diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..83d5fd2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,26 @@ +.DS_Store +.bloop +.bsp +.bsp/ +.history +.metals +.vscode/ +/*.iml +/.classpath +/.idea +/.idea_modules +/.project +/.settings +/RUNNING_PID +/bin/ +/out +conf/felix-cache +dist +felix-cache +js/node_modules +logs +project/target +shared/.js +shared/.jvm +target +tmp diff --git a/README.md b/README.md index c6e0bd8..e5c4fc6 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,24 @@ -# modules -Description +# Development + +## IntelliJ + +Open Build Tools -> sbt and make sure to enable sbt shell for project reloads/builds. + +## Build Javascript Libraries + +``` +fastOptJS::webpack +``` + +## Run WebServer + +``` +python -m SimpleHTTPServer +``` + +## Compile Cycle + +``` +~compile +``` + \ No newline at end of file diff --git a/airbyte.jq b/airbyte.jq new file mode 100644 index 0000000..7c0038b --- /dev/null +++ b/airbyte.jq @@ -0,0 +1,35 @@ +def p($p): "
\($p)
"; + +# input: array of arrays +def row2html: + reduce .[] as $value ("Name | Type | Description | #g' | sed -e 's#^# |
---|---|---|
#g' | sed -e 's#"#\\"#g' )" |
host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).",
+ "type": "string",
+ "examples": ["kafka-broker1:9092,kafka-broker2:9092"]
+ },
+ "topic_pattern": {
+ "title": "Topic Pattern",
+ "description": "Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.",
+ "type": "string",
+ "examples": ["sample.topic", "{namespace}.{stream}.sample"]
+ },
+ "test_topic": {
+ "title": "Test Topic",
+ "description": "Topic to test if Airbyte can produce messages.",
+ "type": "string",
+ "examples": ["test.topic"]
+ },
+ "sync_producer": {
+ "title": "Sync Producer",
+ "description": "Wait synchronously until the record has been sent to Kafka.",
+ "type": "boolean",
+ "default": false
+ },
+ "protocol": {
+ "title": "Protocol",
+ "type": "object",
+ "description": "Protocol used to communicate with brokers.",
+ "oneOf": [
+ {
+ "title": "PLAINTEXT",
+ "required": ["security_protocol"],
+ "properties": {
+ "security_protocol": {
+ "type": "string",
+ "enum": ["PLAINTEXT"],
+ "default": "PLAINTEXT"
+ }
+ }
+ },
+ {
+ "title": "SASL PLAINTEXT",
+ "required": [
+ "security_protocol",
+ "sasl_mechanism",
+ "sasl_jaas_config"
+ ],
+ "properties": {
+ "security_protocol": {
+ "type": "string",
+ "enum": ["SASL_PLAINTEXT"],
+ "default": "SASL_PLAINTEXT"
+ },
+ "sasl_mechanism": {
+ "title": "SASL Mechanism",
+ "description": "SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.",
+ "type": "string",
+ "default": "PLAIN",
+ "enum": ["PLAIN"]
+ },
+ "sasl_jaas_config": {
+ "title": "SASL JAAS Config",
+ "description": "JAAS login context parameters for SASL connections in the format used by JAAS configuration files.",
+ "type": "string",
+ "default": "",
+ "airbyte_secret": true
+ }
+ }
+ },
+ {
+ "title": "SASL SSL",
+ "required": [
+ "security_protocol",
+ "sasl_mechanism",
+ "sasl_jaas_config"
+ ],
+ "properties": {
+ "security_protocol": {
+ "type": "string",
+ "enum": ["SASL_SSL"],
+ "default": "SASL_SSL"
+ },
+ "sasl_mechanism": {
+ "title": "SASL Mechanism",
+ "description": "SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.",
+ "type": "string",
+ "default": "GSSAPI",
+ "enum": [
+ "GSSAPI",
+ "OAUTHBEARER",
+ "SCRAM-SHA-256",
+ "SCRAM-SHA-512",
+ "PLAIN"
+ ]
+ },
+ "sasl_jaas_config": {
+ "title": "SASL JAAS Config",
+ "description": "JAAS login context parameters for SASL connections in the format used by JAAS configuration files.",
+ "type": "string",
+ "default": "",
+ "airbyte_secret": true
+ }
+ }
+ }
+ ]
+ },
+ "client_id": {
+ "title": "Client ID",
+ "description": "An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.",
+ "type": "string",
+ "examples": ["airbyte-producer"]
+ },
+ "acks": {
+ "title": "ACKs",
+ "description": "The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent.",
+ "type": "string",
+ "default": "1",
+ "enum": ["0", "1", "all"]
+ },
+ "enable_idempotence": {
+ "title": "Enable Idempotence",
+ "description": "When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.",
+ "type": "boolean",
+ "default": false
+ },
+ "compression_type": {
+ "title": "Compression Type",
+ "description": "The compression type for all data generated by the producer.",
+ "type": "string",
+ "default": "none",
+ "enum": ["none", "gzip", "snappy", "lz4", "zstd"]
+ },
+ "batch_size": {
+ "title": "Batch Size",
+ "description": "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition.",
+ "type": "integer",
+ "examples": [16384]
+ },
+ "linger_ms": {
+ "title": "Linger ms",
+ "description": "The producer groups together any records that arrive in between request transmissions into a single batched request.",
+ "type": "string",
+ "examples": [0]
+ },
+ "max_in_flight_requests_per_connection": {
+ "title": "Max in Flight Requests per Connection",
+ "description": "The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.",
+ "type": "integer",
+ "examples": [5]
+ },
+ "client_dns_lookup": {
+ "title": "Client DNS Lookup",
+ "description": "Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.",
+ "type": "string",
+ "default": "use_all_dns_ips",
+ "enum": [
+ "default",
+ "use_all_dns_ips",
+ "resolve_canonical_bootstrap_servers_only",
+ "use_all_dns_ips"
+ ]
+ },
+ "buffer_memory": {
+ "title": "Buffer Memory",
+ "description": "The total bytes of memory the producer can use to buffer records waiting to be sent to the server.",
+ "type": "string",
+ "examples": 33554432
+ },
+ "max_request_size": {
+ "title": "Max Request Size",
+ "description": "The maximum size of a request in bytes.",
+ "type": "integer",
+ "examples": [1048576]
+ },
+ "retries": {
+ "title": "Retries",
+ "description": "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.",
+ "type": "integer",
+ "examples": [2147483647]
+ },
+ "socket_connection_setup_timeout_ms": {
+ "title": "Socket Connection Setup Timeout",
+ "description": "The amount of time the client will wait for the socket connection to be established.",
+ "type": "string",
+ "examples": [10000]
+ },
+ "socket_connection_setup_timeout_max_ms": {
+ "title": "Socket Connection Setup Max Timeout",
+ "description": "The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.",
+ "type": "string",
+ "examples": [30000]
+ },
+ "max_block_ms": {
+ "title": "Max Block ms",
+ "description": "The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.",
+ "type": "string",
+ "examples": [60000]
+ },
+ "request_timeout_ms": {
+ "title": "Request Timeout",
+ "description": "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.",
+ "type": "integer",
+ "examples": [30000]
+ },
+ "delivery_timeout_ms": {
+ "title": "Delivery Timeout",
+ "description": "An upper bound on the time to report success or failure after a call to 'send()' returns.",
+ "type": "integer",
+ "examples": [120000]
+ },
+ "send_buffer_bytes": {
+ "title": "Send Buffer bytes",
+ "description": "The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.",
+ "type": "integer",
+ "examples": [131072]
+ },
+ "receive_buffer_bytes": {
+ "title": "Receive Buffer bytes",
+ "description": "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.",
+ "type": "integer",
+ "examples": [32768]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-keen.json b/jvm/src/main/resources/airbyte/destination-keen.json
new file mode 100644
index 0000000..56abae7
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-keen.json
@@ -0,0 +1,35 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/keen",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Keen Spec",
+ "type": "object",
+ "required": ["project_id", "api_key"],
+ "additionalProperties": false,
+ "properties": {
+ "project_id": {
+ "description": "To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.",
+ "title": "Project ID",
+ "type": "string",
+ "examples": ["58b4acc22ba938934e888322e"]
+ },
+ "api_key": {
+ "title": "API Key",
+ "description": "To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.",
+ "type": "string",
+ "examples": ["ABCDEFGHIJKLMNOPRSTUWXYZ"],
+ "airbyte_secret": true
+ },
+ "infer_timestamp": {
+ "title": "Infer Timestamp",
+ "description": "Allow connector to guess keen.timestamp value based on the streamed data.",
+ "type": "boolean",
+ "default": true
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-kinesis.json b/jvm/src/main/resources/airbyte/destination-kinesis.json
new file mode 100644
index 0000000..65aea2d
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-kinesis.json
@@ -0,0 +1,58 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/kinesis",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Kinesis Destination Spec",
+ "type": "object",
+ "required": ["shardCount", "accessKey", "privateKey", "bufferSize"],
+ "additionalProperties": true,
+ "properties": {
+ "endpoint": {
+ "title": "Endpoint",
+ "description": "AWS Kinesis endpoint.",
+ "type": "string",
+ "order": 0
+ },
+ "region": {
+ "title": "Region",
+ "description": "AWS region. Your account determines the Regions that are available to you.",
+ "type": "string",
+ "order": 1
+ },
+ "shardCount": {
+ "title": "Shard Count",
+ "description": "Number of shards to which the data should be streamed.",
+ "type": "integer",
+ "default": 5,
+ "order": 2
+ },
+ "accessKey": {
+ "title": "Access Key",
+ "description": "Generate the AWS Access Key for current user.",
+ "airbyte_secret": true,
+ "type": "string",
+ "order": 3
+ },
+ "privateKey": {
+ "title": "Private Key",
+ "description": "The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a \"recovery phrase\".",
+ "airbyte_secret": true,
+ "type": "string",
+ "order": 4
+ },
+ "bufferSize": {
+ "title": "Buffer Size",
+ "description": "Buffer size for storing kinesis records before being batch streamed.",
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 500,
+ "default": 100,
+ "order": 5
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-kvdb.json b/jvm/src/main/resources/airbyte/destination-kvdb.json
new file mode 100644
index 0000000..e394c1e
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-kvdb.json
@@ -0,0 +1,28 @@
+{
+ "documentationUrl": "https://kvdb.io/docs/api/",
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "supportsIncremental": true,
+ "supportsDBT": false,
+ "supportsNormalization": false,
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Destination KVdb",
+ "type": "object",
+ "required": ["bucket_id", "secret_key"],
+ "additionalProperties": false,
+ "properties": {
+ "bucket_id": {
+ "title": "Bucket ID",
+ "type": "string",
+ "description": "The ID of your KVdb bucket.",
+ "order": 1
+ },
+ "secret_key": {
+ "title": "Secret Key",
+ "type": "string",
+ "description": "Your bucket Secret Key.",
+ "order": 2
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-local-json.json b/jvm/src/main/resources/airbyte/destination-local-json.json
new file mode 100644
index 0000000..d6fc2f3
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-local-json.json
@@ -0,0 +1,22 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/local-json",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Local Json Destination Spec",
+ "type": "object",
+ "required": ["destination_path"],
+ "additionalProperties": false,
+ "properties": {
+ "destination_path": {
+ "description": "Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs",
+ "title": "Destination Path",
+ "type": "string",
+ "examples": ["/json_data"]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-mariadb-columnstore.json b/jvm/src/main/resources/airbyte/destination-mariadb-columnstore.json
new file mode 100644
index 0000000..0c84779
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-mariadb-columnstore.json
@@ -0,0 +1,51 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mariadb-columnstore",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MariaDB Columnstore Destination Spec",
+ "type": "object",
+ "required": ["host", "port", "username", "database"],
+ "additionalProperties": true,
+ "properties": {
+ "host": {
+ "title": "Host",
+ "description": "The Hostname of the database.",
+ "type": "string",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "description": "The Port of the database.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 3306,
+ "examples": ["3306"],
+ "order": 1
+ },
+ "database": {
+ "title": "Database",
+ "description": "Name of the database.",
+ "type": "string",
+ "order": 2
+ },
+ "username": {
+ "title": "Username",
+ "description": "The Username which is used to access the database.",
+ "type": "string",
+ "order": 3
+ },
+ "password": {
+ "title": "Password",
+ "description": "The Password associated with the username.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 4
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-meilisearch.json b/jvm/src/main/resources/airbyte/destination-meilisearch.json
new file mode 100644
index 0000000..e3d0095
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-meilisearch.json
@@ -0,0 +1,29 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/meilisearch",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MeiliSearch Destination Spec",
+ "type": "object",
+ "required": ["host"],
+ "additionalProperties": true,
+ "properties": {
+ "host": {
+ "title": "Host",
+ "description": "Hostname of the MeiliSearch instance.",
+ "type": "string",
+ "order": 0
+ },
+ "api_key": {
+ "title": "API Key",
+ "airbyte_secret": true,
+ "description": "MeiliSearch API Key. See the docs for more information on how to obtain this key.",
+ "type": "string",
+ "order": 1
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-mongodb.json b/jvm/src/main/resources/airbyte/destination-mongodb.json
new file mode 100644
index 0000000..f9b8c16
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-mongodb.json
@@ -0,0 +1,148 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mongodb",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MongoDB Destination Spec",
+ "type": "object",
+ "required": ["database", "auth_type"],
+ "properties": {
+ "instance_type": {
+ "description": "MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.",
+ "title": "MongoDb Instance Type",
+ "type": "object",
+ "order": 0,
+ "oneOf": [
+ {
+ "title": "Standalone MongoDb Instance",
+ "required": ["instance", "host", "port"],
+ "properties": {
+ "instance": {
+ "type": "string",
+ "enum": ["standalone"],
+ "default": "standalone"
+ },
+ "host": {
+ "title": "Host",
+ "type": "string",
+ "description": "The Host of a Mongo database to be replicated.",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "type": "integer",
+ "description": "The Port of a Mongo database to be replicated.",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 27017,
+ "examples": ["27017"],
+ "order": 1
+ },
+ "tls": {
+ "title": "TLS Connection",
+ "type": "boolean",
+ "description": "Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.",
+ "default": false,
+ "order": 2
+ }
+ }
+ },
+ {
+ "title": "Replica Set",
+ "required": ["instance", "server_addresses"],
+ "properties": {
+ "instance": {
+ "type": "string",
+ "enum": ["replica"],
+ "default": "replica"
+ },
+ "server_addresses": {
+ "title": "Server addresses",
+ "type": "string",
+ "description": "The members of a replica set. Please specify `host`:`port` of each member seperated by comma.",
+ "examples": ["host1:27017,host2:27017,host3:27017"],
+ "order": 0
+ },
+ "replica_set": {
+ "title": "Replica Set",
+ "type": "string",
+ "description": "A replica set name.",
+ "order": 1
+ }
+ }
+ },
+ {
+ "title": "MongoDB Atlas",
+ "required": ["instance", "cluster_url"],
+ "properties": {
+ "instance": {
+ "type": "string",
+ "enum": ["atlas"],
+ "default": "atlas"
+ },
+ "cluster_url": {
+ "title": "Cluster URL",
+ "type": "string",
+ "description": "URL of a cluster to connect to.",
+ "order": 0
+ }
+ }
+ }
+ ]
+ },
+ "database": {
+ "title": "DB Name",
+ "description": "Name of the database.",
+ "type": "string",
+ "order": 2
+ },
+ "auth_type": {
+ "title": "Authorization type",
+ "type": "object",
+ "description": "Authorization type.",
+ "oneOf": [
+ {
+ "title": "None",
+ "description": "None.",
+ "required": ["authorization"],
+ "type": "object",
+ "properties": {
+ "authorization": {
+ "type": "string",
+ "const": "none"
+ }
+ }
+ },
+ {
+ "title": "Login/Password",
+ "description": "Login/Password.",
+ "required": ["authorization", "username", "password"],
+ "type": "object",
+ "properties": {
+ "authorization": {
+ "type": "string",
+ "const": "login/password"
+ },
+ "username": {
+ "title": "User",
+ "description": "Username to use to access the database.",
+ "type": "string",
+ "order": 1
+ },
+ "password": {
+ "title": "Password",
+ "description": "Password associated with the username.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 2
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-mqtt.json b/jvm/src/main/resources/airbyte/destination-mqtt.json
new file mode 100644
index 0000000..759bc66
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-mqtt.json
@@ -0,0 +1,108 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mqtt",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MQTT Destination Spec",
+ "type": "object",
+ "required": [
+ "broker_host",
+ "broker_port",
+ "use_tls",
+ "topic_pattern",
+ "publisher_sync",
+ "connect_timeout",
+ "automatic_reconnect",
+ "clean_session",
+ "message_retained",
+ "message_qos"
+ ],
+ "additionalProperties": true,
+ "properties": {
+ "broker_host": {
+ "title": "MQTT broker host",
+ "description": "Host of the broker to connect to.",
+ "type": "string"
+ },
+ "broker_port": {
+ "title": "MQTT broker port",
+ "description": "Port of the broker.",
+ "type": "integer"
+ },
+ "use_tls": {
+ "title": "Use TLS",
+ "description": "Whether to use TLS encryption on the connection.",
+ "type": "boolean",
+ "default": false
+ },
+ "username": {
+ "title": "Username",
+ "description": "User name to use for the connection.",
+ "type": "string"
+ },
+ "password": {
+ "title": "Password",
+ "description": "Password to use for the connection.",
+ "type": "string",
+ "airbyte_secret": true
+ },
+ "topic_pattern": {
+ "title": "Topic pattern",
+ "description": "Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.",
+ "type": "string",
+ "examples": ["sample.topic", "{namespace}/{stream}/sample"]
+ },
+ "topic_test": {
+ "title": "Test topic",
+ "description": "Topic to test if Airbyte can produce messages.",
+ "type": "string",
+ "examples": ["test/topic"]
+ },
+ "client": {
+ "title": "Client ID",
+ "description": "A client identifier that is unique on the server being connected to.",
+ "type": "string",
+ "examples": ["airbyte-client1"]
+ },
+ "publisher_sync": {
+ "title": "Sync publisher",
+ "description": "Wait synchronously until the record has been sent to the broker.",
+ "type": "boolean",
+ "default": false
+ },
+ "connect_timeout": {
+ "title": "Connect timeout",
+ "description": " Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.",
+ "type": "integer",
+ "default": 30
+ },
+ "automatic_reconnect": {
+ "title": "Automatic reconnect",
+ "description": "Whether the client will automatically attempt to reconnect to the server if the connection is lost.",
+ "type": "boolean",
+ "default": true
+ },
+ "clean_session": {
+ "title": "Clean session",
+ "description": "Whether the client and server should remember state across restarts and reconnects.",
+ "type": "boolean",
+ "default": true
+ },
+ "message_retained": {
+ "title": "Message retained",
+ "description": "Whether or not the publish message should be retained by the messaging engine.",
+ "type": "boolean",
+ "default": false
+ },
+ "message_qos": {
+ "title": "Message QoS",
+ "description": "Quality of service used for each message to be delivered.",
+ "default": "AT_LEAST_ONCE",
+ "enum": ["AT_MOST_ONCE", "AT_LEAST_ONCE", "EXACTLY_ONCE"]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-mssql.json b/jvm/src/main/resources/airbyte/destination-mssql.json
new file mode 100644
index 0000000..3aff969
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-mssql.json
@@ -0,0 +1,120 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mssql",
+ "supportsIncremental": true,
+ "supportsNormalization": true,
+ "supportsDBT": true,
+ "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MS SQL Server Destination Spec",
+ "type": "object",
+ "required": ["host", "port", "username", "database", "schema"],
+ "properties": {
+ "host": {
+ "title": "Host",
+ "description": "The host name of the MSSQL database.",
+ "type": "string",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "description": "The port of the MSSQL database.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 1433,
+ "examples": ["1433"],
+ "order": 1
+ },
+ "database": {
+ "title": "DB Name",
+ "description": "The name of the MSSQL database.",
+ "type": "string",
+ "order": 2
+ },
+ "schema": {
+ "title": "Default Schema",
+ "description": "The default schema tables are written to if the source does not specify a namespace. The usual value for this field is \"public\".",
+ "type": "string",
+ "examples": ["public"],
+ "default": "public",
+ "order": 3
+ },
+ "username": {
+ "title": "User",
+ "description": "The username which is used to access the database.",
+ "type": "string",
+ "order": 4
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password associated with this username.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 5
+ },
+ "jdbc_url_params": {
+ "title": "JDBC URL Params",
+ "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).",
+ "type": "string",
+ "order": 6
+ },
+ "ssl_method": {
+ "title": "SSL Method",
+ "type": "object",
+ "description": "The encryption method which is used to communicate with the database.",
+ "order": 7,
+ "oneOf": [
+ {
+ "title": "Unencrypted",
+ "description": "The data transfer will not be encrypted.",
+ "required": ["ssl_method"],
+ "type": "object",
+ "properties": {
+ "ssl_method": {
+ "type": "string",
+ "const": "unencrypted",
+ "enum": ["unencrypted"],
+ "default": "unencrypted"
+ }
+ }
+ },
+ {
+ "title": "Encrypted (trust server certificate)",
+ "description": "Use the certificate provided by the server without verification. (For testing purposes only!)",
+ "required": ["ssl_method"],
+ "type": "object",
+ "properties": {
+ "ssl_method": {
+ "type": "string",
+ "const": "encrypted_trust_server_certificate",
+ "enum": ["encrypted_trust_server_certificate"],
+ "default": "encrypted_trust_server_certificate"
+ }
+ }
+ },
+ {
+ "title": "Encrypted (verify certificate)",
+ "description": "Verify and use the certificate provided by the server.",
+ "required": ["ssl_method", "trustStoreName", "trustStorePassword"],
+ "type": "object",
+ "properties": {
+ "ssl_method": {
+ "type": "string",
+ "const": "encrypted_verify_certificate",
+ "enum": ["encrypted_verify_certificate"],
+ "default": "encrypted_verify_certificate"
+ },
+ "hostNameInCertificate": {
+ "title": "Host Name In Certificate",
+ "type": "string",
+ "description": "Specifies the host name of the server. The value of this property must match the subject property of the certificate.",
+ "order": 8
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-mysql.json b/jvm/src/main/resources/airbyte/destination-mysql.json
new file mode 100644
index 0000000..0605667
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-mysql.json
@@ -0,0 +1,64 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mysql",
+ "supportsIncremental": true,
+ "supportsNormalization": true,
+ "supportsDBT": true,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MySQL Destination Spec",
+ "type": "object",
+ "required": ["host", "port", "username", "database"],
+ "additionalProperties": true,
+ "properties": {
+ "host": {
+ "title": "Host",
+ "description": "Hostname of the database.",
+ "type": "string",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "description": "Port of the database.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 3306,
+ "examples": ["3306"],
+ "order": 1
+ },
+ "database": {
+ "title": "DB Name",
+ "description": "Name of the database.",
+ "type": "string",
+ "order": 2
+ },
+ "username": {
+ "title": "User",
+ "description": "Username to use to access the database.",
+ "type": "string",
+ "order": 3
+ },
+ "password": {
+ "title": "Password",
+ "description": "Password associated with the username.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 4
+ },
+ "ssl": {
+ "title": "SSL Connection",
+ "description": "Encrypt data using SSL.",
+ "type": "boolean",
+ "default": true,
+ "order": 5
+ },
+ "jdbc_url_params": {
+ "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).",
+ "title": "JDBC URL Params",
+ "type": "string",
+ "order": 6
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-oracle.json b/jvm/src/main/resources/airbyte/destination-oracle.json
new file mode 100644
index 0000000..d7fb730
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-oracle.json
@@ -0,0 +1,126 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/oracle",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Oracle Destination Spec",
+ "type": "object",
+ "required": ["host", "port", "username", "sid"],
+ "additionalProperties": true,
+ "properties": {
+ "host": {
+ "title": "Host",
+ "description": "The hostname of the database.",
+ "type": "string",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "description": "The port of the database.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 1521,
+ "examples": ["1521"],
+ "order": 1
+ },
+ "sid": {
+ "title": "SID",
+ "description": "The System Identifier uniquely distinguishes the instance from any other instance on the same computer.",
+ "type": "string",
+ "order": 2
+ },
+ "username": {
+ "title": "User",
+ "description": "The username to access the database. This user must have CREATE USER privileges in the database.",
+ "type": "string",
+ "order": 3
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password associated with the username.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 4
+ },
+ "jdbc_url_params": {
+ "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).",
+ "title": "JDBC URL Params",
+ "type": "string",
+ "order": 5
+ },
+ "schema": {
+ "title": "Default Schema",
+ "description": "The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is \"airbyte\". In Oracle, schemas and users are the same thing, so the \"user\" parameter is used as the login credentials and this is used for the default Airbyte message schema.",
+ "type": "string",
+ "examples": ["airbyte"],
+ "default": "airbyte",
+ "order": 6
+ },
+ "encryption": {
+ "title": "Encryption",
+ "type": "object",
+ "description": "The encryption method which is used when communicating with the database.",
+ "order": 7,
+ "oneOf": [
+ {
+ "title": "Unencrypted",
+ "description": "Data transfer will not be encrypted.",
+ "required": ["encryption_method"],
+ "properties": {
+ "encryption_method": {
+ "type": "string",
+ "const": "unencrypted",
+ "enum": ["unencrypted"],
+ "default": "unencrypted"
+ }
+ }
+ },
+ {
+ "title": "Native Network Encryption (NNE)",
+ "description": "The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.",
+ "required": ["encryption_method"],
+ "properties": {
+ "encryption_method": {
+ "type": "string",
+ "const": "client_nne",
+ "enum": ["client_nne"],
+ "default": "client_nne"
+ },
+ "encryption_algorithm": {
+ "type": "string",
+ "description": "This parameter defines the database encryption algorithm.",
+ "title": "Encryption Algorithm",
+ "default": "AES256",
+ "enum": ["AES256", "RC4_56", "3DES168"]
+ }
+ }
+ },
+ {
+ "title": "TLS Encrypted (verify certificate)",
+ "description": "Verify and use the certificate provided by the server.",
+ "required": ["encryption_method", "ssl_certificate"],
+ "properties": {
+ "encryption_method": {
+ "type": "string",
+ "const": "encrypted_verify_certificate",
+ "enum": ["encrypted_verify_certificate"],
+ "default": "encrypted_verify_certificate"
+ },
+ "ssl_certificate": {
+ "title": "SSL PEM file",
+ "description": "Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.",
+ "type": "string",
+ "airbyte_secret": true,
+ "multiline": true
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-postgres.json b/jvm/src/main/resources/airbyte/destination-postgres.json
new file mode 100644
index 0000000..9117125
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-postgres.json
@@ -0,0 +1,221 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/postgres",
+ "supportsIncremental": true,
+ "supportsNormalization": true,
+ "supportsDBT": true,
+ "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Postgres Destination Spec",
+ "type": "object",
+ "required": ["host", "port", "username", "database", "schema"],
+ "additionalProperties": true,
+ "properties": {
+ "host": {
+ "title": "Host",
+ "description": "Hostname of the database.",
+ "type": "string",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "description": "Port of the database.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 5432,
+ "examples": ["5432"],
+ "order": 1
+ },
+ "database": {
+ "title": "DB Name",
+ "description": "Name of the database.",
+ "type": "string",
+ "order": 2
+ },
+ "schema": {
+ "title": "Default Schema",
+ "description": "The default schema tables are written to if the source does not specify a namespace. The usual value for this field is \"public\".",
+ "type": "string",
+ "examples": ["public"],
+ "default": "public",
+ "order": 3
+ },
+ "username": {
+ "title": "User",
+ "description": "Username to use to access the database.",
+ "type": "string",
+ "order": 4
+ },
+ "password": {
+ "title": "Password",
+ "description": "Password associated with the username.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 5
+ },
+ "ssl": {
+ "title": "SSL Connection",
+ "description": "Encrypt data using SSL. When activating SSL, please select one of the connection modes.",
+ "type": "boolean",
+ "default": false,
+ "order": 6
+ },
+ "ssl_mode": {
+ "title": "SSL modes",
+ "description": "SSL connection modes. \n disable - Chose this mode to disable encryption of communication between Airbyte and destination database\n allow - Chose this mode to enable encryption only when required by the source database\n prefer - Chose this mode to allow unencrypted connection only if the source database does not support encryption\n require - Chose this mode to always require encryption. If the source database server does not support encryption, connection will fail\n verify-ca - Chose this mode to always require encryption and to verify that the source database server has a valid SSL certificate\n verify-full - This is the most secure mode. Chose this mode to always require encryption and to verify the identity of the source database server\n See more information - in the docs.",
+ "type": "object",
+ "order": 7,
+ "oneOf": [
+ {
+ "title": "disable",
+ "additionalProperties": false,
+ "description": "Disable SSL.",
+ "required": ["mode"],
+ "properties": {
+ "mode": {
+ "type": "string",
+ "const": "disable",
+ "enum": ["disable"],
+ "default": "disable",
+ "order": 0
+ }
+ }
+ },
+ {
+ "title": "allow",
+ "additionalProperties": false,
+ "description": "Allow SSL mode.",
+ "required": ["mode"],
+ "properties": {
+ "mode": {
+ "type": "string",
+ "const": "allow",
+ "enum": ["allow"],
+ "default": "allow",
+ "order": 0
+ }
+ }
+ },
+ {
+ "title": "prefer",
+ "additionalProperties": false,
+ "description": "Prefer SSL mode.",
+ "required": ["mode"],
+ "properties": {
+ "mode": {
+ "type": "string",
+ "const": "prefer",
+ "enum": ["prefer"],
+ "default": "prefer",
+ "order": 0
+ }
+ }
+ },
+ {
+ "title": "require",
+ "additionalProperties": false,
+ "description": "Require SSL mode.",
+ "required": ["mode"],
+ "properties": {
+ "mode": {
+ "type": "string",
+ "const": "require",
+ "enum": ["require"],
+ "default": "require",
+ "order": 0
+ }
+ }
+ },
+ {
+ "title": "verify-ca",
+ "additionalProperties": false,
+ "description": "Verify-ca SSL mode.",
+ "required": ["mode", "ca_certificate"],
+ "properties": {
+ "mode": {
+ "type": "string",
+ "const": "verify-ca",
+ "enum": ["verify-ca"],
+ "default": "verify-ca",
+ "order": 0
+ },
+ "ca_certificate": {
+ "type": "string",
+ "title": "CA certificate",
+ "description": "CA certificate",
+ "airbyte_secret": true,
+ "multiline": true,
+ "order": 1
+ },
+ "client_key_password": {
+ "type": "string",
+ "title": "Client key password (Optional)",
+ "description": "Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.",
+ "airbyte_secret": true,
+ "order": 4
+ }
+ }
+ },
+ {
+ "title": "verify-full",
+ "additionalProperties": false,
+ "description": "Verify-full SSL mode.",
+ "required": [
+ "mode",
+ "ca_certificate",
+ "client_certificate",
+ "client_key"
+ ],
+ "properties": {
+ "mode": {
+ "type": "string",
+ "const": "verify-full",
+ "enum": ["verify-full"],
+ "default": "verify-full",
+ "order": 0
+ },
+ "ca_certificate": {
+ "type": "string",
+ "title": "CA certificate",
+ "description": "CA certificate",
+ "airbyte_secret": true,
+ "multiline": true,
+ "order": 1
+ },
+ "client_certificate": {
+ "type": "string",
+ "title": "Client certificate",
+ "description": "Client certificate",
+ "airbyte_secret": true,
+ "multiline": true,
+ "order": 2
+ },
+ "client_key": {
+ "type": "string",
+ "title": "Client key",
+ "description": "Client key",
+ "airbyte_secret": true,
+ "multiline": true,
+ "order": 3
+ },
+ "client_key_password": {
+ "type": "string",
+ "title": "Client key password (Optional)",
+ "description": "Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.",
+ "airbyte_secret": true,
+ "order": 4
+ }
+ }
+ }
+ ]
+ },
+ "jdbc_url_params": {
+ "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).",
+ "title": "JDBC URL Params",
+ "type": "string",
+ "order": 8
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-pubsub.json b/jvm/src/main/resources/airbyte/destination-pubsub.json
new file mode 100644
index 0000000..82bd13c
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-pubsub.json
@@ -0,0 +1,32 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/pubsub",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Google PubSub Destination Spec",
+ "type": "object",
+ "required": ["project_id", "topic_id", "credentials_json"],
+ "additionalProperties": true,
+ "properties": {
+ "project_id": {
+ "type": "string",
+ "description": "The GCP project ID for the project containing the target PubSub.",
+ "title": "Project ID"
+ },
+ "topic_id": {
+ "type": "string",
+ "description": "The PubSub topic ID in the given GCP project ID.",
+ "title": "PubSub Topic ID"
+ },
+ "credentials_json": {
+ "type": "string",
+ "description": "The contents of the JSON service account key. Check out the docs if you need help generating this key.",
+ "title": "Credentials JSON",
+ "airbyte_secret": true
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-pulsar.json b/jvm/src/main/resources/airbyte/destination-pulsar.json
new file mode 100644
index 0000000..7dc40a0
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-pulsar.json
@@ -0,0 +1,137 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/pulsar",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Pulsar Destination Spec",
+ "type": "object",
+ "required": [
+ "brokers",
+ "use_tls",
+ "topic_type",
+ "topic_tenant",
+ "topic_namespace",
+ "topic_pattern",
+ "compression_type",
+ "send_timeout_ms",
+ "max_pending_messages",
+ "max_pending_messages_across_partitions",
+ "batching_enabled",
+ "batching_max_messages",
+ "batching_max_publish_delay",
+ "block_if_queue_full"
+ ],
+ "additionalProperties": true,
+ "properties": {
+ "brokers": {
+ "title": "Pulsar brokers",
+ "description": "A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.",
+ "type": "string",
+ "examples": ["broker1:6650,broker2:6650"]
+ },
+ "use_tls": {
+ "title": "Use TLS",
+ "description": "Whether to use TLS encryption on the connection.",
+ "type": "boolean",
+ "default": false
+ },
+ "topic_type": {
+ "title": "Topic type",
+ "description": "It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.",
+ "type": "string",
+ "default": "persistent",
+ "enum": ["persistent", "non-persistent"]
+ },
+ "topic_tenant": {
+ "title": "Topic tenant",
+ "description": "The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.",
+ "type": "string",
+ "default": "public",
+ "examples": ["public"]
+ },
+ "topic_namespace": {
+ "title": "Topic namespace",
+ "description": "The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.",
+ "type": "string",
+ "default": "default",
+ "examples": ["default"]
+ },
+ "topic_pattern": {
+ "title": "Topic pattern",
+ "description": "Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.",
+ "type": "string",
+ "examples": ["sample.topic", "{namespace}.{stream}.sample"]
+ },
+ "topic_test": {
+ "title": "Test topic",
+ "description": "Topic to test if Airbyte can produce messages.",
+ "type": "string",
+ "examples": ["test.topic"]
+ },
+ "producer_name": {
+ "title": "Producer name",
+ "description": "Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.",
+ "type": "string",
+ "examples": ["airbyte-producer"]
+ },
+ "producer_sync": {
+ "title": "Sync producer",
+ "description": "Wait synchronously until the record has been sent to Pulsar.",
+ "type": "boolean",
+ "default": false
+ },
+ "compression_type": {
+ "title": "Compression type",
+ "description": "Compression type for the producer.",
+ "type": "string",
+ "default": "NONE",
+ "enum": ["NONE", "LZ4", "ZLIB", "ZSTD", "SNAPPY"]
+ },
+ "send_timeout_ms": {
+ "title": "Message send timeout",
+ "description": "If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).",
+ "type": "integer",
+ "default": 30000
+ },
+ "max_pending_messages": {
+ "title": "Max pending messages",
+ "description": "The maximum size of a queue holding pending messages.",
+ "type": "integer",
+ "default": 1000
+ },
+ "max_pending_messages_across_partitions": {
+ "title": "Max pending messages across partitions",
+ "description": "The maximum number of pending messages across partitions.",
+ "type": "integer",
+ "default": 50000
+ },
+ "batching_enabled": {
+ "title": "Enable batching",
+ "description": "Control whether automatic batching of messages is enabled for the producer.",
+ "type": "boolean",
+ "default": true
+ },
+ "batching_max_messages": {
+ "title": "Batching max messages",
+ "description": "Maximum number of messages permitted in a batch.",
+ "type": "integer",
+ "default": 1000
+ },
+ "batching_max_publish_delay": {
+ "title": "Batching max publish delay",
+ "description": " Time period in milliseconds within which the messages sent will be batched.",
+ "type": "integer",
+ "default": 1
+ },
+ "block_if_queue_full": {
+ "title": "Block if queue is full",
+ "description": "If the send operation should block when the outgoing message queue is full.",
+ "type": "boolean",
+ "default": false
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-rabbitmq.json b/jvm/src/main/resources/airbyte/destination-rabbitmq.json
new file mode 100644
index 0000000..212fbcd
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-rabbitmq.json
@@ -0,0 +1,49 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/rabbitmq",
+ "supported_destination_sync_modes": ["append"],
+ "supportsIncremental": true,
+ "supportsDBT": false,
+ "supportsNormalization": false,
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Destination Rabbitmq",
+ "type": "object",
+ "required": ["host", "routing_key"],
+ "additionalProperties": false,
+ "properties": {
+ "ssl": {
+ "type": "boolean",
+ "description": "SSL enabled.",
+ "default": true
+ },
+ "host": {
+ "type": "string",
+ "description": "The RabbitMQ host name."
+ },
+ "port": {
+ "type": "integer",
+ "description": "The RabbitMQ port."
+ },
+ "virtual_host": {
+ "type": "string",
+ "description": "The RabbitMQ virtual host name."
+ },
+ "username": {
+ "type": "string",
+ "description": "The username to connect."
+ },
+ "password": {
+ "type": "string",
+ "description": "The password to connect."
+ },
+ "exchange": {
+ "type": "string",
+ "description": "The exchange name."
+ },
+ "routing_key": {
+ "type": "string",
+ "description": "The routing key."
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-redis.json b/jvm/src/main/resources/airbyte/destination-redis.json
new file mode 100644
index 0000000..ef2de6b
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-redis.json
@@ -0,0 +1,53 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/redis",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Redis Destination Spec",
+ "type": "object",
+ "required": ["host", "port", "username", "password", "cache_type"],
+ "additionalProperties": false,
+ "properties": {
+ "host": {
+ "title": "Host",
+ "description": "Redis host to connect to.",
+ "type": "string",
+ "examples": ["localhost,127.0.0.1"],
+ "order": 1
+ },
+ "port": {
+ "title": "Port",
+ "description": "Port of Redis.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 6379,
+ "order": 2
+ },
+ "username": {
+ "title": "Username",
+ "description": "Username associated with Redis.",
+ "type": "string",
+ "order": 3
+ },
+ "password": {
+ "title": "Password",
+ "description": "Password associated with Redis.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 4
+ },
+ "cache_type": {
+ "title": "Cache type",
+ "type": "string",
+ "default": "hash",
+ "description": "Redis cache type to store data in.",
+ "enum": ["hash"],
+ "order": 5
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-redshift.json b/jvm/src/main/resources/airbyte/destination-redshift.json
new file mode 100644
index 0000000..dd724d1
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-redshift.json
@@ -0,0 +1,216 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/redshift",
+ "supportsIncremental": true,
+ "supportsNormalization": true,
+ "supportsDBT": true,
+ "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Redshift Destination Spec",
+ "type": "object",
+ "required": ["host", "port", "database", "username", "password", "schema"],
+ "additionalProperties": true,
+ "properties": {
+ "host": {
+ "description": "Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)",
+ "type": "string",
+ "title": "Host",
+ "order": 1
+ },
+ "port": {
+ "description": "Port of the database.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 5439,
+ "examples": ["5439"],
+ "title": "Port",
+ "order": 2
+ },
+ "username": {
+ "description": "Username to use to access the database.",
+ "type": "string",
+ "title": "Username",
+ "order": 3
+ },
+ "password": {
+ "description": "Password associated with the username.",
+ "type": "string",
+ "airbyte_secret": true,
+ "title": "Password",
+ "order": 4
+ },
+ "database": {
+ "description": "Name of the database.",
+ "type": "string",
+ "title": "Database",
+ "order": 5
+ },
+ "schema": {
+ "description": "The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is \"public\".",
+ "type": "string",
+ "examples": ["public"],
+ "default": "public",
+ "title": "Default Schema",
+ "order": 6
+ },
+ "jdbc_url_params": {
+ "title": "JDBC URL Params",
+ "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).",
+ "type": "string",
+ "order": 7
+ },
+ "uploading_method": {
+ "title": "Uploading Method",
+ "type": "object",
+ "description": "The method how the data will be uploaded to the database.",
+ "order": 8,
+ "oneOf": [
+ {
+ "title": "Standard",
+ "required": ["method"],
+ "properties": {
+ "method": {
+ "type": "string",
+ "const": "Standard"
+ }
+ }
+ },
+ {
+ "title": "S3 Staging",
+ "required": [
+ "method",
+ "s3_bucket_name",
+ "s3_bucket_region",
+ "access_key_id",
+ "secret_access_key"
+ ],
+ "properties": {
+ "method": {
+ "type": "string",
+ "const": "S3 Staging"
+ },
+ "s3_bucket_name": {
+ "title": "S3 Bucket Name",
+ "type": "string",
+ "description": "The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.",
+ "examples": ["airbyte.staging"]
+ },
+ "s3_bucket_path": {
+ "title": "S3 Bucket Path (Optional)",
+ "type": "string",
+ "description": "The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.",
+ "examples": ["data_sync/test"]
+ },
+ "s3_bucket_region": {
+ "title": "S3 Bucket Region",
+ "type": "string",
+ "default": "",
+ "description": "The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.",
+ "enum": [
+ "",
+ "us-east-1",
+ "us-east-2",
+ "us-west-1",
+ "us-west-2",
+ "af-south-1",
+ "ap-east-1",
+ "ap-south-1",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-northeast-3",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "ca-central-1",
+ "cn-north-1",
+ "cn-northwest-1",
+ "eu-central-1",
+ "eu-north-1",
+ "eu-south-1",
+ "eu-west-1",
+ "eu-west-2",
+ "eu-west-3",
+ "sa-east-1",
+ "me-south-1"
+ ]
+ },
+ "file_name_pattern": {
+ "type": "string",
+ "description": "The pattern allows you to set the file-name format for the S3 staging file(s)",
+ "title": "S3 Filename pattern (Optional)",
+ "examples": [
+ "{date}",
+ "{date:yyyy_MM}",
+ "{timestamp}",
+ "{part_number}",
+ "{sync_id}"
+ ],
+ "order": 8
+ },
+ "access_key_id": {
+ "type": "string",
+ "description": "This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.",
+ "title": "S3 Key Id",
+ "airbyte_secret": true
+ },
+ "secret_access_key": {
+ "type": "string",
+ "description": "The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.",
+ "title": "S3 Access Key",
+ "airbyte_secret": true
+ },
+ "purge_staging_data": {
+ "title": "Purge Staging Files and Tables (Optional)",
+ "type": "boolean",
+ "description": "Whether to delete the staging files from S3 after completing the sync. See docs for details.",
+ "default": true
+ },
+ "encryption": {
+ "title": "Encryption",
+ "type": "object",
+ "description": "How to encrypt the staging data",
+ "default": { "encryption_type": "none" },
+ "oneOf": [
+ {
+ "title": "No encryption",
+ "description": "Staging data will be stored in plaintext.",
+ "type": "object",
+ "required": ["encryption_type"],
+ "properties": {
+ "encryption_type": {
+ "type": "string",
+ "const": "none",
+ "enum": ["none"],
+ "default": "none"
+ }
+ }
+ },
+ {
+ "title": "AES-CBC envelope encryption",
+ "description": "Staging data will be encrypted using AES-CBC envelope encryption.",
+ "type": "object",
+ "required": ["encryption_type"],
+ "properties": {
+ "encryption_type": {
+ "type": "string",
+ "const": "aes_cbc_envelope",
+ "enum": ["aes_cbc_envelope"],
+ "default": "aes_cbc_envelope"
+ },
+ "key_encrypting_key": {
+ "type": "string",
+ "title": "Key",
+ "description": "The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.",
+ "airbyte_secret": true
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-rockset.json b/jvm/src/main/resources/airbyte/destination-rockset.json
new file mode 100644
index 0000000..7eedb8e
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-rockset.json
@@ -0,0 +1,39 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/rockset",
+ "supportsIncremental": true,
+ "supported_destination_sync_modes": ["append", "overwrite"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Rockset Destination Spec",
+ "type": "object",
+ "required": ["api_key", "workspace"],
+ "additionalProperties": false,
+ "properties": {
+ "api_key": {
+ "title": "Api Key",
+ "description": "Rockset api key",
+ "type": "string",
+ "order": 0,
+ "airbyte_secret": true
+ },
+ "workspace": {
+ "title": "Workspace",
+ "description": "The Rockset workspace in which collections will be created + written to.",
+ "type": "string",
+ "examples": ["commons", "my_workspace"],
+ "default": "commons",
+ "airbyte_secret": false,
+ "order": 1
+ },
+ "api_server": {
+ "title": "Api Server",
+ "description": "Rockset api URL",
+ "type": "string",
+ "airbyte_secret": false,
+ "default": "https://api.rs2.usw2.rockset.com",
+ "pattern": "^https:\\/\\/.*.rockset.com$",
+ "order": 2
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-s3.json b/jvm/src/main/resources/airbyte/destination-s3.json
new file mode 100644
index 0000000..db4022e
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-s3.json
@@ -0,0 +1,390 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/s3",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "S3 Destination Spec",
+ "type": "object",
+ "required": [
+ "s3_bucket_name",
+ "s3_bucket_path",
+ "s3_bucket_region",
+ "format"
+ ],
+ "properties": {
+ "access_key_id": {
+ "type": "string",
+ "description": "The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.",
+ "title": "S3 Key ID *",
+ "airbyte_secret": true,
+ "examples": ["A012345678910EXAMPLE"],
+ "order": 0
+ },
+ "secret_access_key": {
+ "type": "string",
+ "description": "The corresponding secret to the access key ID. Read more here",
+ "title": "S3 Access Key *",
+ "airbyte_secret": true,
+ "examples": ["a012345678910ABCDEFGH/AbCdEfGhEXAMPLEKEY"],
+ "order": 1
+ },
+ "s3_bucket_name": {
+ "title": "S3 Bucket Name",
+ "type": "string",
+ "description": "The name of the S3 bucket. Read more here.",
+ "examples": ["airbyte_sync"],
+ "order": 2
+ },
+ "s3_bucket_path": {
+ "title": "S3 Bucket Path",
+ "description": "Directory under the S3 bucket where data will be written. Read more here",
+ "type": "string",
+ "examples": ["data_sync/test"],
+ "order": 3
+ },
+ "s3_bucket_region": {
+ "title": "S3 Bucket Region",
+ "type": "string",
+ "default": "",
+ "description": "The region of the S3 bucket. See here for all region codes.",
+ "enum": [
+ "",
+ "us-east-1",
+ "us-east-2",
+ "us-west-1",
+ "us-west-2",
+ "af-south-1",
+ "ap-east-1",
+ "ap-south-1",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-northeast-3",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "ca-central-1",
+ "cn-north-1",
+ "cn-northwest-1",
+ "eu-central-1",
+ "eu-north-1",
+ "eu-south-1",
+ "eu-west-1",
+ "eu-west-2",
+ "eu-west-3",
+ "sa-east-1",
+ "me-south-1",
+ "us-gov-east-1",
+ "us-gov-west-1"
+ ],
+ "order": 4
+ },
+ "format": {
+ "title": "Output Format *",
+ "type": "object",
+ "description": "Format of the data output. See here for more details",
+ "oneOf": [
+ {
+ "title": "Avro: Apache Avro",
+ "required": ["format_type", "compression_codec"],
+ "properties": {
+ "format_type": {
+ "title": "Format Type *",
+ "type": "string",
+ "enum": ["Avro"],
+ "default": "Avro",
+ "order": 0
+ },
+ "compression_codec": {
+ "title": "Compression Codec *",
+ "description": "The compression algorithm used to compress data. Default to no compression.",
+ "type": "object",
+ "oneOf": [
+ {
+ "title": "No Compression",
+ "required": ["codec"],
+ "properties": {
+ "codec": {
+ "type": "string",
+ "enum": ["no compression"],
+ "default": "no compression"
+ }
+ }
+ },
+ {
+ "title": "Deflate",
+ "required": ["codec", "compression_level"],
+ "properties": {
+ "codec": {
+ "type": "string",
+ "enum": ["Deflate"],
+ "default": "Deflate"
+ },
+ "compression_level": {
+ "title": "Deflate Level",
+ "description": "0: no compression & fastest, 9: best compression & slowest.",
+ "type": "integer",
+ "default": 0,
+ "minimum": 0,
+ "maximum": 9
+ }
+ }
+ },
+ {
+ "title": "bzip2",
+ "required": ["codec"],
+ "properties": {
+ "codec": {
+ "type": "string",
+ "enum": ["bzip2"],
+ "default": "bzip2"
+ }
+ }
+ },
+ {
+ "title": "xz",
+ "required": ["codec", "compression_level"],
+ "properties": {
+ "codec": {
+ "type": "string",
+ "enum": ["xz"],
+ "default": "xz"
+ },
+ "compression_level": {
+ "title": "Compression Level",
+ "description": "See here for details.",
+ "type": "integer",
+ "default": 6,
+ "minimum": 0,
+ "maximum": 9
+ }
+ }
+ },
+ {
+ "title": "zstandard",
+ "required": ["codec", "compression_level"],
+ "properties": {
+ "codec": {
+ "type": "string",
+ "enum": ["zstandard"],
+ "default": "zstandard"
+ },
+ "compression_level": {
+ "title": "Compression Level",
+ "description": "Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.",
+ "type": "integer",
+ "default": 3,
+ "minimum": -5,
+ "maximum": 22
+ },
+ "include_checksum": {
+ "title": "Include Checksum",
+ "description": "If true, include a checksum with each data block.",
+ "type": "boolean",
+ "default": false
+ }
+ }
+ },
+ {
+ "title": "snappy",
+ "required": ["codec"],
+ "properties": {
+ "codec": {
+ "type": "string",
+ "enum": ["snappy"],
+ "default": "snappy"
+ }
+ }
+ }
+ ],
+ "order": 1
+ }
+ }
+ },
+ {
+ "title": "CSV: Comma-Separated Values",
+ "required": ["format_type", "flattening"],
+ "properties": {
+ "format_type": {
+ "title": "Format Type *",
+ "type": "string",
+ "enum": ["CSV"],
+ "default": "CSV"
+ },
+ "flattening": {
+ "type": "string",
+ "title": "Normalization (Flattening)",
+ "description": "Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.",
+ "default": "No flattening",
+ "enum": ["No flattening", "Root level flattening"]
+ },
+ "compression": {
+ "title": "Compression",
+ "type": "object",
+ "description": "Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: \".csv.gz\").",
+ "oneOf": [
+ {
+ "title": "No Compression",
+ "requires": ["compression_type"],
+ "properties": {
+ "compression_type": {
+ "type": "string",
+ "enum": ["No Compression"],
+ "default": "No Compression"
+ }
+ }
+ },
+ {
+ "title": "GZIP",
+ "requires": ["compression_type"],
+ "properties": {
+ "compression_type": {
+ "type": "string",
+ "enum": ["GZIP"],
+ "default": "GZIP"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ {
+ "title": "JSON Lines: Newline-delimited JSON",
+ "required": ["format_type"],
+ "properties": {
+ "format_type": {
+ "title": "Format Type *",
+ "type": "string",
+ "enum": ["JSONL"],
+ "default": "JSONL"
+ },
+ "compression": {
+ "title": "Compression",
+ "type": "object",
+ "description": "Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: \".jsonl.gz\").",
+ "oneOf": [
+ {
+ "title": "No Compression",
+ "requires": "compression_type",
+ "properties": {
+ "compression_type": {
+ "type": "string",
+ "enum": ["No Compression"],
+ "default": "No Compression"
+ }
+ }
+ },
+ {
+ "title": "GZIP",
+ "requires": "compression_type",
+ "properties": {
+ "compression_type": {
+ "type": "string",
+ "enum": ["GZIP"],
+ "default": "GZIP"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ {
+ "title": "Parquet: Columnar Storage",
+ "required": ["format_type"],
+ "properties": {
+ "format_type": {
+ "title": "Format Type *",
+ "type": "string",
+ "enum": ["Parquet"],
+ "default": "Parquet"
+ },
+ "compression_codec": {
+ "title": "Compression Codec (Optional)",
+ "description": "The compression algorithm used to compress data pages.",
+ "type": "string",
+ "enum": [
+ "UNCOMPRESSED",
+ "SNAPPY",
+ "GZIP",
+ "LZO",
+ "BROTLI",
+ "LZ4",
+ "ZSTD"
+ ],
+ "default": "UNCOMPRESSED"
+ },
+ "block_size_mb": {
+ "title": "Block Size (Row Group Size) (MB) (Optional)",
+ "description": "This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.",
+ "type": "integer",
+ "default": 128,
+ "examples": [128]
+ },
+ "max_padding_size_mb": {
+ "title": "Max Padding Size (MB) (Optional)",
+ "description": "Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.",
+ "type": "integer",
+ "default": 8,
+ "examples": [8]
+ },
+ "page_size_kb": {
+ "title": "Page Size (KB) (Optional)",
+ "description": "The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.",
+ "type": "integer",
+ "default": 1024,
+ "examples": [1024]
+ },
+ "dictionary_page_size_kb": {
+ "title": "Dictionary Page Size (KB) (Optional)",
+ "description": "There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.",
+ "type": "integer",
+ "default": 1024,
+ "examples": [1024]
+ },
+ "dictionary_encoding": {
+ "title": "Dictionary Encoding (Optional)",
+ "description": "Default: true.",
+ "type": "boolean",
+ "default": true
+ }
+ }
+ }
+ ],
+ "order": 5
+ },
+ "s3_endpoint": {
+ "title": "Endpoint (Optional)",
+ "type": "string",
+ "default": "",
+ "description": "Your S3 endpoint url. Read more here",
+ "examples": ["http://localhost:9000"],
+ "order": 6
+ },
+ "s3_path_format": {
+ "title": "S3 Path Format (Optional)",
+ "description": "Format string on how data will be organized inside the S3 bucket directory. Read more here",
+ "type": "string",
+ "examples": [
+ "${NAMESPACE}/${STREAM_NAME}/${YEAR}_${MONTH}_${DAY}_${EPOCH}_"
+ ],
+ "order": 7
+ },
+ "file_name_pattern": {
+ "type": "string",
+ "description": "The pattern allows you to set the file-name format for the S3 staging file(s)",
+ "title": "S3 Filename pattern (Optional)",
+ "examples": [
+ "{date}",
+ "{date:yyyy_MM}",
+ "{timestamp}",
+ "{part_number}",
+ "{sync_id}"
+ ],
+ "order": 8
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-scylla.json b/jvm/src/main/resources/airbyte/destination-scylla.json
new file mode 100644
index 0000000..6fbed67
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-scylla.json
@@ -0,0 +1,57 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/scylla",
+ "supportsIncremental": true,
+ "supportsNormalization": false,
+ "supportsDBT": false,
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Scylla Destination Spec",
+ "type": "object",
+ "required": ["keyspace", "username", "password", "address", "port"],
+ "additionalProperties": true,
+ "properties": {
+ "keyspace": {
+ "title": "Keyspace",
+ "description": "Default Scylla keyspace to create data in.",
+ "type": "string",
+ "order": 0
+ },
+ "username": {
+ "title": "Username",
+ "description": "Username to use to access Scylla.",
+ "type": "string",
+ "order": 1
+ },
+ "password": {
+ "title": "Password",
+ "description": "Password associated with Scylla.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 2
+ },
+ "address": {
+ "title": "Address",
+ "description": "Address to connect to.",
+ "type": "string",
+ "order": 3
+ },
+ "port": {
+ "title": "Port",
+ "description": "Port of Scylla.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 9042,
+ "order": 4
+ },
+ "replication": {
+ "title": "Replication factor",
+ "type": "integer",
+ "description": "Indicates to how many nodes the data should be replicated to.",
+ "default": 1,
+ "order": 5
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-sftp-json.json b/jvm/src/main/resources/airbyte/destination-sftp-json.json
new file mode 100644
index 0000000..920997d
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-sftp-json.json
@@ -0,0 +1,52 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/sftp-json",
+ "supported_destination_sync_modes": ["overwrite", "append"],
+ "supportsIncremental": true,
+ "supportsDBT": false,
+ "supportsNormalization": false,
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Destination SFTP JSON",
+ "type": "object",
+ "required": ["host", "username", "password", "destination_path"],
+ "additionalProperties": false,
+ "properties": {
+ "host": {
+ "title": "Host",
+ "description": "Hostname of the SFTP server.",
+ "type": "string",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "description": "Port of the SFTP server.",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 22,
+ "examples": [22],
+ "order": 1
+ },
+ "username": {
+ "title": "User",
+ "description": "Username to use to access the SFTP server.",
+ "type": "string",
+ "order": 2
+ },
+ "password": {
+ "title": "Password",
+ "description": "Password associated with the username.",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 3
+ },
+ "destination_path": {
+ "title": "Destination path",
+ "type": "string",
+ "description": "Path to the directory where json files will be written.",
+ "examples": ["/json_data"],
+ "order": 4
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/destination-snowflake.json b/jvm/src/main/resources/airbyte/destination-snowflake.json
new file mode 100644
index 0000000..e834527
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/destination-snowflake.json
@@ -0,0 +1,477 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/snowflake",
+ "supportsIncremental": true,
+ "supportsNormalization": true,
+ "supportsDBT": true,
+ "supported_destination_sync_modes": ["overwrite", "append", "append_dedup"],
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Snowflake Destination Spec",
+ "type": "object",
+ "required": ["host", "role", "warehouse", "database", "schema", "username"],
+ "additionalProperties": true,
+ "properties": {
+ "host": {
+ "description": "Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if harana can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if harana can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "harana". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default harana message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if harana can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. harana requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have harana generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. harana requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background.
+datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups.
+datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
+datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages.
+datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received.
+datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
+datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).
+datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read.
+datasources.section.source-kafka.test_topic.description=The Topic to test in case the harana can consume messages.
+datasources.section.source-klaviyo.api_key.title=Api Key
+datasources.section.source-klaviyo.start_date.title=Start Date
+datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key.
+datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-kustomer-singer.api_token.title=API Token
+datasources.section.source-kustomer-singer.start_date.title=Start Date
+datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this
+datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data
+datasources.section.source-kyriba.domain.title=Domain
+datasources.section.source-kyriba.end_date.title=End Date
+datasources.section.source-kyriba.password.title=Password
+datasources.section.source-kyriba.start_date.title=Start Date
+datasources.section.source-kyriba.username.title=Username
+datasources.section.source-kyriba.domain.description=Kyriba domain
+datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date.
+datasources.section.source-kyriba.password.description=Password to be used in basic auth
+datasources.section.source-kyriba.start_date.description=The date the sync should start from.
+datasources.section.source-kyriba.username.description=Username to be used in basic auth
+datasources.section.source-lemlist.api_key.title=API key
+datasources.section.source-lemlist.api_key.description=Lemlist API key.
+datasources.section.source-linkedin-ads.account_ids.title=Account IDs
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-ads.credentials.title=Authentication *
+datasources.section.source-linkedin-ads.start_date.title=Start date
+datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token.
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours.
+datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-pages.credentials.title=Authentication *
+datasources.section.source-linkedin-pages.org_id.title=Organization ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID
+datasources.section.source-linnworks.application_id.title=Application ID.
+datasources.section.source-linnworks.application_secret.title=Application Secret
+datasources.section.source-linnworks.start_date.title=Start Date
+datasources.section.source-linnworks.token.title=API Token
+datasources.section.source-linnworks.application_id.description=Linnworks Application ID
+datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret
+datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-looker.client_id.title=Client ID
+datasources.section.source-looker.client_secret.title=Client Secret
+datasources.section.source-looker.domain.title=Domain
+datasources.section.source-looker.run_look_ids.title=Look IDs to Run
+datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.
+datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key.
+datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. harana.cloud.looker.com,looker.[clientname].com,IP address
+datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional)
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key
+datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key
+datasources.section.source-mailchimp.credentials.title=Authentication *
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key.
+datasources.section.source-mailgun.domain_region.title=Domain Region Code
+datasources.section.source-mailgun.private_key.title=Private API Key
+datasources.section.source-mailgun.start_date.title=Replication Start Date
+datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'.
+datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data.
+datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.
+datasources.section.source-marketo.client_id.title=Client ID
+datasources.section.source-marketo.client_secret.title=Client Secret
+datasources.section.source-marketo.domain_url.title=Domain URL
+datasources.section.source-marketo.start_date.title=Start Date
+datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this.
+datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0)
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft
+datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism
+datasources.section.source-microsoft-teams.period.title=Period
+datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.
+datasources.section.source-mixpanel.api_secret.title=Project Secret
+datasources.section.source-mixpanel.attribution_window.title=Attribution Window
+datasources.section.source-mixpanel.date_window_size.title=Date slicing window
+datasources.section.source-mixpanel.end_date.title=End Date
+datasources.section.source-mixpanel.project_timezone.title=Project Timezone
+datasources.section.source-mixpanel.region.title=Region
+datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default
+datasources.section.source-mixpanel.start_date.title=Start Date
+datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this.
+datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.
+datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.
+datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date
+datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.
+datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU.
+datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.
+datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug
+datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token
+datasources.section.source-monday.credentials.oneOf.1.title=API Token
+datasources.section.source-monday.credentials.title=Authorization Method
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests.
+datasources.section.source-mongodb.auth_source.title=Authentication source
+datasources.section.source-mongodb.database.title=Database name
+datasources.section.source-mongodb.host.title=Host
+datasources.section.source-mongodb.password.title=Password
+datasources.section.source-mongodb.port.title=Port
+datasources.section.source-mongodb.replica_set.title=Replica Set
+datasources.section.source-mongodb.ssl.title=TLS connection
+datasources.section.source-mongodb.user.title=User
+datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info.
+datasources.section.source-mongodb.database.description=Database to be replicated.
+datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated.
+datasources.section.source-mongodb.password.description=Password
+datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated.
+datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
+datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB.
+datasources.section.source-mongodb.user.description=User
+datasources.section.source-mongodb-v2.auth_source.title=Authentication Source
+datasources.section.source-mongodb-v2.database.title=Database Name
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type
+datasources.section.source-mongodb-v2.password.title=Password
+datasources.section.source-mongodb-v2.user.title=User
+datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored.
+datasources.section.source-mongodb-v2.database.description=The database you want to replicate.
+datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma.
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to.
+datasources.section.source-mongodb-v2.password.description=The password associated with this username.
+datasources.section.source-mongodb-v2.user.description=The username which is used to access the database.
+datasources.section.source-mssql.database.title=Database
+datasources.section.source-mssql.host.title=Host
+datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mssql.password.title=Password
+datasources.section.source-mssql.port.title=Port
+datasources.section.source-mssql.replication_method.oneOf.0.title=Standard
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level
+datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mssql.replication_method.title=Replication Method
+datasources.section.source-mssql.schemas.title=Schemas
+datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.source-mssql.ssl_method.title=SSL Method
+datasources.section.source-mssql.username.title=Username
+datasources.section.source-mssql.database.description=The name of the database.
+datasources.section.source-mssql.host.description=The hostname of the database.
+datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mssql.password.description=The password associated with the username.
+datasources.section.source-mssql.port.description=The port of the database.
+datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database.
+datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive.
+datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database.
+datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.source-mssql.username.description=The username which is used to access the database.
+datasources.section.source-my-hours.email.title=Email
+datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size
+datasources.section.source-my-hours.password.title=Password
+datasources.section.source-my-hours.start_date.title=Start Date
+datasources.section.source-my-hours.email.description=Your My Hours username
+datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days
+datasources.section.source-my-hours.password.description=The password associated to the username
+datasources.section.source-my-hours.start_date.description=Start date for collecting time logs
+datasources.section.source-mysql.database.title=Database
+datasources.section.source-mysql.host.title=Host
+datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mysql.password.title=Password
+datasources.section.source-mysql.port.title=Port
+datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD
+datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mysql.replication_method.title=Replication Method
+datasources.section.source-mysql.ssl.title=SSL Connection
+datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred
+datasources.section.source-mysql.ssl_mode.oneOf.1.title=required
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password
+datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password
+datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity
+datasources.section.source-mysql.ssl_mode.title=SSL modes
+datasources.section.source-mysql.username.title=Username
+datasources.section.source-mysql.database.description=The database name.
+datasources.section.source-mysql.host.description=The host name of the database.
+datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mysql.password.description=The password associated with the username.
+datasources.section.source-mysql.port.description=The port to connect to.
+datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database.
+datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.source-mysql.ssl_mode.description=SSL connection modes. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background.
+datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups.
+datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
+datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages.
+datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received.
+datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
+datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).
+datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read.
+datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages.
+datasources.section.source-klaviyo.api_key.title=Api Key
+datasources.section.source-klaviyo.start_date.title=Start Date
+datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key.
+datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-kustomer-singer.api_token.title=API Token
+datasources.section.source-kustomer-singer.start_date.title=Start Date
+datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this
+datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data
+datasources.section.source-kyriba.domain.title=Domain
+datasources.section.source-kyriba.end_date.title=End Date
+datasources.section.source-kyriba.password.title=Password
+datasources.section.source-kyriba.start_date.title=Start Date
+datasources.section.source-kyriba.username.title=Username
+datasources.section.source-kyriba.domain.description=Kyriba domain
+datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date.
+datasources.section.source-kyriba.password.description=Password to be used in basic auth
+datasources.section.source-kyriba.start_date.description=The date the sync should start from.
+datasources.section.source-kyriba.username.description=Username to be used in basic auth
+datasources.section.source-lemlist.api_key.title=API key
+datasources.section.source-lemlist.api_key.description=Lemlist API key.
+datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional)
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-ads.credentials.title=Authentication *
+datasources.section.source-linkedin-ads.start_date.title=Start date
+datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token.
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours.
+datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-pages.credentials.title=Authentication *
+datasources.section.source-linkedin-pages.org_id.title=Organization ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID
+datasources.section.source-linnworks.application_id.title=Application ID.
+datasources.section.source-linnworks.application_secret.title=Application Secret
+datasources.section.source-linnworks.start_date.title=Start Date
+datasources.section.source-linnworks.token.title=API Token
+datasources.section.source-linnworks.application_id.description=Linnworks Application ID
+datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret
+datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-looker.client_id.title=Client ID
+datasources.section.source-looker.client_secret.title=Client Secret
+datasources.section.source-looker.domain.title=Domain
+datasources.section.source-looker.run_look_ids.title=Look IDs to Run
+datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.
+datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key.
+datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address
+datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional)
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key
+datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key
+datasources.section.source-mailchimp.credentials.title=Authentication *
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key.
+datasources.section.source-mailgun.domain_region.title=Domain Region Code
+datasources.section.source-mailgun.private_key.title=Private API Key
+datasources.section.source-mailgun.start_date.title=Replication Start Date
+datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'.
+datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data.
+datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.
+datasources.section.source-marketo.client_id.title=Client ID
+datasources.section.source-marketo.client_secret.title=Client Secret
+datasources.section.source-marketo.domain_url.title=Domain URL
+datasources.section.source-marketo.start_date.title=Start Date
+datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this.
+datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0)
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft
+datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism
+datasources.section.source-microsoft-teams.period.title=Period
+datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.
+datasources.section.source-mixpanel.api_secret.title=Project Secret
+datasources.section.source-mixpanel.attribution_window.title=Attribution Window
+datasources.section.source-mixpanel.date_window_size.title=Date slicing window
+datasources.section.source-mixpanel.end_date.title=End Date
+datasources.section.source-mixpanel.project_timezone.title=Project Timezone
+datasources.section.source-mixpanel.region.title=Region
+datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default
+datasources.section.source-mixpanel.start_date.title=Start Date
+datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this.
+datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.
+datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.
+datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date
+datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.
+datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU.
+datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.
+datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional)
+datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token
+datasources.section.source-monday.credentials.oneOf.1.title=API Token
+datasources.section.source-monday.credentials.title=Authorization Method
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests.
+datasources.section.source-mongodb.auth_source.title=Authentication source
+datasources.section.source-mongodb.database.title=Database name
+datasources.section.source-mongodb.host.title=Host
+datasources.section.source-mongodb.password.title=Password
+datasources.section.source-mongodb.port.title=Port
+datasources.section.source-mongodb.replica_set.title=Replica Set
+datasources.section.source-mongodb.ssl.title=TLS connection
+datasources.section.source-mongodb.user.title=User
+datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info.
+datasources.section.source-mongodb.database.description=Database to be replicated.
+datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated.
+datasources.section.source-mongodb.password.description=Password
+datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated.
+datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
+datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB.
+datasources.section.source-mongodb.user.description=User
+datasources.section.source-mongodb-v2.auth_source.title=Authentication Source
+datasources.section.source-mongodb-v2.database.title=Database Name
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type
+datasources.section.source-mongodb-v2.password.title=Password
+datasources.section.source-mongodb-v2.user.title=User
+datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored.
+datasources.section.source-mongodb-v2.database.description=The database you want to replicate.
+datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma.
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to.
+datasources.section.source-mongodb-v2.password.description=The password associated with this username.
+datasources.section.source-mongodb-v2.user.description=The username which is used to access the database.
+datasources.section.source-mssql.database.title=Database
+datasources.section.source-mssql.host.title=Host
+datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mssql.password.title=Password
+datasources.section.source-mssql.port.title=Port
+datasources.section.source-mssql.replication_method.oneOf.0.title=Standard
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level
+datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mssql.replication_method.title=Replication Method
+datasources.section.source-mssql.schemas.title=Schemas
+datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.source-mssql.ssl_method.title=SSL Method
+datasources.section.source-mssql.username.title=Username
+datasources.section.source-mssql.database.description=The name of the database.
+datasources.section.source-mssql.host.description=The hostname of the database.
+datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mssql.password.description=The password associated with the username.
+datasources.section.source-mssql.port.description=The port of the database.
+datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database.
+datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive.
+datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database.
+datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.source-mssql.username.description=The username which is used to access the database.
+datasources.section.source-my-hours.email.title=Email
+datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size
+datasources.section.source-my-hours.password.title=Password
+datasources.section.source-my-hours.start_date.title=Start Date
+datasources.section.source-my-hours.email.description=Your My Hours username
+datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days
+datasources.section.source-my-hours.password.description=The password associated to the username
+datasources.section.source-my-hours.start_date.description=Start date for collecting time logs
+datasources.section.source-mysql.database.title=Database
+datasources.section.source-mysql.host.title=Host
+datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mysql.password.title=Password
+datasources.section.source-mysql.port.title=Port
+datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD
+datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mysql.replication_method.title=Replication Method
+datasources.section.source-mysql.ssl.title=SSL Connection
+datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred
+datasources.section.source-mysql.ssl_mode.oneOf.1.title=required
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity
+datasources.section.source-mysql.ssl_mode.title=SSL modes
+datasources.section.source-mysql.username.title=Username
+datasources.section.source-mysql.database.description=The database name.
+datasources.section.source-mysql.host.description=The host name of the database.
+datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mysql.password.description=The password associated with the username.
+datasources.section.source-mysql.port.description=The port to connect to.
+datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database.
+datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.source-mysql.ssl_mode.description=SSL connection modes. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.access_key.title=AWS IAM Access Key ID
+datasources.section.destination-amazon-sqs.message_body_key.title=Message Body Key
+datasources.section.destination-amazon-sqs.message_delay.title=Message Delay
+datasources.section.destination-amazon-sqs.message_group_id.title=Message Group Id
+datasources.section.destination-amazon-sqs.queue_url.title=Queue URL
+datasources.section.destination-amazon-sqs.region.title=AWS Region
+datasources.section.destination-amazon-sqs.secret_key.title=AWS IAM Secret Key
+datasources.section.destination-amazon-sqs.access_key.description=The Access Key ID of the AWS IAM Role to use for sending messages
+datasources.section.destination-amazon-sqs.message_body_key.description=Use this property to extract the contents of the named key in the input record to use as the SQS message body. If not set, the entire content of the input record data is used as the message body.
+datasources.section.destination-amazon-sqs.message_delay.description=Modify the Message Delay of the individual message from the Queue's default (seconds).
+datasources.section.destination-amazon-sqs.message_group_id.description=The tag that specifies that a message belongs to a specific message group. This parameter applies only to, and is REQUIRED by, FIFO queues.
+datasources.section.destination-amazon-sqs.queue_url.description=URL of the SQS Queue
+datasources.section.destination-amazon-sqs.region.description=AWS Region of the SQS Queue
+datasources.section.destination-amazon-sqs.secret_key.description=The Secret Key of the AWS IAM Role to use for sending messages
+datasources.section.destination-aws-datalake.aws_account_id.title=AWS Account Id
+datasources.section.destination-aws-datalake.bucket_name.title=S3 Bucket Name
+datasources.section.destination-aws-datalake.bucket_prefix.title=Target S3 Bucket Prefix
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.title=Target Role Arn
+datasources.section.destination-aws-datalake.credentials.oneOf.0.title=IAM Role
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.title=Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.title=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.title=Credentials Title
+datasources.section.destination-aws-datalake.credentials.oneOf.1.title=IAM User
+datasources.section.destination-aws-datalake.credentials.title=Authentication mode
+datasources.section.destination-aws-datalake.lakeformation_database_name.title=Lakeformation Database Name
+datasources.section.destination-aws-datalake.region.title=AWS Region
+datasources.section.destination-aws-datalake.aws_account_id.description=target aws account id
+datasources.section.destination-aws-datalake.bucket_name.description=Name of the bucket
+datasources.section.destination-aws-datalake.bucket_prefix.description=S3 prefix
+datasources.section.destination-aws-datalake.credentials.description=Choose How to Authenticate to AWS.
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.credentials.oneOf.0.properties.role_arn.description=Will assume this role to write data to s3
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_access_key_id.description=AWS User Access Key Id
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.aws_secret_access_key.description=Secret Access Key
+datasources.section.destination-aws-datalake.credentials.oneOf.1.properties.credentials_title.description=Name of the credentials
+datasources.section.destination-aws-datalake.lakeformation_database_name.description=Which database to use
+datasources.section.destination-aws-datalake.region.description=Region name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.title=Azure Blob Storage account key
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.title=Azure blob storage container (Bucket) Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.title=Endpoint Domain Name
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.title=Azure Blob Storage output buffer size (Megabytes)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-azure-blob-storage.format.oneOf.0.title=CSV: Comma-Separated Values
+datasources.section.destination-azure-blob-storage.format.oneOf.1.title=JSON Lines: newline-delimited JSON
+datasources.section.destination-azure-blob-storage.format.title=Output Format
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_key.description=The Azure blob storage account key.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_account_name.description=The account's name of the Azure Blob Storage.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_container_name.description=The name of the Azure blob storage container. If not exists - will be created automatically. May be empty, then will be created automatically airbytecontainer+timestamp
+datasources.section.destination-azure-blob-storage.azure_blob_storage_endpoint_domain_name.description=This is Azure Blob Storage endpoint domain name. Leave default value (or leave it empty if run container from command line) to use Microsoft native from example.
+datasources.section.destination-azure-blob-storage.azure_blob_storage_output_buffer_size.description=The amount of megabytes to buffer for the output stream to Azure. This will impact memory footprint on workers, but may need adjustment for performance and appropriate block size in Azure.
+datasources.section.destination-azure-blob-storage.format.description=Output data format
+datasources.section.destination-azure-blob-storage.format.oneOf.0.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.title=Google BigQuery Client Chunk Size (Optional)
+datasources.section.destination-bigquery.credentials_json.title=Service Account Key JSON (Required for cloud, optional for open-source)
+datasources.section.destination-bigquery.dataset_id.title=Default Dataset ID
+datasources.section.destination-bigquery.dataset_location.title=Dataset Location
+datasources.section.destination-bigquery.loading_method.oneOf.0.title=Standard Inserts
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_access_id.title=HMAC Key Access ID
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.properties.hmac_key_secret.title=HMAC Key Secret
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.oneOf.0.title=HMAC key
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.credential.title=Credential
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_name.title=GCS Bucket Name
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.gcs_bucket_path.title=GCS Bucket Path
+datasources.section.destination-bigquery.loading_method.oneOf.1.properties.keep_files_in_gcs-bucket.title=GCS Tmp Files Afterward Processing (Optional)
+datasources.section.destination-bigquery.loading_method.oneOf.1.title=GCS Staging
+datasources.section.destination-bigquery.loading_method.title=Loading Method
+datasources.section.destination-bigquery.project_id.title=Project ID
+datasources.section.destination-bigquery.transformation_priority.title=Transformation Query Run Type (Optional)
+datasources.section.destination-bigquery.big_query_client_buffer_size_mb.description=Google BigQuery client's chunk (buffer) size (MIN=1, MAX = 15) for each table. The size that will be written by a single RPC. Written data will be buffered and only flushed upon reaching this size or closing the channel. The default 15MB value is used if not set explicitly. Read more here.
+datasources.section.destination-bigquery.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
+datasources.section.destination-bigquery.dataset_id.description=The default BigQuery Dataset ID that tables are replicated to if the source does not specify a namespace. Read more here.
+datasources.section.destination-bigquery.dataset_location.description=The location of the dataset. Warning: Changes made after creation will not be applied. Read more here.
+datasources.section.destination-bigquery.loading_method.description=Loading method used to send select the way data will be uploaded to BigQuery. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background.
+datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups.
+datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
+datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages.
+datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received.
+datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
+datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).
+datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read.
+datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages.
+datasources.section.source-klaviyo.api_key.title=Api Key
+datasources.section.source-klaviyo.start_date.title=Start Date
+datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key.
+datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-kustomer-singer.api_token.title=API Token
+datasources.section.source-kustomer-singer.start_date.title=Start Date
+datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this
+datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data
+datasources.section.source-kyriba.domain.title=Domain
+datasources.section.source-kyriba.end_date.title=End Date
+datasources.section.source-kyriba.password.title=Password
+datasources.section.source-kyriba.start_date.title=Start Date
+datasources.section.source-kyriba.username.title=Username
+datasources.section.source-kyriba.domain.description=Kyriba domain
+datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date.
+datasources.section.source-kyriba.password.description=Password to be used in basic auth
+datasources.section.source-kyriba.start_date.description=The date the sync should start from.
+datasources.section.source-kyriba.username.description=Username to be used in basic auth
+datasources.section.source-lemlist.api_key.title=API key
+datasources.section.source-lemlist.api_key.description=Lemlist API key.
+datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional)
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-ads.credentials.title=Authentication *
+datasources.section.source-linkedin-ads.start_date.title=Start date
+datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token.
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours.
+datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-pages.credentials.title=Authentication *
+datasources.section.source-linkedin-pages.org_id.title=Organization ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID
+datasources.section.source-linnworks.application_id.title=Application ID.
+datasources.section.source-linnworks.application_secret.title=Application Secret
+datasources.section.source-linnworks.start_date.title=Start Date
+datasources.section.source-linnworks.token.title=API Token
+datasources.section.source-linnworks.application_id.description=Linnworks Application ID
+datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret
+datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-looker.client_id.title=Client ID
+datasources.section.source-looker.client_secret.title=Client Secret
+datasources.section.source-looker.domain.title=Domain
+datasources.section.source-looker.run_look_ids.title=Look IDs to Run
+datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.
+datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key.
+datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address
+datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional)
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key
+datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key
+datasources.section.source-mailchimp.credentials.title=Authentication *
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key.
+datasources.section.source-mailgun.domain_region.title=Domain Region Code
+datasources.section.source-mailgun.private_key.title=Private API Key
+datasources.section.source-mailgun.start_date.title=Replication Start Date
+datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'.
+datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data.
+datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.
+datasources.section.source-marketo.client_id.title=Client ID
+datasources.section.source-marketo.client_secret.title=Client Secret
+datasources.section.source-marketo.domain_url.title=Domain URL
+datasources.section.source-marketo.start_date.title=Start Date
+datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this.
+datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0)
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft
+datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism
+datasources.section.source-microsoft-teams.period.title=Period
+datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.
+datasources.section.source-mixpanel.api_secret.title=Project Secret
+datasources.section.source-mixpanel.attribution_window.title=Attribution Window
+datasources.section.source-mixpanel.date_window_size.title=Date slicing window
+datasources.section.source-mixpanel.end_date.title=End Date
+datasources.section.source-mixpanel.project_timezone.title=Project Timezone
+datasources.section.source-mixpanel.region.title=Region
+datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default
+datasources.section.source-mixpanel.start_date.title=Start Date
+datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this.
+datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.
+datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.
+datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date
+datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.
+datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU.
+datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.
+datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional)
+datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token
+datasources.section.source-monday.credentials.oneOf.1.title=API Token
+datasources.section.source-monday.credentials.title=Authorization Method
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests.
+datasources.section.source-mongodb.auth_source.title=Authentication source
+datasources.section.source-mongodb.database.title=Database name
+datasources.section.source-mongodb.host.title=Host
+datasources.section.source-mongodb.password.title=Password
+datasources.section.source-mongodb.port.title=Port
+datasources.section.source-mongodb.replica_set.title=Replica Set
+datasources.section.source-mongodb.ssl.title=TLS connection
+datasources.section.source-mongodb.user.title=User
+datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info.
+datasources.section.source-mongodb.database.description=Database to be replicated.
+datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated.
+datasources.section.source-mongodb.password.description=Password
+datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated.
+datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
+datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB.
+datasources.section.source-mongodb.user.description=User
+datasources.section.source-mongodb-v2.auth_source.title=Authentication Source
+datasources.section.source-mongodb-v2.database.title=Database Name
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type
+datasources.section.source-mongodb-v2.password.title=Password
+datasources.section.source-mongodb-v2.user.title=User
+datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored.
+datasources.section.source-mongodb-v2.database.description=The database you want to replicate.
+datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma.
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to.
+datasources.section.source-mongodb-v2.password.description=The password associated with this username.
+datasources.section.source-mongodb-v2.user.description=The username which is used to access the database.
+datasources.section.source-mssql.database.title=Database
+datasources.section.source-mssql.host.title=Host
+datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mssql.password.title=Password
+datasources.section.source-mssql.port.title=Port
+datasources.section.source-mssql.replication_method.oneOf.0.title=Standard
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level
+datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mssql.replication_method.title=Replication Method
+datasources.section.source-mssql.schemas.title=Schemas
+datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.source-mssql.ssl_method.title=SSL Method
+datasources.section.source-mssql.username.title=Username
+datasources.section.source-mssql.database.description=The name of the database.
+datasources.section.source-mssql.host.description=The hostname of the database.
+datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mssql.password.description=The password associated with the username.
+datasources.section.source-mssql.port.description=The port of the database.
+datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database.
+datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive.
+datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database.
+datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.source-mssql.username.description=The username which is used to access the database.
+datasources.section.source-my-hours.email.title=Email
+datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size
+datasources.section.source-my-hours.password.title=Password
+datasources.section.source-my-hours.start_date.title=Start Date
+datasources.section.source-my-hours.email.description=Your My Hours username
+datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days
+datasources.section.source-my-hours.password.description=The password associated to the username
+datasources.section.source-my-hours.start_date.description=Start date for collecting time logs
+datasources.section.source-mysql.database.title=Database
+datasources.section.source-mysql.host.title=Host
+datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mysql.password.title=Password
+datasources.section.source-mysql.port.title=Port
+datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD
+datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mysql.replication_method.title=Replication Method
+datasources.section.source-mysql.ssl.title=SSL Connection
+datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred
+datasources.section.source-mysql.ssl_mode.oneOf.1.title=required
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity
+datasources.section.source-mysql.ssl_mode.title=SSL modes
+datasources.section.source-mysql.username.title=Username
+datasources.section.source-mysql.database.description=The database name.
+datasources.section.source-mysql.host.description=The host name of the database.
+datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mysql.password.description=The password associated with the username.
+datasources.section.source-mysql.port.description=The port to connect to.
+datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database.
+datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.source-mysql.ssl_mode.description=SSL connection modes. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background.
+datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups.
+datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
+datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages.
+datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received.
+datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
+datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).
+datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read.
+datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages.
+datasources.section.source-klaviyo.api_key.title=Api Key
+datasources.section.source-klaviyo.start_date.title=Start Date
+datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key.
+datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-kustomer-singer.api_token.title=API Token
+datasources.section.source-kustomer-singer.start_date.title=Start Date
+datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this
+datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data
+datasources.section.source-kyriba.domain.title=Domain
+datasources.section.source-kyriba.end_date.title=End Date
+datasources.section.source-kyriba.password.title=Password
+datasources.section.source-kyriba.start_date.title=Start Date
+datasources.section.source-kyriba.username.title=Username
+datasources.section.source-kyriba.domain.description=Kyriba domain
+datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date.
+datasources.section.source-kyriba.password.description=Password to be used in basic auth
+datasources.section.source-kyriba.start_date.description=The date the sync should start from.
+datasources.section.source-kyriba.username.description=Username to be used in basic auth
+datasources.section.source-lemlist.api_key.title=API key
+datasources.section.source-lemlist.api_key.description=Lemlist API key.
+datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional)
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-ads.credentials.title=Authentication *
+datasources.section.source-linkedin-ads.start_date.title=Start date
+datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token.
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours.
+datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-pages.credentials.title=Authentication *
+datasources.section.source-linkedin-pages.org_id.title=Organization ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID
+datasources.section.source-linnworks.application_id.title=Application ID.
+datasources.section.source-linnworks.application_secret.title=Application Secret
+datasources.section.source-linnworks.start_date.title=Start Date
+datasources.section.source-linnworks.token.title=API Token
+datasources.section.source-linnworks.application_id.description=Linnworks Application ID
+datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret
+datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-looker.client_id.title=Client ID
+datasources.section.source-looker.client_secret.title=Client Secret
+datasources.section.source-looker.domain.title=Domain
+datasources.section.source-looker.run_look_ids.title=Look IDs to Run
+datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.
+datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key.
+datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address
+datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional)
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key
+datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key
+datasources.section.source-mailchimp.credentials.title=Authentication *
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key.
+datasources.section.source-mailgun.domain_region.title=Domain Region Code
+datasources.section.source-mailgun.private_key.title=Private API Key
+datasources.section.source-mailgun.start_date.title=Replication Start Date
+datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'.
+datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data.
+datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.
+datasources.section.source-marketo.client_id.title=Client ID
+datasources.section.source-marketo.client_secret.title=Client Secret
+datasources.section.source-marketo.domain_url.title=Domain URL
+datasources.section.source-marketo.start_date.title=Start Date
+datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this.
+datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0)
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft
+datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism
+datasources.section.source-microsoft-teams.period.title=Period
+datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.
+datasources.section.source-mixpanel.api_secret.title=Project Secret
+datasources.section.source-mixpanel.attribution_window.title=Attribution Window
+datasources.section.source-mixpanel.date_window_size.title=Date slicing window
+datasources.section.source-mixpanel.end_date.title=End Date
+datasources.section.source-mixpanel.project_timezone.title=Project Timezone
+datasources.section.source-mixpanel.region.title=Region
+datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default
+datasources.section.source-mixpanel.start_date.title=Start Date
+datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this.
+datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.
+datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.
+datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date
+datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.
+datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU.
+datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.
+datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional)
+datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token
+datasources.section.source-monday.credentials.oneOf.1.title=API Token
+datasources.section.source-monday.credentials.title=Authorization Method
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests.
+datasources.section.source-mongodb.auth_source.title=Authentication source
+datasources.section.source-mongodb.database.title=Database name
+datasources.section.source-mongodb.host.title=Host
+datasources.section.source-mongodb.password.title=Password
+datasources.section.source-mongodb.port.title=Port
+datasources.section.source-mongodb.replica_set.title=Replica Set
+datasources.section.source-mongodb.ssl.title=TLS connection
+datasources.section.source-mongodb.user.title=User
+datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info.
+datasources.section.source-mongodb.database.description=Database to be replicated.
+datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated.
+datasources.section.source-mongodb.password.description=Password
+datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated.
+datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
+datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB.
+datasources.section.source-mongodb.user.description=User
+datasources.section.source-mongodb-v2.auth_source.title=Authentication Source
+datasources.section.source-mongodb-v2.database.title=Database Name
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type
+datasources.section.source-mongodb-v2.password.title=Password
+datasources.section.source-mongodb-v2.user.title=User
+datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored.
+datasources.section.source-mongodb-v2.database.description=The database you want to replicate.
+datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma.
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to.
+datasources.section.source-mongodb-v2.password.description=The password associated with this username.
+datasources.section.source-mongodb-v2.user.description=The username which is used to access the database.
+datasources.section.source-mssql.database.title=Database
+datasources.section.source-mssql.host.title=Host
+datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mssql.password.title=Password
+datasources.section.source-mssql.port.title=Port
+datasources.section.source-mssql.replication_method.oneOf.0.title=Standard
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level
+datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mssql.replication_method.title=Replication Method
+datasources.section.source-mssql.schemas.title=Schemas
+datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.source-mssql.ssl_method.title=SSL Method
+datasources.section.source-mssql.username.title=Username
+datasources.section.source-mssql.database.description=The name of the database.
+datasources.section.source-mssql.host.description=The hostname of the database.
+datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mssql.password.description=The password associated with the username.
+datasources.section.source-mssql.port.description=The port of the database.
+datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database.
+datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive.
+datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database.
+datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.source-mssql.username.description=The username which is used to access the database.
+datasources.section.source-my-hours.email.title=Email
+datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size
+datasources.section.source-my-hours.password.title=Password
+datasources.section.source-my-hours.start_date.title=Start Date
+datasources.section.source-my-hours.email.description=Your My Hours username
+datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days
+datasources.section.source-my-hours.password.description=The password associated to the username
+datasources.section.source-my-hours.start_date.description=Start date for collecting time logs
+datasources.section.source-mysql.database.title=Database
+datasources.section.source-mysql.host.title=Host
+datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mysql.password.title=Password
+datasources.section.source-mysql.port.title=Port
+datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD
+datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mysql.replication_method.title=Replication Method
+datasources.section.source-mysql.ssl.title=SSL Connection
+datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred
+datasources.section.source-mysql.ssl_mode.oneOf.1.title=required
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity
+datasources.section.source-mysql.ssl_mode.title=SSL modes
+datasources.section.source-mysql.username.title=Username
+datasources.section.source-mysql.database.description=The database name.
+datasources.section.source-mysql.host.description=The host name of the database.
+datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mysql.password.description=The password associated with the username.
+datasources.section.source-mysql.port.description=The port to connect to.
+datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database.
+datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.source-mysql.ssl_mode.description=SSL connection modes. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background.
+datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups.
+datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
+datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages.
+datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received.
+datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
+datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).
+datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read.
+datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages.
+datasources.section.source-klaviyo.api_key.title=Api Key
+datasources.section.source-klaviyo.start_date.title=Start Date
+datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key.
+datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-kustomer-singer.api_token.title=API Token
+datasources.section.source-kustomer-singer.start_date.title=Start Date
+datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this
+datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data
+datasources.section.source-kyriba.domain.title=Domain
+datasources.section.source-kyriba.end_date.title=End Date
+datasources.section.source-kyriba.password.title=Password
+datasources.section.source-kyriba.start_date.title=Start Date
+datasources.section.source-kyriba.username.title=Username
+datasources.section.source-kyriba.domain.description=Kyriba domain
+datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date.
+datasources.section.source-kyriba.password.description=Password to be used in basic auth
+datasources.section.source-kyriba.start_date.description=The date the sync should start from.
+datasources.section.source-kyriba.username.description=Username to be used in basic auth
+datasources.section.source-lemlist.api_key.title=API key
+datasources.section.source-lemlist.api_key.description=Lemlist API key.
+datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional)
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-ads.credentials.title=Authentication *
+datasources.section.source-linkedin-ads.start_date.title=Start date
+datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token.
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours.
+datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-pages.credentials.title=Authentication *
+datasources.section.source-linkedin-pages.org_id.title=Organization ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID
+datasources.section.source-linnworks.application_id.title=Application ID.
+datasources.section.source-linnworks.application_secret.title=Application Secret
+datasources.section.source-linnworks.start_date.title=Start Date
+datasources.section.source-linnworks.token.title=API Token
+datasources.section.source-linnworks.application_id.description=Linnworks Application ID
+datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret
+datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-looker.client_id.title=Client ID
+datasources.section.source-looker.client_secret.title=Client Secret
+datasources.section.source-looker.domain.title=Domain
+datasources.section.source-looker.run_look_ids.title=Look IDs to Run
+datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.
+datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key.
+datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address
+datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional)
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key
+datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key
+datasources.section.source-mailchimp.credentials.title=Authentication *
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key.
+datasources.section.source-mailgun.domain_region.title=Domain Region Code
+datasources.section.source-mailgun.private_key.title=Private API Key
+datasources.section.source-mailgun.start_date.title=Replication Start Date
+datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'.
+datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data.
+datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.
+datasources.section.source-marketo.client_id.title=Client ID
+datasources.section.source-marketo.client_secret.title=Client Secret
+datasources.section.source-marketo.domain_url.title=Domain URL
+datasources.section.source-marketo.start_date.title=Start Date
+datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this.
+datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0)
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft
+datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism
+datasources.section.source-microsoft-teams.period.title=Period
+datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.
+datasources.section.source-mixpanel.api_secret.title=Project Secret
+datasources.section.source-mixpanel.attribution_window.title=Attribution Window
+datasources.section.source-mixpanel.date_window_size.title=Date slicing window
+datasources.section.source-mixpanel.end_date.title=End Date
+datasources.section.source-mixpanel.project_timezone.title=Project Timezone
+datasources.section.source-mixpanel.region.title=Region
+datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default
+datasources.section.source-mixpanel.start_date.title=Start Date
+datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this.
+datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.
+datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.
+datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date
+datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.
+datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU.
+datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.
+datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional)
+datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token
+datasources.section.source-monday.credentials.oneOf.1.title=API Token
+datasources.section.source-monday.credentials.title=Authorization Method
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests.
+datasources.section.source-mongodb.auth_source.title=Authentication source
+datasources.section.source-mongodb.database.title=Database name
+datasources.section.source-mongodb.host.title=Host
+datasources.section.source-mongodb.password.title=Password
+datasources.section.source-mongodb.port.title=Port
+datasources.section.source-mongodb.replica_set.title=Replica Set
+datasources.section.source-mongodb.ssl.title=TLS connection
+datasources.section.source-mongodb.user.title=User
+datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info.
+datasources.section.source-mongodb.database.description=Database to be replicated.
+datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated.
+datasources.section.source-mongodb.password.description=Password
+datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated.
+datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
+datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB.
+datasources.section.source-mongodb.user.description=User
+datasources.section.source-mongodb-v2.auth_source.title=Authentication Source
+datasources.section.source-mongodb-v2.database.title=Database Name
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type
+datasources.section.source-mongodb-v2.password.title=Password
+datasources.section.source-mongodb-v2.user.title=User
+datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored.
+datasources.section.source-mongodb-v2.database.description=The database you want to replicate.
+datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma.
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to.
+datasources.section.source-mongodb-v2.password.description=The password associated with this username.
+datasources.section.source-mongodb-v2.user.description=The username which is used to access the database.
+datasources.section.source-mssql.database.title=Database
+datasources.section.source-mssql.host.title=Host
+datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mssql.password.title=Password
+datasources.section.source-mssql.port.title=Port
+datasources.section.source-mssql.replication_method.oneOf.0.title=Standard
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level
+datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mssql.replication_method.title=Replication Method
+datasources.section.source-mssql.schemas.title=Schemas
+datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.source-mssql.ssl_method.title=SSL Method
+datasources.section.source-mssql.username.title=Username
+datasources.section.source-mssql.database.description=The name of the database.
+datasources.section.source-mssql.host.description=The hostname of the database.
+datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mssql.password.description=The password associated with the username.
+datasources.section.source-mssql.port.description=The port of the database.
+datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database.
+datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive.
+datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database.
+datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.source-mssql.username.description=The username which is used to access the database.
+datasources.section.source-my-hours.email.title=Email
+datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size
+datasources.section.source-my-hours.password.title=Password
+datasources.section.source-my-hours.start_date.title=Start Date
+datasources.section.source-my-hours.email.description=Your My Hours username
+datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days
+datasources.section.source-my-hours.password.description=The password associated to the username
+datasources.section.source-my-hours.start_date.description=Start date for collecting time logs
+datasources.section.source-mysql.database.title=Database
+datasources.section.source-mysql.host.title=Host
+datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mysql.password.title=Password
+datasources.section.source-mysql.port.title=Port
+datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD
+datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mysql.replication_method.title=Replication Method
+datasources.section.source-mysql.ssl.title=SSL Connection
+datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred
+datasources.section.source-mysql.ssl_mode.oneOf.1.title=required
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity
+datasources.section.source-mysql.ssl_mode.title=SSL modes
+datasources.section.source-mysql.username.title=Username
+datasources.section.source-mysql.database.description=The database name.
+datasources.section.source-mysql.host.description=The host name of the database.
+datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mysql.password.description=The password associated with the username.
+datasources.section.source-mysql.port.description=The port to connect to.
+datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database.
+datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.source-mysql.ssl_mode.description=SSL connection modes. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background.
+datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups.
+datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
+datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages.
+datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received.
+datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
+datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).
+datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read.
+datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages.
+datasources.section.source-klaviyo.api_key.title=Api Key
+datasources.section.source-klaviyo.start_date.title=Start Date
+datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key.
+datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-kustomer-singer.api_token.title=API Token
+datasources.section.source-kustomer-singer.start_date.title=Start Date
+datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this
+datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data
+datasources.section.source-kyriba.domain.title=Domain
+datasources.section.source-kyriba.end_date.title=End Date
+datasources.section.source-kyriba.password.title=Password
+datasources.section.source-kyriba.start_date.title=Start Date
+datasources.section.source-kyriba.username.title=Username
+datasources.section.source-kyriba.domain.description=Kyriba domain
+datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date.
+datasources.section.source-kyriba.password.description=Password to be used in basic auth
+datasources.section.source-kyriba.start_date.description=The date the sync should start from.
+datasources.section.source-kyriba.username.description=Username to be used in basic auth
+datasources.section.source-lemlist.api_key.title=API key
+datasources.section.source-lemlist.api_key.description=Lemlist API key.
+datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional)
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-ads.credentials.title=Authentication *
+datasources.section.source-linkedin-ads.start_date.title=Start date
+datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token.
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours.
+datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-pages.credentials.title=Authentication *
+datasources.section.source-linkedin-pages.org_id.title=Organization ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID
+datasources.section.source-linnworks.application_id.title=Application ID.
+datasources.section.source-linnworks.application_secret.title=Application Secret
+datasources.section.source-linnworks.start_date.title=Start Date
+datasources.section.source-linnworks.token.title=API Token
+datasources.section.source-linnworks.application_id.description=Linnworks Application ID
+datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret
+datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-looker.client_id.title=Client ID
+datasources.section.source-looker.client_secret.title=Client Secret
+datasources.section.source-looker.domain.title=Domain
+datasources.section.source-looker.run_look_ids.title=Look IDs to Run
+datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.
+datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key.
+datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address
+datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional)
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key
+datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key
+datasources.section.source-mailchimp.credentials.title=Authentication *
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key.
+datasources.section.source-mailgun.domain_region.title=Domain Region Code
+datasources.section.source-mailgun.private_key.title=Private API Key
+datasources.section.source-mailgun.start_date.title=Replication Start Date
+datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'.
+datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data.
+datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.
+datasources.section.source-marketo.client_id.title=Client ID
+datasources.section.source-marketo.client_secret.title=Client Secret
+datasources.section.source-marketo.domain_url.title=Domain URL
+datasources.section.source-marketo.start_date.title=Start Date
+datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this.
+datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0)
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft
+datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism
+datasources.section.source-microsoft-teams.period.title=Period
+datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.
+datasources.section.source-mixpanel.api_secret.title=Project Secret
+datasources.section.source-mixpanel.attribution_window.title=Attribution Window
+datasources.section.source-mixpanel.date_window_size.title=Date slicing window
+datasources.section.source-mixpanel.end_date.title=End Date
+datasources.section.source-mixpanel.project_timezone.title=Project Timezone
+datasources.section.source-mixpanel.region.title=Region
+datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default
+datasources.section.source-mixpanel.start_date.title=Start Date
+datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this.
+datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.
+datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.
+datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date
+datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.
+datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU.
+datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.
+datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional)
+datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token
+datasources.section.source-monday.credentials.oneOf.1.title=API Token
+datasources.section.source-monday.credentials.title=Authorization Method
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests.
+datasources.section.source-mongodb.auth_source.title=Authentication source
+datasources.section.source-mongodb.database.title=Database name
+datasources.section.source-mongodb.host.title=Host
+datasources.section.source-mongodb.password.title=Password
+datasources.section.source-mongodb.port.title=Port
+datasources.section.source-mongodb.replica_set.title=Replica Set
+datasources.section.source-mongodb.ssl.title=TLS connection
+datasources.section.source-mongodb.user.title=User
+datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info.
+datasources.section.source-mongodb.database.description=Database to be replicated.
+datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated.
+datasources.section.source-mongodb.password.description=Password
+datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated.
+datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
+datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB.
+datasources.section.source-mongodb.user.description=User
+datasources.section.source-mongodb-v2.auth_source.title=Authentication Source
+datasources.section.source-mongodb-v2.database.title=Database Name
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type
+datasources.section.source-mongodb-v2.password.title=Password
+datasources.section.source-mongodb-v2.user.title=User
+datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored.
+datasources.section.source-mongodb-v2.database.description=The database you want to replicate.
+datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma.
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to.
+datasources.section.source-mongodb-v2.password.description=The password associated with this username.
+datasources.section.source-mongodb-v2.user.description=The username which is used to access the database.
+datasources.section.source-mssql.database.title=Database
+datasources.section.source-mssql.host.title=Host
+datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mssql.password.title=Password
+datasources.section.source-mssql.port.title=Port
+datasources.section.source-mssql.replication_method.oneOf.0.title=Standard
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level
+datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mssql.replication_method.title=Replication Method
+datasources.section.source-mssql.schemas.title=Schemas
+datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.source-mssql.ssl_method.title=SSL Method
+datasources.section.source-mssql.username.title=Username
+datasources.section.source-mssql.database.description=The name of the database.
+datasources.section.source-mssql.host.description=The hostname of the database.
+datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mssql.password.description=The password associated with the username.
+datasources.section.source-mssql.port.description=The port of the database.
+datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database.
+datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive.
+datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database.
+datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.source-mssql.username.description=The username which is used to access the database.
+datasources.section.source-my-hours.email.title=Email
+datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size
+datasources.section.source-my-hours.password.title=Password
+datasources.section.source-my-hours.start_date.title=Start Date
+datasources.section.source-my-hours.email.description=Your My Hours username
+datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days
+datasources.section.source-my-hours.password.description=The password associated to the username
+datasources.section.source-my-hours.start_date.description=Start date for collecting time logs
+datasources.section.source-mysql.database.title=Database
+datasources.section.source-mysql.host.title=Host
+datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mysql.password.title=Password
+datasources.section.source-mysql.port.title=Port
+datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD
+datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mysql.replication_method.title=Replication Method
+datasources.section.source-mysql.ssl.title=SSL Connection
+datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred
+datasources.section.source-mysql.ssl_mode.oneOf.1.title=required
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity
+datasources.section.source-mysql.ssl_mode.title=SSL modes
+datasources.section.source-mysql.username.title=Username
+datasources.section.source-mysql.database.description=The database name.
+datasources.section.source-mysql.host.description=The host name of the database.
+datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mysql.password.description=The password associated with the username.
+datasources.section.source-mysql.port.description=The port to connect to.
+datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database.
+datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.source-mysql.ssl_mode.description=SSL connection modes. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background.
+datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups.
+datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
+datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages.
+datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received.
+datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
+datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).
+datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read.
+datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages.
+datasources.section.source-klaviyo.api_key.title=Api Key
+datasources.section.source-klaviyo.start_date.title=Start Date
+datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key.
+datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-kustomer-singer.api_token.title=API Token
+datasources.section.source-kustomer-singer.start_date.title=Start Date
+datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this
+datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data
+datasources.section.source-kyriba.domain.title=Domain
+datasources.section.source-kyriba.end_date.title=End Date
+datasources.section.source-kyriba.password.title=Password
+datasources.section.source-kyriba.start_date.title=Start Date
+datasources.section.source-kyriba.username.title=Username
+datasources.section.source-kyriba.domain.description=Kyriba domain
+datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date.
+datasources.section.source-kyriba.password.description=Password to be used in basic auth
+datasources.section.source-kyriba.start_date.description=The date the sync should start from.
+datasources.section.source-kyriba.username.description=Username to be used in basic auth
+datasources.section.source-lemlist.api_key.title=API key
+datasources.section.source-lemlist.api_key.description=Lemlist API key.
+datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional)
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-ads.credentials.title=Authentication *
+datasources.section.source-linkedin-ads.start_date.title=Start date
+datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token.
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours.
+datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-pages.credentials.title=Authentication *
+datasources.section.source-linkedin-pages.org_id.title=Organization ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID
+datasources.section.source-linnworks.application_id.title=Application ID.
+datasources.section.source-linnworks.application_secret.title=Application Secret
+datasources.section.source-linnworks.start_date.title=Start Date
+datasources.section.source-linnworks.token.title=API Token
+datasources.section.source-linnworks.application_id.description=Linnworks Application ID
+datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret
+datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-looker.client_id.title=Client ID
+datasources.section.source-looker.client_secret.title=Client Secret
+datasources.section.source-looker.domain.title=Domain
+datasources.section.source-looker.run_look_ids.title=Look IDs to Run
+datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.
+datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key.
+datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address
+datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional)
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key
+datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key
+datasources.section.source-mailchimp.credentials.title=Authentication *
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key.
+datasources.section.source-mailgun.domain_region.title=Domain Region Code
+datasources.section.source-mailgun.private_key.title=Private API Key
+datasources.section.source-mailgun.start_date.title=Replication Start Date
+datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'.
+datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data.
+datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.
+datasources.section.source-marketo.client_id.title=Client ID
+datasources.section.source-marketo.client_secret.title=Client Secret
+datasources.section.source-marketo.domain_url.title=Domain URL
+datasources.section.source-marketo.start_date.title=Start Date
+datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this.
+datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0)
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft
+datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism
+datasources.section.source-microsoft-teams.period.title=Period
+datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.
+datasources.section.source-mixpanel.api_secret.title=Project Secret
+datasources.section.source-mixpanel.attribution_window.title=Attribution Window
+datasources.section.source-mixpanel.date_window_size.title=Date slicing window
+datasources.section.source-mixpanel.end_date.title=End Date
+datasources.section.source-mixpanel.project_timezone.title=Project Timezone
+datasources.section.source-mixpanel.region.title=Region
+datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default
+datasources.section.source-mixpanel.start_date.title=Start Date
+datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this.
+datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.
+datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.
+datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date
+datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.
+datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU.
+datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.
+datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional)
+datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token
+datasources.section.source-monday.credentials.oneOf.1.title=API Token
+datasources.section.source-monday.credentials.title=Authorization Method
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests.
+datasources.section.source-mongodb.auth_source.title=Authentication source
+datasources.section.source-mongodb.database.title=Database name
+datasources.section.source-mongodb.host.title=Host
+datasources.section.source-mongodb.password.title=Password
+datasources.section.source-mongodb.port.title=Port
+datasources.section.source-mongodb.replica_set.title=Replica Set
+datasources.section.source-mongodb.ssl.title=TLS connection
+datasources.section.source-mongodb.user.title=User
+datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info.
+datasources.section.source-mongodb.database.description=Database to be replicated.
+datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated.
+datasources.section.source-mongodb.password.description=Password
+datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated.
+datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
+datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB.
+datasources.section.source-mongodb.user.description=User
+datasources.section.source-mongodb-v2.auth_source.title=Authentication Source
+datasources.section.source-mongodb-v2.database.title=Database Name
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type
+datasources.section.source-mongodb-v2.password.title=Password
+datasources.section.source-mongodb-v2.user.title=User
+datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored.
+datasources.section.source-mongodb-v2.database.description=The database you want to replicate.
+datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma.
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to.
+datasources.section.source-mongodb-v2.password.description=The password associated with this username.
+datasources.section.source-mongodb-v2.user.description=The username which is used to access the database.
+datasources.section.source-mssql.database.title=Database
+datasources.section.source-mssql.host.title=Host
+datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mssql.password.title=Password
+datasources.section.source-mssql.port.title=Port
+datasources.section.source-mssql.replication_method.oneOf.0.title=Standard
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level
+datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mssql.replication_method.title=Replication Method
+datasources.section.source-mssql.schemas.title=Schemas
+datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.source-mssql.ssl_method.title=SSL Method
+datasources.section.source-mssql.username.title=Username
+datasources.section.source-mssql.database.description=The name of the database.
+datasources.section.source-mssql.host.description=The hostname of the database.
+datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mssql.password.description=The password associated with the username.
+datasources.section.source-mssql.port.description=The port of the database.
+datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database.
+datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive.
+datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database.
+datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.source-mssql.username.description=The username which is used to access the database.
+datasources.section.source-my-hours.email.title=Email
+datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size
+datasources.section.source-my-hours.password.title=Password
+datasources.section.source-my-hours.start_date.title=Start Date
+datasources.section.source-my-hours.email.description=Your My Hours username
+datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days
+datasources.section.source-my-hours.password.description=The password associated to the username
+datasources.section.source-my-hours.start_date.description=Start date for collecting time logs
+datasources.section.source-mysql.database.title=Database
+datasources.section.source-mysql.host.title=Host
+datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mysql.password.title=Password
+datasources.section.source-mysql.port.title=Port
+datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD
+datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mysql.replication_method.title=Replication Method
+datasources.section.source-mysql.ssl.title=SSL Connection
+datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred
+datasources.section.source-mysql.ssl_mode.oneOf.1.title=required
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity
+datasources.section.source-mysql.ssl_mode.title=SSL modes
+datasources.section.source-mysql.username.title=Username
+datasources.section.source-mysql.database.description=The database name.
+datasources.section.source-mysql.host.description=The host name of the database.
+datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mysql.password.description=The password associated with the username.
+datasources.section.source-mysql.port.description=The port to connect to.
+datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database.
+datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.source-mysql.ssl_mode.description=SSL connection modes. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.destination-kafka.buffer_memory.description=The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+datasources.section.destination-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.destination-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.destination-kafka.compression_type.description=The compression type for all data generated by the producer.
+datasources.section.destination-kafka.delivery_timeout_ms.description=An upper bound on the time to report success or failure after a call to 'send()' returns.
+datasources.section.destination-kafka.enable_idempotence.description=When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream.
+datasources.section.destination-kafka.linger_ms.description=The producer groups together any records that arrive in between request transmissions into a single batched request.
+datasources.section.destination-kafka.max_block_ms.description=The configuration controls how long the KafkaProducer's send(), partitionsFor(), initTransactions(), sendOffsetsToTransaction(), commitTransaction() and abortTransaction() methods will block.
+datasources.section.destination-kafka.max_in_flight_requests_per_connection.description=The maximum number of unacknowledged requests the client will send on a single connection before blocking. Can be greater than 1, and the maximum value supported with idempotency is 5.
+datasources.section.destination-kafka.max_request_size.description=The maximum size of a request in bytes.
+datasources.section.destination-kafka.protocol.description=Protocol used to communicate with brokers.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.destination-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.destination-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.destination-kafka.retries.description=Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.
+datasources.section.destination-kafka.send_buffer_bytes.description=The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.
+datasources.section.destination-kafka.socket_connection_setup_timeout_max_ms.description=The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum.
+datasources.section.destination-kafka.socket_connection_setup_timeout_ms.description=The amount of time the client will wait for the socket connection to be established.
+datasources.section.destination-kafka.sync_producer.description=Wait synchronously until the record has been sent to Kafka.
+datasources.section.destination-kafka.test_topic.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-kafka.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-keen.api_key.title=API Key
+datasources.section.destination-keen.infer_timestamp.title=Infer Timestamp
+datasources.section.destination-keen.project_id.title=Project ID
+datasources.section.destination-keen.api_key.description=To get Keen Master API Key, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-keen.infer_timestamp.description=Allow connector to guess keen.timestamp value based on the streamed data.
+datasources.section.destination-keen.project_id.description=To get Keen Project ID, navigate to the Access tab from the left-hand, side panel and check the Project Details section.
+datasources.section.destination-kinesis.accessKey.title=Access Key
+datasources.section.destination-kinesis.bufferSize.title=Buffer Size
+datasources.section.destination-kinesis.endpoint.title=Endpoint
+datasources.section.destination-kinesis.privateKey.title=Private Key
+datasources.section.destination-kinesis.region.title=Region
+datasources.section.destination-kinesis.shardCount.title=Shard Count
+datasources.section.destination-kinesis.accessKey.description=Generate the AWS Access Key for current user.
+datasources.section.destination-kinesis.bufferSize.description=Buffer size for storing kinesis records before being batch streamed.
+datasources.section.destination-kinesis.endpoint.description=AWS Kinesis endpoint.
+datasources.section.destination-kinesis.privateKey.description=The AWS Private Key - a string of numbers and letters that are unique for each account, also known as a "recovery phrase".
+datasources.section.destination-kinesis.region.description=AWS region. Your account determines the Regions that are available to you.
+datasources.section.destination-kinesis.shardCount.description=Number of shards to which the data should be streamed.
+datasources.section.destination-kvdb.bucket_id.title=Bucket ID
+datasources.section.destination-kvdb.secret_key.title=Secret Key
+datasources.section.destination-kvdb.bucket_id.description=The ID of your KVdb bucket.
+datasources.section.destination-kvdb.secret_key.description=Your bucket Secret Key.
+datasources.section.destination-local-json.destination_path.title=Destination Path
+datasources.section.destination-local-json.destination_path.description=Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
+datasources.section.destination-mariadb-columnstore.database.title=Database
+datasources.section.destination-mariadb-columnstore.host.title=Host
+datasources.section.destination-mariadb-columnstore.password.title=Password
+datasources.section.destination-mariadb-columnstore.port.title=Port
+datasources.section.destination-mariadb-columnstore.username.title=Username
+datasources.section.destination-mariadb-columnstore.database.description=Name of the database.
+datasources.section.destination-mariadb-columnstore.host.description=The Hostname of the database.
+datasources.section.destination-mariadb-columnstore.password.description=The Password associated with the username.
+datasources.section.destination-mariadb-columnstore.port.description=The Port of the database.
+datasources.section.destination-mariadb-columnstore.username.description=The Username which is used to access the database.
+datasources.section.destination-meilisearch.api_key.title=API Key
+datasources.section.destination-meilisearch.host.title=Host
+datasources.section.destination-meilisearch.api_key.description=MeiliSearch API Key. See the docs for more information on how to obtain this key.
+datasources.section.destination-meilisearch.host.description=Hostname of the MeiliSearch instance.
+datasources.section.destination-mongodb.auth_type.oneOf.0.title=None
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.title=Password
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.title=User
+datasources.section.destination-mongodb.auth_type.oneOf.1.title=Login/Password
+datasources.section.destination-mongodb.auth_type.title=Authorization type
+datasources.section.destination-mongodb.database.title=DB Name
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.destination-mongodb.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.title=Server addresses
+datasources.section.destination-mongodb.instance_type.oneOf.1.title=Replica Set
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.destination-mongodb.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.destination-mongodb.instance_type.title=MongoDb Instance Type
+datasources.section.destination-mongodb.auth_type.description=Authorization type.
+datasources.section.destination-mongodb.auth_type.oneOf.0.description=None.
+datasources.section.destination-mongodb.auth_type.oneOf.1.description=Login/Password.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.password.description=Password associated with the username.
+datasources.section.destination-mongodb.auth_type.oneOf.1.properties.username.description=Username to use to access the database.
+datasources.section.destination-mongodb.database.description=Name of the database.
+datasources.section.destination-mongodb.instance_type.description=MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.host.description=The Host of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.port.description=The Port of a Mongo database to be replicated.
+datasources.section.destination-mongodb.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.replica_set.description=A replica set name.
+datasources.section.destination-mongodb.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member seperated by comma.
+datasources.section.destination-mongodb.instance_type.oneOf.2.properties.cluster_url.description=URL of a cluster to connect to.
+datasources.section.destination-mqtt.automatic_reconnect.title=Automatic reconnect
+datasources.section.destination-mqtt.broker_host.title=MQTT broker host
+datasources.section.destination-mqtt.broker_port.title=MQTT broker port
+datasources.section.destination-mqtt.clean_session.title=Clean session
+datasources.section.destination-mqtt.client.title=Client ID
+datasources.section.destination-mqtt.connect_timeout.title=Connect timeout
+datasources.section.destination-mqtt.message_qos.title=Message QoS
+datasources.section.destination-mqtt.message_retained.title=Message retained
+datasources.section.destination-mqtt.password.title=Password
+datasources.section.destination-mqtt.publisher_sync.title=Sync publisher
+datasources.section.destination-mqtt.topic_pattern.title=Topic pattern
+datasources.section.destination-mqtt.topic_test.title=Test topic
+datasources.section.destination-mqtt.use_tls.title=Use TLS
+datasources.section.destination-mqtt.username.title=Username
+datasources.section.destination-mqtt.automatic_reconnect.description=Whether the client will automatically attempt to reconnect to the server if the connection is lost.
+datasources.section.destination-mqtt.broker_host.description=Host of the broker to connect to.
+datasources.section.destination-mqtt.broker_port.description=Port of the broker.
+datasources.section.destination-mqtt.clean_session.description=Whether the client and server should remember state across restarts and reconnects.
+datasources.section.destination-mqtt.client.description=A client identifier that is unique on the server being connected to.
+datasources.section.destination-mqtt.connect_timeout.description= Maximum time interval (in seconds) the client will wait for the network connection to the MQTT server to be established.
+datasources.section.destination-mqtt.message_qos.description=Quality of service used for each message to be delivered.
+datasources.section.destination-mqtt.message_retained.description=Whether or not the publish message should be retained by the messaging engine.
+datasources.section.destination-mqtt.password.description=Password to use for the connection.
+datasources.section.destination-mqtt.publisher_sync.description=Wait synchronously until the record has been sent to the broker.
+datasources.section.destination-mqtt.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-mqtt.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-mqtt.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-mqtt.username.description=User name to use for the connection.
+datasources.section.destination-mssql.database.title=DB Name
+datasources.section.destination-mssql.host.title=Host
+datasources.section.destination-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mssql.password.title=Password
+datasources.section.destination-mssql.port.title=Port
+datasources.section.destination-mssql.schema.title=Default Schema
+datasources.section.destination-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.destination-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.destination-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.destination-mssql.ssl_method.title=SSL Method
+datasources.section.destination-mssql.username.title=User
+datasources.section.destination-mssql.database.description=The name of the MSSQL database.
+datasources.section.destination-mssql.host.description=The host name of the MSSQL database.
+datasources.section.destination-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mssql.password.description=The password associated with this username.
+datasources.section.destination-mssql.port.description=The port of the MSSQL database.
+datasources.section.destination-mssql.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-mssql.ssl_method.description=The encryption method which is used to communicate with the database.
+datasources.section.destination-mssql.ssl_method.oneOf.0.description=The data transfer will not be encrypted.
+datasources.section.destination-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.destination-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.destination-mssql.username.description=The username which is used to access the database.
+datasources.section.destination-mysql.database.title=DB Name
+datasources.section.destination-mysql.host.title=Host
+datasources.section.destination-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-mysql.password.title=Password
+datasources.section.destination-mysql.port.title=Port
+datasources.section.destination-mysql.ssl.title=SSL Connection
+datasources.section.destination-mysql.username.title=User
+datasources.section.destination-mysql.database.description=Name of the database.
+datasources.section.destination-mysql.host.description=Hostname of the database.
+datasources.section.destination-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-mysql.password.description=Password associated with the username.
+datasources.section.destination-mysql.port.description=Port of the database.
+datasources.section.destination-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.destination-mysql.username.description=Username to use to access the database.
+datasources.section.destination-oracle.encryption.oneOf.0.title=Unencrypted
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.title=Encryption Algorithm
+datasources.section.destination-oracle.encryption.oneOf.1.title=Native Network Encryption (NNE)
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.title=SSL PEM file
+datasources.section.destination-oracle.encryption.oneOf.2.title=TLS Encrypted (verify certificate)
+datasources.section.destination-oracle.encryption.title=Encryption
+datasources.section.destination-oracle.host.title=Host
+datasources.section.destination-oracle.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-oracle.password.title=Password
+datasources.section.destination-oracle.port.title=Port
+datasources.section.destination-oracle.schema.title=Default Schema
+datasources.section.destination-oracle.sid.title=SID
+datasources.section.destination-oracle.username.title=User
+datasources.section.destination-oracle.encryption.description=The encryption method which is used when communicating with the database.
+datasources.section.destination-oracle.encryption.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.destination-oracle.encryption.oneOf.1.description=The native network encryption gives you the ability to encrypt database connections, without the configuration overhead of TCP/IP and SSL/TLS and without the need to open and listen on different ports.
+datasources.section.destination-oracle.encryption.oneOf.1.properties.encryption_algorithm.description=This parameter defines the database encryption algorithm.
+datasources.section.destination-oracle.encryption.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.destination-oracle.encryption.oneOf.2.properties.ssl_certificate.description=Privacy Enhanced Mail (PEM) files are concatenated certificate containers frequently used in certificate installations.
+datasources.section.destination-oracle.host.description=The hostname of the database.
+datasources.section.destination-oracle.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-oracle.password.description=The password associated with the username.
+datasources.section.destination-oracle.port.description=The port of the database.
+datasources.section.destination-oracle.schema.description=The default schema is used as the target schema for all statements issued from the connection that do not explicitly specify a schema name. The usual value for this field is "airbyte". In Oracle, schemas and users are the same thing, so the "user" parameter is used as the login credentials and this is used for the default Airbyte message schema.
+datasources.section.destination-oracle.sid.description=The System Identifier uniquely distinguishes the instance from any other instance on the same computer.
+datasources.section.destination-oracle.username.description=The username to access the database. This user must have CREATE USER privileges in the database.
+datasources.section.destination-postgres.database.title=DB Name
+datasources.section.destination-postgres.host.title=Host
+datasources.section.destination-postgres.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-postgres.password.title=Password
+datasources.section.destination-postgres.port.title=Port
+datasources.section.destination-postgres.schema.title=Default Schema
+datasources.section.destination-postgres.ssl.title=SSL Connection
+datasources.section.destination-postgres.ssl_mode.oneOf.0.title=disable
+datasources.section.destination-postgres.ssl_mode.oneOf.1.title=allow
+datasources.section.destination-postgres.ssl_mode.oneOf.2.title=prefer
+datasources.section.destination-postgres.ssl_mode.oneOf.3.title=require
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.4.title=verify-ca
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.title=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.title=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.title=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.title=Client key password (Optional)
+datasources.section.destination-postgres.ssl_mode.oneOf.5.title=verify-full
+datasources.section.destination-postgres.ssl_mode.title=SSL modes
+datasources.section.destination-postgres.username.title=User
+datasources.section.destination-postgres.database.description=Name of the database.
+datasources.section.destination-postgres.host.description=Hostname of the database.
+datasources.section.destination-postgres.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-postgres.password.description=Password associated with the username.
+datasources.section.destination-postgres.port.description=Port of the database.
+datasources.section.destination-postgres.schema.description=The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
+datasources.section.destination-postgres.ssl.description=Encrypt data using SSL. When activating SSL, please select one of the connection modes.
+datasources.section.destination-postgres.ssl_mode.description=SSL connection modes.
+datasources.section.destination-postgres.ssl_mode.oneOf.0.description=Disable SSL.
+datasources.section.destination-postgres.ssl_mode.oneOf.1.description=Allow SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.2.description=Prefer SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.3.description=Require SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.description=Verify-ca SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.4.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.description=Verify-full SSL mode.
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.ca_certificate.description=CA certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_certificate.description=Client certificate
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key.description=Client key
+datasources.section.destination-postgres.ssl_mode.oneOf.5.properties.client_key_password.description=Password for keystorage. This field is optional. If you do not add it - the password will be generated automatically.
+datasources.section.destination-postgres.username.description=Username to use to access the database.
+datasources.section.destination-pubsub.credentials_json.title=Credentials JSON
+datasources.section.destination-pubsub.project_id.title=Project ID
+datasources.section.destination-pubsub.topic_id.title=PubSub Topic ID
+datasources.section.destination-pubsub.credentials_json.description=The contents of the JSON service account key. Check out the docs if you need help generating this key.
+datasources.section.destination-pubsub.project_id.description=The GCP project ID for the project containing the target PubSub.
+datasources.section.destination-pubsub.topic_id.description=The PubSub topic ID in the given GCP project ID.
+datasources.section.destination-pulsar.batching_enabled.title=Enable batching
+datasources.section.destination-pulsar.batching_max_messages.title=Batching max messages
+datasources.section.destination-pulsar.batching_max_publish_delay.title=Batching max publish delay
+datasources.section.destination-pulsar.block_if_queue_full.title=Block if queue is full
+datasources.section.destination-pulsar.brokers.title=Pulsar brokers
+datasources.section.destination-pulsar.compression_type.title=Compression type
+datasources.section.destination-pulsar.max_pending_messages.title=Max pending messages
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.title=Max pending messages across partitions
+datasources.section.destination-pulsar.producer_name.title=Producer name
+datasources.section.destination-pulsar.producer_sync.title=Sync producer
+datasources.section.destination-pulsar.send_timeout_ms.title=Message send timeout
+datasources.section.destination-pulsar.topic_namespace.title=Topic namespace
+datasources.section.destination-pulsar.topic_pattern.title=Topic pattern
+datasources.section.destination-pulsar.topic_tenant.title=Topic tenant
+datasources.section.destination-pulsar.topic_test.title=Test topic
+datasources.section.destination-pulsar.topic_type.title=Topic type
+datasources.section.destination-pulsar.use_tls.title=Use TLS
+datasources.section.destination-pulsar.batching_enabled.description=Control whether automatic batching of messages is enabled for the producer.
+datasources.section.destination-pulsar.batching_max_messages.description=Maximum number of messages permitted in a batch.
+datasources.section.destination-pulsar.batching_max_publish_delay.description= Time period in milliseconds within which the messages sent will be batched.
+datasources.section.destination-pulsar.block_if_queue_full.description=If the send operation should block when the outgoing message queue is full.
+datasources.section.destination-pulsar.brokers.description=A list of host/port pairs to use for establishing the initial connection to the Pulsar cluster.
+datasources.section.destination-pulsar.compression_type.description=Compression type for the producer.
+datasources.section.destination-pulsar.max_pending_messages.description=The maximum size of a queue holding pending messages.
+datasources.section.destination-pulsar.max_pending_messages_across_partitions.description=The maximum number of pending messages across partitions.
+datasources.section.destination-pulsar.producer_name.description=Name for the producer. If not filled, the system will generate a globally unique name which can be accessed with.
+datasources.section.destination-pulsar.producer_sync.description=Wait synchronously until the record has been sent to Pulsar.
+datasources.section.destination-pulsar.send_timeout_ms.description=If a message is not acknowledged by a server before the send-timeout expires, an error occurs (in ms).
+datasources.section.destination-pulsar.topic_namespace.description=The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the namespace level. Each tenant has one or multiple namespaces.
+datasources.section.destination-pulsar.topic_pattern.description=Topic pattern in which the records will be sent. You can use patterns like '{namespace}' and/or '{stream}' to send the message to a specific topic based on these values. Notice that the topic name will be transformed to a standard naming convention.
+datasources.section.destination-pulsar.topic_tenant.description=The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+datasources.section.destination-pulsar.topic_test.description=Topic to test if Airbyte can produce messages.
+datasources.section.destination-pulsar.topic_type.description=It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably persisted on disk (that means on multiple disks unless the broker is standalone), whereas non-persistent topic does not persist message into storage disk.
+datasources.section.destination-pulsar.use_tls.description=Whether to use TLS encryption on the connection.
+datasources.section.destination-rabbitmq.exchange.description=The exchange name.
+datasources.section.destination-rabbitmq.host.description=The RabbitMQ host name.
+datasources.section.destination-rabbitmq.password.description=The password to connect.
+datasources.section.destination-rabbitmq.port.description=The RabbitMQ port.
+datasources.section.destination-rabbitmq.routing_key.description=The routing key.
+datasources.section.destination-rabbitmq.ssl.description=SSL enabled.
+datasources.section.destination-rabbitmq.username.description=The username to connect.
+datasources.section.destination-rabbitmq.virtual_host.description=The RabbitMQ virtual host name.
+datasources.section.destination-redis.cache_type.title=Cache type
+datasources.section.destination-redis.host.title=Host
+datasources.section.destination-redis.password.title=Password
+datasources.section.destination-redis.port.title=Port
+datasources.section.destination-redis.username.title=Username
+datasources.section.destination-redis.cache_type.description=Redis cache type to store data in.
+datasources.section.destination-redis.host.description=Redis host to connect to.
+datasources.section.destination-redis.password.description=Password associated with Redis.
+datasources.section.destination-redis.port.description=Port of Redis.
+datasources.section.destination-redis.username.description=Username associated with Redis.
+datasources.section.destination-redshift.database.title=Database
+datasources.section.destination-redshift.host.title=Host
+datasources.section.destination-redshift.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-redshift.password.title=Password
+datasources.section.destination-redshift.port.title=Port
+datasources.section.destination-redshift.schema.title=Default Schema
+datasources.section.destination-redshift.uploading_method.oneOf.0.title=Standard
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.title=S3 Key Id
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.title=Encryption
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.title=Purge Staging Files and Tables (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.title=S3 Bucket Path (Optional)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.title=S3 Access Key
+datasources.section.destination-redshift.uploading_method.oneOf.1.title=S3 Staging
+datasources.section.destination-redshift.uploading_method.title=Uploading Method
+datasources.section.destination-redshift.username.title=Username
+datasources.section.destination-redshift.database.description=Name of the database.
+datasources.section.destination-redshift.host.description=Host Endpoint of the Redshift Cluster (must include the cluster-id, region and end with .redshift.amazonaws.com)
+datasources.section.destination-redshift.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.destination-redshift.password.description=Password associated with the username.
+datasources.section.destination-redshift.port.description=Port of the database.
+datasources.section.destination-redshift.schema.description=The default schema tables are written to if the source does not specify a namespace. Unless specifically configured, the usual value for this field is "public".
+datasources.section.destination-redshift.uploading_method.description=The method how the data will be uploaded to the database.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.access_key_id.description=This ID grants access to the above S3 staging bucket. Airbyte requires Read and Write permissions to the given bucket. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.description=How to encrypt the staging data
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.0.description=Staging data will be stored in plaintext.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.description=Staging data will be encrypted using AES-CBC envelope encryption.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.encryption.oneOf.1.properties.key_encrypting_key.description=The key, base64-encoded. Must be either 128, 192, or 256 bits. Leave blank to have Airbyte generate an ephemeral key for each sync.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.purge_staging_data.description=Whether to delete the staging files from S3 after completing the sync. See docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_name.description=The name of the staging S3 bucket to use if utilising a COPY strategy. COPY is recommended for production workloads for better speed and scalability. See AWS docs for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_path.description=The directory under the S3 bucket where data will be written. If not provided, then defaults to the root directory. See path's name recommendations for more details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.s3_bucket_region.description=The region of the S3 staging bucket to use if utilising a COPY strategy. See AWS docs for details.
+datasources.section.destination-redshift.uploading_method.oneOf.1.properties.secret_access_key.description=The corresponding secret to the above access key id. See AWS docs on how to generate an access key ID and secret access key.
+datasources.section.destination-redshift.username.description=Username to use to access the database.
+datasources.section.destination-rockset.api_key.title=Api Key
+datasources.section.destination-rockset.api_server.title=Api Server
+datasources.section.destination-rockset.workspace.title=Workspace
+datasources.section.destination-rockset.api_key.description=Rockset api key
+datasources.section.destination-rockset.api_server.description=Rockset api URL
+datasources.section.destination-rockset.workspace.description=The Rockset workspace in which collections will be created + written to.
+datasources.section.destination-s3.access_key_id.title=S3 Key ID *
+datasources.section.destination-s3.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.title=Deflate Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.title=Deflate
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.2.title=bzip2
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.title=xz
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.title=Compression Level
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.title=Include Checksum
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.title=zstandard
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.5.title=snappy
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.title=Compression Codec *
+datasources.section.destination-s3.format.oneOf.0.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.0.title=Avro: Apache Avro
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.1.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.1.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.title=Normalization (Flattening)
+datasources.section.destination-s3.format.oneOf.1.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.1.title=CSV: Comma-Separated Values
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.0.title=No Compression
+datasources.section.destination-s3.format.oneOf.2.properties.compression.oneOf.1.title=GZIP
+datasources.section.destination-s3.format.oneOf.2.properties.compression.title=Compression
+datasources.section.destination-s3.format.oneOf.2.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.2.title=JSON Lines: Newline-delimited JSON
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.title=Block Size (Row Group Size) (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.title=Compression Codec (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.title=Dictionary Encoding (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.title=Dictionary Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.format_type.title=Format Type *
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.title=Max Padding Size (MB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.title=Page Size (KB) (Optional)
+datasources.section.destination-s3.format.oneOf.3.title=Parquet: Columnar Storage
+datasources.section.destination-s3.format.title=Output Format *
+datasources.section.destination-s3.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-s3.s3_bucket_path.title=S3 Bucket Path
+datasources.section.destination-s3.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-s3.s3_endpoint.title=Endpoint (Optional)
+datasources.section.destination-s3.s3_path_format.title=S3 Path Format (Optional)
+datasources.section.destination-s3.secret_access_key.title=S3 Access Key *
+datasources.section.destination-s3.access_key_id.description=The access key ID to access the S3 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
+datasources.section.destination-s3.file_name_pattern.description=The pattern allows you to set the file-name format for the S3 staging file(s)
+datasources.section.destination-s3.format.description=Format of the data output. See here for more details
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.description=The compression algorithm used to compress data. Default to no compression.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.1.properties.compression_level.description=0: no compression & fastest, 9: best compression & slowest.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.3.properties.compression_level.description=See here for details.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.compression_level.description=Negative levels are 'fast' modes akin to lz4 or snappy, levels above 9 are generally for archival purposes, and levels above 18 use a lot of memory.
+datasources.section.destination-s3.format.oneOf.0.properties.compression_codec.oneOf.4.properties.include_checksum.description=If true, include a checksum with each data block.
+datasources.section.destination-s3.format.oneOf.1.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".csv.gz").
+datasources.section.destination-s3.format.oneOf.1.properties.flattening.description=Whether the input json data should be normalized (flattened) in the output CSV. Please refer to docs for details.
+datasources.section.destination-s3.format.oneOf.2.properties.compression.description=Whether the output files should be compressed. If compression is selected, the output filename will have an extra extension (GZIP: ".jsonl.gz").
+datasources.section.destination-s3.format.oneOf.3.properties.block_size_mb.description=This is the size of a row group being buffered in memory. It limits the memory usage when writing. Larger values will improve the IO when reading, but consume more memory when writing. Default: 128 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.compression_codec.description=The compression algorithm used to compress data pages.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_encoding.description=Default: true.
+datasources.section.destination-s3.format.oneOf.3.properties.dictionary_page_size_kb.description=There is one dictionary page per column per row group when dictionary encoding is used. The dictionary page size works like the page size but for dictionary. Default: 1024 KB.
+datasources.section.destination-s3.format.oneOf.3.properties.max_padding_size_mb.description=Maximum size allowed as padding to align row groups. This is also the minimum size of a row group. Default: 8 MB.
+datasources.section.destination-s3.format.oneOf.3.properties.page_size_kb.description=The page size is for compression. A block is composed of pages. A page is the smallest unit that must be read fully to access a single record. If this value is too small, the compression will deteriorate. Default: 1024 KB.
+datasources.section.destination-s3.s3_bucket_name.description=The name of the S3 bucket. Read more here.
+datasources.section.destination-s3.s3_bucket_path.description=Directory under the S3 bucket where data will be written. Read more here
+datasources.section.destination-s3.s3_bucket_region.description=The region of the S3 bucket. See here for all region codes.
+datasources.section.destination-s3.s3_endpoint.description=Your S3 endpoint url. Read more here
+datasources.section.destination-s3.s3_path_format.description=Format string on how data will be organized inside the S3 bucket directory. Read more here
+datasources.section.destination-s3.secret_access_key.description=The corresponding secret to the access key ID. Read more here
+datasources.section.destination-scylla.address.title=Address
+datasources.section.destination-scylla.keyspace.title=Keyspace
+datasources.section.destination-scylla.password.title=Password
+datasources.section.destination-scylla.port.title=Port
+datasources.section.destination-scylla.replication.title=Replication factor
+datasources.section.destination-scylla.username.title=Username
+datasources.section.destination-scylla.address.description=Address to connect to.
+datasources.section.destination-scylla.keyspace.description=Default Scylla keyspace to create data in.
+datasources.section.destination-scylla.password.description=Password associated with Scylla.
+datasources.section.destination-scylla.port.description=Port of Scylla.
+datasources.section.destination-scylla.replication.description=Indicates to how many nodes the data should be replicated to.
+datasources.section.destination-scylla.username.description=Username to use to access Scylla.
+datasources.section.destination-sftp-json.destination_path.title=Destination path
+datasources.section.destination-sftp-json.host.title=Host
+datasources.section.destination-sftp-json.password.title=Password
+datasources.section.destination-sftp-json.port.title=Port
+datasources.section.destination-sftp-json.username.title=User
+datasources.section.destination-sftp-json.destination_path.description=Path to the directory where json files will be written.
+datasources.section.destination-sftp-json.host.description=Hostname of the SFTP server.
+datasources.section.destination-sftp-json.password.description=Password associated with the username.
+datasources.section.destination-sftp-json.port.description=Port of the SFTP server.
+datasources.section.destination-sftp-json.username.description=Username to use to access the SFTP server.
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.0.title=OAuth2.0
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.title=Private Key
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.title=Passphrase (Optional)
+datasources.section.destination-snowflake.credentials.oneOf.1.title=Key Pair Authentication
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.title=Password
+datasources.section.destination-snowflake.credentials.oneOf.2.title=Username and Password
+datasources.section.destination-snowflake.credentials.title=Authorization Method
+datasources.section.destination-snowflake.database.title=Database
+datasources.section.destination-snowflake.host.title=Host
+datasources.section.destination-snowflake.jdbc_url_params.title=JDBC URL Params
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.0.title=Select another option
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.1.title=[Recommended] Internal Staging
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.access_key_id.title=AWS access key ID
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.0.title=No encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.properties.key_encrypting_key.title=Key
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.oneOf.1.title=AES-CBC envelope encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.encryption.title=Encryption
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.file_name_pattern.title=S3 Filename pattern (Optional)
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.purge_staging_data.title=Purge Staging Files and Tables
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_name.title=S3 Bucket Name
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.s3_bucket_region.title=S3 Bucket Region
+datasources.section.destination-snowflake.loading_method.oneOf.2.properties.secret_access_key.title=AWS secret access key
+datasources.section.destination-snowflake.loading_method.oneOf.2.title=AWS S3 Staging
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.bucket_name.title=Cloud Storage bucket name
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.credentials_json.title=Google Application Credentials
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.3.properties.project_id.title=Google Cloud project ID
+datasources.section.destination-snowflake.loading_method.oneOf.3.title=Google Cloud Storage Staging
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_account_name.title=Azure Blob Storage account name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_container_name.title=Azure Blob Storage Container Name
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_endpoint_domain_name.title=Azure Blob Storage Endpoint
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.azure_blob_storage_sas_token.title=SAS Token
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.properties.method.title=
+datasources.section.destination-snowflake.loading_method.oneOf.4.title=Azure Blob Storage Staging
+datasources.section.destination-snowflake.loading_method.title=Data Staging Method
+datasources.section.destination-snowflake.role.title=Role
+datasources.section.destination-snowflake.schema.title=Default Schema
+datasources.section.destination-snowflake.username.title=Username
+datasources.section.destination-snowflake.warehouse.title=Warehouse
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.description=
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.access_token.description=Enter you application's Access Token
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_id.description=Enter your application's Client ID
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.client_secret.description=Enter your application's Client secret
+datasources.section.destination-snowflake.credentials.oneOf.0.properties.refresh_token.description=Enter your application's Refresh Token
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key.description=RSA Private key to use for Snowflake connection. See the docs for more information on how to obtain this key.
+datasources.section.destination-snowflake.credentials.oneOf.1.properties.private_key_password.description=Passphrase for private key
+datasources.section.destination-snowflake.credentials.oneOf.2.properties.password.description=Enter the password associated with the username.
+datasources.section.destination-snowflake.database.description=Enter the name of the database you want to sync data into
+datasources.section.destination-snowflake.host.description=Enter your Snowflake account's locator (in the format host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+datasources.section.source-kafka.client_dns_lookup.description=Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
+datasources.section.source-kafka.client_id.description=An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
+datasources.section.source-kafka.enable_auto_commit.description=If true, the consumer's offset will be periodically committed in the background.
+datasources.section.source-kafka.group_id.description=The Group ID is how you distinguish different consumer groups.
+datasources.section.source-kafka.max_poll_records.description=The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
+datasources.section.source-kafka.polling_time.description=Amount of time Kafka connector should try to poll for messages.
+datasources.section.source-kafka.protocol.description=The Protocol used to communicate with brokers.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.1.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_jaas_config.description=The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.
+datasources.section.source-kafka.protocol.oneOf.2.properties.sasl_mechanism.description=The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.
+datasources.section.source-kafka.receive_buffer_bytes.description=The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
+datasources.section.source-kafka.repeated_calls.description=The number of repeated calls to poll() if no messages were received.
+datasources.section.source-kafka.request_timeout_ms.description=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+datasources.section.source-kafka.retry_backoff_ms.description=The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
+datasources.section.source-kafka.subscription.description=You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
+datasources.section.source-kafka.subscription.oneOf.0.properties.subscription_type.description=Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).
+datasources.section.source-kafka.subscription.oneOf.1.properties.subscription_type.description=The Topic pattern from which the records will be read.
+datasources.section.source-kafka.test_topic.description=The Topic to test in case the Airbyte can consume messages.
+datasources.section.source-klaviyo.api_key.title=Api Key
+datasources.section.source-klaviyo.start_date.title=Start Date
+datasources.section.source-klaviyo.api_key.description=Klaviyo API Key. See our docs if you need help finding this key.
+datasources.section.source-klaviyo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-kustomer-singer.api_token.title=API Token
+datasources.section.source-kustomer-singer.start_date.title=Start Date
+datasources.section.source-kustomer-singer.api_token.description=Kustomer API Token. See the docs on how to obtain this
+datasources.section.source-kustomer-singer.start_date.description=The date from which you'd like to replicate the data
+datasources.section.source-kyriba.domain.title=Domain
+datasources.section.source-kyriba.end_date.title=End Date
+datasources.section.source-kyriba.password.title=Password
+datasources.section.source-kyriba.start_date.title=Start Date
+datasources.section.source-kyriba.username.title=Username
+datasources.section.source-kyriba.domain.description=Kyriba domain
+datasources.section.source-kyriba.end_date.description=The date the sync should end. If let empty the sync will run to the current date.
+datasources.section.source-kyriba.password.description=Password to be used in basic auth
+datasources.section.source-kyriba.start_date.description=The date the sync should start from.
+datasources.section.source-kyriba.username.description=Username to be used in basic auth
+datasources.section.source-lemlist.api_key.title=API key
+datasources.section.source-lemlist.api_key.description=Lemlist API key.
+datasources.section.source-linkedin-ads.account_ids.title=Account IDs (Optional)
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-ads.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-ads.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-ads.credentials.title=Authentication *
+datasources.section.source-linkedin-ads.start_date.title=Start date
+datasources.section.source-linkedin-ads.account_ids.description=Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.client_secret.description=The client secret the LinkedIn Ads developer application.
+datasources.section.source-linkedin-ads.credentials.oneOf.0.properties.refresh_token.description=The key to refresh the expired access token.
+datasources.section.source-linkedin-ads.credentials.oneOf.1.properties.access_token.description=The token value generated using the authentication code. See the docs to obtain yours.
+datasources.section.source-linkedin-ads.start_date.description=UTC date in the format 2020-09-17. Any data before this date will not be replicated.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.title=Client secret
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.title=Refresh token
+datasources.section.source-linkedin-pages.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.title=Access token
+datasources.section.source-linkedin-pages.credentials.oneOf.1.title=Access token
+datasources.section.source-linkedin-pages.credentials.title=Authentication *
+datasources.section.source-linkedin-pages.org_id.title=Organization ID
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_id.description=The client ID of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.client_secret.description=The client secret of the LinkedIn developer application.
+datasources.section.source-linkedin-pages.credentials.oneOf.0.properties.refresh_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.credentials.oneOf.1.properties.access_token.description=The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.
+datasources.section.source-linkedin-pages.org_id.description=Specify the Organization ID
+datasources.section.source-linnworks.application_id.title=Application ID.
+datasources.section.source-linnworks.application_secret.title=Application Secret
+datasources.section.source-linnworks.start_date.title=Start Date
+datasources.section.source-linnworks.token.title=API Token
+datasources.section.source-linnworks.application_id.description=Linnworks Application ID
+datasources.section.source-linnworks.application_secret.description=Linnworks Application Secret
+datasources.section.source-linnworks.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-looker.client_id.title=Client ID
+datasources.section.source-looker.client_secret.title=Client Secret
+datasources.section.source-looker.domain.title=Domain
+datasources.section.source-looker.run_look_ids.title=Look IDs to Run
+datasources.section.source-looker.client_id.description=The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key.
+datasources.section.source-looker.client_secret.description=The Client Secret is second part of an API3 key.
+datasources.section.source-looker.domain.description=Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address
+datasources.section.source-looker.run_look_ids.description=The IDs of any Looks to run (optional)
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-mailchimp.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.title=API Key
+datasources.section.source-mailchimp.credentials.oneOf.1.title=API Key
+datasources.section.source-mailchimp.credentials.title=Authentication *
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.access_token.description=An access token generated using the above client ID and secret.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-mailchimp.credentials.oneOf.1.properties.apikey.description=Mailchimp API Key. See the docs for information on how to generate this key.
+datasources.section.source-mailgun.domain_region.title=Domain Region Code
+datasources.section.source-mailgun.private_key.title=Private API Key
+datasources.section.source-mailgun.start_date.title=Replication Start Date
+datasources.section.source-mailgun.domain_region.description=Domain region code. 'EU' or 'US' are possible values. The default is 'US'.
+datasources.section.source-mailgun.private_key.description=Primary account API key to access your Mailgun data.
+datasources.section.source-mailgun.start_date.description=UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.
+datasources.section.source-marketo.client_id.title=Client ID
+datasources.section.source-marketo.client_secret.title=Client Secret
+datasources.section.source-marketo.domain_url.title=Domain URL
+datasources.section.source-marketo.start_date.title=Start Date
+datasources.section.source-marketo.client_id.description=The Client ID of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.client_secret.description=The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.
+datasources.section.source-marketo.domain_url.description=Your Marketo Base URL. See the docs for info on how to obtain this.
+datasources.section.source-marketo.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.title=Refresh Token
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.0.title=Authenticate via Microsoft (OAuth 2.0)
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.title=Client ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.title=Client Secret
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.title=Directory (tenant) ID
+datasources.section.source-microsoft-teams.credentials.oneOf.1.title=Authenticate via Microsoft
+datasources.section.source-microsoft-teams.credentials.title=Authentication mechanism
+datasources.section.source-microsoft-teams.period.title=Period
+datasources.section.source-microsoft-teams.credentials.description=Choose how to authenticate to Microsoft
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.refresh_token.description=A Refresh Token to renew the expired Access Token.
+datasources.section.source-microsoft-teams.credentials.oneOf.0.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_id.description=The Client ID of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.client_secret.description=The Client Secret of your Microsoft Teams developer application.
+datasources.section.source-microsoft-teams.credentials.oneOf.1.properties.tenant_id.description=A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL
+datasources.section.source-microsoft-teams.period.description=Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.
+datasources.section.source-mixpanel.api_secret.title=Project Secret
+datasources.section.source-mixpanel.attribution_window.title=Attribution Window
+datasources.section.source-mixpanel.date_window_size.title=Date slicing window
+datasources.section.source-mixpanel.end_date.title=End Date
+datasources.section.source-mixpanel.project_timezone.title=Project Timezone
+datasources.section.source-mixpanel.region.title=Region
+datasources.section.source-mixpanel.select_properties_by_default.title=Select Properties By Default
+datasources.section.source-mixpanel.start_date.title=Start Date
+datasources.section.source-mixpanel.api_secret.description=Mixpanel project secret. See the docs for more information on how to obtain this.
+datasources.section.source-mixpanel.attribution_window.description= A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.
+datasources.section.source-mixpanel.date_window_size.description=Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.
+datasources.section.source-mixpanel.end_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date
+datasources.section.source-mixpanel.project_timezone.description=Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.
+datasources.section.source-mixpanel.region.description=The region of mixpanel domain instance either US or EU.
+datasources.section.source-mixpanel.select_properties_by_default.description=Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.
+datasources.section.source-mixpanel.start_date.description=UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.title=Access Token
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.title=Client ID
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.title=Client Secret
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.title=Subdomain/Slug (Optional)
+datasources.section.source-monday.credentials.oneOf.0.title=OAuth2.0
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.title=Personal API Token
+datasources.section.source-monday.credentials.oneOf.1.title=API Token
+datasources.section.source-monday.credentials.title=Authorization Method
+datasources.section.source-monday.credentials.oneOf.0.properties.access_token.description=Access Token for making authenticated requests.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_id.description=The Client ID of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.client_secret.description=The Client Secret of your OAuth application.
+datasources.section.source-monday.credentials.oneOf.0.properties.subdomain.description=Slug/subdomain of the account, or the first part of the URL that comes before .monday.com
+datasources.section.source-monday.credentials.oneOf.1.properties.api_token.description=API Token for making authenticated requests.
+datasources.section.source-mongodb.auth_source.title=Authentication source
+datasources.section.source-mongodb.database.title=Database name
+datasources.section.source-mongodb.host.title=Host
+datasources.section.source-mongodb.password.title=Password
+datasources.section.source-mongodb.port.title=Port
+datasources.section.source-mongodb.replica_set.title=Replica Set
+datasources.section.source-mongodb.ssl.title=TLS connection
+datasources.section.source-mongodb.user.title=User
+datasources.section.source-mongodb.auth_source.description=Authentication source where user information is stored. See the Mongo docs for more info.
+datasources.section.source-mongodb.database.description=Database to be replicated.
+datasources.section.source-mongodb.host.description=Host of a Mongo database to be replicated.
+datasources.section.source-mongodb.password.description=Password
+datasources.section.source-mongodb.port.description=Port of a Mongo database to be replicated.
+datasources.section.source-mongodb.replica_set.description=The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
+datasources.section.source-mongodb.ssl.description=If this switch is enabled, TLS connections will be used to connect to MongoDB.
+datasources.section.source-mongodb.user.description=User
+datasources.section.source-mongodb-v2.auth_source.title=Authentication Source
+datasources.section.source-mongodb-v2.database.title=Database Name
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.title=Host
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.title=Port
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.title=TLS Connection
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.title=Standalone MongoDb Instance
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.title=Server Addresses
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.title=Replica Set
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.title=Cluster URL
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.title=MongoDB Atlas
+datasources.section.source-mongodb-v2.instance_type.title=MongoDb Instance Type
+datasources.section.source-mongodb-v2.password.title=Password
+datasources.section.source-mongodb-v2.user.title=User
+datasources.section.source-mongodb-v2.auth_source.description=The authentication source where the user information is stored.
+datasources.section.source-mongodb-v2.database.description=The database you want to replicate.
+datasources.section.source-mongodb-v2.instance_type.description=The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.host.description=The host name of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.port.description=The port of the Mongo database.
+datasources.section.source-mongodb-v2.instance_type.oneOf.0.properties.tls.description=Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.replica_set.description=A replica set in MongoDB is a group of mongod processes that maintain the same data set.
+datasources.section.source-mongodb-v2.instance_type.oneOf.1.properties.server_addresses.description=The members of a replica set. Please specify `host`:`port` of each member separated by comma.
+datasources.section.source-mongodb-v2.instance_type.oneOf.2.properties.cluster_url.description=The URL of a cluster to connect to.
+datasources.section.source-mongodb-v2.password.description=The password associated with this username.
+datasources.section.source-mongodb-v2.user.description=The username which is used to access the database.
+datasources.section.source-mssql.database.title=Database
+datasources.section.source-mssql.host.title=Host
+datasources.section.source-mssql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mssql.password.title=Password
+datasources.section.source-mssql.port.title=Port
+datasources.section.source-mssql.replication_method.oneOf.0.title=Standard
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.title=Data to Sync
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.title=Initial Snapshot Isolation Level
+datasources.section.source-mssql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mssql.replication_method.title=Replication Method
+datasources.section.source-mssql.schemas.title=Schemas
+datasources.section.source-mssql.ssl_method.oneOf.0.title=Unencrypted
+datasources.section.source-mssql.ssl_method.oneOf.1.title=Encrypted (trust server certificate)
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.title=Host Name In Certificate
+datasources.section.source-mssql.ssl_method.oneOf.2.title=Encrypted (verify certificate)
+datasources.section.source-mssql.ssl_method.title=SSL Method
+datasources.section.source-mssql.username.title=Username
+datasources.section.source-mssql.database.description=The name of the database.
+datasources.section.source-mssql.host.description=The hostname of the database.
+datasources.section.source-mssql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mssql.password.description=The password associated with the username.
+datasources.section.source-mssql.port.description=The port of the database.
+datasources.section.source-mssql.replication_method.description=The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mssql.replication_method.oneOf.1.description=CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.data_to_sync.description=What data should be synced under the CDC. "Existing and New" will read existing data as a snapshot, and sync new changes through CDC. "New Changes Only" will skip the initial snapshot, and only sync new changes through CDC.
+datasources.section.source-mssql.replication_method.oneOf.1.properties.snapshot_isolation.description=Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the "Snapshot" level, you must enable the snapshot isolation mode on the database.
+datasources.section.source-mssql.schemas.description=The list of schemas to sync from. Defaults to user. Case sensitive.
+datasources.section.source-mssql.ssl_method.description=The encryption method which is used when communicating with the database.
+datasources.section.source-mssql.ssl_method.oneOf.0.description=Data transfer will not be encrypted.
+datasources.section.source-mssql.ssl_method.oneOf.1.description=Use the certificate provided by the server without verification. (For testing purposes only!)
+datasources.section.source-mssql.ssl_method.oneOf.2.description=Verify and use the certificate provided by the server.
+datasources.section.source-mssql.ssl_method.oneOf.2.properties.hostNameInCertificate.description=Specifies the host name of the server. The value of this property must match the subject property of the certificate.
+datasources.section.source-mssql.username.description=The username which is used to access the database.
+datasources.section.source-my-hours.email.title=Email
+datasources.section.source-my-hours.logs_batch_size.title=Time logs batch size
+datasources.section.source-my-hours.password.title=Password
+datasources.section.source-my-hours.start_date.title=Start Date
+datasources.section.source-my-hours.email.description=Your My Hours username
+datasources.section.source-my-hours.logs_batch_size.description=Pagination size used for retrieving logs in days
+datasources.section.source-my-hours.password.description=The password associated to the username
+datasources.section.source-my-hours.start_date.description=Start date for collecting time logs
+datasources.section.source-mysql.database.title=Database
+datasources.section.source-mysql.host.title=Host
+datasources.section.source-mysql.jdbc_url_params.title=JDBC URL Params
+datasources.section.source-mysql.password.title=Password
+datasources.section.source-mysql.port.title=Port
+datasources.section.source-mysql.replication_method.oneOf.0.title=STANDARD
+datasources.section.source-mysql.replication_method.oneOf.1.title=Logical Replication (CDC)
+datasources.section.source-mysql.replication_method.title=Replication Method
+datasources.section.source-mysql.ssl.title=SSL Connection
+datasources.section.source-mysql.ssl_mode.oneOf.0.title=preferred
+datasources.section.source-mysql.ssl_mode.oneOf.1.title=required
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.2.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.2.title=Verify CA
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.ca_certificate.title=CA certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_certificate.title=Client certificate
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key.title=Client key
+datasources.section.source-mysql.ssl_mode.oneOf.3.properties.client_key_password.title=Client key password (Optional)
+datasources.section.source-mysql.ssl_mode.oneOf.3.title=Verify Identity
+datasources.section.source-mysql.ssl_mode.title=SSL modes
+datasources.section.source-mysql.username.title=Username
+datasources.section.source-mysql.database.description=The database name.
+datasources.section.source-mysql.host.description=The host name of the database.
+datasources.section.source-mysql.jdbc_url_params.description=Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
+datasources.section.source-mysql.password.description=The password associated with the username.
+datasources.section.source-mysql.port.description=The port to connect to.
+datasources.section.source-mysql.replication_method.description=Replication method to use for extracting data from the database.
+datasources.section.source-mysql.replication_method.oneOf.0.description=Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.
+datasources.section.source-mysql.replication_method.oneOf.1.description=CDC uses the Binlog to detect inserts, updates, and deletes. This needs to be configured on the source database itself.
+datasources.section.source-mysql.ssl.description=Encrypt data using SSL.
+datasources.section.source-mysql.ssl_mode.description=SSL connection modes. host1:port1,host2:port2,...
. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).",
+ "type": "string",
+ "examples": ["kafka-broker1:9092,kafka-broker2:9092"]
+ },
+ "subscription": {
+ "title": "Subscription Method",
+ "type": "object",
+ "description": "You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.",
+ "oneOf": [
+ {
+ "title": "Manually assign a list of partitions",
+ "required": ["subscription_type", "topic_partitions"],
+ "properties": {
+ "subscription_type": {
+ "description": "Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).\nIf the given list of topic partitions is empty, it is treated the same as unsubscribe().",
+ "type": "string",
+ "const": "assign",
+ "enum": ["assign"],
+ "default": "assign"
+ },
+ "topic_partitions": {
+ "title": "List of topic:partition Pairs",
+ "type": "string",
+ "examples": ["sample.topic:0, sample.topic:1"]
+ }
+ }
+ },
+ {
+ "title": "Subscribe to all topics matching specified pattern",
+ "required": ["subscription_type", "topic_pattern"],
+ "properties": {
+ "subscription_type": {
+ "description": "The Topic pattern from which the records will be read.",
+ "type": "string",
+ "const": "subscribe",
+ "enum": ["subscribe"],
+ "default": "subscribe"
+ },
+ "topic_pattern": {
+ "title": "Topic Pattern",
+ "type": "string",
+ "examples": ["sample.topic"]
+ }
+ }
+ }
+ ]
+ },
+ "test_topic": {
+ "title": "Test Topic",
+ "description": "The Topic to test in case the Airbyte can consume messages.",
+ "type": "string",
+ "examples": ["test.topic"]
+ },
+ "group_id": {
+ "title": "Group ID",
+ "description": "The Group ID is how you distinguish different consumer groups.",
+ "type": "string",
+ "examples": ["group.id"]
+ },
+ "max_poll_records": {
+ "title": "Max Poll Records",
+ "description": "The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.",
+ "type": "integer",
+ "default": 500
+ },
+ "polling_time": {
+ "title": "Polling Time",
+ "description": "Amount of time Kafka connector should try to poll for messages.",
+ "type": "integer",
+ "default": 100
+ },
+ "protocol": {
+ "title": "Protocol",
+ "type": "object",
+ "description": "The Protocol used to communicate with brokers.",
+ "oneOf": [
+ {
+ "title": "PLAINTEXT",
+ "required": ["security_protocol"],
+ "properties": {
+ "security_protocol": {
+ "type": "string",
+ "enum": ["PLAINTEXT"],
+ "default": "PLAINTEXT"
+ }
+ }
+ },
+ {
+ "title": "SASL PLAINTEXT",
+ "required": [
+ "security_protocol",
+ "sasl_mechanism",
+ "sasl_jaas_config"
+ ],
+ "properties": {
+ "security_protocol": {
+ "type": "string",
+ "enum": ["SASL_PLAINTEXT"],
+ "default": "SASL_PLAINTEXT"
+ },
+ "sasl_mechanism": {
+ "title": "SASL Mechanism",
+ "description": "The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.",
+ "type": "string",
+ "default": "PLAIN",
+ "enum": ["PLAIN"]
+ },
+ "sasl_jaas_config": {
+ "title": "SASL JAAS Config",
+ "description": "The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.",
+ "type": "string",
+ "default": "",
+ "airbyte_secret": true
+ }
+ }
+ },
+ {
+ "title": "SASL SSL",
+ "required": [
+ "security_protocol",
+ "sasl_mechanism",
+ "sasl_jaas_config"
+ ],
+ "properties": {
+ "security_protocol": {
+ "type": "string",
+ "enum": ["SASL_SSL"],
+ "default": "SASL_SSL"
+ },
+ "sasl_mechanism": {
+ "title": "SASL Mechanism",
+ "description": "The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.",
+ "type": "string",
+ "default": "GSSAPI",
+ "enum": [
+ "GSSAPI",
+ "OAUTHBEARER",
+ "SCRAM-SHA-256",
+ "SCRAM-SHA-512",
+ "PLAIN"
+ ]
+ },
+ "sasl_jaas_config": {
+ "title": "SASL JAAS Config",
+ "description": "The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.",
+ "type": "string",
+ "default": "",
+ "airbyte_secret": true
+ }
+ }
+ }
+ ]
+ },
+ "client_id": {
+ "title": "Client ID",
+ "description": "An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.",
+ "type": "string",
+ "examples": ["airbyte-consumer"]
+ },
+ "enable_auto_commit": {
+ "title": "Enable Auto Commit",
+ "description": "If true, the consumer's offset will be periodically committed in the background.",
+ "type": "boolean",
+ "default": true
+ },
+ "auto_commit_interval_ms": {
+ "title": "Auto Commit Interval, ms",
+ "description": "The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true.",
+ "type": "integer",
+ "default": 5000
+ },
+ "client_dns_lookup": {
+ "title": "Client DNS Lookup",
+ "description": "Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.",
+ "type": "string",
+ "default": "use_all_dns_ips",
+ "enum": [
+ "default",
+ "use_all_dns_ips",
+ "resolve_canonical_bootstrap_servers_only"
+ ]
+ },
+ "retry_backoff_ms": {
+ "title": "Retry Backoff, ms",
+ "description": "The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.",
+ "type": "integer",
+ "default": 100
+ },
+ "request_timeout_ms": {
+ "title": "Request Timeout, ms",
+ "description": "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.",
+ "type": "integer",
+ "default": 30000
+ },
+ "receive_buffer_bytes": {
+ "title": "Receive Buffer, bytes",
+ "description": "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.",
+ "type": "integer",
+ "default": 32768
+ },
+ "auto_offset_reset": {
+ "title": "Auto Offset Reset",
+ "description": "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer.",
+ "type": "string",
+ "default": "latest",
+ "enum": ["latest", "earliest", "none"]
+ },
+ "repeated_calls": {
+ "title": "Repeated Calls",
+ "description": "The number of repeated calls to poll() if no messages were received.",
+ "type": "integer",
+ "default": 3
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-klaviyo.json b/jvm/src/main/resources/airbyte/source-klaviyo.json
new file mode 100644
index 0000000..914600b
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-klaviyo.json
@@ -0,0 +1,25 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/klaviyo",
+ "changelogUrl": "https://docs.airbyte.io/integrations/sources/klaviyo",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Klaviyo Spec",
+ "type": "object",
+ "properties": {
+ "api_key": {
+ "title": "Api Key",
+ "description": "Klaviyo API Key. See our docs if you need help finding this key.",
+ "airbyte_secret": true,
+ "type": "string"
+ },
+ "start_date": {
+ "title": "Start Date",
+ "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.",
+ "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$",
+ "examples": ["2017-01-25T00:00:00Z"],
+ "type": "string"
+ }
+ },
+ "required": ["api_key", "start_date"]
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-kustomer-singer.json b/jvm/src/main/resources/airbyte/source-kustomer-singer.json
new file mode 100644
index 0000000..19e62c7
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-kustomer-singer.json
@@ -0,0 +1,24 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/kustomer",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Source Kustomer Singer Spec",
+ "type": "object",
+ "required": ["api_token", "start_date"],
+ "additionalProperties": true,
+ "properties": {
+ "api_token": {
+ "title": "API Token",
+ "type": "string",
+ "description": "Kustomer API Token. See the docs on how to obtain this",
+ "airbyte_secret": true
+ },
+ "start_date": {
+ "title": "Start Date",
+ "type": "string",
+ "description": "The date from which you'd like to replicate the data",
+ "examples": ["2019-01-01T00:00:00Z"]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-kyriba.json b/jvm/src/main/resources/airbyte/source-kyriba.json
new file mode 100644
index 0000000..3705013
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-kyriba.json
@@ -0,0 +1,44 @@
+{
+ "documentationUrl": "https://docsurl.com",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Kyriba Spec",
+ "type": "object",
+ "required": ["domain", "username", "password", "start_date"],
+ "additionalProperties": false,
+ "properties": {
+ "domain": {
+ "type": "string",
+ "description": "Kyriba domain",
+ "title": "Domain",
+ "examples": ["demo.kyriba.com"],
+ "pattern": "^[a-zA-Z0-9._-]*\\.[a-zA-Z0-9._-]*\\.[a-z]*"
+ },
+ "username": {
+ "type": "string",
+ "description": "Username to be used in basic auth",
+ "title": "Username"
+ },
+ "password": {
+ "type": "string",
+ "description": "Password to be used in basic auth",
+ "title": "Password",
+ "airbyte_secret": true
+ },
+ "start_date": {
+ "type": "string",
+ "description": "The date the sync should start from.",
+ "title": "Start Date",
+ "examples": ["2021-01-10"],
+ "pattern": "^\\d{4}\\-(0[1-9]|1[012])\\-(0[1-9]|[12][0-9]|3[01])$"
+ },
+ "end_date": {
+ "type": "string",
+ "description": "The date the sync should end. If let empty the sync will run to the current date.",
+ "title": "End Date",
+ "examples": ["2022-03-01"],
+ "pattern": "^(?:(\\d{4}\\-(0[1-9]|1[012])\\-(0[1-9]|[12][0-9]|3[01]))|)$"
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-lemlist.json b/jvm/src/main/resources/airbyte/source-lemlist.json
new file mode 100644
index 0000000..9aa5133
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-lemlist.json
@@ -0,0 +1,18 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/lemlist",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Lemlist Spec",
+ "type": "object",
+ "required": ["api_key"],
+ "additionalProperties": false,
+ "properties": {
+ "api_key": {
+ "type": "string",
+ "title": "API key",
+ "description": "Lemlist API key.",
+ "airbyte_secret": true
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-linkedin-ads.json b/jvm/src/main/resources/airbyte/source-linkedin-ads.json
new file mode 100644
index 0000000..688ab0f
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-linkedin-ads.json
@@ -0,0 +1,88 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/linkedin-ads",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Linkedin Ads Spec",
+ "type": "object",
+ "required": ["start_date"],
+ "additionalProperties": true,
+ "properties": {
+ "credentials": {
+ "title": "Authentication *",
+ "type": "object",
+ "oneOf": [
+ {
+ "type": "object",
+ "title": "OAuth2.0",
+ "required": ["client_id", "client_secret", "refresh_token"],
+ "properties": {
+ "auth_method": {
+ "type": "string",
+ "const": "oAuth2.0"
+ },
+ "client_id": {
+ "type": "string",
+ "title": "Client ID",
+ "description": "The client ID of the LinkedIn Ads developer application.",
+ "airbyte_secret": true
+ },
+ "client_secret": {
+ "type": "string",
+ "title": "Client secret",
+ "description": "The client secret the LinkedIn Ads developer application.",
+ "airbyte_secret": true
+ },
+ "refresh_token": {
+ "type": "string",
+ "title": "Refresh token",
+ "description": "The key to refresh the expired access token.",
+ "airbyte_secret": true
+ }
+ }
+ },
+ {
+ "title": "Access token",
+ "type": "object",
+ "required": ["access_token"],
+ "properties": {
+ "auth_method": {
+ "type": "string",
+ "const": "access_token"
+ },
+ "access_token": {
+ "type": "string",
+ "title": "Access token",
+ "description": "The token value generated using the authentication code. See the docs to obtain yours.",
+ "airbyte_secret": true
+ }
+ }
+ }
+ ]
+ },
+ "start_date": {
+ "type": "string",
+ "title": "Start date",
+ "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$",
+ "description": "UTC date in the format 2020-09-17. Any data before this date will not be replicated.",
+ "examples": ["2021-05-17"]
+ },
+ "account_ids": {
+ "title": "Account IDs (Optional)",
+ "type": "array",
+ "description": "Specify the account IDs separated by a space, to pull the data from. Leave empty, if you want to pull the data from all associated accounts. See the LinkedIn Ads docs for more info.",
+ "items": {
+ "type": "integer"
+ },
+ "default": []
+ }
+ }
+ },
+ "authSpecification": {
+ "auth_type": "oauth2.0",
+ "oauth2Specification": {
+ "rootObject": ["credentials", "0"],
+ "oauthFlowInitParameters": [["client_id"], ["client_secret"]],
+ "oauthFlowOutputParameters": [["refresh_token"]]
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-linkedin-pages.json b/jvm/src/main/resources/airbyte/source-linkedin-pages.json
new file mode 100644
index 0000000..a335440
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-linkedin-pages.json
@@ -0,0 +1,79 @@
+{
+ "documentationUrl": "https://docs.airbyte.com/integrations/sources/linkedin-pages/",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Linkedin Pages Spec",
+ "type": "object",
+ "required": ["org_id"],
+ "additionalProperties": true,
+ "properties": {
+ "org_id": {
+ "title": "Organization ID",
+ "type": "integer",
+ "airbyte_secret": true,
+ "description": "Specify the Organization ID",
+ "examples": ["123456789"]
+ },
+ "credentials": {
+ "title": "Authentication *",
+ "type": "object",
+ "oneOf": [
+ {
+ "type": "object",
+ "title": "OAuth2.0",
+ "required": ["client_id", "client_secret", "refresh_token"],
+ "properties": {
+ "auth_method": {
+ "type": "string",
+ "const": "oAuth2.0"
+ },
+ "client_id": {
+ "type": "string",
+ "title": "Client ID",
+ "description": "The client ID of the LinkedIn developer application.",
+ "airbyte_secret": true
+ },
+ "client_secret": {
+ "type": "string",
+ "title": "Client secret",
+ "description": "The client secret of the LinkedIn developer application.",
+ "airbyte_secret": true
+ },
+ "refresh_token": {
+ "type": "string",
+ "title": "Refresh token",
+ "description": "The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.",
+ "airbyte_secret": true
+ }
+ }
+ },
+ {
+ "title": "Access token",
+ "type": "object",
+ "required": ["access_token"],
+ "properties": {
+ "auth_method": {
+ "type": "string",
+ "const": "access_token"
+ },
+ "access_token": {
+ "type": "string",
+ "title": "Access token",
+ "description": "The token value generated using the LinkedIn Developers OAuth Token Tools. See the docs to obtain yours.",
+ "airbyte_secret": true
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "authSpecification": {
+ "auth_type": "oauth2.0",
+ "oauth2Specification": {
+ "rootObject": ["credentials", "0"],
+ "oauthFlowInitParameters": [["client_id"], ["client_secret"]],
+ "oauthFlowOutputParameters": [["refresh_token"]]
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-linnworks.json b/jvm/src/main/resources/airbyte/source-linnworks.json
new file mode 100644
index 0000000..7afcdce
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-linnworks.json
@@ -0,0 +1,33 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/linnworks",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Linnworks Spec",
+ "type": "object",
+ "required": ["application_id", "application_secret", "token", "start_date"],
+ "additionalProperties": false,
+ "properties": {
+ "application_id": {
+ "title": "Application ID.",
+ "description": "Linnworks Application ID",
+ "type": "string"
+ },
+ "application_secret": {
+ "title": "Application Secret",
+ "description": "Linnworks Application Secret",
+ "type": "string",
+ "airbyte_secret": true
+ },
+ "token": {
+ "title": "API Token",
+ "type": "string"
+ },
+ "start_date": {
+ "title": "Start Date",
+ "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.",
+ "type": "string",
+ "format": "date-time"
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-looker.json b/jvm/src/main/resources/airbyte/source-looker.json
new file mode 100644
index 0000000..ee9f9dc
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-looker.json
@@ -0,0 +1,41 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/looker",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Looker Spec",
+ "type": "object",
+ "required": ["domain", "client_id", "client_secret"],
+ "additionalProperties": false,
+ "properties": {
+ "domain": {
+ "type": "string",
+ "title": "Domain",
+ "examples": [
+ "domainname.looker.com",
+ "looker.clientname.com",
+ "123.123.124.123:8000"
+ ],
+ "description": "Domain for your Looker account, e.g. airbyte.cloud.looker.com,looker.[clientname].com,IP address"
+ },
+ "client_id": {
+ "title": "Client ID",
+ "type": "string",
+ "description": "The Client ID is first part of an API3 key that is specific to each Looker user. See the docs for more information on how to generate this key."
+ },
+ "client_secret": {
+ "title": "Client Secret",
+ "type": "string",
+ "description": "The Client Secret is second part of an API3 key."
+ },
+ "run_look_ids": {
+ "title": "Look IDs to Run",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "pattern": "^[0-9]*$"
+ },
+ "description": "The IDs of any Looks to run (optional)"
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-mailchimp.json b/jvm/src/main/resources/airbyte/source-mailchimp.json
new file mode 100644
index 0000000..acc3145
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-mailchimp.json
@@ -0,0 +1,109 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/mailchimp",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Mailchimp Spec",
+ "type": "object",
+ "required": [],
+ "additionalProperties": true,
+ "properties": {
+ "credentials": {
+ "type": "object",
+ "title": "Authentication *",
+ "oneOf": [
+ {
+ "title": "OAuth2.0",
+ "type": "object",
+ "required": ["auth_type", "access_token"],
+ "properties": {
+ "auth_type": {
+ "type": "string",
+ "const": "oauth2.0",
+ "order": 0
+ },
+ "client_id": {
+ "title": "Client ID",
+ "type": "string",
+ "description": "The Client ID of your OAuth application.",
+ "airbyte_secret": true
+ },
+ "client_secret": {
+ "title": "Client Secret",
+ "type": "string",
+ "description": "The Client Secret of your OAuth application.",
+ "airbyte_secret": true
+ },
+ "access_token": {
+ "title": "Access Token",
+ "type": "string",
+ "description": "An access token generated using the above client ID and secret.",
+ "airbyte_secret": true
+ }
+ }
+ },
+ {
+ "type": "object",
+ "title": "API Key",
+ "required": ["auth_type", "apikey"],
+ "properties": {
+ "auth_type": {
+ "type": "string",
+ "const": "apikey",
+ "order": 1
+ },
+ "apikey": {
+ "type": "string",
+ "title": "API Key",
+ "description": "Mailchimp API Key. See the docs for information on how to generate this key.",
+ "airbyte_secret": true
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "advanced_auth": {
+ "auth_flow_type": "oauth2.0",
+ "predicate_key": ["credentials", "auth_type"],
+ "predicate_value": "oauth2.0",
+ "oauth_config_specification": {
+ "complete_oauth_output_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "access_token": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "access_token"]
+ }
+ }
+ },
+ "complete_oauth_server_input_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "client_id": {
+ "type": "string"
+ },
+ "client_secret": {
+ "type": "string"
+ }
+ }
+ },
+ "complete_oauth_server_output_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "client_id": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "client_id"]
+ },
+ "client_secret": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "client_secret"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-mailgun.json b/jvm/src/main/resources/airbyte/source-mailgun.json
new file mode 100644
index 0000000..25c41ff
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-mailgun.json
@@ -0,0 +1,30 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/mailgun",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Source Mailgun Spec",
+ "type": "object",
+ "required": ["private_key"],
+ "additionalProperties": true,
+ "properties": {
+ "private_key": {
+ "type": "string",
+ "airbyte_secret": true,
+ "description": "Primary account API key to access your Mailgun data.",
+ "title": "Private API Key"
+ },
+ "domain_region": {
+ "type": "string",
+ "description": "Domain region code. 'EU' or 'US' are possible values. The default is 'US'.",
+ "title": "Domain Region Code"
+ },
+ "start_date": {
+ "title": "Replication Start Date",
+ "description": "UTC date and time in the format 2020-10-01 00:00:00. Any data before this date will not be replicated. If omitted, defaults to 3 days ago.",
+ "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}$",
+ "examples": ["2020-10-01 00:00:00"],
+ "type": "string"
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-marketo.json b/jvm/src/main/resources/airbyte/source-marketo.json
new file mode 100644
index 0000000..9af488b
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-marketo.json
@@ -0,0 +1,42 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/marketo",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Source Marketo Spec",
+ "type": "object",
+ "required": ["domain_url", "client_id", "client_secret", "start_date"],
+ "additionalProperties": true,
+ "properties": {
+ "domain_url": {
+ "title": "Domain URL",
+ "type": "string",
+ "order": 3,
+ "description": "Your Marketo Base URL. See the docs for info on how to obtain this.",
+ "examples": ["https://000-AAA-000.mktorest.com"],
+ "airbyte_secret": true
+ },
+ "client_id": {
+ "title": "Client ID",
+ "type": "string",
+ "description": "The Client ID of your Marketo developer application. See the docs for info on how to obtain this.",
+ "order": 0,
+ "airbyte_secret": true
+ },
+ "client_secret": {
+ "title": "Client Secret",
+ "type": "string",
+ "description": "The Client Secret of your Marketo developer application. See the docs for info on how to obtain this.",
+ "order": 1,
+ "airbyte_secret": true
+ },
+ "start_date": {
+ "title": "Start Date",
+ "type": "string",
+ "order": 2,
+ "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.",
+ "examples": ["2020-09-25T00:00:00Z"],
+ "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$"
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-microsoft-teams.json b/jvm/src/main/resources/airbyte/source-microsoft-teams.json
new file mode 100644
index 0000000..442abfd
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-microsoft-teams.json
@@ -0,0 +1,151 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/microsoft-teams",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Microsoft Teams Spec",
+ "type": "object",
+ "required": ["period"],
+ "additionalProperties": true,
+ "properties": {
+ "period": {
+ "type": "string",
+ "title": "Period",
+ "description": "Specifies the length of time over which the Team Device Report stream is aggregated. The supported values are: D7, D30, D90, and D180.",
+ "examples": ["D7"]
+ },
+ "credentials": {
+ "title": "Authentication mechanism",
+ "description": "Choose how to authenticate to Microsoft",
+ "type": "object",
+ "oneOf": [
+ {
+ "type": "object",
+ "title": "Authenticate via Microsoft (OAuth 2.0)",
+ "required": [
+ "tenant_id",
+ "client_id",
+ "client_secret",
+ "refresh_token"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "auth_type": {
+ "type": "string",
+ "const": "Client",
+ "enum": ["Client"],
+ "default": "Client",
+ "order": 0
+ },
+ "tenant_id": {
+ "title": "Directory (tenant) ID",
+ "type": "string",
+ "description": "A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL"
+ },
+ "client_id": {
+ "title": "Client ID",
+ "type": "string",
+ "description": "The Client ID of your Microsoft Teams developer application."
+ },
+ "client_secret": {
+ "title": "Client Secret",
+ "type": "string",
+ "description": "The Client Secret of your Microsoft Teams developer application.",
+ "airbyte_secret": true
+ },
+ "refresh_token": {
+ "title": "Refresh Token",
+ "type": "string",
+ "description": "A Refresh Token to renew the expired Access Token.",
+ "airbyte_secret": true
+ }
+ }
+ },
+ {
+ "type": "object",
+ "title": "Authenticate via Microsoft",
+ "required": ["tenant_id", "client_id", "client_secret"],
+ "additionalProperties": false,
+ "properties": {
+ "auth_type": {
+ "type": "string",
+ "const": "Token",
+ "enum": ["Token"],
+ "default": "Token",
+ "order": 0
+ },
+ "tenant_id": {
+ "title": "Directory (tenant) ID",
+ "type": "string",
+ "description": "A globally unique identifier (GUID) that is different than your organization name or domain. Follow these steps to obtain: open one of the Teams where you belong inside the Teams Application -> Click on the … next to the Team title -> Click on Get link to team -> Copy the link to the team and grab the tenant ID form the URL"
+ },
+ "client_id": {
+ "title": "Client ID",
+ "type": "string",
+ "description": "The Client ID of your Microsoft Teams developer application."
+ },
+ "client_secret": {
+ "title": "Client Secret",
+ "type": "string",
+ "description": "The Client Secret of your Microsoft Teams developer application.",
+ "airbyte_secret": true
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "advanced_auth": {
+ "auth_flow_type": "oauth2.0",
+ "predicate_key": ["credentials", "auth_type"],
+ "predicate_value": "Client",
+ "oauth_config_specification": {
+ "complete_oauth_output_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "refresh_token": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "refresh_token"]
+ }
+ }
+ },
+ "complete_oauth_server_input_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "client_id": {
+ "type": "string"
+ },
+ "client_secret": {
+ "type": "string"
+ }
+ }
+ },
+ "complete_oauth_server_output_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "client_id": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "client_id"]
+ },
+ "client_secret": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "client_secret"]
+ }
+ }
+ },
+ "oauth_user_input_from_connector_config_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "tenant_id": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "tenant_id"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-mixpanel.json b/jvm/src/main/resources/airbyte/source-mixpanel.json
new file mode 100644
index 0000000..5f41e04
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-mixpanel.json
@@ -0,0 +1,71 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/mixpanel",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Source Mixpanel Spec",
+ "type": "object",
+ "required": ["api_secret"],
+ "properties": {
+ "api_secret": {
+ "order": 0,
+ "title": "Project Secret",
+ "type": "string",
+ "description": "Mixpanel project secret. See the docs for more information on how to obtain this.",
+ "airbyte_secret": true
+ },
+ "attribution_window": {
+ "order": 1,
+ "title": "Attribution Window",
+ "type": "integer",
+ "description": " A period of time for attributing results to ads and the lookback period after those actions occur during which ad results are counted. Default attribution window is 5 days.",
+ "default": 5
+ },
+ "project_timezone": {
+ "order": 2,
+ "title": "Project Timezone",
+ "type": "string",
+ "description": "Time zone in which integer date times are stored. The project timezone may be found in the project settings in the Mixpanel console.",
+ "default": "US/Pacific",
+ "examples": ["US/Pacific", "UTC"]
+ },
+ "select_properties_by_default": {
+ "order": 3,
+ "title": "Select Properties By Default",
+ "type": "boolean",
+ "description": "Setting this config parameter to TRUE ensures that new properties on events and engage records are captured. Otherwise new properties will be ignored.",
+ "default": true
+ },
+ "start_date": {
+ "order": 4,
+ "title": "Start Date",
+ "type": "string",
+ "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. If this option is not set, the connector will replicate data from up to one year ago by default.",
+ "examples": ["2021-11-16"],
+ "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}(T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)?$"
+ },
+ "end_date": {
+ "order": 5,
+ "title": "End Date",
+ "type": "string",
+ "description": "UTC date and time in the format 2017-01-25T00:00:00Z. Any data after this date will not be replicated. Left empty to always sync to most recent date",
+ "examples": ["2021-11-16"],
+ "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}(T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)?$"
+ },
+ "region": {
+ "order": 6,
+ "title": "Region",
+ "description": "The region of mixpanel domain instance either US or EU.",
+ "type": "string",
+ "enum": ["US", "EU"],
+ "default": "US"
+ },
+ "date_window_size": {
+ "order": 7,
+ "title": "Date slicing window",
+ "description": "Defines window size in days, that used to slice through data. You can reduce it, if amount of data in each window is too big for your environment.",
+ "type": "integer",
+ "default": 30
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-monday.json b/jvm/src/main/resources/airbyte/source-monday.json
new file mode 100644
index 0000000..d558ba3
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-monday.json
@@ -0,0 +1,131 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/monday",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Monday Spec",
+ "type": "object",
+ "required": [],
+ "additionalProperties": true,
+ "properties": {
+ "credentials": {
+ "title": "Authorization Method",
+ "type": "object",
+ "oneOf": [
+ {
+ "type": "object",
+ "title": "OAuth2.0",
+ "required": [
+ "auth_type",
+ "client_id",
+ "client_secret",
+ "access_token"
+ ],
+ "properties": {
+ "subdomain": {
+ "type": "string",
+ "title": "Subdomain/Slug (Optional)",
+ "description": "Slug/subdomain of the account, or the first part of the URL that comes before .monday.com",
+ "default": "",
+ "order": 0
+ },
+ "auth_type": {
+ "type": "string",
+ "const": "oauth2.0",
+ "order": 1
+ },
+ "client_id": {
+ "type": "string",
+ "title": "Client ID",
+ "description": "The Client ID of your OAuth application.",
+ "airbyte_secret": true
+ },
+ "client_secret": {
+ "type": "string",
+ "title": "Client Secret",
+ "description": "The Client Secret of your OAuth application.",
+ "airbyte_secret": true
+ },
+ "access_token": {
+ "type": "string",
+ "title": "Access Token",
+ "description": "Access Token for making authenticated requests.",
+ "airbyte_secret": true
+ }
+ }
+ },
+ {
+ "type": "object",
+ "title": "API Token",
+ "required": ["auth_type", "api_token"],
+ "properties": {
+ "auth_type": {
+ "type": "string",
+ "const": "api_token",
+ "order": 0
+ },
+ "api_token": {
+ "type": "string",
+ "title": "Personal API Token",
+ "description": "API Token for making authenticated requests.",
+ "airbyte_secret": true
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "advanced_auth": {
+ "auth_flow_type": "oauth2.0",
+ "predicate_key": ["credentials", "auth_type"],
+ "predicate_value": "oauth2.0",
+ "oauth_config_specification": {
+ "complete_oauth_output_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "access_token": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "access_token"]
+ }
+ }
+ },
+ "complete_oauth_server_input_specification": {
+ "type": "object",
+ "additionalProperties": true,
+ "properties": {
+ "client_id": {
+ "type": "string"
+ },
+ "client_secret": {
+ "type": "string"
+ }
+ }
+ },
+ "complete_oauth_server_output_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "client_id": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "client_id"]
+ },
+ "client_secret": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "client_secret"]
+ }
+ }
+ },
+ "oauth_user_input_from_connector_config_specification": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "subdomain": {
+ "type": "string",
+ "path_in_connector_config": ["credentials", "subdomain"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-mongodb-v2.json b/jvm/src/main/resources/airbyte/source-mongodb-v2.json
new file mode 100644
index 0000000..e78ca96
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-mongodb-v2.json
@@ -0,0 +1,124 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/mongodb-v2",
+ "changelogUrl": "https://docs.airbyte.io/integrations/sources/mongodb-v2",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MongoDb Source Spec",
+ "type": "object",
+ "required": ["database"],
+ "additionalProperties": true,
+ "properties": {
+ "instance_type": {
+ "type": "object",
+ "title": "MongoDb Instance Type",
+ "description": "The MongoDb instance to connect to. For MongoDB Atlas and Replica Set TLS connection is used by default.",
+ "order": 0,
+ "oneOf": [
+ {
+ "title": "Standalone MongoDb Instance",
+ "required": ["instance", "host", "port"],
+ "properties": {
+ "instance": {
+ "type": "string",
+ "enum": ["standalone"],
+ "default": "standalone"
+ },
+ "host": {
+ "title": "Host",
+ "type": "string",
+ "description": "The host name of the Mongo database.",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "type": "integer",
+ "description": "The port of the Mongo database.",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 27017,
+ "examples": ["27017"],
+ "order": 1
+ },
+ "tls": {
+ "title": "TLS Connection",
+ "type": "boolean",
+ "description": "Indicates whether TLS encryption protocol will be used to connect to MongoDB. It is recommended to use TLS connection if possible. For more information see documentation.",
+ "default": false,
+ "order": 2
+ }
+ }
+ },
+ {
+ "title": "Replica Set",
+ "required": ["instance", "server_addresses"],
+ "properties": {
+ "instance": {
+ "type": "string",
+ "enum": ["replica"],
+ "default": "replica"
+ },
+ "server_addresses": {
+ "title": "Server Addresses",
+ "type": "string",
+ "description": "The members of a replica set. Please specify `host`:`port` of each member separated by comma.",
+ "examples": ["host1:27017,host2:27017,host3:27017"],
+ "order": 0
+ },
+ "replica_set": {
+ "title": "Replica Set",
+ "type": "string",
+ "description": "A replica set in MongoDB is a group of mongod processes that maintain the same data set.",
+ "order": 1
+ }
+ }
+ },
+ {
+ "title": "MongoDB Atlas",
+ "additionalProperties": false,
+ "required": ["instance", "cluster_url"],
+ "properties": {
+ "instance": {
+ "type": "string",
+ "enum": ["atlas"],
+ "default": "atlas"
+ },
+ "cluster_url": {
+ "title": "Cluster URL",
+ "type": "string",
+ "description": "The URL of a cluster to connect to.",
+ "order": 0
+ }
+ }
+ }
+ ]
+ },
+ "database": {
+ "title": "Database Name",
+ "type": "string",
+ "description": "The database you want to replicate.",
+ "order": 1
+ },
+ "user": {
+ "title": "User",
+ "type": "string",
+ "description": "The username which is used to access the database.",
+ "order": 2
+ },
+ "password": {
+ "title": "Password",
+ "type": "string",
+ "description": "The password associated with this username.",
+ "airbyte_secret": true,
+ "order": 3
+ },
+ "auth_source": {
+ "title": "Authentication Source",
+ "type": "string",
+ "description": "The authentication source where the user information is stored.",
+ "default": "admin",
+ "examples": ["admin"],
+ "order": 4
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-mongodb.json b/jvm/src/main/resources/airbyte/source-mongodb.json
new file mode 100644
index 0000000..87ec218
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-mongodb.json
@@ -0,0 +1,70 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/mongodb",
+ "changelogUrl": "https://docs.airbyte.io/integrations/sources/mongodb",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Mongodb Source Spec",
+ "type": "object",
+ "required": ["host", "port", "database", "user", "password", "auth_source"],
+ "additionalProperties": false,
+ "properties": {
+ "host": {
+ "title": "Host",
+ "type": "string",
+ "description": "Host of a Mongo database to be replicated.",
+ "order": 0
+ },
+ "port": {
+ "title": "Port",
+ "type": "integer",
+ "description": "Port of a Mongo database to be replicated.",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 27017,
+ "examples": ["27017"],
+ "order": 1
+ },
+ "database": {
+ "title": "Database name",
+ "type": "string",
+ "description": "Database to be replicated.",
+ "order": 2
+ },
+ "user": {
+ "title": "User",
+ "type": "string",
+ "description": "User",
+ "order": 3
+ },
+ "password": {
+ "title": "Password",
+ "type": "string",
+ "description": "Password",
+ "airbyte_secret": true,
+ "order": 4
+ },
+ "auth_source": {
+ "title": "Authentication source",
+ "type": "string",
+ "description": "Authentication source where user information is stored. See the Mongo docs for more info.",
+ "default": "admin",
+ "examples": ["admin"],
+ "order": 5
+ },
+ "replica_set": {
+ "title": "Replica Set",
+ "type": "string",
+ "description": "The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.",
+ "default": "",
+ "order": 6
+ },
+ "ssl": {
+ "title": "TLS connection",
+ "type": "boolean",
+ "description": "If this switch is enabled, TLS connections will be used to connect to MongoDB.",
+ "default": false,
+ "order": 7
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-mssql.json b/jvm/src/main/resources/airbyte/source-mssql.json
new file mode 100644
index 0000000..8b8acf4
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-mssql.json
@@ -0,0 +1,170 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/destinations/mssql",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MSSQL Source Spec",
+ "type": "object",
+ "required": ["host", "port", "database", "username"],
+ "properties": {
+ "host": {
+ "description": "The hostname of the database.",
+ "title": "Host",
+ "type": "string",
+ "order": 0
+ },
+ "port": {
+ "description": "The port of the database.",
+ "title": "Port",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "examples": ["1433"],
+ "order": 1
+ },
+ "database": {
+ "description": "The name of the database.",
+ "title": "Database",
+ "type": "string",
+ "examples": ["master"],
+ "order": 2
+ },
+ "schemas": {
+ "title": "Schemas",
+ "description": "The list of schemas to sync from. Defaults to user. Case sensitive.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "minItems": 0,
+ "uniqueItems": true,
+ "default": ["dbo"],
+ "order": 3
+ },
+ "username": {
+ "description": "The username which is used to access the database.",
+ "title": "Username",
+ "type": "string",
+ "order": 4
+ },
+ "password": {
+ "description": "The password associated with the username.",
+ "title": "Password",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 5
+ },
+ "jdbc_url_params": {
+ "title": "JDBC URL Params",
+ "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).",
+ "type": "string",
+ "order": 6
+ },
+ "ssl_method": {
+ "title": "SSL Method",
+ "type": "object",
+ "description": "The encryption method which is used when communicating with the database.",
+ "order": 7,
+ "oneOf": [
+ {
+ "title": "Unencrypted",
+ "description": "Data transfer will not be encrypted.",
+ "required": ["ssl_method"],
+ "properties": {
+ "ssl_method": {
+ "type": "string",
+ "const": "unencrypted",
+ "enum": ["unencrypted"],
+ "default": "unencrypted"
+ }
+ }
+ },
+ {
+ "title": "Encrypted (trust server certificate)",
+ "description": "Use the certificate provided by the server without verification. (For testing purposes only!)",
+ "required": ["ssl_method"],
+ "properties": {
+ "ssl_method": {
+ "type": "string",
+ "const": "encrypted_trust_server_certificate",
+ "enum": ["encrypted_trust_server_certificate"],
+ "default": "encrypted_trust_server_certificate"
+ }
+ }
+ },
+ {
+ "title": "Encrypted (verify certificate)",
+ "description": "Verify and use the certificate provided by the server.",
+ "required": ["ssl_method", "trustStoreName", "trustStorePassword"],
+ "properties": {
+ "ssl_method": {
+ "type": "string",
+ "const": "encrypted_verify_certificate",
+ "enum": ["encrypted_verify_certificate"],
+ "default": "encrypted_verify_certificate"
+ },
+ "hostNameInCertificate": {
+ "title": "Host Name In Certificate",
+ "type": "string",
+ "description": "Specifies the host name of the server. The value of this property must match the subject property of the certificate.",
+ "order": 7
+ }
+ }
+ }
+ ]
+ },
+ "replication_method": {
+ "type": "object",
+ "title": "Replication Method",
+ "description": "The replication method used for extracting data from the database. STANDARD replication requires no setup on the DB side but will not be able to represent deletions incrementally. CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.",
+ "default": "STANDARD",
+ "order": 8,
+ "oneOf": [
+ {
+ "title": "Standard",
+ "description": "Standard replication requires no setup on the DB side but will not be able to represent deletions incrementally.",
+ "required": ["method"],
+ "properties": {
+ "method": {
+ "type": "string",
+ "const": "STANDARD",
+ "enum": ["STANDARD"],
+ "default": "STANDARD",
+ "order": 0
+ }
+ }
+ },
+ {
+ "title": "Logical Replication (CDC)",
+ "description": "CDC uses {TBC} to detect inserts, updates, and deletes. This needs to be configured on the source database itself.",
+ "required": ["method"],
+ "properties": {
+ "method": {
+ "type": "string",
+ "const": "CDC",
+ "enum": ["CDC"],
+ "default": "CDC",
+ "order": 0
+ },
+ "data_to_sync": {
+ "title": "Data to Sync",
+ "type": "string",
+ "default": "Existing and New",
+ "enum": ["Existing and New", "New Changes Only"],
+ "description": "What data should be synced under the CDC. \"Existing and New\" will read existing data as a snapshot, and sync new changes through CDC. \"New Changes Only\" will skip the initial snapshot, and only sync new changes through CDC.",
+ "order": 1
+ },
+ "snapshot_isolation": {
+ "title": "Initial Snapshot Isolation Level",
+ "type": "string",
+ "default": "Snapshot",
+ "enum": ["Snapshot", "Read Committed"],
+ "description": "Existing data in the database are synced through an initial snapshot. This parameter controls the isolation level that will be used during the initial snapshotting. If you choose the \"Snapshot\" level, you must enable the snapshot isolation mode on the database.",
+ "order": 2
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-my-hours.json b/jvm/src/main/resources/airbyte/source-my-hours.json
new file mode 100644
index 0000000..d1a739b
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-my-hours.json
@@ -0,0 +1,40 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/my-hours",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "My Hours Spec",
+ "type": "object",
+ "required": ["email", "password", "start_date"],
+ "additionalProperties": false,
+ "properties": {
+ "email": {
+ "title": "Email",
+ "type": "string",
+ "description": "Your My Hours username",
+ "example": "john@doe.com"
+ },
+ "password": {
+ "title": "Password",
+ "type": "string",
+ "description": "The password associated to the username",
+ "airbyte_secret": true
+ },
+ "start_date": {
+ "title": "Start Date",
+ "description": "Start date for collecting time logs",
+ "examples": ["%Y-%m-%d", "2016-01-01"],
+ "type": "string",
+ "pattern": "^[0-9]{4}-[0-9]{2}-[0-9]{2}$"
+ },
+ "logs_batch_size": {
+ "title": "Time logs batch size",
+ "description": "Pagination size used for retrieving logs in days",
+ "examples": [30],
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 365,
+ "default": 30
+ }
+ }
+ }
+}
diff --git a/jvm/src/main/resources/airbyte/source-mysql.json b/jvm/src/main/resources/airbyte/source-mysql.json
new file mode 100644
index 0000000..c09509f
--- /dev/null
+++ b/jvm/src/main/resources/airbyte/source-mysql.json
@@ -0,0 +1,221 @@
+{
+ "documentationUrl": "https://docs.airbyte.io/integrations/sources/mysql",
+ "connectionSpecification": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MySql Source Spec",
+ "type": "object",
+ "required": ["host", "port", "database", "username", "replication_method"],
+ "properties": {
+ "host": {
+ "description": "The host name of the database.",
+ "title": "Host",
+ "type": "string",
+ "order": 0
+ },
+ "port": {
+ "description": "The port to connect to.",
+ "title": "Port",
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65536,
+ "default": 3306,
+ "examples": ["3306"],
+ "order": 1
+ },
+ "database": {
+ "description": "The database name.",
+ "title": "Database",
+ "type": "string",
+ "order": 2
+ },
+ "username": {
+ "description": "The username which is used to access the database.",
+ "title": "Username",
+ "type": "string",
+ "order": 3
+ },
+ "password": {
+ "description": "The password associated with the username.",
+ "title": "Password",
+ "type": "string",
+ "airbyte_secret": true,
+ "order": 4
+ },
+ "jdbc_url_params": {
+ "description": "Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).",
+ "title": "JDBC URL Params",
+ "type": "string",
+ "order": 5
+ },
+ "ssl": {
+ "title": "SSL Connection",
+ "description": "Encrypt data using SSL.",
+ "type": "boolean",
+ "default": true,
+ "order": 6
+ },
+ "ssl_mode": {
+ "title": "SSL modes",
+ "description": "SSL connection modes. + * Implementation is immutable and therefore thread-safe. + *
+ * + * @since 1.0 + * @author Yaniv Inbar + */ +public final class DateTime implements Serializable { + + private static final long serialVersionUID = 1L; + + private static final TimeZone GMT = TimeZone.getTimeZone("GMT"); + + /** Regular expression for parsing RFC3339 date/times. */ + private static final Pattern RFC3339_PATTERN = Pattern.compile( + "^(\\d{4})-(\\d{2})-(\\d{2})" // yyyy-MM-dd + + "([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d+)?)?" // 'T'HH:mm:ss.milliseconds + + "([Zz]|([+-])(\\d{2}):(\\d{2}))?"); // 'Z' or time zone shift HH:mm following '+' or '-' + + /** + * Date/time value expressed as the number of ms since the Unix epoch. + * + *+ * If the time zone is specified, this value is normalized to UTC, so to format this date/time + * value, the time zone shift has to be applied. + *
+ */ + private final long value; + + /** Specifies whether this is a date-only value. */ + private final boolean dateOnly; + + /** Time zone shift from UTC in minutes or {@code 0} for date-only value. */ + private final int tzShift; + + /** + * Instantiates {@link DateTime} from a {@link Date} and {@link TimeZone}. + * + * @param date date and time + * @param zone time zone; if {@code null}, it is interpreted as {@code TimeZone.getDefault()}. + */ + public DateTime(Date date, TimeZone zone) { + this(false, date.getTime(), zone == null ? null : zone.getOffset(date.getTime()) / 60000); +} + + /** + * Instantiates {@link DateTime} from the number of milliseconds since the Unix epoch. + * + *+ * The time zone is interpreted as {@code TimeZone.getDefault()}, which may vary with + * implementation. + *
+ * + * @param value number of milliseconds since the Unix epoch (January 1, 1970, 00:00:00 GMT) + */ + public DateTime(long value) { + this(false, value, null); +} + + /** + * Instantiates {@link DateTime} from a {@link Date}. + * + *+ * The time zone is interpreted as {@code TimeZone.getDefault()}, which may vary with + * implementation. + *
+ * + * @param value date and time + */ + public DateTime(Date value) { + this(value.getTime()); +} + + /** + * Instantiates {@link DateTime} from the number of milliseconds since the Unix epoch, and a shift + * from UTC in minutes. + * + * @param value number of milliseconds since the Unix epoch (January 1, 1970, 00:00:00 GMT) + * @param tzShift time zone, represented by the number of minutes off of UTC. + */ + public DateTime(long value, int tzShift) { + this(false, value, tzShift); +} + + /** + * Instantiates {@link DateTime}, which may represent a date-only value, from the number of + * milliseconds since the Unix epoch, and a shift from UTC in minutes. + * + * @param dateOnly specifies if this should represent a date-only value + * @param value number of milliseconds since the Unix epoch (January 1, 1970, 00:00:00 GMT) + * @param tzShift time zone, represented by the number of minutes off of UTC, or {@code null} for + * {@code TimeZone.getDefault()}. + */ + public DateTime(boolean dateOnly, long value, Integer tzShift) { + this.dateOnly = dateOnly; + this.value = value; + this.tzShift = + dateOnly ? 0 : tzShift == null ? TimeZone.getDefault().getOffset(value) / 60000 : tzShift; +} + + /** + * Instantiates {@link DateTime} from an RFC 3339 + * date/time value. + * + *+ * Upgrade warning: in prior version 1.17, this method required milliseconds to be exactly 3 + * digits (if included), and did not throw an exception for all types of invalid input values, but + * starting in version 1.18, the parsing done by this method has become more strict to enforce + * that only valid RFC3339 strings are entered, and if not, it throws a + * {@link NumberFormatException}. Also, in accordance with the RFC3339 standard, any number of + * milliseconds digits is now allowed. + *
+ * + * @param value an RFC 3339 date/time value. + * @since 1.11 + */ + public DateTime(String value) { + // Note, the following refactoring is being considered: Move the implementation of parseRfc3339 + // into this constructor. Implementation of parseRfc3339 can then do + // "return new DateTime(str);". + DateTime dateTime = parseRfc3339(value); + this.dateOnly = dateTime.dateOnly; + this.value = dateTime.value; + this.tzShift = dateTime.tzShift; +} + + /** + * Returns the date/time value expressed as the number of milliseconds since the Unix epoch. + * + *+ * If the time zone is specified, this value is normalized to UTC, so to format this date/time + * value, the time zone shift has to be applied. + *
+ * + * @since 1.5 + */ + public long getValue() { + return value; +} + + /** + * Returns whether this is a date-only value. + * + * @since 1.5 + */ + public boolean isDateOnly() { + return dateOnly; +} + + /** + * Returns the time zone shift from UTC in minutes or {@code 0} for date-only value. + * + * @since 1.5 + */ + public int getTimeZoneShift() { + return tzShift; +} + + /** Formats the value as an RFC 3339 date/time string. */ + public String toStringRfc3339() { + StringBuilder sb = new StringBuilder(); + Calendar dateTime = new GregorianCalendar(GMT); + long localTime = value + (tzShift * 60000L); + dateTime.setTimeInMillis(localTime); + // date + appendInt(sb, dateTime.get(Calendar.YEAR), 4); + sb.append('-'); + appendInt(sb, dateTime.get(Calendar.MONTH) + 1, 2); + sb.append('-'); + appendInt(sb, dateTime.get(Calendar.DAY_OF_MONTH), 2); + if (!dateOnly) { + // time + sb.append('T'); + appendInt(sb, dateTime.get(Calendar.HOUR_OF_DAY), 2); + sb.append(':'); + appendInt(sb, dateTime.get(Calendar.MINUTE), 2); + sb.append(':'); + appendInt(sb, dateTime.get(Calendar.SECOND), 2); + + if (dateTime.isSet(Calendar.MILLISECOND)) { + sb.append('.'); + appendInt(sb, dateTime.get(Calendar.MILLISECOND), 3); +} + // time zone + if (tzShift == 0) { + sb.append('Z'); +} else { + int absTzShift = tzShift; + if (tzShift > 0) { + sb.append('+'); +} else { + sb.append('-'); + absTzShift = -absTzShift; +} + + int tzHours = absTzShift / 60; + int tzMinutes = absTzShift % 60; + appendInt(sb, tzHours, 2); + sb.append(':'); + appendInt(sb, tzMinutes, 2); +} +} + return sb.toString(); +} + + @Override + public String toString() { + return toStringRfc3339(); +} + + /** + * {@inheritDoc} + * + *+ * A check is added that the time zone is the same. If you ONLY want to check equality of time + * value, check equality on the {@link #getValue()}. + *
+ */ + @Override + public boolean equals(Object o) { + if (o == this) { + return true; +} + if (!(o instanceof DateTime)) { + return false; +} + DateTime other = (DateTime) o; + return dateOnly == other.dateOnly && value == other.value && tzShift == other.tzShift; +} + + @Override + public int hashCode() { + return Arrays.hashCode(new long[] {value, dateOnly ? 1 : 0, tzShift}); +} + + /** + * Parses an RFC3339 date/time value. + * + *+ * Upgrade warning: in prior version 1.17, this method required milliseconds to be exactly 3 + * digits (if included), and did not throw an exception for all types of invalid input values, but + * starting in version 1.18, the parsing done by this method has become more strict to enforce + * that only valid RFC3339 strings are entered, and if not, it throws a + * {@link NumberFormatException}. Also, in accordance with the RFC3339 standard, any number of + * milliseconds digits is now allowed. + *
+ * + *+ * For the date-only case, the time zone is ignored and the hourOfDay, minute, second, and + * millisecond parameters are set to zero. + *
+ * + * @param str Date/time string in RFC3339 format + * @throws NumberFormatException if {@code str} doesn't match the RFC3339 standard format; an + * exception is thrown if {@code str} doesn't match {@code RFC3339_REGEX} or if it + * contains a time zone shift but no time. + */ + public static DateTime parseRfc3339(String str) throws NumberFormatException { + Matcher matcher = RFC3339_PATTERN.matcher(str); + if (!matcher.matches()) { + throw new NumberFormatException("Invalid date/time format: " + str); +} + + int year = Integer.parseInt(matcher.group(1)); // yyyy + int month = Integer.parseInt(matcher.group(2)) - 1; // MM + int day = Integer.parseInt(matcher.group(3)); // dd + boolean isTimeGiven = matcher.group(4) != null; // 'T'HH:mm:ss.milliseconds + String tzShiftRegexGroup = matcher.group(9); // 'Z', or time zone shift HH:mm following '+'/'-' + boolean isTzShiftGiven = tzShiftRegexGroup != null; + int hourOfDay = 0; + int minute = 0; + int second = 0; + int milliseconds = 0; + Integer tzShiftInteger = null; + + if (isTzShiftGiven && !isTimeGiven) { + throw new NumberFormatException("Invalid date/time format, cannot specify time zone shift" + + " without specifying time: " + str); +} + + if (isTimeGiven) { + hourOfDay = Integer.parseInt(matcher.group(5)); // HH + minute = Integer.parseInt(matcher.group(6)); // mm + second = Integer.parseInt(matcher.group(7)); // ss + if (matcher.group(8) != null) { // contains .milliseconds? + milliseconds = Integer.parseInt(matcher.group(8).substring(1)); // milliseconds + // The number of digits after the dot may not be 3. Need to renormalize. + int fractionDigits = matcher.group(8).substring(1).length() - 3; + milliseconds = (int) ((float) milliseconds / Math.pow(10, fractionDigits)); +} +} + Calendar dateTime = new GregorianCalendar(GMT); + dateTime.set(year, month, day, hourOfDay, minute, second); + dateTime.set(Calendar.MILLISECOND, milliseconds); + long value = dateTime.getTimeInMillis(); + + if (isTimeGiven && isTzShiftGiven) { + int tzShift; + if (Character.toUpperCase(tzShiftRegexGroup.charAt(0)) == 'Z') { + tzShift = 0; +} else { + tzShift = Integer.parseInt(matcher.group(11)) * 60 // time zone shift HH + + Integer.parseInt(matcher.group(12)); // time zone shift mm + if (matcher.group(10).charAt(0) == '-') { // time zone shift + or - + tzShift = -tzShift; +} + value -= tzShift * 60000L; // e.g. if 1 hour ahead of UTC, subtract an hour to get UTC time +} + tzShiftInteger = tzShift; +} + return new DateTime(!isTimeGiven, value, tzShiftInteger); +} + + /** Appends a zero-padded number to a string builder. */ + private static void appendInt(StringBuilder sb, int num, int numDigits) { + if (num < 0) { + sb.append('-'); + num = -num; +} + int x = num; + while (x > 0) { + x /= 10; + numDigits--; +} + for (int i = 0; i < numDigits; i++) { + sb.append('0'); +} + if (num != 0) { + sb.append(num); +} +} +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alertmanager/LiveAlertManager.scala b/jvm/src/main/scala/com/harana/modules/alertmanager/LiveAlertManager.scala new file mode 100644 index 0000000..3092088 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alertmanager/LiveAlertManager.scala @@ -0,0 +1,199 @@ +package com.harana.modules.alertmanager + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.Kubernetes +import io.circe.parser._ +import io.circe.syntax._ +import io.circe.{Decoder, Encoder} +import skuber.Container.Port +import skuber.apps.StatefulSet +import skuber.{ConfigMap, Container, EnvVar, HTTPGetAction, LabelSelector, ObjectMeta, PersistentVolume, PersistentVolumeClaim, Pod, Probe, Protocol, Service, Volume} +import zio.{Task, ZIO, ZLayer} + +object LiveAlertManager { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAlertManager(config, http, kubernetes, logger, micrometer) + } +} + +case class LiveAlertManager(config: Config, http: Http, kubernetes: Kubernetes, logger: Logger, micrometer: Micrometer) extends AlertManager { + + override def start(name: String, + storageClassName: String, + replicas: Int = 1) = + for { + version <- config.string("alertmanager.version") + config <- alertManagerConfig(name) + + replicaArgs = if (replicas > 1) + List( + "--cluster.advertise-address=$(POD_IP):9094", + "--cluster.listen-address=0.0.0.0:9094" + ) ++ (0 to replicas).map(i => s"--cluster.peer=$name-{{ $i }}.$name-headless:9094") + else List() + + configMap = ConfigMap(name).withData(Map("alertmanager.yml" -> config)) + service = Service(name, Map("app.kubernetes.io/name" -> name), 9094) + probe = Probe(HTTPGetAction(port = Right("http"), path = "/")) + statefulSet = StatefulSet(name) + .withReplicas(replicas) + .withServiceName(name) + .withLabelSelector(LabelSelector()) + .withVolumeClaimTemplate( + PersistentVolumeClaim( + metadata = ObjectMeta(name = "storage"), + spec = Some(PersistentVolumeClaim.Spec( + accessModes = List(PersistentVolume.AccessMode.ReadWriteOnce), + storageClassName = Some(storageClassName) + )) + ) + ) + .withTemplate(Pod.Template.Spec( + metadata = ObjectMeta(labels = Map()), + spec = Some(Pod.Spec( + nodeSelector = Map("type" -> "core"), + containers = List(Container( + name = "alertmanager", + image = s"quay.io/prometheus/alertmanager:$version", + ports = List(Port(9093, Protocol.TCP, "http")), + env = List(EnvVar("POD_IP", EnvVar.FieldRef("status.podIP", "v1"))), + livenessProbe = Some(probe), + readinessProbe = Some(probe), + args = replicaArgs ++ List( + "--storage.path=/alertmanager", + "--config.file=/etc/alertmanager/alertmanager.yml" + ), + volumeMounts = List( + Volume.Mount("config", "/etc/alertmanager"), + Volume.Mount("storage", "/alertmanager") + ) + )) + )) + )) + } yield () + + + private def alertManagerConfig(name: String): Task[String] = + ZIO.attempt("") + + + def healthy: Task[Boolean] = + for { + domain <- config.string("alertmanager.host") + response <- http.get(s"https://$domain/-/healthy").mapError(ex => new Exception(ex.toString)) + } yield response.code() == 200 + + + def ready: Task[Boolean] = + for { + domain <- config.string("alertmanager.host") + response <- http.get(s"https://$domain/-/ready").mapError(ex => new Exception(ex.toString)) + } yield response.code() == 200 + + + def reload: Task[Unit] = + post("-/reload") + + + def status: Task[AlertManagerStatus] = + get[AlertManagerStatus]("api/v2/status") + + + def receivers: Task[List[ReceiverName]] = + get[List[ReceiverName]]("api/v2/receivers") + + + def silences(filters: Set[String] = Set()): Task[List[Silence]] = + get[List[Silence]]("api/v2/silences") + + + def silence(id: SilenceId): Task[Silence] = + get[Silence](s"api/v2/silences/${id.silenceID}") + + + def saveSilence(silence: PostableSilence): Task[SilenceId] = + postWithResponse[PostableSilence, SilenceId]("api/v2/silences", Some(silence)) + + + def deleteSilence(id: SilenceId): Task[Unit] = + delete(s"api/v2/silence/${id.silenceID}") + + + def alerts(active: Boolean = false, + silenced: Boolean = false, + inhibited: Boolean = false, + unprocessed: Boolean = false, + filters: List[String] = List(), + receiver: Option[String] = None): Task[List[Alert]] = + get[List[Alert]](s"api/v2/alerts", + Map( + "active" -> List(active.toString), + "silenced" -> List(silenced.toString), + "inhibited" -> List(inhibited.toString), + "unprocessed" -> List(unprocessed.toString), + "filters" -> filters) ++ (if (receiver.nonEmpty) Map("receiver" -> List(receiver.get)) else Map()) + ) + + def saveAlerts(alerts: List[PostableAlert]): Task[Unit] = + postWithBody[List[PostableAlert]]("api/v2/alerts", Some(alerts)) + + + def alertGroups(active: Boolean = false, + silenced: Boolean = false, + inhibited: Boolean = false, + filters: List[String] = List(), + receiver: Option[String] = None): Task[List[AlertGroup]] = + get[List[AlertGroup]]("api/v2/alerts/group", + Map( + "active" -> List(active.toString), + "silenced" -> List(silenced.toString), + "inhibited" -> List(inhibited.toString), + "filters" -> filters) ++ (if (receiver.nonEmpty) Map("receiver" -> List(receiver.get)) else Map()) + ) + + + private def get[A](url: String, parameters: Map[String, List[String]] = Map())(implicit d: Decoder[A]): Task[A] = + for { + domain <- config.string("alertmanager.host") + json <- http.getAsJson(s"https://$domain/$url", parameters).mapError(ex => new Exception(ex.toString)) + obj <- ZIO.fromEither(decode[A](json.noSpaces)) + } yield obj + + + private def post(url: String, parameters: Map[String, List[String]] = Map()): Task[Unit] = + for { + domain <- config.string("alertmanager.host") + _ <- http.postAsJson(s"https://$domain/$url", params = parameters).mapError(ex => new Exception(ex.toString)) + } yield () + + + private def postWithBody[A](url: String, obj: Option[A], parameters: Map[String, List[String]] = Map())(implicit e: Encoder[A]): Task[Unit] = + for { + domain <- config.string("alertmanager.host") + _ <- http.postAsJson(s"https://$domain/$url", body = obj.map(_.asJson.noSpaces), params = parameters).mapError(ex => new Exception(ex.toString)) + } yield () + + + private def postWithResponse[A, B](url: String, obj: Option[A], parameters: Map[String, List[String]] = Map())(implicit d: Decoder[B], e: Encoder[A]): Task[B] = + for { + domain <- config.string("alertmanager.host") + json <- http.postAsJson(s"https://$domain/$url", body = obj.map(_.asJson.noSpaces), params = parameters).mapError(ex => new Exception(ex.toString)) + obj <- ZIO.fromEither(decode[B](json.noSpaces)) + } yield obj + + + private def delete[A](url: String, parameters: Map[String, List[String]] = Map()): Task[Unit] = + for { + domain <- config.string("alertmanager.host") + _ <- http.deleteAsJson(s"https://$domain/$url", parameters).mapError(ex => new Exception(ex.toString)) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alertmanager/models.scala b/jvm/src/main/scala/com/harana/modules/alertmanager/models.scala new file mode 100644 index 0000000..bcb3c7a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alertmanager/models.scala @@ -0,0 +1,104 @@ +package com.harana.modules + +import io.circe.generic.JsonCodec +import io.circe.{Decoder, Encoder} + +package object alertmanager { + + implicit val decodeDateTime: Decoder[DateTime] = + Decoder.decodeString.emap { str => Right(DateTime.parseRfc3339(str)) } + + implicit val encodeDateTime: Encoder[DateTime] = + Encoder.encodeString.contramap[DateTime](_.toStringRfc3339) + + + @JsonCodec + case class Alert(labels: Map[String, String], + annotations: Map[String, String], + receivers: List[ReceiverName], + status: AlertStatus, + fingerprint: String, + updatedAt: DateTime, + startsAt: DateTime, + endsAt: DateTime, + generatorURL: String) + + @JsonCodec + case class AlertGroup(labels: Map[String, String], + receiver: ReceiverName, + alerts: List[Alert]) + + @JsonCodec + case class AlertManagerConfig(original: String) + + @JsonCodec + case class AlertManagerStatus(cluster: ClusterStatus, + config: AlertManagerConfig, + uptime: DateTime, + versionInfo: VersionInfo) + + @JsonCodec + case class AlertStatus(inhibitedBy: List[String], + silencedBy: List[String], + state: String) + + @JsonCodec + case class ClusterStatus(name: String, + peers: List[Peer], + status: String) + + @JsonCodec + case class Labels(alertname: String, + labels: Map[String, String]) + + @JsonCodec + case class Matcher(regex: Boolean, + name: String, + value: String) + + @JsonCodec + case class Peer(address: String, + name: String) + + @JsonCodec + case class PostableAlert(labels: Map[String, String], + annotations: Map[String, String], + startsAt: String, + endsAt: String, + generatorURL: String) + + @JsonCodec + case class PostableSilence(id: String, + comment: String, + createdBy: String, + endsAt: String, + startsAt: String, + matchers: List[Matcher]) + + @JsonCodec + case class ReceiverName(name: String) + + @JsonCodec + case class Silence(id: String, + status: SilenceStatus, + updatedAt: DateTime, + comment: String, + createdBy: String, + endsAt: DateTime, + startsAt: DateTime, + matchers: List[Matcher]) + + @JsonCodec + case class SilenceId(silenceID: String) + + @JsonCodec + case class SilenceStatus(state: String) + + @JsonCodec + case class VersionInfo(branch: String, + buildDate: String, + buildUser: String, + goVersion: String, + revision: String, + version: String) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alluxiofs/AlluxioFs.scala b/jvm/src/main/scala/com/harana/modules/alluxiofs/AlluxioFs.scala new file mode 100644 index 0000000..93b4e19 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alluxiofs/AlluxioFs.scala @@ -0,0 +1,70 @@ +package com.harana.modules.alluxiofs + +import alluxio.grpc.SetAclAction +import alluxio.security.authorization.AclEntry +import com.harana.sdk.shared.models.HaranaFile +import zio.Task +import zio.macros.accessible + +@accessible +trait AlluxioFs { + + def createDirectory(path: String, + createParent: Boolean, + username: Option[String] = None): Task[Unit] + +// def createFile(path: String, +// data: Array[Byte], +// username: Option[String] = None, +// blockSize: Option[Int] = None): Task[Unit] + + def delete(path: String, + recursive: Boolean, + username: Option[String] = None): Task[Unit] + + def exists(path: String, + username: Option[String] = None): Task[Boolean] + + def free(path: String, + username: Option[String] = None): Task[Unit] + + def info(path: String, + username: Option[String] = None): Task[HaranaFile] + + def isDirectory(path: String, + username: Option[String] = None): Task[Boolean] + + def isFile(path: String, + username: Option[String] = None): Task[Boolean] + + def list(path: String, + username: Option[String] = None): Task[List[HaranaFile]] + +// def loadFile(path: String, +// username: Option[String] = None): Task[Array[Byte]] + + def mount(path: String, + ufsPath: String, + username: Option[String] = None): Task[Unit] + + def parent(path: String, + username: Option[String] = None): Task[Option[String]] + + def persist(path: String, + username: Option[String] = None): Task[Unit] + + def rename(source: String, + destination: String, + username: Option[String] = None): Task[Unit] + + def search(path: String, query: String): Task[List[HaranaFile]] + + def setAcl(path: String, + action: SetAclAction, + entries: List[AclEntry], + username: Option[String] = None): Task[Unit] + + def unmount(path: String, + username: Option[String] = None): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alluxiofs/LiveAlluxioFs.scala b/jvm/src/main/scala/com/harana/modules/alluxiofs/LiveAlluxioFs.scala new file mode 100644 index 0000000..381afce --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alluxiofs/LiveAlluxioFs.scala @@ -0,0 +1,210 @@ +package com.harana.modules.alluxiofs + +import alluxio.client.file.URIStatus +import alluxio.conf.{Configuration, PropertyKey} +import alluxio.grpc.{CreateDirectoryPOptions, DeletePOptions, SetAclAction} +import alluxio.security.authorization.AclEntry +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.sdk.shared.models.HaranaFile +import org.apache.commons.io.IOUtils +import zio.{Task, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveAlluxioFs { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAlluxioFs(config, logger, micrometer) + } +} + +case class LiveAlluxioFs(config: Config, logger: Logger, micrometer: Micrometer) extends AlluxioFs { + + private val alluxioProperties = for { + hosts <- config.listString("alluxio.hosts", List()) + port <- config.long("alluxio.port", 19998) + properties = Configuration.global().copyProperties() + addresses = hosts.map(host => s"$host:$port").mkString(",") + _ = properties.set(PropertyKey.SECURITY_AUTHENTICATION_TYPE, "NOSASL") + _ = properties.set(PropertyKey.MASTER_RPC_ADDRESSES, addresses) + _ <- logger.info(s"Connecting to hosts: $addresses") + } yield properties + + + def createDirectory(path: String, + createParent: Boolean, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + options = CreateDirectoryPOptions.newBuilder().setRecursive(createParent).build() + _ <- ZIO.attempt(fs.createDirectory(uri(path), options)) + } yield () + +// FIXME +// def createFile(path: String, +// data: Array[Byte], +// username: Option[String] = None, +// blockSize: Option[Int] = None): Task[Unit] = +// for { +// properties <- alluxioProperties +// fs <- alluxioFs(properties, username) +// _ <- ZIO.attempt(fs.createFile(uri(path))).acquireReleaseWith(closeStream)(os => io(os.write(data)) +// ) +// } yield () + + + def delete(path: String, + recursive: Boolean, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + options = DeletePOptions.newBuilder().setRecursive(true).build() + _ <- ZIO.attempt(fs.delete(uri(path), options)) + } yield () + + + def exists(path: String, + username: Option[String] = None): Task[Boolean] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.exists(uri(path))) + } yield result + + + def free(path: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.free(uri(path))) + } yield result + + + def info(path: String, + username: Option[String] = None): Task[HaranaFile] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.listStatus(uri(path)).asScala.map(toDataFile).head) + } yield result + + + def isDirectory(path: String, + username: Option[String] = None): Task[Boolean] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.getStatus(uri(path)).isFolder) + } yield result + + + def isFile(path: String, username: Option[String] = None): Task[Boolean] = + isDirectory(path).map(!_) + + + def list(path: String, + username: Option[String] = None): Task[List[HaranaFile]] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + result <- ZIO.attempt(fs.listStatus(uri(path)).asScala.toList.map(toDataFile)) + } yield result + + +// FIXME +// def loadFile(path: String, +// username: Option[String] = None): Task[Array[Byte]] = +// for { +// properties <- alluxioProperties +// fs <- alluxioFs(properties, username) +// result <- ZIO.attempt(fs.openFile(uri(path))).acquireReleaseWithAuto(closeStream)(is => io(IOUtils.toByteArray(is))) +// } yield result + + + def mount(path: String, + ufsPath: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.mount(uri(path), uri(ufsPath))) + } yield () + + + def parent(path: String, + username: Option[String] = None): Task[Option[String]] = + io(Option(uri(path).getParent).map(_.getPath)) + + + def persist(path: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.persist(uri(path))) + } yield () + + + def rename(source: String, + destination: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.rename(uri(source), uri(destination))) + } yield () + + + def search(path: String, + query: String): Task[List[HaranaFile]] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties) +// _ <- ZIO.from(fs.rename(uri(source), uri(destination))) + } yield List() + + + def setAcl(path: String, + action: SetAclAction, + entries: List[AclEntry], + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.setAcl(uri(path), action, entries.asJava)) + } yield () + + + def unmount(path: String, + username: Option[String] = None): Task[Unit] = + for { + properties <- alluxioProperties + fs <- alluxioFs(properties, username) + _ <- ZIO.attempt(fs.unmount(uri(path))) + } yield () + + + private def toDataFile(uri: URIStatus): HaranaFile = { + null + } + // private def toDataFile(uri: URIStatus) = { +// uri. +// +// DataFile( +// name = file.getName.getBaseName, +// extension = if (StringUtils.isEmpty(file.getName.getExtension)) None else Some(file.getName.getExtension), +// isFolder = file.isFolder, +// lastModified = Instant.ofEpochMilli(file.getContent.getLastModifiedTime), +// size = size(file), +// tags = List() +// ) +// } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/alluxiofs/package.scala b/jvm/src/main/scala/com/harana/modules/alluxiofs/package.scala new file mode 100644 index 0000000..0bcebc4 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/alluxiofs/package.scala @@ -0,0 +1,40 @@ +package com.harana.modules + +import alluxio.AlluxioURI +import alluxio.client.file.FileSystem +import alluxio.conf.{AlluxioProperties, InstancedConfiguration, PropertyKey} +import alluxio.exception.AlluxioException +import zio.{IO, ZIO} + +import java.io.{InputStream, OutputStream} + +package object alluxiofs { + + def alluxioFs(properties: AlluxioProperties, username: Option[String] = None) = + ZIO.succeed { + FileSystem.Factory.create( + username match { + case Some(u) => + val p = properties.copy() + p.set(PropertyKey.SECURITY_LOGIN_USERNAME, u) + new InstancedConfiguration(p) + + case None => + new InstancedConfiguration(properties) + } + ) + } + + def closeStream(is: InputStream) = + ZIO.succeed(is.close()) + + def closeStream(os: OutputStream) = + ZIO.succeed(os.close()) + + def io[A](fn: => A): IO[AlluxioException, A] = + ZIO.from(fn).refineToOrDie[AlluxioException] + + def uri(path: String) = + new AlluxioURI(path) + +} diff --git a/jvm/src/main/scala/com/harana/modules/argo/Argo.scala b/jvm/src/main/scala/com/harana/modules/argo/Argo.scala new file mode 100644 index 0000000..db54a28 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/Argo.scala @@ -0,0 +1,35 @@ +package com.harana.modules.argo + +import com.harana.modules.argo.events.EventSource.EventSource +import com.harana.modules.argo.events.Rollout.Rollout +import com.harana.modules.argo.events.Sensor.Sensor +import com.harana.modules.argo.workflows.Workflow.Workflow +import com.harana.modules.argo.workflows.WorkflowTemplate.WorkflowTemplate +import skuber.api.client.KubernetesClient +import zio.Task +import zio.macros.accessible + +@accessible +trait Argo { + + def createOrUpdateEventSource(namespace: String, eventSource: EventSource, client: Option[KubernetesClient] = None): Task[EventSource] + def deleteEventSource(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsEventSource(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + + def createOrUpdateRollout(namespace: String, rollout: Rollout, client: Option[KubernetesClient] = None): Task[Rollout] + def deleteRollout(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsRollout(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + + def createOrUpdateSensor(namespace: String, sensor: Sensor, client: Option[KubernetesClient] = None): Task[Sensor] + def deleteSensor(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsSensor(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + + def createOrUpdateWorkflow(namespace: String, workflow: Workflow, client: Option[KubernetesClient] = None): Task[Workflow] + def deleteWorkflow(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsWorkflow(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + + def createOrUpdateWorkflowTemplate(namespace: String, template: WorkflowTemplate, client: Option[KubernetesClient] = None): Task[WorkflowTemplate] + def deleteWorkflowTemplate(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Unit] + def existsWorkflowTemplate(namespace: String, name: String, client: Option[KubernetesClient] = None): Task[Boolean] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/LiveArgo.scala b/jvm/src/main/scala/com/harana/modules/argo/LiveArgo.scala new file mode 100644 index 0000000..36bf442 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/LiveArgo.scala @@ -0,0 +1,126 @@ +package com.harana.modules.argo + +import com.harana.modules.argo.events.EventSource._ +import com.harana.modules.argo.events.Rollout._ +import com.harana.modules.argo.events.Sensor._ +import com.harana.modules.argo.workflows.Workflow._ +import com.harana.modules.argo.workflows.WorkflowTemplate._ +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.Kubernetes +import play.api.libs.json.Format +import skuber.ResourceDefinition +import skuber.api.client.{KubernetesClient, LoggingContext} +import zio.{Task, ZIO, ZLayer} + +import scala.reflect.ClassTag + +object LiveArgo { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveArgo(config, kubernetes, logger, micrometer) + } +} + +case class LiveArgo(config: Config, kubernetes: Kubernetes, logger: Logger, micrometer: Micrometer) extends Argo { + + def createOrUpdateEventSource(namespace: String, eventSource: EventSource, client: Option[KubernetesClient]): Task[EventSource] = + upsert[EventSource](namespace, eventSource, client) + + + def deleteEventSource(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[EventSource](namespace, name, client) + + + def existsEventSource(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[EventSource](namespace, name, client) + + + def createOrUpdateRollout(namespace: String, rollout: Rollout, client: Option[KubernetesClient]): Task[Rollout] = + upsert[Rollout](namespace, rollout, client) + + + def deleteRollout(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[Rollout](namespace, name, client) + + + def existsRollout(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[Rollout](namespace, name, client) + + + def createOrUpdateSensor(namespace: String, sensor: Sensor, client: Option[KubernetesClient]): Task[Sensor] = + upsert[Sensor](namespace, sensor, client) + + + def deleteSensor(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[Sensor](namespace, name, client) + + + def existsSensor(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[Sensor](namespace, name, client) + + + def createOrUpdateWorkflow(namespace: String, workflow: Workflow, client: Option[KubernetesClient]): Task[Workflow] = + upsert[Workflow](namespace, workflow, client) + + + def deleteWorkflow(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[Workflow](namespace, name, client) + + + def existsWorkflow(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[Workflow](namespace, name, client) + + + def createOrUpdateWorkflowTemplate(namespace: String, template: WorkflowTemplate, client: Option[KubernetesClient]): Task[WorkflowTemplate] = + upsert[WorkflowTemplate](namespace, template, client) + + + def deleteWorkflowTemplate(namespace: String, name: String, client: Option[KubernetesClient]): Task[Unit] = + delete[WorkflowTemplate](namespace, name, client) + + + def existsWorkflowTemplate(namespace: String, name: String, client: Option[KubernetesClient]): Task[Boolean] = + exists[WorkflowTemplate](namespace, name, client) + + + private def upsert[A <: skuber.ObjectResource](namespace: String, resource: A, client: Option[KubernetesClient])(implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext, ct: ClassTag[A]): Task[A] = + for { + client <- ZIO.fromOption(client).orElse(kubernetes.newClient) + exists <- kubernetes.exists[A](client, namespace, resource.name) + resource <- if (exists) kubernetes.create[A](client, namespace, resource) else kubernetes.update[A](client, namespace, resource) + _ <- kubernetes.close(client) + } yield resource + + + private def rename[A <: skuber.ObjectResource](namespace: String, oldName: String, newName: String, client: Option[KubernetesClient])(implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext, ct: ClassTag[A]): Task[Unit] = + for { + client <- ZIO.fromOption(client).orElse(kubernetes.newClient) + exists <- kubernetes.exists[A](client, namespace, oldName) + resource <- kubernetes.get[A](client, namespace, oldName) + _ <- ZIO.when(exists)(kubernetes.delete[A](client, namespace, oldName)) + _ <- ZIO.when(exists)(kubernetes.create[A](client, namespace, resource.get)) + _ <- kubernetes.close(client) + } yield () + + + private def delete[A <: skuber.ObjectResource](namespace: String, name: String, client: Option[KubernetesClient])(implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext, ct: ClassTag[A]): Task[Unit] = + for { + client <- ZIO.fromOption(client).orElse(kubernetes.newClient) + _ <- kubernetes.delete[A](client, namespace, name) + _ <- kubernetes.close(client) + } yield () + + + private def exists[A <: skuber.ObjectResource](namespace: String, name: String, client: Option[KubernetesClient])(implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext, ct: ClassTag[A]): Task[Boolean] = + for { + client <- ZIO.fromOption(client).orElse(kubernetes.newClient) + exists <- kubernetes.get[A](client, namespace, name).map(_.nonEmpty) + } yield exists + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/events/EventSource.scala b/jvm/src/main/scala/com/harana/modules/argo/events/EventSource.scala new file mode 100644 index 0000000..3062554 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/events/EventSource.scala @@ -0,0 +1,204 @@ +package com.harana.modules.argo.events + +import com.harana.modules.argo._ +import io.circe.generic.JsonCodec +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ListResource, ResourceDefinition} + +object EventSource { + + type EventSource = CustomResource[Spec, Status] + type EventSourceList = ListResource[EventSource] + + implicit lazy val calendarFmt: Format[Calendar] = Json.format[Calendar] + implicit lazy val catchupConfigurationFmt: Format[CatchupConfiguration] = Json.format[CatchupConfiguration] + implicit lazy val configMapPersistenceFmt: Format[ConfigMapPersistence] = Json.format[ConfigMapPersistence] + implicit lazy val eventPersistenceFmt: Format[EventPersistence] = Json.format[EventPersistence] + implicit lazy val eventSourceFilterFmt: Format[EventSourceFilter] = Json.format[EventSourceFilter] + implicit lazy val fileFmt: Format[File] = Json.format[File] + implicit lazy val genericFmt: Format[Generic] = Json.format[Generic] + implicit lazy val githubFmt: Format[Github] = Json.format[Github] + implicit lazy val gitlabFmt: Format[Gitlab] = Json.format[Gitlab] + implicit lazy val hdfsFmt: Format[Hdfs] = Json.format[Hdfs] + implicit lazy val kafkaFmt: Format[Kafka] = Json.format[Kafka] + implicit lazy val redisFmt: Format[Redis] = Json.format[Redis] + implicit lazy val resourceFmt: Format[Resource] = Json.format[Resource] + implicit lazy val slackFmt: Format[Slack] = Json.format[Slack] + implicit lazy val snsFmt: Format[SNS] = Json.format[SNS] + implicit lazy val specFmt: Format[Spec] = Json.format[Spec] + implicit lazy val sqsFmt: Format[SQS] = Json.format[SQS] + implicit lazy val stripeFmt: Format[Stripe] = Json.format[Stripe] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val watchPathConfigFmt: Format[WatchPathConfig] = Json.format[WatchPathConfig] + implicit lazy val webhookFmt: Format[Webhook] = Json.format[Webhook] + + implicit lazy val resourceDefinition: ResourceDefinition[EventSource] = ResourceDefinition[EventSource]("EventSource", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[EventSource] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) + + @JsonCodec + case class Calendar(exclusionDates: List[String], + interval: Option[String] = None, + schedule: Option[String] = None, + timezone: Option[String] = None, + metadata: Map[String, String] = Map(), + eventPersistence: Option[EventPersistence] = None, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class CatchupConfiguration(enabled: Boolean, + maxDuration: String) + + @JsonCodec + case class ConfigMapPersistence(name: String, + createIfNotExist: Boolean) + + @JsonCodec + case class EventPersistence(catchupConfiguration: CatchupConfiguration, + configMapPersistence: ConfigMapPersistence) + + @JsonCodec + case class EventSourceFilter(expression: String) + + + @JsonCodec + case class File(eventType: String, + watchPathConfig: WatchPathConfig, + polling: Boolean = false, + metadata: Map[String, String] = Map(), + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Generic(url: String, + config: Option[String] = None, + insecure: Boolean = false, + jsonBody: Boolean = true, + metadata: Map[String, String] = Map(), + authSecret: Option[String] = None, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Github(owner: String, + repository: String, + endpoint: String, + port: Long, + url: Option[String] = None, + events: List[String], + apiSecret: String, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Gitlab(projectId: String, + endpoint: String, + port: Long, + url: Option[String] = None, + event: String, + apiSecret: String, + baseUrl: String, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Hdfs(directory: String, + `type`: String, + path: String, + addresses: List[String], + hdfsUser: String, + krbCCacheSecret: Option[String] = None, + krbKeytabSecret: Option[String] = None, + krbUsername: Option[String] = None, + krbRealm: Option[String] = None, + krbConfigConfigMap: Option[String] = None, + krbServicePrincipalName: Option[String] = None, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Kafka(url: String, + topic: String, + partition: Int, + backOffDuration: Int, + backOffSteps: Int, + backOffFactor: Int, + backOffJitter: Int, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Redis(hostAddress: String, + hostPasswordSecret: Option[String] = None, + db: Option[Int] = None, + channels: List[String], + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Resource(namespace: String, + group: Option[String] = None, + version: Option[String] = None, + resource: String, + `type`: String, + filters: Map[String, String], + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Slack(endpoint: String, + port: Long, + token: String, + signingSecret: Option[String] = None, + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class SNS(endpoint: String, + port: Long, + url: Option[String] = None, + accessKeySecret: Option[String] = None, + secretKeySecret: Option[String] = None, + region: Option[String] = None, + filter: Option[EventSourceFilter] = None) + + case class Spec(eventBusName: String, + calendar: Map[String, Calendar] = Map(), + file: Map[String, File] = Map(), + generic: Map[String, Generic] = Map(), + github: Map[String, Github] = Map(), + gitlab: Map[String, Gitlab] = Map(), + hdfs: Map[String, Hdfs] = Map(), + kafka: Map[String, Kafka] = Map(), + redis: Map[String, Redis] = Map(), + replicas: Int = 1, + resource: Map[String, Resource] = Map(), + service: Option[Service] = None, + slack: Map[String, Slack] = Map(), + sns: Map[String, SNS] = Map(), + sqs: Map[String, SQS] = Map(), + stripe: Map[String, Stripe] = Map(), + template: Option[Template] = None, + webhook: Map[String, Webhook] = Map()) + + @JsonCodec + case class SQS(accessKeySecret: Option[String] = None, + secretKeySecret: Option[String] = None, + region: Option[String] = None, + queue: String, + waitTimeSeconds: Option[Int], + filter: Option[EventSourceFilter] = None) + + @JsonCodec + case class Stripe(endpoint: String, + port: Long, + url: Option[String] = None, + apiKeySecret: String, + eventFilters: List[String], + filter: Option[EventSourceFilter] = None) + + case class Status(createdAt: Time) + + @JsonCodec + case class WatchPathConfig(directory: Option[String] = None, + path: Option[String] = None, + pathRegexp: Option[String] = None) + + @JsonCodec + case class Webhook(endpoint: String, + port: Long, + filter: Option[EventSourceFilter] = None) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/events/Rollout.scala b/jvm/src/main/scala/com/harana/modules/argo/events/Rollout.scala new file mode 100644 index 0000000..ed840f6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/events/Rollout.scala @@ -0,0 +1,101 @@ +package com.harana.modules.argo.events + +import com.harana.modules.argo._ +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ListResource, ResourceDefinition} + +object Rollout { + + type Rollout = CustomResource[Spec, Status] + type RolloutList = ListResource[Rollout] + + implicit lazy val analysisFmt: Format[Analysis] = Json.format[Analysis] + implicit lazy val blueGreenFmt: Format[BlueGreen] = Json.format[BlueGreen] + implicit lazy val blueGreenStatusFmt: Format[BlueGreenStatus] = Json.format[BlueGreenStatus] + implicit lazy val canaryFmt: Format[Canary] = Json.format[Canary] + implicit lazy val canaryStatusFmt: Format[CanaryStatus] = Json.format[CanaryStatus] + implicit lazy val canaryStepFmt: Format[CanaryStep] = Json.format[CanaryStep] + implicit lazy val pauseConditionFmt: Format[PauseCondition] = Json.format[PauseCondition] + implicit lazy val rolloutConditionFmt: Format[RolloutCondition] = Json.format[RolloutCondition] + implicit lazy val specFmt: Format[Spec] = Json.format[Spec] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val strategyFmt: Format[Strategy] = Json.format[Strategy] + + implicit lazy val resourceDefinition: ResourceDefinition[Rollout] = ResourceDefinition[Rollout]("Rollout", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[Rollout] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) + + case class Analysis(successfulRunHistoryLimit: Option[Int] = None, + unsuccessfulRunHistoryLimit: Option[Int] = None) + + case class BlueGreen(activeService: String, + previewService: String, + previewReplicaCount: Option[Int], + autoPromotionEnabled: Option[Boolean], + autoPromotionSeconds: Option[Int], + scaleDownDelaySeconds: Option[Int], + scaleDownDelayRevisionLimit: Option[Int]) + + case class BlueGreenStatus(activeSelector: String, + previousActiveSelector: String, + previewSelector: String, + scaleDownDelayStartTime: Time, + scaleUpPreviewCheckPoint: Boolean) + + case class Canary(stableService: String, + canaryService: String, + steps: List[CanaryStep] = List(), + maxSurge: Option[String] = None, + maxUnavailable: Option[String] = None) + + case class CanaryStatus(currentBackgroundAnalysisRun: String, + currentExperiment: String, + currentStepAnalysisRun: String, + stableRS: String) + + case class CanaryStep(weight: Int, + pause: Option[Int]) + + case class PauseCondition(reason: Option[String] = None, + startTime: Time) + + case class RolloutCondition(lastTransitionTime: Time, + lastUpdateTime: Time, + message: String, + reason: String, + status: String, + `type`: String) + + case class Spec(analysis: Option[Analysis] = None, + minReadySeconds: Option[Int] = None, + paused: Boolean = false, + progressDeadlineAbort: Boolean = false, + progressDeadlineSeconds: Option[Int] = None, + replicas: Option[Int] = Some(1), + restartAt: Option[String] = None, + revisionHistoryLimit: Option[Int] = None, + selector: Option[NodeSelector] = None, + strategy: Option[Strategy] = None) + + case class Status(abort: Option[Boolean] = None, + pauseConditions: List[PauseCondition] = List(), + controllerPause: Option[Boolean] = None, + currentPodHash: Option[String] = None, + replicas: Option[Int] = None, + updatedReplicas: Option[Int] = None, + readyReplicas: Option[Int] = None, + availableReplicas: Option[Int] = None, + currentStepIndex: Option[Int] = None, + collisionCount: Option[Int] = None, + observedGeneration: Option[String] = None, + conditions: List[RolloutCondition] = List(), + canary: Option[CanaryStatus] = None, + blueGreen: Option[BlueGreenStatus] = None, + HPAReplicas: Option[Int] = None, + selector: Option[String] = None) + + case class Strategy(blueGreen: Option[BlueGreen] = None, + canary: Option[Canary] = None) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/events/Sensor.scala b/jvm/src/main/scala/com/harana/modules/argo/events/Sensor.scala new file mode 100644 index 0000000..fe02bb3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/events/Sensor.scala @@ -0,0 +1,152 @@ +package com.harana.modules.argo.events + +import com.harana.modules.argo._ +import com.harana.modules.argo.events.Trigger.{HttpTrigger, K8STrigger, SlackTrigger} +import com.harana.modules.argo.workflows.Workflow +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ListResource, ResourceDefinition} + +object Sensor { + + type Sensor = CustomResource[Spec, Status] + type SensorList = ListResource[Sensor] + + implicit lazy val conditionsResetCriteriaFmt: Format[ConditionsResetCriteria] = Json.format[ConditionsResetCriteria] + implicit lazy val conditionsResetByTimeFmt: Format[ConditionsResetByTime] = Json.format[ConditionsResetByTime] + implicit lazy val dataFilterFmt: Format[DataFilter] = Json.format[DataFilter] + implicit lazy val eventContextFmt: Format[EventContext] = Json.format[EventContext] + implicit lazy val eventDependencyFilterFmt: Format[EventDependencyFilter] = Json.format[EventDependencyFilter] + implicit lazy val eventDependencyFmt: Format[EventDependency] = Json.format[EventDependency] + implicit lazy val exprFilterFmt: Format[ExprFilter] = Json.format[ExprFilter] + implicit lazy val httpFmt: Format[Http] = Json.format[Http] + implicit lazy val k8sResourceFmt: Format[K8SResource] = Json.format[K8SResource] + implicit lazy val parameterFmt: Format[Parameter] = Json.format[Parameter] + implicit lazy val parameterSourceFmt: Format[ParameterSource] = Json.format[ParameterSource] + implicit lazy val resourceLabelsPolicyFmt: Format[ResourceLabelsPolicy] = Json.format[ResourceLabelsPolicy] + implicit lazy val specFmt: Format[Spec] = Json.format[Spec] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val statusPolicyFmt: Format[StatusPolicy] = Json.format[StatusPolicy] + implicit lazy val subscriptionFmt: Format[Subscription] = Json.format[Subscription] + implicit lazy val timeFmt: Format[Time] = Json.format[Time] + implicit lazy val timeFilterFmt: Format[TimeFilter] = Json.format[TimeFilter] + implicit lazy val triggerTemplateFmt: Format[TriggerTemplate] = Json.format[TriggerTemplate] + implicit lazy val triggerParameterFmt: Format[TriggerParameter] = Json.format[TriggerParameter] + implicit lazy val triggerParameterSourceFmt: Format[TriggerParameterSource] = Json.format[TriggerParameterSource] + implicit lazy val triggerPolicyFmt: Format[TriggerPolicy] = Json.format[TriggerPolicy] + implicit lazy val triggerFmt: Format[Trigger] = Json.format[Trigger] + + implicit lazy val resourceDefinition: ResourceDefinition[Sensor] = ResourceDefinition[Sensor]("Sensor", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[Sensor] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec) + .withLabels(("sensors.argoproj.io/sensor-controller-instanceid", "argo")) + .withName(name) + + case class ConditionsResetCriteria(byTime: ConditionsResetByTime) + + case class ConditionsResetByTime(cron: String, + timezone: Option[String] = None) + + case class DataFilter(path: String, + `type`: JSONType, + value: List[String] = List(), + comparator: Option[Comparator] = None, + template: Option[String] = None) + + case class EventContext(id: String, + source: String, + specversion: String, + `type`: String, + datacontenttype: String, + subject: String, + time: Time) + + case class EventDependencyFilter(time: Option[TimeFilter] = None, + context: Option[EventContext] = None, + data: List[DataFilter] = List(), + exprs: List[ExprFilter] = List()) + + case class EventDependency(name: String, + eventSourceName: String, + eventName: String, + filters: Option[EventDependencyFilter] = None) + + case class ExprFilter(expr: String, + fields: List[PayloadField]) + + case class Http(port: Int) + + case class K8SResource(apiVersion: String, + kind: String, + metadata: ObjectMetadata, + spec: Workflow.Spec) + + case class Parameter(dest: String, + action: Option[String] = None, + src: ParameterSource) + + case class ParameterSource(contextKey: Option[String] = None, + dataKey: Option[String] = None, + event: String, + value: Option[String] = None) + + case class Policy(backoff: Backoff, + errorOnBackoffTimeout: Boolean, + resourceLabels: ResourceLabelsPolicy) + + case class ResourceLabelsPolicy(labels: String) + + case class Spec(template: Option[Template] = None, + dependencies: List[EventDependency] = List(), + errorOnFailedRound: Option[Boolean] = None, + eventBusName: Option[String] = None, + replicas: Option[Int] = None, + subscription: Option[Subscription] = None, + triggers: List[Trigger] = List()) + + case class Status(completedAt: Option[Time] = None, + lastCycleTime: Time, + message: Option[String] = None, + nodes: Option[NodeStatus] = None, + phase: String, + resources: ObjectResource, + startedAt: Option[Time] = None, + triggerCycleCount: Option[Int] = None, + triggerCycleStatus: Int) + + case class Subscription(http: Option[Http] = None) + + case class TimeFilter(start: String, + stop: String) + + case class Trigger(template: TriggerTemplate, + parameters: List[TriggerParameter] = List(), + policy: Option[TriggerPolicy] = None, + retryStrategy: Option[Backoff] = None, + rateLimit: Option[RateLimit] = None) + + case class TriggerTemplate(name: String, + conditions: Option[String] = None, + http: Option[HttpTrigger] = None, + k8s: Option[K8STrigger] = None, + slack: Option[SlackTrigger] = None, + conditionsReset: List[ConditionsResetCriteria] = List()) + + + case class TriggerParameter(src: TriggerParameterSource, + dest: String, + action: TriggerParameterOption) + + case class TriggerParameterSource(dependencyName: String, + contextKey: String, + contextTemplate: String, + dataKey: String, + dataTemplate: String, + value: String) + + type TriggerParameterOption = String + + case class TriggerPolicy(k8s: Option[K8SResourcePolicy] = None, + status: Option[StatusPolicy] = None) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/events/Trigger.scala b/jvm/src/main/scala/com/harana/modules/argo/events/Trigger.scala new file mode 100644 index 0000000..6f2c73d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/events/Trigger.scala @@ -0,0 +1,37 @@ +package com.harana.modules.argo.events + +import com.harana.modules.argo._ +import com.harana.modules.argo.events.Sensor.{K8SResource, TriggerParameter} +import play.api.libs.json.{Format, Json} + +object Trigger { + + implicit lazy val httpTriggerFmt: Format[HttpTrigger] = Json.format[HttpTrigger] + implicit lazy val k8sTriggerFmt: Format[K8STrigger] = Json.format[K8STrigger] + implicit lazy val k8sSourceFmt: Format[K8SSource] = Json.format[K8SSource] + implicit lazy val slackTriggerFmt: Format[SlackTrigger] = Json.format[SlackTrigger] + + case class K8STrigger(group: String, + version: String, + resource: String, + action: String, + source: K8SSource, + parameters: List[Parameter] = List()) + + case class K8SSource(resource: K8SResource) + + case class HttpTrigger(url: String, + payload: List[TriggerParameter] = List(), + tls: Option[TLSConfig] = None, + method: Option[String] = None, + parameters: List[TriggerParameter] = List(), + timeout: Option[Int] = None, + basicAuth: Option[BasicAuth] = None, + headers: Map[String, String] = Map(), + secureHeaders: List[SecureHeader] = List()) + + case class SlackTrigger(slackToken: SecretKeySelector, + channel: Option[String] = None, + message: Option[String] = None, + parameters: List[TriggerParameter] = List()) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/package.scala b/jvm/src/main/scala/com/harana/modules/argo/package.scala new file mode 100644 index 0000000..f89536a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/package.scala @@ -0,0 +1,286 @@ +package com.harana.modules + +import play.api.libs.json.{Format, Json} +import skuber.Security.{SELinuxOptions, Sysctl} +import skuber.{LocalObjectReference, PodSecurityContext} + +package object argo { + + implicit lazy val affinityFmt: Format[Affinity] = Json.format[Affinity] + implicit lazy val awsElasticBlockStoreVolumeSourceFmt: Format[AWSElasticBlockStoreVolumeSource] = Json.format[AWSElasticBlockStoreVolumeSource] + implicit lazy val backoffFmt: Format[Backoff] = Json.format[Backoff] + implicit lazy val basicAuthFmt: Format[BasicAuth] = Json.format[BasicAuth] + implicit lazy val configMapKeySelectorFmt: Format[ConfigMapKeySelector] = Json.format[ConfigMapKeySelector] + implicit lazy val configMapVolumeSourceFmt: Format[ConfigMapVolumeSource] = Json.format[ConfigMapVolumeSource] + implicit lazy val containerFmt: Format[Container] = Json.format[Container] + implicit lazy val environmentVariableFmt: Format[EnvironmentVariable] = Json.format[EnvironmentVariable] + implicit lazy val fieldsV1Fmt: Format[FieldsV1] = Json.format[FieldsV1] + implicit lazy val hostPathVolumeSourceFmt: Format[HostPathVolumeSource] = Json.format[HostPathVolumeSource] + implicit lazy val keyToPathFmt: Format[KeyToPath] = Json.format[KeyToPath] + implicit lazy val k8sResourcePolicyFmt: Format[K8SResourcePolicy] = Json.format[K8SResourcePolicy] + implicit lazy val labelSelectorRequirementFmt: Format[LabelSelectorRequirement] = Json.format[LabelSelectorRequirement] + implicit lazy val labelSelectorFmt: Format[LabelSelector] = Json.format[LabelSelector] + implicit lazy val localObjectReferenceFmt: Format[LocalObjectReference] = Json.format[LocalObjectReference] + implicit lazy val managedFieldsEntryFmt: Format[ManagedFieldsEntry] = Json.format[ManagedFieldsEntry] + implicit lazy val microTimeFmt: Format[MicroTime] = Json.format[MicroTime] + implicit lazy val nfsVolumeSourceFmt: Format[NFSVolumeSource] = Json.format[NFSVolumeSource] + implicit lazy val nodeAffinityFmt: Format[NodeAffinity] = Json.format[NodeAffinity] + implicit lazy val nodeStatusFmt: Format[NodeStatus] = Json.format[NodeStatus] + implicit lazy val nodeSelectorRequirementFmt: Format[NodeSelectorRequirement] = Json.format[NodeSelectorRequirement] + implicit lazy val nodeSelectorTermFmt: Format[NodeSelectorTerm] = Json.format[NodeSelectorTerm] + implicit lazy val objectMetaFmt: Format[ObjectMetadata] = Json.format[ObjectMetadata] + implicit lazy val nodeSelectorFmt: Format[NodeSelector] = Json.format[NodeSelector] + implicit lazy val objectResourceFmt: Format[ObjectResource] = Json.format[ObjectResource] + implicit lazy val ownerReferenceFmt: Format[OwnerReference] = Json.format[OwnerReference] + implicit lazy val parameterFmt: Format[Parameter] = Json.format[Parameter] + implicit lazy val payloadFieldFmt: Format[PayloadField] = Json.format[PayloadField] + implicit lazy val persistentVolumeClaimFmt: Format[PersistentVolumeClaim] = Json.format[PersistentVolumeClaim] + implicit lazy val persistentVolumeClaimVolumeSourceFmt: Format[PersistentVolumeClaimVolumeSource] = Json.format[PersistentVolumeClaimVolumeSource] + implicit lazy val podAffinityFmt: Format[PodAffinity] = Json.format[PodAffinity] + implicit lazy val podAffinityTermFmt: Format[PodAffinityTerm] = Json.format[PodAffinityTerm] + implicit lazy val podAntiAffinityFmt: Format[PodAntiAffinity] = Json.format[PodAntiAffinity] + implicit lazy val podSecurityContextFmt: Format[PodSecurityContext] = Json.format[PodSecurityContext] + implicit lazy val preferredSchedulingTermFmt: Format[PreferredSchedulingTerm] = Json.format[PreferredSchedulingTerm] + implicit lazy val rateLimitFmt: Format[RateLimit] = Json.format[RateLimit] + implicit lazy val requestsFmt: Format[Requests] = Json.format[Requests] + implicit lazy val resourcesFmt: Format[Resources] = Json.format[Resources] + implicit lazy val secretKeySelectorFmt: Format[SecretKeySelector] = Json.format[SecretKeySelector] + implicit lazy val secureHeaderFmt: Format[SecureHeader] = Json.format[SecureHeader] + implicit lazy val seLinuxOptionsFmt: Format[SELinuxOptions] = Json.format[SELinuxOptions] + implicit lazy val serviceFmt: Format[Service] = Json.format[Service] + implicit lazy val servicePortFmt: Format[ServicePort] = Json.format[ServicePort] + implicit lazy val sysctlFmt: Format[Sysctl] = Json.format[Sysctl] + implicit lazy val templateFmt: Format[Template] = Json.format[Template] + implicit lazy val templateMetdataFmt: Format[TemplateMetadata] = Json.format[TemplateMetadata] + implicit lazy val timeFmt: Format[Time] = Json.format[Time] + implicit lazy val tlsConfigFmt: Format[TLSConfig] = Json.format[TLSConfig] + implicit lazy val valueFromFmt: Format[ValueFrom] = Json.format[ValueFrom] + implicit lazy val valueFromSourceFmt: Format[ValueFromSource] = Json.format[ValueFromSource] + implicit lazy val volumeFmt: Format[Volume] = Json.format[Volume] + implicit lazy val volumeMountFmt: Format[VolumeMount] = Json.format[VolumeMount] + implicit lazy val volumeSourceFmt: Format[VolumeSource] = Json.format[VolumeSource] + implicit lazy val weightedPodAffinityTermFmt: Format[WeightedPodAffinityTerm] = Json.format[WeightedPodAffinityTerm] + + + case class Affinity(nodeAffinity: Option[NodeAffinity] = None, + podAffinity: Option[PodAffinity] = None, + podAntiAffinity: Option[PodAntiAffinity] = None) + + case class AWSElasticBlockStoreVolumeSource(fsType: Option[String] = None, + partition: Option[Int] = None, + readOnly: Option[Boolean] = None, + volumeID: Option[String] = None) + + case class BasicAuth(username: SecretKeySelector, + password: SecretKeySelector) + + case class Backoff(duration: String, + factor: Int, + jitte: Int, + steps: Int) + + type Comparator = String + + case class ConfigMapKeySelector(key: Option[String] = None, + name: Option[String] = None, + optional: Option[Boolean] = None) + + case class ConfigMapVolumeSource(defaultMode: Option[Int] = None, + items: List[KeyToPath] = List()) + + + case class Container(args: List[String] = List(), + command: List[String] = List(), + env: List[EnvironmentVariable] = List(), + image: String, + imagePullPolicy: Option[String] = None, + mirrorVolumeMounts: Option[Boolean] = None, + name: String, + resources: Option[Resources] = None, + volumeMounts: List[VolumeMount] = List()) + + case class EnvironmentVariable(name: String, + value: String) + + case class FieldsV1(Raw: Option[String] = None) + + case class HostPathVolumeSource(path: Option[String] = None, + `type`: Option[String] = None) + + type JSONType = String + + case class K8SResourcePolicy(labels: Map[String, String], + backoff: Backoff, + errorOnBackoffTimeout: Boolean) + + case class KeyToPath(key: Option[String] = None, + mode: Option[Int] = None, + path: Option[String] = None) + + case class LabelSelector(matchExpressions: List[LabelSelectorRequirement] = List(), + matchLabels: Map[String, String] = Map()) + + case class LabelSelectorRequirement(key: Option[String] = None, + operator: Option[String] = None, + values: List[String] = List()) + + case class ManagedFieldsEntry(apiVersion: Option[String] = None, + fieldsType: Option[String] = None, + fieldsV1: Option[FieldsV1] = None, + manager: Option[String] = None, + action: Option[String] = None, + time: Option[Time] = None) + + case class MicroTime(Time: String) + + case class NFSVolumeSource(path: Option[String] = None, + readOnly: Option[Boolean] = None, + server: Option[String] = None) + + case class NodeAffinity(preferredDuringSchedulingIgnoredDuringExecution: List[PreferredSchedulingTerm] = List(), + requiredDuringSchedulingIgnoredDuringExecution: Option[NodeSelector] = None) + + case class NodeSelector(nodeSelectorTerms: List[NodeSelectorTerm] = List()) + + case class NodeSelectorRequirement(key: Option[String] = None, + operator: Option[String] = None, + values: List[String] = List()) + + case class NodeSelectorTerm(matchExpressions: List[NodeSelectorRequirement] = List(), + matchFields: List[NodeSelectorRequirement] = List()) + + case class NodeStatus(displayName: String, + id: String, + message: Option[String] = None, + name: String, + phase: Option[String] = None, + updateTime: Option[MicroTime] = None) + + case class ObjectMetadata(annotations: Map[String, String] = Map(), + clusterName: Option[String] = None, + creationTimestamp: Option[Time] = None, + deletionGracePeriodSeconds: Option[Int] = None, + deletionTimestamp: Option[Time] = None, + finalizers: List[String] = List(), + generateName: Option[String] = None, + generation: Option[String] = None, + labels: Map[String, String] = Map(), + managedFields: List[ManagedFieldsEntry] = List(), + name: Option[String] = None, + namespace: Option[String] = None, + ownerReferences: List[OwnerReference] = List(), + resourceVersion: Option[String] = None, + selfLink: Option[String] = None, + uid: Option[String] = None) + + case class ObjectResource(deployment: ObjectMetadata, + service: ObjectMetadata) + + case class OwnerReference(apiVersion: Option[String] = None, + blockOwnerDeletion: Option[Boolean] = None, + controller: Option[Boolean] = None, + kind: Option[String] = None, + name: Option[String] = None, + uid: Option[String] = None) + + case class Parameter(default: Option[String] = None, + globalName: Option[String] = None, + name: String, + value: Option[String] = None, + valueFrom: Option[ValueFrom] = None) + + case class PayloadField(path: String, + name: String) + + case class PodAffinity(preferredDuringSchedulingIgnoredDuringExecution: List[WeightedPodAffinityTerm] = List(), + requiredDuringSchedulingIgnoredDuringExecution: List[PodAffinityTerm] = List()) + + case class PodAffinityTerm(labelSelector: Option[LabelSelector] = None, + namespaces: List[String] = List(), + topologyKey: Option[String] = None) + + case class PodAntiAffinity(labelSelector: Option[LabelSelector] = None, + namespaces: List[String] = List(), + topologyKey: Option[String] = None) + + case class PersistentVolumeClaim(claimName: String) + + case class PersistentVolumeClaimVolumeSource(claimName: Option[String] = None, + readOnly: Option[Boolean] = None) + + case class PreferredSchedulingTerm(preference: Option[NodeSelectorTerm] = None, + weight: Option[Int] = None) + + type RateLimitUnit = String + + case class RateLimit(unit: RateLimitUnit, + requestsPerUnit: Int) + + case class Requests(cpu: Option[String] = None, + memory: Option[String] = None) + + case class Resources(requests: Option[Requests] = None) + + case class SecureHeader(name: String, + valueFrom: ValueFromSource) + + case class SecretKeySelector(key: Option[String] = None, + name: Option[String] = None, + optional: Option[Boolean] = None) + + case class Service(ports: List[ServicePort], + clusterIP: Option[String] = None) + + case class ServicePort(protocol: String, + port: Int, + targetPort: Int) + + case class StatusPolicy(allow: List[Int]) + + case class Template(metadata: Option[TemplateMetadata] = None, + serviceAccountName: Option[String] = None, + container: Option[Container] = None, + volumes: List[Volume] = List(), + securityContext: Option[PodSecurityContext] = None, + nodeSelector: Map[String, String] = Map(), + imagePullSecrets: List[LocalObjectReference] = List(), + priorityClassName: Option[String] = None, + priority: Option[Int] = None, + affinity: Option[Affinity] = None) + + case class TemplateMetadata(name: Option[String] = None, + labels: Map[String, String] = Map(), + volumes: List[Volume] = List()) + + case class TLSConfig(caCertSecret: SecretKeySelector, + clientCertSecret: SecretKeySelector, + clientKeySecret: SecretKeySelector) + + case class Time(nanos: Int, + seconds: Long) + + case class ValueFrom(path: String) + + case class ValueFromSource(configMapKeyRef: Option[ConfigMapKeySelector] = None, + secretKeyRef: Option[SecretKeySelector] = None) + + case class Volume(name: String, + persistentVolumeClaim: Option[PersistentVolumeClaim] = None, + secret: Map[String, String] = Map()) + + case class VolumeMount(mountPath: Option[String] = None, + mountPropagation: Option[String] = None, + name: String, + readOnly: Option[Boolean] = None, + subPath: Option[String] = None, + subPathExpr: Option[String] = None) + + case class VolumeSource(awsElasticBlockStore: Option[AWSElasticBlockStoreVolumeSource] = None, + configMap: Option[ConfigMapVolumeSource] = None, + hostPath: Option[HostPathVolumeSource] = None, + nfs: Option[NFSVolumeSource] = None, + persistentVolumeClaim: Option[PersistentVolumeClaimVolumeSource] = None) + + case class WeightedPodAffinityTerm(podAffinityTerm: Option[PodAffinityTerm] = None, + weight: Option[Int] = None) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/workflows/Workflow.scala b/jvm/src/main/scala/com/harana/modules/argo/workflows/Workflow.scala new file mode 100644 index 0000000..d51ad60 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/workflows/Workflow.scala @@ -0,0 +1,55 @@ +package com.harana.modules.argo.workflows + +import ai.x.play.json.Jsonx +import com.harana.modules.argo.{Affinity, NodeStatus, PersistentVolumeClaim, SecretKeySelector, Time} +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ListResource, ResourceDefinition} + +object Workflow { + + type Workflow = CustomResource[Spec, Status] + type WorkflowList = ListResource[Workflow] + + implicit lazy val specFmt: Format[Spec] = Jsonx.formatCaseClass[Spec] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val resourceDefinition: ResourceDefinition[Workflow] = ResourceDefinition[Workflow]("Workflow", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[Workflow] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) + + case class Spec(activeDeadlineSeconds: Option[Int] = None, + affinity: Option[Affinity] = None, + arguments: Option[Arguments] = None, + automountServiceAccountToken: Option[Boolean] = None, + entrypoint: Option[String] = None, + hostNetwork: Option[Boolean] = None, + imagePullSecrets: List[SecretKeySelector] = List(), + nodeSelector: Option[String] = None, + onExit: Option[String] = None, + parallelism: Option[Int] = None, + podGC: Option[PodGC] = None, + podSpecPath: Option[String] = None, + priority: Option[Int] = None, + schedulerName: Option[String] = None, + serviceAccountName: Option[String] = None, + suspend: Option[Boolean] = None, + templates: List[Template] = List(), + tolerations: List[Toleration] = List(), + ttlSecondsAfterFinished: Option[Int] = None, + ttlStrategy: Option[TtlStrategy] = None, + volumeClaimTemplates: List[PersistentVolumeClaim] = List(), + volumes: List[Volume] = List()) + + case class Status(compressedNodes: String, + finishedAt: Time, + message: String, + nodes: NodeStatus, + offloadNodeStatusVersion: String, + outputs: Outputs, + persistentVolumeClaims: List[Volume] = List(), + phase: String, + startedAt: Time, + storedTemplates: Template) + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/workflows/WorkflowTemplate.scala b/jvm/src/main/scala/com/harana/modules/argo/workflows/WorkflowTemplate.scala new file mode 100644 index 0000000..6476f70 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/workflows/WorkflowTemplate.scala @@ -0,0 +1,53 @@ +package com.harana.modules.argo.workflows + +import ai.x.play.json.Jsonx +import com.harana.modules.argo.{Affinity, NodeStatus, PersistentVolumeClaim, SecretKeySelector, Time} +import play.api.libs.json.{Format, Json} +import skuber.apiextensions.CustomResourceDefinition +import skuber.{CustomResource, ResourceDefinition} + +object WorkflowTemplate { + + type WorkflowTemplate = CustomResource[Spec, Status] + + implicit lazy val specFmt: Format[Spec] = Jsonx.formatCaseClass[Spec] + implicit lazy val statusFmt: Format[Status] = Json.format[Status] + implicit lazy val resourceDefinition: ResourceDefinition[WorkflowTemplate] = ResourceDefinition[WorkflowTemplate]("WorkflowTemplate", "argoproj.io", "v1alpha1") + val crd = CustomResourceDefinition[WorkflowTemplate] + + def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) + + case class Spec(activeDeadlineSeconds: Option[Int] = None, + affinity: Option[Affinity] = None, + arguments: Option[Arguments] = None, + automountServiceAccountToken: Option[Boolean] = None, + entrypoint: Option[String] = None, + hostNetwork: Option[Boolean] = None, + imagePullSecrets: List[SecretKeySelector] = List(), + nodeSelector: Option[String] = None, + onExit: Option[String] = None, + parallelism: Option[Int] = None, + podGC: Option[PodGC] = None, + podSpecPath: Option[String] = None, + priority: Option[Int] = None, + schedulerName: Option[String] = None, + serviceAccountName: Option[String] = None, + suspend: Option[Boolean] = None, + templates: List[Template] = List(), + tolerations: List[Toleration] = List(), + ttlSecondsAfterFinished: Option[Int] = None, + ttlStrategy: Option[TtlStrategy] = None, + volumeClaimTemplates: List[PersistentVolumeClaim] = List(), + volumes: List[Volume] = List()) + + case class Status(compressedNodes: String, + finishedAt: Time, + message: String, + nodes: NodeStatus, + offloadNodeStatusVersion: String, + outputs: Outputs, + persistentVolumeClaims: List[Volume] = List(), + phase: String, + startedAt: Time, + storedTemplates: Template) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/argo/workflows/package.scala b/jvm/src/main/scala/com/harana/modules/argo/workflows/package.scala new file mode 100644 index 0000000..9fd4271 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/argo/workflows/package.scala @@ -0,0 +1,235 @@ +package com.harana.modules.argo + +import ai.x.play.json.{CamelToSnakeNameEncoder, Jsonx} +import play.api.libs.json.{Format, Json} + +package object workflows { + + implicit lazy val encoder: CamelToSnakeNameEncoder = CamelToSnakeNameEncoder() + + implicit lazy val argumentsFmt: Format[Arguments] = Json.format[Arguments] + implicit lazy val artifactoryFmt: Format[Artifactory] = Json.format[Artifactory] + implicit lazy val artifactoryArtifactFmt: Format[ArtifactoryArtifact] = Json.format[ArtifactoryArtifact] + implicit lazy val artifactoryAuthFmt: Format[ArtifactoryAuth] = Json.format[ArtifactoryAuth] + implicit lazy val artifactFmt: Format[Artifact] = Json.format[Artifact] + implicit lazy val artifactLocationFmt: Format[ArtifactLocation] = Json.format[ArtifactLocation] + implicit lazy val backoffFmt: Format[Backoff] = Json.format[Backoff] + implicit lazy val continueOnFmt: Format[ContinueOn] = Json.format[ContinueOn] + implicit lazy val dagFmt: Format[DAG] = Json.format[DAG] + implicit lazy val dagTaskFmt: Format[DAGTask] = Json.format[DAGTask] + implicit lazy val executorConfigFmt: Format[ExecutorConfig] = Json.format[ExecutorConfig] + implicit lazy val gitArtifactFmt: Format[GitArtifact] = Json.format[GitArtifact] + implicit lazy val hdfsArtifactFmt: Format[HDFSArtifact] = Json.format[HDFSArtifact] + implicit lazy val hdfsConfigFmt: Format[HDFSConfig] = Json.format[HDFSConfig] + implicit lazy val hdfsKrbConfigFmt: Format[HDFSKrbConfig] = Json.format[HDFSKrbConfig] + implicit lazy val httpArtifactFmt: Format[HTTPArtifact] = Json.format[HTTPArtifact] + implicit lazy val inputsFmt: Format[Inputs] = Json.format[Inputs] + implicit lazy val metadataFmt: Format[Metadata] = Json.format[Metadata] + implicit lazy val outputsFmt: Format[Outputs] = Json.format[Outputs] + implicit lazy val podGcFmt: Format[PodGC] = Json.format[PodGC] + implicit lazy val rawFmt: Format[Raw] = Json.format[Raw] + implicit lazy val rawArtifactFmt: Format[RawArtifact] = Json.format[RawArtifact] + implicit lazy val resourceFmt: Format[Resource] = Json.format[Resource] + implicit lazy val retryStrategyFmt: Format[RetryStrategy] = Json.format[RetryStrategy] + implicit lazy val s3Fmt: Format[S3] = Json.format[S3] + implicit lazy val s3ArtifactFmt: Format[S3Artifact] = Json.format[S3Artifact] + implicit lazy val s3BucketFmt: Format[S3Bucket] = Json.format[S3Bucket] + implicit lazy val scriptFmt: Format[Script] = Json.format[Script] + implicit lazy val sequenceFmt: Format[Sequence] = Json.format[Sequence] + implicit lazy val sidecarFmt: Format[Sidecar] = Json.format[Sidecar] + implicit lazy val stepFmt: Format[Step] = Json.format[Step] + implicit lazy val suspendFmt: Format[Suspend] = Json.format[Suspend] + implicit lazy val templateFmt: Format[Template] = Jsonx.formatCaseClass[Template] + implicit lazy val templateRefFmt: Format[TemplateRef] = Json.format[TemplateRef] + implicit lazy val tolerationFmt: Format[Toleration] = Json.format[Toleration] + implicit lazy val ttlStrategyFmt: Format[TtlStrategy] = Json.format[TtlStrategy] + implicit lazy val volumeFmt: Format[Volume] = Json.format[Volume] + + case class Arguments(artifacts: List[Artifact] = List(), + parameters: List[Parameter] = List()) + + case class Artifactory(url: Option[String] = None, + usernameSecret: Option[SecretKeySelector] = None, + passwordSecret: Option[SecretKeySelector] = None) + + case class ArtifactoryArtifact(artifactoryAuth: Option[ArtifactoryAuth] = None, + url: Option[String] = None) + + case class ArtifactoryAuth(passwordSecret: Option[SecretKeySelector] = None, + usernameSecret: Option[SecretKeySelector] = None) + + case class Artifact(name: String, + artifactory: Option[Artifactory] = None, + http: Map[String, String] = Map(), + path: Option[String] = None, + raw: Option[Raw] = None, + s3: Option[S3] = None, + mode: Option[String] = None) + + case class ArtifactLocation(archiveLogs: Option[Boolean] = None, + artifactory: Option[ArtifactoryArtifact] = None, + git: Option[GitArtifact] = None, + hdfs: Option[HDFSArtifact] = None, + http: Option[HTTPArtifact] = None, + raw: Option[RawArtifact] = None, + s3: Option[S3Artifact] = None) + + case class Backoff(duration: Option[String] = None, + factor: Option[Int] = None, + maxDuration: Option[String] = None) + + case class ContinueOn(error: Option[Boolean] = None, + failed: Option[Boolean] = None) + + case class DAG(failFast: Option[Boolean] = None, + target: Option[String] = None, + tasks: List[DAGTask] = List()) + + case class DAGTask(arguments: Option[Arguments] = None, + continueOn: Option[ContinueOn] = None, + dependencies: List[String] = List(), + name: Option[String] = None, + onExit: Option[String] = None, + template: Option[String] = None, + templateRef: Option[TemplateRef] = None, + when: Option[String] = None, + withItems: List[String] = List(), + withSequence: Option[Sequence] = None) + + case class ExecutorConfig(serviceAccountName: Option[String] = None) + + case class GitArtifact(depth: Option[Long] = None, + fetch: List[String] = List()) + + case class HDFSArtifact(force: Option[Boolean] = None, + hDFSConfig: Option[HDFSConfig] = None, + path: Option[String] = None) + + case class HDFSConfig(hDFSKrbConfig: Option[HDFSKrbConfig] = None, + hdfsUser: Option[String] = None) + + case class HDFSKrbConfig(krbCCacheSecret: Option[SecretKeySelector] = None, + krbConfigConfigMap: Option[ConfigMapKeySelector] = None, + krbKeytabSecret: Option[SecretKeySelector] = None, + krbRealm: Option[String] = None, + krbServicePrincipalName: Option[String] = None, + krbUsername: Option[String] = None) + + case class HTTPArtifact(url: Option[String] = None) + + case class Inputs(artifacts: List[Artifact] = List(), + parameters: List[Parameter] = List()) + + case class Metadata(annotations: Option[String] = None, + labels: Option[String] = None) + + case class Outputs(artifacts: List[Artifact] = List(), + parameters: List[Parameter] = List(), + result: Option[String] = None) + + case class PodGC(strategy: String) + + case class Raw(data: String) + + case class RawArtifact(data: Option[String] = None) + + case class Resource(action: String, + failureCondition: Option[String] = None, + manifest: Option[String] = None, + successCondition: Option[String] = None) + + case class RetryStrategy(backoff: Option[Backoff] = None, + limit: Int, + retryPolicy: String) + + case class S3(accessKeySecret: Option[SecretKeySelector] = None, + bucket: Option[String] = None, + endpoint: Option[String] = None, + insecure: Option[Boolean] = None, + key: Option[String] = None, + region: Option[String] = None, + roleARN: Option[String] = None, + secretKeySecret: Option[SecretKeySelector] = None) + + case class S3Artifact(key: Option[String] = None, + s3bucket: Option[S3Bucket] = None) + + case class S3Bucket(accessKeySecret: Option[SecretKeySelector] = None, + bucket: Option[String] = None, + endpoint: Option[String] = None, + insecure: Option[Boolean] = None, + region: Option[Boolean] = None, + roleARN: Option[String] = None, + secretKeySecret: Option[SecretKeySelector]) + + case class Script(image: String, + command: List[String] = List(), + source: String) + + case class Sidecar(name: String, + image: String) + + case class Sequence(count: Option[String] = None, + end: Option[String] = None, + format: Option[String] = None, + start: Option[String] = None) + + case class Step(arguments: Option[Arguments] = None, + continueOn: Option[ContinueOn] = None, + name: String, + onExit: Option[String] = None, + template: Option[String] = None, + templateRef: Option[String] = None, + when: Option[String] = None, + withItems: List[String] = List(), + withParam: Option[String] = None, + withSequence: Option[Sequence] = None) + + case class Suspend(duration: Option[String] = None) + + case class Template(archiveLocation: Option[ArtifactLocation] = None, + container: Option[Container] = None, + daemon: Option[Boolean] = None, + dag: Option[DAG] = None, + executor: Option[ExecutorConfig] = None, + initContainers: List[Container] = List(), + inputs: Option[Inputs] = None, + metadata: Option[Metadata] = None, + name: String, + nodeSelector: Option[String] = None, + outputs: Option[Outputs] = None, + parallelism: Option[Int] = None, + parameters: List[Parameter] = List(), + podSpecPath: Option[String] = None, + priority: Option[Int] = None, + priorityClassName: Option[String] = None, + resource: Option[Resource] = None, + retryStrategy: Option[RetryStrategy] = None, + schedulerName: Option[String] = None, + script: Option[Script] = None, + serviceAccountName: Option[String] = None, + sidecars: List[Container] = List(), + steps: List[Step] = List(), + suspend: Option[Suspend] = None, + template: Option[String] = None, + templateRef: Option[TemplateRef] = None, + tolerations: List[Toleration] = List(), + volumes: List[Volume] = List()) + + case class TemplateRef(name: Option[String] = None, + runtimeResolution: Option[Boolean] = None, + template: Option[String] = None) + + case class Toleration(effect: Option[String] = None, + key: Option[String] = None, + operator: Option[String] = None, + tolerationSeconds: Option[Int] = None, + value: Option[String] = None) + + case class TtlStrategy(secondsAfterCompletion: Option[Int] = None, + secondsAfterSuccess: Option[Int] = None, + secondsAfterFailure: Option[Int] = None) + + case class Volume(name: Option[String] = None, + volumeSource: Option[VolumeSource] = None) +} diff --git a/jvm/src/main/scala/com/harana/modules/auth0/Auth0.scala b/jvm/src/main/scala/com/harana/modules/auth0/Auth0.scala new file mode 100644 index 0000000..b8bbd58 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/auth0/Auth0.scala @@ -0,0 +1,61 @@ +package com.harana.modules.auth0 + +import com.auth0.json.auth.{CreatedUser, TokenHolder, UserInfo} +import com.auth0.json.mgmt.roles.Role +import com.auth0.json.mgmt.users.User +import com.harana.modules.auth0.models.Auth0Error +import zio.IO +import zio.macros.accessible + +import java.net.URL + +@accessible +trait Auth0 { + + def authorizeUrl(redirectUri: String, + audience: Option[String] = None, + connection: Option[String] = None, + parameter: Option[(String, String)] = None, + responseType: Option[String] = None, + scope: Option[String] = None, + state: Option[String] = None): IO[Nothing, URL] + + def logoutUrl(returnToUrl: String, setClientId: Boolean, useFederated: Option[Boolean] = None): IO[Nothing, URL] + + def userInfo(accessToken: String): IO[Auth0Error, UserInfo] + + def resetPassword(email: String): IO[Auth0Error, Unit] + + def signUp(email: String, username: Option[String], password: String): IO[Auth0Error, CreatedUser] + + def login(emailOrUsername: String, password: String, realm: Option[String]): IO[Auth0Error, TokenHolder] + + def requestToken(audience: String): IO[Auth0Error, TokenHolder] + + def revokeToken(refreshToken: String): IO[Auth0Error, Unit] + + def renewAuth(refreshToken: String): IO[Auth0Error, TokenHolder] + + def exchangeCode(code: String, redirectUri: String): IO[Auth0Error, TokenHolder] + + def listByEmail(email: String): IO[Auth0Error, List[User]] + + def getUser(id: String): IO[Auth0Error, User] + + def createUser(user: User): IO[Auth0Error, User] + + def deleteUser(id: String): IO[Auth0Error, Unit] + + def updateUser(id: String, user: User): IO[Auth0Error, User] + + def getRole(id: String): IO[Auth0Error, Role] + + def createRole(role: Role): IO[Auth0Error, Role] + + def deleteRole(id: String): IO[Auth0Error, Unit] + + def updateRole(id: String, role: Role): IO[Auth0Error, Role] + + def assignUsersToRole(roleId: String, userIds: List[String]): IO[Auth0Error, Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/auth0/LiveAuth0.scala b/jvm/src/main/scala/com/harana/modules/auth0/LiveAuth0.scala new file mode 100644 index 0000000..b652fd7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/auth0/LiveAuth0.scala @@ -0,0 +1,205 @@ +package com.harana.modules.auth0 + +import com.auth0.client.auth.AuthAPI +import com.auth0.client.mgmt.ManagementAPI +import com.auth0.client.mgmt.filter.UserFilter +import com.auth0.exception.{APIException, Auth0Exception, RateLimitException} +import com.auth0.json.auth._ +import com.auth0.json.mgmt.roles.Role +import com.auth0.json.mgmt.users.User +import com.auth0.net.Request +import com.harana.modules.auth0.models.Auth0Error +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.{IO, UIO, ZIO, ZLayer} + +import java.net.URL +import scala.jdk.CollectionConverters._ + +object LiveAuth0 { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAuth0(config, logger, micrometer) + } +} + +case class LiveAuth0(config: Config, logger: Logger, micrometer: Micrometer) extends Auth0 { + + private val authApi = for { + domain <- config.string("auth0.domain", "") + clientId <- config.secret("auth0-client-id") + clientSecret <- config.secret("auth0-client-secret") + } yield { + AuthAPI.newBuilder(domain, clientId, clientSecret).build() + } + + private val managementApi = for { + apiToken <- config.secret("auth0-api-token") + domain <- config.string("auth0.domain", "") + } yield { + ManagementAPI.newBuilder(domain, apiToken).build() + } + + def authorizeUrl(redirectUri: String, + audience: Option[String] = None, + connection: Option[String] = None, + parameter: Option[(String, String)] = None, + responseType: Option[String] = None, + scope: Option[String] = None, + state: Option[String] = None): UIO[URL] = + for { + a <- authApi + r <- ZIO.succeed { + var b = a.authorizeUrl(redirectUri) + if (audience.nonEmpty) b = b.withAudience(audience.get) + if (audience.nonEmpty) b = b.withAudience(audience.get) + if (connection.nonEmpty) b = b.withConnection(connection.get) + if (parameter.nonEmpty) b = b.withParameter(parameter.get._1, parameter.get._2) + if (responseType.nonEmpty) b = b.withResponseType(responseType.get) + if (scope.nonEmpty) b = b.withScope(scope.get) + if (state.nonEmpty) b = b.withState(state.get) + new URL(b.build()) + } + } yield r + + def logoutUrl(returnToUrl: String, setClientId: Boolean, useFederated: Option[Boolean] = None): UIO[URL] = + for { + a <- authApi + r <- ZIO.succeed { + var b = a.logoutUrl(returnToUrl, setClientId) + if (useFederated.nonEmpty) b = b.useFederated(useFederated.get) + new URL(b.build()) + } + } yield r + + def userInfo(accessToken: String): IO[Auth0Error, UserInfo] = + for { + a <- authApi + r <- execute(a.userInfo(accessToken)) + } yield r + + def resetPassword(email: String): IO[Auth0Error, Unit] = + for { + a <- authApi + _ <- execute(a.resetPassword(email, "Username-Password-Authentication")) + r <- ZIO.unit + } yield r + + def signUp(email: String, username: Option[String], password: String): IO[Auth0Error, CreatedUser] = + for { + a <- authApi + r <- execute( + if (username.nonEmpty) a.signUp(email, username.get, password, "Username-Password-Authentication") + else a.signUp(email, password, "Username-Password-Authentication") + ) + } yield r + + def login(emailOrUsername: String, password: String, realm: Option[String]): IO[Auth0Error, TokenHolder] = + for { + a <- authApi + r <- execute( + if (realm.nonEmpty) a.login(emailOrUsername, password, realm.get) + else a.login(emailOrUsername, password) + ) + } yield r + + def requestToken(audience: String): IO[Auth0Error, TokenHolder] = + for { + a <- authApi + r <- execute(a.requestToken(audience)) + } yield r + + def revokeToken(refreshToken: String): IO[Auth0Error, Unit] = + for { + a <- authApi + _ <- execute(a.revokeToken(refreshToken)) + r <- ZIO.unit + } yield r + + def renewAuth(refreshToken: String): IO[Auth0Error, TokenHolder] = + for { + a <- authApi + r <- execute(a.renewAuth(refreshToken)) + } yield r + + def exchangeCode(code: String, redirectUri: String): IO[Auth0Error, TokenHolder] = + for { + a <- authApi + r <- execute(a.exchangeCode(code, redirectUri)) + } yield r + + def listByEmail(email: String): IO[Auth0Error, List[User]] = + for { + m <- managementApi + r <- execute(m.users.listByEmail(email, new UserFilter)).map(_.asScala.toList) + } yield r + + def getUser(id: String): IO[Auth0Error, User] = + for { + m <- managementApi + r <- execute(m.users.get(id, new UserFilter())) + } yield r + + def createUser(user: User): IO[Auth0Error, User] = + for { + m <- managementApi + r <- execute(m.users.create(user)) + } yield r + + def deleteUser(id: String): IO[Auth0Error, Unit] = + for { + m <- managementApi + _ <- execute(m.users.delete(id)) + r <- ZIO.unit + } yield r + + def updateUser(id: String, user: User): IO[Auth0Error, User] = + for { + m <- managementApi + r <- execute(m.users.update(id, user)) + } yield r + + def getRole(id: String): IO[Auth0Error, Role] = + for { + m <- managementApi + r <- execute(m.roles.get(id)) + } yield r + + def createRole(role: Role): IO[Auth0Error, Role] = + for { + m <- managementApi + r <- execute(m.roles.create(role)) + } yield r + + def deleteRole(id: String): IO[Auth0Error, Unit] = + for { + m <- managementApi + _ <- execute(m.roles.delete(id)) + r <- ZIO.unit + } yield r + + def updateRole(id: String, role: Role): IO[Auth0Error, Role] = + for { + m <- managementApi + r <- execute(m.roles.update(id, role)) + } yield r + + def assignUsersToRole(roleId: String, userIds: List[String]): IO[Auth0Error, Unit] = + for { + m <- managementApi + _ <- execute(m.roles.assignUsers(roleId, userIds.asJava)) + r <- ZIO.unit + } yield r + + private def execute[T](request: Request[T]): IO[Auth0Error, T] = + ZIO.attempt(request).mapBoth({ + case e: RateLimitException => Auth0Error.RateLimit(e) + case e: APIException => Auth0Error.Api(e) + case e: Auth0Exception => Auth0Error.Request(e) + }, _.execute().getBody) + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/auth0/models/Auth0Error.scala b/jvm/src/main/scala/com/harana/modules/auth0/models/Auth0Error.scala new file mode 100644 index 0000000..5b42343 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/auth0/models/Auth0Error.scala @@ -0,0 +1,8 @@ +package com.harana.modules.auth0.models + +sealed trait Auth0Error +object Auth0Error { + case class Api(e: Exception) extends Auth0Error + case class RateLimit(e: Exception) extends Auth0Error + case class Request(e: Exception) extends Auth0Error +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/aws/AWS.scala b/jvm/src/main/scala/com/harana/modules/aws/AWS.scala new file mode 100644 index 0000000..1e3eb65 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/aws/AWS.scala @@ -0,0 +1,36 @@ +package com.harana.modules.aws + +import awscala.iam.AccessKey +import awscala.s3.S3ObjectSummary +import com.amazonaws.services.simpleemail.model.{Message, Template} +import zio.Task +import zio.macros.accessible + +import java.io.InputStream + +@accessible +trait AWS { + + def iamCreateS3User(name: String, bucket: String, prefix: String): Task[AccessKey] + def iamDeleteUser(name: String): Task[Unit] + + def s3CreateBucket(name: String): Task[Unit] + def s3List(bucket: String, prefix: Option[String]): Task[List[S3ObjectSummary]] + def s3ListAsStream(bucket: String, prefix: Option[String]): Task[Stream[Either[String, S3ObjectSummary]]] + def s3ListTags(bucket: String, at: String): Task[Map[String, String]] + def s3CopyFile(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] + def s3CopyFolder(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] + def s3Move(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] + def s3Get(bucket: String, at: String): Task[InputStream] + def s3Put(bucket: String, at: String, inputStream: InputStream, contentLength: Long): Task[Unit] + def s3Rename(bucket: String, from: String, to: String): Task[Unit] + def s3Delete(bucket: String, at: String): Task[Unit] + def s3Tag(bucket: String, at: String, tags: Map[String, String]): Task[Unit] + + def sesCreateTemplate(template: Template): Task[Unit] + def sesDeleteTemplate(name: String): Task[Unit] + def sesSendEmail(message: Message, to: List[String], cc: List[String], bcc: List[String], sender: String, replyTo: List[String] = List()): Task[Unit] + def sesSendTemplatedEmail(template: String, templateValues: Map[String, String], to: List[String], cc: List[String], bcc: List[String], sender: String, replyTo: List[String] = List()): Task[Unit] + def sesSendBulkTemplatedEmail(template: String, toWithTemplateValues: List[(String, Map[String, String])], sender: String, replyTo: List[String] = List()): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/aws/LiveAWS.scala b/jvm/src/main/scala/com/harana/modules/aws/LiveAWS.scala new file mode 100644 index 0000000..2aec2b8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/aws/LiveAWS.scala @@ -0,0 +1,290 @@ +package com.harana.modules.aws + +import awscala._ +import awscala.iam.{AccessKey, IAM} +import awscala.s3.{Bucket, S3, S3ObjectSummary} +import com.amazonaws.auth._ +import com.amazonaws.services.identitymanagement.model.DeleteUserPolicyRequest +import com.amazonaws.services.s3.model._ +import com.amazonaws.services.s3.transfer.TransferManagerBuilder +import com.amazonaws.services.simpleemail.model._ +import com.amazonaws.services.simpleemail.{AmazonSimpleEmailServiceAsync, AmazonSimpleEmailServiceAsyncClient} +import com.harana.modules.aws.LiveAWS._ +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import io.circe.syntax._ +import zio.{Task, ZIO, ZLayer} + +import java.io.InputStream +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters._ + +object LiveAWS { + val credentialsProviderRef = new AtomicReference[Option[AWSCredentialsProvider]](None) + val iamRef = new AtomicReference[Option[IAM]](None) + val s3Ref = new AtomicReference[Option[S3]](None) + val sesRef = new AtomicReference[Option[AmazonSimpleEmailServiceAsync]](None) + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAWS(config, logger, micrometer) + } +} + +case class LiveAWS(config: Config, logger: Logger, micrometer: Micrometer) extends AWS { + + private def credentialsProvider = + for { + provider <- if (credentialsProviderRef.get.nonEmpty) ZIO.attempt(credentialsProviderRef.get.get) else + for { + accessId <- config.secret("aws-access-id") + secretKey <- config.secret("aws-secret-key") + credentialsFile <- config.optString("aws.credentialsFile") + useCredentialsFile <- config.boolean("aws.useCredentialsFile", default = false) + useEnvironmentVariables <- config.boolean("aws.useEnvironmentVariables", default = false) + useInstanceProfile <- config.boolean("aws.useInstanceProfile", default = false) + profile <- config.optString("aws.profile") + provider = (accessId, secretKey, credentialsFile, useCredentialsFile, useEnvironmentVariables, useInstanceProfile, profile) match { + case (_, _, _, _, true, _, _) => new EnvironmentVariableCredentialsProvider() + case (_, _, _, _, _, true, _) => InstanceProfileCredentialsProvider.getInstance() + case (a, s, _, _, _, _, _) => new AWSStaticCredentialsProvider(new BasicAWSCredentials(a, s)) + } + } yield provider + _ = credentialsProviderRef.set(Some(provider)) + } yield provider + + + private def iamClient = + for { + client <- if (iamRef.get.nonEmpty) ZIO.attempt(iamRef.get.get) else + for { + creds <- credentialsProvider + iam <- ZIO.attempt(IAM(creds)) + } yield iam + _ = iamRef.set(Some(client)) + } yield client + + + private def s3Client = + for { + client <- if (s3Ref.get.nonEmpty) ZIO.attempt(s3Ref.get.get) else + for { + creds <- credentialsProvider + region <- config.secret("aws-region") + s3 <- ZIO.attempt(S3(creds)(awscala.Region(region))) + } yield s3 + _ = s3Ref.set(Some(client)) + } yield client + + + private def sesClient = + for { + client <- if (sesRef.get.nonEmpty) ZIO.attempt(sesRef.get.get) else + for { + creds <- credentialsProvider + region <- config.secret("aws-region") + client <- ZIO.succeed(AmazonSimpleEmailServiceAsyncClient.asyncBuilder().withCredentials(creds).withRegion(region).build()) + } yield client + _ = sesRef.set(Some(client)) + } yield client + + + private def s3Bucket(bucket: String): Task[Bucket] = + for { + client <- s3Client + bucket <- ZIO.fromOption(client.bucket(bucket)).orElseFail(new Throwable("No available bucket")) + } yield bucket + + + def iamCreateS3User(name: String, bucket: String, prefix: String): Task[AccessKey] = + for { + client <- iamClient + user <- ZIO.attempt(client.createUser(name)) + s3arn = s"arn:aws:s3:::$bucket/$prefix" + policy = Policy(Seq(Statement(Effect.Allow, Seq(Action("s3:*")), Seq(Resource(s3arn))))) + _ = user.putPolicy(s"$name-s3", policy)(client) + accessKey = user.createAccessKey()(client) + } yield accessKey + + + def iamDeleteUser(name: String): Task[Unit] = + for { + client <- iamClient + user <- ZIO.attempt(client.user(name)) + _ <- ZIO.attempt(client.deleteUserPolicy(new DeleteUserPolicyRequest().withUserName(name).withPolicyName(s"$name-s3"))) + _ <- ZIO.foreachDiscard(user)(u => ZIO.attempt(client.delete(u))) + } yield () + + + def s3CreateBucket(name: String): Task[Unit] = + for { + client <- s3Client + _ <- ZIO.attempt(client.createBucket(name)) + } yield () + + + def s3List(bucket: String, prefix: Option[String]): Task[List[S3ObjectSummary]] = + for { + client <- s3Client + bucket <- s3Bucket(bucket) + summaries <- ZIO.attempt(client.objectSummaries(bucket, prefix.getOrElse("")).toList) + } yield summaries + + + def s3ListAsStream(bucket: String, prefix: Option[String]): Task[Stream[Either[String, S3ObjectSummary]]] = + for { + client <- s3Client + bucket <- s3Bucket(bucket) + summaries <- ZIO.attempt(client.ls(bucket, prefix.getOrElse(""))) + } yield summaries + + + def s3ListTags(bucket: String, at: String): Task[Map[String, String]] = + for { + client <- s3Client + request = new GetObjectTaggingRequest(bucket, at) + tagging <- ZIO.attempt(client.getObjectTagging(request)) + tags = tagging.getTagSet.asScala.map(t => t.getKey -> t.getValue).toMap + } yield tags + + + def s3CopyFile(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] = + for { + client <- s3Client + _ <- ZIO.attempt(client.copyObject(fromBucket, from, toBucket.getOrElse(fromBucket), to)) + } yield () + + + def s3CopyFolder(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] = + for { + client <- s3Client + manager <- ZIO.attempt { + val tm = TransferManagerBuilder.standard + tm.setS3Client(client) + tm.build + } + fromFiles <- ZIO.attempt(client.listObjects(fromBucket, s"$from/").getObjectSummaries.asScala.toList) + _ <- ZIO.foreachParDiscard(fromFiles) { file => + for { + filename <- ZIO.succeed(file.getKey.replace(s"$from/", "")) + _ <- logger.debug(s"Copying $fromBucket/${file.getKey} to $toBucket/$to/$filename") + _ <- ZIO.attempt(manager.copy(fromBucket, file.getKey, toBucket.getOrElse(fromBucket), s"$to/$filename").waitForCompletion()).when(!filename.isEmpty) + } yield () + } + } yield () + + + def s3Move(fromBucket: String, from: String, toBucket: Option[String], to: String): Task[Unit] = + for { + _ <- s3CopyFile(fromBucket, from, toBucket, to) + _ <- s3Delete(fromBucket, from) + } yield () + + + def s3Rename(bucket: String, from: String, to: String): Task[Unit] = + s3Move(bucket, from, None, to) + + + def s3Get(bucket: String, at: String): Task[InputStream] = + for { + client <- s3Client + bucket <- s3Bucket(bucket) + inputStream <- ZIO.attempt(client.getObject(bucket, at).get.content) + } yield inputStream + + + def s3Put(bucket: String, at: String, inputStream: InputStream, contentLength: Long): Task[Unit] = + for { + client <- s3Client + metadata = new ObjectMetadata() + _ = metadata.setContentLength(contentLength) + _ <- ZIO.attempt(client.putObject(bucket, at, inputStream, metadata)) + } yield () + + + def s3Delete(bucket: String, at: String): Task[Unit] = + for { + client <- s3Client + _ <- ZIO.attempt(client.deleteObject(bucket, at)) + } yield () + + + def s3Tag(bucket: String, at: String, tags: Map[String, String]): Task[Unit] = + for { + client <- s3Client + s3Tags = tags.map { case (k, v) => new Tag(k, v) } + request = new SetObjectTaggingRequest(bucket, at, new ObjectTagging(s3Tags.toList.asJava)) + _ <- ZIO.attempt(client.setObjectTagging(request)) + } yield () + + + def sesCreateTemplate(template: Template): Task[Unit] = + for { + client <- sesClient + request = new CreateTemplateRequest().withTemplate(template) + - <- ZIO.fromFutureJava(client.createTemplateAsync(request)) + } yield () + + + def sesDeleteTemplate(name: String): Task[Unit] = + for { + client <- sesClient + request = new DeleteTemplateRequest().withTemplateName(name) + - <- ZIO.fromFutureJava(client.deleteTemplateAsync(request)) + } yield () + + + def sesSendEmail(message: Message, to: List[String], cc: List[String], bcc: List[String], sender: String, replyTo: List[String] = List()): Task[Unit] = + for { + client <- sesClient + destination = new Destination().withBccAddresses(bcc.asJava).withCcAddresses(cc.asJava).withToAddresses(to.asJava) + configurationSet <- config.string("aws.ses.configurationSet", "default") + request = new SendEmailRequest() + .withConfigurationSetName(configurationSet) + .withSource(sender) + .withDestination(destination) + .withMessage(message) + .withReplyToAddresses(replyTo.asJava) + - <- ZIO.fromFutureJava(client.sendEmailAsync(request)) + } yield () + + + def sesSendTemplatedEmail(template: String, templateValues: Map[String, String], to: List[String], cc: List[String], bcc: List[String], sender: String, replyTo: List[String] = List()): Task[Unit] = + for { + client <- sesClient + destination = new Destination().withBccAddresses(bcc.asJava).withCcAddresses(cc.asJava).withToAddresses(to.asJava) + configurationSet <- config.string("aws.ses.configurationSet", "default") + request = new SendTemplatedEmailRequest() + .withConfigurationSetName(configurationSet) + .withSource(sender) + .withDestination(destination) + .withReplyToAddresses(replyTo.asJava) + .withTemplate(template) + .withTemplateData(templateValues.asJson.noSpaces) + - <- ZIO.fromFutureJava(client.sendTemplatedEmailAsync(request)) + } yield () + + + def sesSendBulkTemplatedEmail(template: String, toWithTemplateValues: List[(String, Map[String, String])], sender: String, replyTo: List[String] = List()): Task[Unit] = + for { + client <- sesClient + configurationSet <- config.string("aws.ses.configurationSet", "default") + destinations = toWithTemplateValues.map(t => + new BulkEmailDestination() + .withDestination(new Destination().withToAddresses(List(t._1).asJava)) + .withReplacementTemplateData(t._2.asJson.noSpaces) + ).asJavaCollection + request = new SendBulkTemplatedEmailRequest() + .withConfigurationSetName(configurationSet) + .withSource(sender) + .withDestinations(destinations) + .withDefaultTemplateData("{}") + .withTemplate(template) + .withReplyToAddresses(replyTo.asJava) + - <- ZIO.fromFutureJava(client.sendBulkTemplatedEmailAsync(request)) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/aws_s3/AwsS3.scala b/jvm/src/main/scala/com/harana/modules/aws_s3/AwsS3.scala new file mode 100644 index 0000000..6a0d14f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/aws_s3/AwsS3.scala @@ -0,0 +1,114 @@ +package com.harana.modules.aws_s3 + +import io.vertx.core.buffer.Buffer +import io.vertx.ext.reactivestreams.{ReactiveReadStream, ReactiveWriteStream} +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider +import software.amazon.awssdk.regions.Region +import software.amazon.awssdk.services.s3.S3AsyncClient +import software.amazon.awssdk.services.s3.model._ +import zio.Task +import zio.macros.accessible + +import java.time.Instant + +@accessible +trait AwsS3 { + + def newClient(credentials: AwsCredentialsProvider, + region: Option[Region] = None, + endpoint: Option[String] = None, + targetThroughput: Option[Double] = None): Task[S3AsyncClient] + + def createBucket(client: S3AsyncClient, bucket: String): Task[Unit] + + def deleteBucket(client: S3AsyncClient, bucket: String): Task[Unit] + + def listBuckets(client: S3AsyncClient): Task[List[Bucket]] + + def bucketExists(client: S3AsyncClient, bucket: String): Task[Boolean] + + def getBucketPolicy(client: S3AsyncClient, bucket: String): Task[String] + + def getBucketAcl(client: S3AsyncClient, bucket: String): Task[GetBucketAclResponse] + + def putBucketAcl(client: S3AsyncClient, bucket: String, acl: BucketCannedACL): Task[Unit] + + def listObjects(client: S3AsyncClient, bucket: String, prefix: Option[String] = None): Task[ListObjectsV2Response] + + def deleteObject(client: S3AsyncClient, bucket: String, key: String): Task[Unit] + + def deleteObjects(client: S3AsyncClient, bucket: String, identifiers: List[ObjectIdentifier]): Task[Unit] + + def getObject(client: S3AsyncClient, + bucket: String, + key: String, + ifMatch: Option[String] = None, + ifNoneMatch: Option[String] = None, + ifModifiedSince: Option[Instant] = None, + ifUnmodifiedSince: Option[Instant] = None, + range: Option[String] = None): Task[(GetObjectResponse, ReactiveReadStream[Buffer])] + + def putObject(client: S3AsyncClient, + bucket: String, + key: String, + writeStream: ReactiveWriteStream[Buffer], + acl: ObjectCannedACL, + contentLength: Option[Long] = None, + contentMD5: Option[String] = None, + storageClass: Option[String] = None, + tags: Map[String, String] = Map()): Task[String] + + def copyObject(client: S3AsyncClient, + sourceBucket: String, + sourceKey: String, + destinationBucket: String, + destinationKey: String, + sourceIfMatch: Option[String] = None, + sourceIfNoneMatch: Option[String] = None, + sourceIfModifiedSince: Option[Instant] = None, + sourceIfUnmodifiedSince: Option[Instant] = None): Task[CopyObjectResult] + + def getObjectAttributes(client: S3AsyncClient, bucket: String, key: String): Task[GetObjectAttributesResponse] + + def getObjectTagging(client: S3AsyncClient, bucket: String, key: String): Task[GetObjectTaggingResponse] + + def getObjectAcl(client: S3AsyncClient, bucket: String, key: String): Task[GetObjectAclResponse] + + def putObjectAcl(client: S3AsyncClient, bucket: String, key: String, acl: ObjectCannedACL): Task[Unit] + + def putObjectTagging(client: S3AsyncClient, bucket: String, key: String, tag: Map[String, String]): Task[Unit] + + def uploadPartCopy(client: S3AsyncClient, + sourceBucket: String, + sourceKey: String, + destinationBucket: String, + destinationKey: String, + uploadId: String, + partNumber: Int, + copySourceIfMatch: Option[String], + copySourceIfNoneMatch: Option[String], + copySourceIfModifiedSince: Option[Instant], + copySourceIfUnmodifiedSince: Option[Instant], + copySourceRange: Option[String]): Task[CopyPartResult] + + def uploadPart(client: S3AsyncClient, + bucket: String, + key: String, + uploadId: String, + partNumber: Int, + writeStream: ReactiveWriteStream[Buffer], + contentLength: Option[Long] = None): Task[String] + + def listParts(client: S3AsyncClient, bucket: String, key: String, uploadId: String): Task[List[Part]] + + def listMultipartUploads(client: S3AsyncClient, bucket: String, prefix: Option[String] = None): Task[List[MultipartUpload]] + + def createMultipartUpload(client: S3AsyncClient, bucket: String, key: String, cannedACL: ObjectCannedACL): Task[String] + + def abortMultipartUpload(client: S3AsyncClient, bucket: String, key: String, uploadId: String): Task[Unit] + + def completeMultipartUpload(client: S3AsyncClient, bucket: String, key: String, uploadId: String): Task[String] + + def presignedUrl(bucketName: String, key: String, expirationMinutes: Int): Task[String] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/aws_s3/LiveAwsS3.scala b/jvm/src/main/scala/com/harana/modules/aws_s3/LiveAwsS3.scala new file mode 100644 index 0000000..eec7508 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/aws_s3/LiveAwsS3.scala @@ -0,0 +1,267 @@ +package com.harana.modules.aws_s3 + +import com.amazonaws.services.s3.internal.ServiceUtils +import com.harana.modules.aws_s3.LiveAwsS3.presigner +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import io.vertx.core.buffer.Buffer +import io.vertx.ext.reactivestreams.{ReactiveReadStream, ReactiveWriteStream} +import org.reactivestreams.{Subscriber, Subscription} +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider +import software.amazon.awssdk.core.async.{AsyncRequestBody, AsyncResponseTransformer, SdkPublisher} +import software.amazon.awssdk.regions.Region +import software.amazon.awssdk.services.s3.S3AsyncClient +import software.amazon.awssdk.services.s3.model._ +import software.amazon.awssdk.services.s3.presigner.S3Presigner +import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest +import zio.{Task, ZIO, ZLayer} + +import java.net.URI +import java.nio.ByteBuffer +import java.time.{Duration, Instant} +import java.util.Optional +import java.util.concurrent.CompletableFuture +import scala.jdk.CollectionConverters._ + +object LiveAwsS3 { + val presigner = S3Presigner.create() + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveAwsS3(config, logger, micrometer) + } +} + +case class LiveAwsS3(config: Config, logger: Logger, micrometer: Micrometer) extends AwsS3 { + + def newClient(credentials: AwsCredentialsProvider, + region: Option[Region] = None, + endpoint: Option[String] = None, + targetThroughput: Option[Double] = None): Task[S3AsyncClient] = + for { + defaultRegion <- config.string("aws.defaultRegion") + clientBuilder = S3AsyncClient.crtBuilder() + .credentialsProvider(credentials) + .region(region.getOrElse(Region.of(defaultRegion))) + .targetThroughputInGbps(java.lang.Double.valueOf(targetThroughput.getOrElse(40.0))) + .minimumPartSizeInBytes(8 * 1024 * 1024) + client = if (endpoint.nonEmpty) clientBuilder.endpointOverride(URI.create(endpoint.get)).build() else clientBuilder.build() + } yield client + + def createBucket(client: S3AsyncClient, bucket: String) = + ZIO.fromCompletableFuture(client.createBucket(CreateBucketRequest.builder().bucket(bucket).build())).unit + + def deleteBucket(client: S3AsyncClient, bucket: String) = + ZIO.fromCompletableFuture(client.deleteBucket(DeleteBucketRequest.builder().bucket(bucket).build())).unit + + def listBuckets(client: S3AsyncClient) = + ZIO.fromCompletableFuture(client.listBuckets(ListBucketsRequest.builder().build())).map(_.buckets().asScala.toList) + + def bucketExists(client: S3AsyncClient, bucket: String) = + listBuckets(client).map(b => b.map(_.name()).contains(bucket)) + + def getBucketPolicy(client: S3AsyncClient, bucket: String) = + ZIO.fromCompletableFuture(client.getBucketPolicy(GetBucketPolicyRequest.builder().bucket(bucket).build())).map(_.policy()) + + def getBucketAcl(client: S3AsyncClient, bucket: String) = + ZIO.fromCompletableFuture(client.getBucketAcl(GetBucketAclRequest.builder().bucket(bucket).build())) + + def putBucketAcl(client: S3AsyncClient, bucket: String, acl: BucketCannedACL) = + ZIO.fromCompletableFuture(client.putBucketAcl(PutBucketAclRequest.builder().bucket(bucket).acl(acl).build())).unit + + def listObjects(client: S3AsyncClient, bucket: String, prefix: Option[String] = None) = { + var builder = ListObjectsV2Request.builder().bucket(bucket) + builder = if (prefix.nonEmpty) builder.prefix(prefix.get) else builder + ZIO.fromCompletableFuture(client.listObjectsV2(builder.build())) + } + + def deleteObject(client: S3AsyncClient, bucket: String, key: String) = + ZIO.fromCompletableFuture(client.deleteObject(DeleteObjectRequest.builder().bucket(bucket).key(key).build())).unit + + def deleteObjects(client: S3AsyncClient, bucket: String, identifiers: List[ObjectIdentifier]) = + ZIO.fromCompletableFuture( + client.deleteObjects(DeleteObjectsRequest.builder().bucket(bucket) + .delete(Delete.builder().objects(identifiers.asJava).build()).build()) + ).unit + + def getObject(client: S3AsyncClient, + bucket: String, + key: String, + ifMatch: Option[String] = None, + ifNoneMatch: Option[String] = None, + ifModifiedSince: Option[Instant] = None, + ifUnmodifiedSince: Option[Instant] = None, + range: Option[String] = None) = { + + val readStream = ReactiveReadStream.readStream[Buffer] + var response = Option.empty[GetObjectResponse] + + val builder = GetObjectRequest.builder() + .bucket(bucket) + .key(key) + + if (ifMatch.nonEmpty) builder.ifMatch(ifMatch.get) + if (ifNoneMatch.nonEmpty) builder.ifNoneMatch(ifNoneMatch.get) + if (ifModifiedSince.nonEmpty) builder.ifModifiedSince(ifModifiedSince.get) + if (ifUnmodifiedSince.nonEmpty) builder.ifUnmodifiedSince(ifUnmodifiedSince.get) + if (range.nonEmpty) builder.range(range.get) + + ZIO.fromCompletableFuture(client.getObject(builder.build(), new AsyncResponseTransformer[GetObjectResponse, Unit] { + override def onStream(publisher: SdkPublisher[ByteBuffer]) = + publisher.subscribe(new Subscriber[ByteBuffer] { + override def onSubscribe(sub: Subscription) = readStream.onSubscribe(sub) + override def onNext(t: ByteBuffer) = readStream.onNext(Buffer.buffer(t.array())) + override def onError(t: Throwable) = readStream.onError(t) + override def onComplete() = readStream.onComplete() + }) + + override def prepare() = new CompletableFuture[Unit] {} + override def onResponse(r: GetObjectResponse) = response = Some(r) + override def exceptionOccurred(error: Throwable) = readStream.onError(error) + })).as((response.get, readStream)) + } + + def getObjectAttributes(client: S3AsyncClient, bucket: String, key: String) = + ZIO.fromCompletableFuture(client.getObjectAttributes(GetObjectAttributesRequest.builder().bucket(bucket).key(key).build())) + + def getObjectTagging(client: S3AsyncClient, bucket: String, key: String) = + ZIO.fromCompletableFuture(client.getObjectTagging(GetObjectTaggingRequest.builder().bucket(bucket).key(key).build())) + + def putObject(client: S3AsyncClient, + bucket: String, + key: String, + writeStream: ReactiveWriteStream[Buffer], + acl: ObjectCannedACL, + contentLength: Option[Long] = None, + contentMD5: Option[String] = None, + storageClass: Option[String] = None, + tags: Map[String, String] = Map()) = + ZIO.fromCompletableFuture { + val builder = PutObjectRequest.builder() + .bucket(bucket) + .key(key) + .acl(acl) + .tagging(Tagging.builder().tagSet(tags.map { case (k,v) => Tag.builder().key(k).value(v).build() }.toList.asJava).build()) + + if (contentLength.nonEmpty) builder.contentLength(contentLength.get) + if (contentMD5.nonEmpty) builder.contentMD5(contentMD5.get) + if (storageClass.nonEmpty) builder.storageClass(storageClass.get) + + client.putObject(builder.build(), publisher(writeStream)) + }.map(_.eTag()) + + def copyObject(client: S3AsyncClient, + sourceBucket: String, + sourceKey: String, + destinationBucket: String, + destinationKey: String, + sourceIfMatch: Option[String] = None, + sourceIfNoneMatch: Option[String] = None, + sourceIfModifiedSince: Option[Instant] = None, + sourceIfUnmodifiedSince: Option[Instant] = None) = { + + val builder = CopyObjectRequest.builder() + .sourceBucket(sourceBucket).sourceKey(sourceKey) + .destinationBucket(destinationBucket).destinationKey(destinationKey) + + if (sourceIfMatch.nonEmpty) builder.copySourceIfMatch(sourceIfMatch.get) + if (sourceIfNoneMatch.nonEmpty) builder.copySourceIfNoneMatch(sourceIfNoneMatch.get) + if (sourceIfModifiedSince.nonEmpty) builder.copySourceIfModifiedSince(sourceIfModifiedSince.get) + if (sourceIfUnmodifiedSince.nonEmpty) builder.copySourceIfUnmodifiedSince(sourceIfUnmodifiedSince.get) + + ZIO.fromCompletableFuture(client.copyObject(builder.build())).map(_.copyObjectResult()) + } + + def getObjectAcl(client: S3AsyncClient, bucket: String, key: String) = + ZIO.fromCompletableFuture(client.getObjectAcl(GetObjectAclRequest.builder().bucket(bucket).key(key).build())) + + def putObjectAcl(client: S3AsyncClient, bucket: String, key: String, acl: ObjectCannedACL) = + ZIO.fromCompletableFuture(client.putObjectAcl(PutObjectAclRequest.builder().bucket(bucket).key(key).acl(acl).build())).unit + + def putObjectTagging(client: S3AsyncClient, bucket: String, key: String, tags: Map[String, String]) = + ZIO.fromCompletableFuture(client.putObjectTagging( + PutObjectTaggingRequest.builder().bucket(bucket).key(key) + .tagging(Tagging.builder().tagSet(tags.map { case (k, v) => Tag.builder().key(k).value(v).build() }.toList.asJava).build()) + .build()) + ).unit + + def uploadPartCopy(client: S3AsyncClient, + sourceBucket: String, + sourceKey: String, + destinationBucket: String, + destinationKey: String, + uploadId: String, + partNumber: Int, + copySourceIfMatch: Option[String], + copySourceIfNoneMatch: Option[String], + copySourceIfModifiedSince: Option[Instant], + copySourceIfUnmodifiedSince: Option[Instant], + copySourceRange: Option[String]) = + ZIO.fromCompletableFuture { + val builder = UploadPartCopyRequest.builder() + .sourceBucket(sourceBucket).sourceKey(sourceKey) + .destinationBucket(destinationBucket).destinationKey(destinationKey) + .partNumber(partNumber).uploadId(uploadId) + + if (copySourceRange.nonEmpty) builder.copySourceRange(copySourceRange.get) + if (copySourceIfMatch.nonEmpty) builder.copySourceIfMatch(copySourceIfMatch.get) + if (copySourceIfNoneMatch.nonEmpty) builder.copySourceIfNoneMatch(copySourceIfNoneMatch.get) + if (copySourceIfModifiedSince.nonEmpty) builder.copySourceIfModifiedSince(copySourceIfModifiedSince.get) + if (copySourceIfUnmodifiedSince.nonEmpty) builder.copySourceIfUnmodifiedSince(copySourceIfUnmodifiedSince.get) + + client.uploadPartCopy(builder.build()) + }.map(_.copyPartResult()) + + def uploadPart(client: S3AsyncClient, + bucket: String, + key: String, + uploadId: String, + partNumber: Int, + writeStream: ReactiveWriteStream[Buffer], + contentLength: Option[Long] = None) = { + val builder = UploadPartRequest.builder().bucket(bucket).key(key).partNumber(partNumber).uploadId(uploadId) + if (contentLength.nonEmpty) builder.contentLength(contentLength.get) + ZIO.fromCompletableFuture(client.uploadPart(builder.build(), AsyncRequestBody.fromPublisher(publisher(writeStream)))).map(_.eTag()) + } + + def listParts(client: S3AsyncClient, bucket: String, key: String, uploadId: String) = + ZIO.fromCompletableFuture(client.listParts(ListPartsRequest.builder().bucket(bucket).key(key).uploadId(uploadId).build())).map(_.parts().asScala.toList) + + def listMultipartUploads(client: S3AsyncClient, bucket: String, prefix: Option[String] = None) = { + val builder = ListMultipartUploadsRequest.builder().bucket(bucket) + if (prefix.nonEmpty) builder.prefix(prefix.get) + ZIO.fromCompletableFuture(client.listMultipartUploads(builder.build())).map(_.uploads().asScala.toList) + } + + def createMultipartUpload(client: S3AsyncClient, bucket: String, key: String, cannedACL: ObjectCannedACL) = + ZIO.fromCompletableFuture(client.createMultipartUpload(CreateMultipartUploadRequest.builder().bucket(bucket).key(key).acl(cannedACL).build())).map(_.uploadId()) + + def abortMultipartUpload(client: S3AsyncClient, bucket: String, key: String, uploadId: String) = + ZIO.fromCompletableFuture(client.abortMultipartUpload(AbortMultipartUploadRequest.builder().bucket(bucket).key(key).uploadId(uploadId).build())).unit + + def completeMultipartUpload(client: S3AsyncClient, bucket: String, key: String, uploadId: String) = + ZIO.fromCompletableFuture(client.completeMultipartUpload(CompleteMultipartUploadRequest.builder().bucket(bucket).key(key).uploadId(uploadId).build())).map(_.eTag()) + + def presignedUrl(bucketName: String, key: String, expirationMinutes: Int): Task[String] = { + ZIO.attempt { + val presignRequest = GetObjectPresignRequest.builder.signatureDuration(Duration.ofMinutes(expirationMinutes)).build() + presigner.presignGetObject(presignRequest).url().toString + } + } + + private def publisher(writeStream: ReactiveWriteStream[Buffer]) = + new AsyncRequestBody() { + def contentLength: Optional[java.lang.Long] = Optional.empty + def subscribe(s: Subscriber[_ >: ByteBuffer]) = + writeStream.subscribe(new Subscriber[Buffer] { + def onSubscribe(sub: Subscription) = s.onSubscribe(sub) + def onNext(t: Buffer) = s.onNext(t.getByteBuf.nioBuffer()) + def onError(t: Throwable) = s.onError(t) + def onComplete() = s.onComplete() + }) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/buildpack/Buildpack.scala b/jvm/src/main/scala/com/harana/modules/buildpack/Buildpack.scala new file mode 100644 index 0000000..848cb05 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/buildpack/Buildpack.scala @@ -0,0 +1,30 @@ +package com.harana.modules.buildpack + +import zio.Task +import zio.macros.accessible + +import java.io.File + +@accessible +trait Buildpack { + + def build(name: String, + path: File, + builder: Option[String] = None, + environmentVariables: Map[String, String] = Map(), + mountedVolumes: Map[File, File] = Map(), + network: Option[String] = None, + publish: Option[Boolean] = None, + runImage: Option[String] = None): Task[List[String]] + + def setDefaultBuilder(name: String): Task[List[String]] + + def rebase(name: String, + publish: Option[Boolean] = None, + runImage: Option[String] = None): Task[List[String]] + +} + +object Buildpack { + type ContainerId = String +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/buildpack/LiveBuildpack.scala b/jvm/src/main/scala/com/harana/modules/buildpack/LiveBuildpack.scala new file mode 100644 index 0000000..5c53d5a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/buildpack/LiveBuildpack.scala @@ -0,0 +1,77 @@ +package com.harana.modules.buildpack + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.process.Command +import zio.{Task, ZIO, ZLayer} + +import java.io.File +import java.util.Locale +import scala.collection.mutable + +object LiveBuildpack { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveBuildpack(config, logger, micrometer) + } +} + +case class LiveBuildpack(config: Config, logger: Logger, micrometer: Micrometer) extends Buildpack { + + private val buildpackCmd = ZIO.attempt { + val os = System.getProperty("os.name").toLowerCase(Locale.ROOT) + val url = { + if (os.contains("mac")) getClass.getResource("pack/mac/pack") + if (os.contains("win")) getClass.getResource("pack/windows/pack.exe") + getClass.getResource("pack/linux/pack") + } + url.getFile + } + + + def build(name: String, + path: File, + builder: Option[String] = None, + environmentVariables: Map[String, String] = Map(), + mountedVolumes: Map[File, File] = Map(), + network: Option[String] = None, + publish: Option[Boolean] = None, + runImage: Option[String] = None): Task[List[String]] = + for { + cmd <- buildpackCmd + args <- ZIO.succeed { + val args = mutable.ListBuffer[String]("build", "name", s"--path ${path.getAbsolutePath}") + if (builder.nonEmpty) args += s"--builder ${builder.get}" + if (environmentVariables.nonEmpty) args += s"--env ${environmentVariables.map { case (k,v) => s"$k=$v" }.mkString(",")}" + if (mountedVolumes.nonEmpty) args += s"--volume ${mountedVolumes.map { case (k,v) => s"$k:$v" }.mkString(",")}" + if (network.nonEmpty) args += s"--network ${network.get}" + if (publish.nonEmpty) args += s"--publish" + if (runImage.nonEmpty) args += s"--run-image ${runImage.get}s" + args + } + cmd <- Command(cmd, args.toSeq: _*).lines + } yield cmd.toList + + + def setDefaultBuilder(name: String): Task[List[String]] = + Command("pack", List("set-default-builder", name): _*).lines.map(_.toList) + + + def rebase(name: String, + publish: Option[Boolean] = None, + runImage: Option[String] = None): Task[List[String]] = + for { + cmd <- buildpackCmd + args <- ZIO.succeed { + val args = mutable.ListBuffer[String]() + if (publish.nonEmpty) args += s"--publish" + if (runImage.nonEmpty) args += s"--run-image ${runImage.get}s" + args + } + cmd <- Command(cmd, args.toSeq: _*).lines + } yield cmd.toList +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/calcite/Calcite.scala b/jvm/src/main/scala/com/harana/modules/calcite/Calcite.scala new file mode 100644 index 0000000..5203dbd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/calcite/Calcite.scala @@ -0,0 +1,11 @@ +package com.harana.modules.calcite + +import zio.Task +import zio.macros.accessible + +@accessible +trait Calcite { + + def rewrite(userId: String, query: String): Task[String] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite.scala b/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite.scala new file mode 100644 index 0000000..d6547a8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite.scala @@ -0,0 +1,125 @@ +package com.harana.modules.calcite + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.sdk.shared.models.common.User.UserId +import org.apache.calcite.config.Lex +import org.apache.calcite.sql.SqlKind._ +import org.apache.calcite.sql.dialect.CalciteSqlDialect +import org.apache.calcite.sql.parser.SqlParser +import org.apache.calcite.sql._ +import zio.{Task, ZLayer, ZIO} + +import scala.jdk.CollectionConverters._ + +object LiveCalcite { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveCalcite(config, logger, micrometer) + } +} + +case class LiveCalcite(config: Config, logger: Logger, micrometer: Micrometer) extends Calcite { + + def rewrite(userId: UserId, query: String): Task[String] = { + ZIO.attempt(parse(userId, query)) + } + + private val CONFIG = SqlParser.configBuilder.setLex(Lex.MYSQL).build + + + private def parse(userId: String, sql: String): String = { + val sqlParser = SqlParser.create(sql, CONFIG) + val sqlNode = sqlParser.parseStmt + + sqlNode.getKind match { + case INSERT => + val sqlInsert = sqlNode.asInstanceOf[SqlInsert] + val source = sqlInsert.getSource.asInstanceOf[SqlSelect] + parseSource(source, userId) + + case SELECT => + parseSource(sqlNode.asInstanceOf[SqlSelect], userId) + + case ORDER_BY => + println("Order by not currently supported") + + case _ => + throw new IllegalArgumentException("It must be an insert SQL, sql:" + sql) + } + + sqlNode.toSqlString(CalciteSqlDialect.DEFAULT).getSql + } + + + private def parseSource(sqlSelect: SqlSelect, userId: String): Unit = { + parseSelectList(sqlSelect.getSelectList, userId) + parseFrom(sqlSelect.getFrom, userId) match { + case Some(newIdentifier) => sqlSelect.setFrom(newIdentifier) + case None => + } + } + + + private def parseSelectList(sqlNodeList: SqlNodeList, userId: String): Unit = + sqlNodeList.asScala.foreach(parseSelect(_, userId)) + + + private def parseFrom(from: SqlNode, userId: String): Option[SqlIdentifier] = + from.getKind match { + case IDENTIFIER => + val identifier = from.asInstanceOf[SqlIdentifier] + Some(identifier.setName(0, s"$userId.$identifier")) + + case AS => + val sqlBasicCall = from.asInstanceOf[SqlBasicCall] + + parseFrom(sqlBasicCall.getOperandList.asScala.head, userId) match { + case Some(newIdentifier) => sqlBasicCall.setOperand(0, newIdentifier) + case None => + } + + None + + case SELECT => + parseSource(from.asInstanceOf[SqlSelect], userId) + None + + case JOIN => + val sqlJoin = from.asInstanceOf[SqlJoin] + + parseFrom(sqlJoin.getLeft, userId) match { + case Some(newIdentifier) => sqlJoin.setLeft(newIdentifier) + case None => + } + + parseFrom(sqlJoin.getRight, userId) match { + case Some(newIdentifier) => sqlJoin.setRight(newIdentifier) + case None => + } + + None + + case _ => None + } + + + private def parseSelect(sqlNode: SqlNode, userId: String): Unit = + sqlNode.getKind match { + case IDENTIFIER => + + case AS => + val firstNode = sqlNode.asInstanceOf[SqlBasicCall].getOperandList.asScala.head + parseSelect(firstNode, userId) + + case SELECT => + parseSource(sqlNode.asInstanceOf[SqlSelect], userId) + + case _ => + + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite2.scala b/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite2.scala new file mode 100644 index 0000000..e9d90c9 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/calcite/LiveCalcite2.scala @@ -0,0 +1,115 @@ +package com.harana.modules.calcite + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.sdk.shared.models.common.User.UserId +import org.apache.calcite.sql.SqlKind._ +import org.apache.calcite.sql.dialect.CalciteSqlDialect +import org.apache.calcite.sql.parser.SqlParser +import org.apache.calcite.sql._ +import zio.{Task, ZLayer, ZIO} + +import scala.collection.mutable.ListBuffer + +object LiveCalcite2 { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveCalcite2(config, logger, micrometer) + } +} + +case class LiveCalcite2(config: Config, logger: Logger, micrometer: Micrometer) extends Calcite { + + def rewrite(userId: UserId, query: String): Task[String] = + for { + node <- ZIO.attempt(SqlParser.create(query).parseQuery()) + sqlNode <- ZIO.attempt(tableNames(userId, node)) + } yield sqlNode.toSqlString(CalciteSqlDialect.DEFAULT).getSql + + private def tableNames(userId: UserId, sqlNode: SqlNode): SqlNode = { + val node = if (sqlNode.getKind.equals(ORDER_BY)) sqlNode.asInstanceOf[SqlOrderBy].query else sqlNode + processFrom(userId, node) + } + + private def processFrom(userId: UserId, node: SqlNode): SqlNode = { + val childNode = node.asInstanceOf[SqlSelect].getFrom + if (childNode == null) return node + + childNode.getKind match { + case IDENTIFIER => + println(childNode.toSqlString(CalciteSqlDialect.DEFAULT)) + val identifier = childNode.asInstanceOf[SqlIdentifier] + val newIdentifier = identifier.setName(0, s"$userId.${identifier.names.get(0)}") + node.asInstanceOf[SqlSelect].setFrom(newIdentifier) + node + case AS => + println(childNode.toSqlString(CalciteSqlDialect.DEFAULT)) + val call = childNode.asInstanceOf[SqlBasicCall] + if (call.operand(0).isInstanceOf[SqlIdentifier]) { + val newIdentifier = setName(userId, call.operand(0).asInstanceOf[SqlIdentifier]) + call.setOperand(0, newIdentifier) + node.asInstanceOf[SqlSelect].setFrom(call) + } + node + + case JOIN => + println(childNode.toSqlString(CalciteSqlDialect.DEFAULT)) + val fromNode = childNode.asInstanceOf[SqlJoin] + + if (fromNode.getLeft.getKind.equals(AS)) { + val newLeftIdentifier = setName(userId, leftNode(fromNode)) + val newRightIdentifier = setName(userId, rightNode(fromNode)) + fromNode.getLeft.asInstanceOf[SqlBasicCall].setOperand(0, newLeftIdentifier) + fromNode.getRight.asInstanceOf[SqlBasicCall].setOperand(0, newRightIdentifier) + node + } + else { + val tables = ListBuffer[String]() + + fromNode.getLeft.getKind match { + case IDENTIFIER => + if (fromNode.getRight.getKind.equals(IDENTIFIER)) { + val newLeftIdentifier = setName(userId, fromNode.getLeft.asInstanceOf[SqlIdentifier]) + val newRightIdentifier = setName(userId, fromNode.getRight.asInstanceOf[SqlIdentifier]) + fromNode.setLeft(newLeftIdentifier) + fromNode.setRight(newRightIdentifier) + node + } else { + println(fromNode.getLeft.toString) + node + } + + case JOIN => + var leftJoin = fromNode.getLeft.asInstanceOf[SqlJoin] + + while (!leftJoin.getLeft.getKind.equals(AS) && leftJoin.getLeft.isInstanceOf[SqlJoin]) { + // tables += rightNode(leftJoin) + leftJoin = leftJoin.getLeft.asInstanceOf[SqlJoin] + } + + if (leftJoin.getLeft.isInstanceOf[SqlBasicCall]) + println(s"Left C = ${leftNode(leftJoin)}") + + if (leftJoin.getRight.isInstanceOf[SqlBasicCall]) + println(s"Right C = ${rightNode(leftJoin)}") + + node + //tables.toList ++ List(left(leftJoin), right(leftJoin)) + } + } + } + } + + private def setName(userId: UserId, identifier: SqlIdentifier) = + identifier.setName(0, s"$userId.${identifier.names.get(0)}") + + private def leftNode(node: SqlJoin) = + node.getLeft.asInstanceOf[SqlBasicCall].operand(0).asInstanceOf[SqlIdentifier] + + private def rightNode(node: SqlJoin) = + node.getRight.asInstanceOf[SqlBasicCall].operand(0).asInstanceOf[SqlIdentifier] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/Clearbit.scala b/jvm/src/main/scala/com/harana/modules/clearbit/Clearbit.scala new file mode 100644 index 0000000..5aa33a3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/Clearbit.scala @@ -0,0 +1,12 @@ +package com.harana.modules.clearbit + +import com.harana.modules.clearbit.models.RiskResponse +import zio.Task +import zio.macros.accessible + +@accessible +trait Clearbit { + + def calculateRisk(emailAddress: String, ipAddress: String, firstName: String, lastName: String): Task[RiskResponse] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/LiveClearbit.scala b/jvm/src/main/scala/com/harana/modules/clearbit/LiveClearbit.scala new file mode 100644 index 0000000..b066d76 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/LiveClearbit.scala @@ -0,0 +1,33 @@ +package com.harana.modules.clearbit + +import com.harana.modules.clearbit.models.RiskResponse +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import io.circe.parser._ +import zio.{Task, ZLayer, ZIO} + +object LiveClearbit { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveClearbit(config, http, logger, micrometer) + } +} + +case class LiveClearbit(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Clearbit { + + def calculateRisk(emailAddress: String, ipAddress: String, firstName: String, lastName: String): Task[RiskResponse] = + for { + apiKey <- config.secret("clearbit-api-key") + _ <- logger.debug(s"Calculating risk for email: $emailAddress") + params = Map("email" -> emailAddress, "given_name" -> firstName, "family_name" -> lastName, "ip" -> ipAddress) + response <- http.postForm("https://risk.clearbit.com/v1/calculate", params, credentials = Some((apiKey, ""))).mapError(e => new Exception(e.toString)).onError(e => logger.error(s"Failed to calculate risk: ${e.prettyPrint}")) + risk <- ZIO.fromEither(decode[RiskResponse](response.body().string())).onError(e => logger.error(s"Failed to decode risk to RiskResponse object: ${e.prettyPrint}")) + } yield risk + +} diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/ClearbitError.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/ClearbitError.scala new file mode 100644 index 0000000..264ff31 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/ClearbitError.scala @@ -0,0 +1,3 @@ +package com.harana.modules.clearbit.models + +case class ClearbitError() \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/Common.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/Common.scala new file mode 100644 index 0000000..85b8161 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/Common.scala @@ -0,0 +1,34 @@ +package com.harana.modules.clearbit.models + +case class Facebook(handle: String) + +case class LinkedIn(handle: String) + +case class AngelList(handle: String, + bio: String, + blog: String, + site: String, + followers: Long, + avatar: String) + +case class Crunchbase(handle: String) + + +case class Github(handle: String, + id: Long, + avatar: String, + company: String, + blog: String, + followers: Long, + following: Long) + +case class Twitter(handle: String, + id: String, + bio: String, + followers: Long, + following: Long, + statuses: Long, + favorites: Long, + location: String, + site: String, + avatar: String) diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/Company.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/Company.scala new file mode 100644 index 0000000..8c169ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/Company.scala @@ -0,0 +1,66 @@ +package com.harana.modules.clearbit.models + +case class Company(id: String, + name: String, + legalName: String, + domain: String, + domainAliases: List[String], + logo: String, + site: Site, + tags: List[String], + category: Category, + description: String, + foundedYear: Integer, + location: String, + timeZone: String, + utcOffset: Long, + geo: CompanyGeo, + metrics: Metrics, + facebook: Facebook, + linkedin: LinkedIn, + twitter: Twitter, + crunchbase: Crunchbase, + emailProvider: Boolean, + `type`: String, + ticker: String, + phone: String, + indexedAt: String, + tech: List[String], + parent: Parent) + +case class Site(title: String, + h1: String, + metaDescription: String, + phoneNumbers: List[String], + emailAddresses: List[String]) + +case class Category(sector: String, + industryGroup: String, + industry: String, + subIndustry: String, + sicCode: String, + naicsCode: String) + +case class CompanyGeo(streetNumber: String, + streetName: String, + subPremise: String, + city: String, + state: String, + stateCode: String, + postalCode: String, + country: String, + countryCode: String, + lat: Double, + lng: Double) + +case class Metrics(alexaUsRank: Long, + alexaGlobalRank: Long, + employees: Long, + employeesRange: String, + marketCap: Long, + raised: Long, + annualRevenue: Long, + fiscalYearEnd: Long, + estimatedAnnualRevenue: String) + +case class Parent(domain: String) diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/ModelType.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/ModelType.scala new file mode 100644 index 0000000..91fd613 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/ModelType.scala @@ -0,0 +1,8 @@ +package com.harana.modules.clearbit.models + +sealed trait ModelType +object ModelType { + case object PERSON extends ModelType + case object COMPANY extends ModelType + case object PERSON_COMPANY extends ModelType +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/Person.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/Person.scala new file mode 100644 index 0000000..bb14b77 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/Person.scala @@ -0,0 +1,55 @@ +package com.harana.modules.clearbit.models + +case class Person(id: String, + name: Name, + email: String, + gender: String, + location: String, + timeZone: String, + utcOffset: Long, + geo: PersonGeo, + bio: String, + site: String, + avatar: String, + employment: Employment, + facebook: Facebook, + github: Github, + twitter: Twitter, + linkedin: LinkedIn, + aboutme: AboutMe, + gravatar: Gravatar, + fuzzy: Boolean, + emailProvider: Boolean, + indexedAt: String) + +case class Name(fullName: String, + givenName: String, + familyName: String) + +case class PersonGeo(city: String, + state: String, + stateCode: String, + country: String, + countryCode: String, + lat: Double, + lng: Double) + +case class Employment(name: String, + title: String, + domain: String, + role: String, + subRole: String, + seniority: String) + +case class AboutMe(handle: String, + bio: String, + avatar: String) + +case class Gravatar(handle: String, + urls: List[Url], + avatar: String, + avatars: List[Avatar]) + +case class Url(value: String, title: String) + +case class Avatar(url: String, `type`: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/PersonCompany.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/PersonCompany.scala new file mode 100644 index 0000000..2ce81b7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/PersonCompany.scala @@ -0,0 +1,3 @@ +package com.harana.modules.clearbit.models + +case class PersonCompany(person: Person, company: Company) diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/RiskResponse.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/RiskResponse.scala new file mode 100644 index 0000000..d92b8f0 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/RiskResponse.scala @@ -0,0 +1,35 @@ +package com.harana.modules.clearbit.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class RiskResponse(id: String, + live: Boolean, + fingerprint: Boolean, + email: Email, + address: Address, + ip: IP, + risk: Risk) + +@JsonCodec +case class Email(valid: Option[Boolean], + socialMatch: Option[Boolean], + companyMatch: Option[Boolean], + nameMatch: Option[Boolean], + disposable: Option[Boolean], + freeProvider: Option[Boolean], + blacklisted: Option[Boolean]) + +@JsonCodec +case class Address(geoMatch: Option[String]) + +@JsonCodec +case class IP(proxy: Option[Boolean], + geoMatch: Option[Boolean], + blacklisted: Boolean, + rateLimited: Option[Boolean]) + +@JsonCodec +case class Risk(level: String, + score: Int, + reasons: List[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/clearbit/models/WebhookResponse.scala b/jvm/src/main/scala/com/harana/modules/clearbit/models/WebhookResponse.scala new file mode 100644 index 0000000..c0d3ecd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/clearbit/models/WebhookResponse.scala @@ -0,0 +1,6 @@ +package com.harana.modules.clearbit.models + +case class WebhookResponse(`type`: ModelType, + body: Object, + status: Int, + id: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/docker/Docker.scala b/jvm/src/main/scala/com/harana/modules/docker/Docker.scala new file mode 100644 index 0000000..b2c67bf --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/docker/Docker.scala @@ -0,0 +1,297 @@ +package com.harana.modules.docker + +import com.github.dockerjava.api.command._ +import com.github.dockerjava.api.exception.{DockerException, NotFoundException, UnauthorizedException} +import com.github.dockerjava.api.model.Network.Ipam +import com.github.dockerjava.api.model.{Service => DockerService, _} +import com.harana.modules.core.http.models.OkHttpError +import zio.macros.accessible +import zio.{IO, Queue, UIO} + +import java.io.{File, InputStream} + +@accessible +trait Docker { + + def attachContainer(id: ContainerId): IO[DockerException, InputStream] + + def auth(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): IO[UnauthorizedException, String] + + def auth(config: Option[AuthConfig]): IO[UnauthorizedException, String] + + def authConfig(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): UIO[AuthConfig] + + def buildImage(dockerFileOrFolder: File, tags: Set[String]): IO[DockerException, ImageId] + + def buildImage(inputStream: InputStream, tags: Set[String]): IO[DockerException, ImageId] + + def commit(id: ContainerId): IO[NotFoundException, String] + + def connectToNetwork: UIO[Unit] + + def containerDiff(id: ContainerId): IO[NotFoundException, List[ChangeLog]] + + def containerExists(containerName: String): UIO[Boolean] + + def containerNotExists(containerName: String): UIO[Boolean] + + def containerRunning(containerName: String): UIO[Boolean] + + def containerNotRunning(containerName: String): UIO[Boolean] + + def copyResourceFromContainer(id: ContainerId, resource: String, hostPath: Option[String] = None): IO[NotFoundException, InputStream] + + def copyArchiveToContainer(id: ContainerId, tarInputStream: InputStream, remotePath: Option[String] = None): IO[NotFoundException, Unit] + + def copyResourceToContainer(id: ContainerId, resource: String, remotePath: Option[String] = None): IO[NotFoundException, Unit] + + def createContainer(name: String, + imageName: String, + command: Option[String] = None, + exposedPorts: Map[Int, Int] = Map()): IO[DockerException, ContainerId] + + def createImage(repository: String, imageStream: InputStream): IO[NotFoundException, ImageId] + + def createNetwork(name: Option[String] = None, + attachable: Boolean = false, + checkDuplicate: Boolean = false, + driver: Option[String] = None, + enableIpv6: Boolean = false, + internal: Boolean = false, + ipam: Option[Ipam] = None, + labels: Map[String, String] = Map(), + options: Map[String, String] = Map()): IO[DockerException, NetworkId] + + def createService(spec: ServiceSpec): IO[NotFoundException, ServiceId] + + def createVolume(name: String, driver: Option[String] = None, driverOpts: Map[String, String] = Map()): IO[NotFoundException, String] + + def disconnectFromNetwork(networkId: Option[NetworkId] = None, containerId: Option[ContainerId] = None, force: Boolean = false): UIO[Unit] + + def ensureContainerIsRunning(name: String, imageName: String, command: Option[String] = None, exposedPorts: Map[Int, Int] = Map()): IO[DockerException, Unit] + + def ensureLocalRegistryIsRunning: IO[DockerException, Unit] + + def events(containerFilter: List[ContainerId] = List(), + eventFilter: List[EventId] = List(), + imageFilter: List[ImageId] = List(), + labelFilter: Map[String, String] = Map(), + withSince: Option[String] = None, + withUntil: Option[String] = None): UIO[Queue[Event]] + + def execCreate(id: ExecId, + attachStderr: Boolean = false, + attachStdin: Boolean = false, + attachStdout: Boolean = false, + cmd: List[String] = List(), + containerId: Option[ContainerId] = None, + env: List[String] = List(), + privileged: Boolean = false, + tty: Boolean = false, + user: Option[String] = None, + workingDir: Option[String] = None): IO[NotFoundException, String] + + def execStart(id: ExecId, + detach: Boolean = false, + stdIn: Option[InputStream] = None, + tty: Boolean = false): UIO[Queue[Frame]] + + def hubTags(namespace: String, + repository: String, + page: Option[Int] = None, + pageSize: Option[Int] = None): zio.Task[List[HubTag]] + + def info: UIO[Info] + + def initializeSwarm(spec: SwarmSpec): UIO[Unit] + + def inspectContainer(id: ContainerId): IO[NotFoundException, InspectContainerResponse] + + def inspectExec(id: ExecId): IO[NotFoundException, InspectExecResponse] + + def inspectImage(id: ImageId): IO[NotFoundException, InspectImageResponse] + + def inspectNetwork(id: Option[NetworkId] = None): IO[NotFoundException, Network] + + def inspectService(id: ServiceId): IO[NotFoundException, DockerService] + + def inspectSwarm: IO[NotFoundException, Swarm] + + def inspectVolume(name: String): IO[NotFoundException, InspectVolumeResponse] + + def joinSwarm(advertiseAddr: Option[String] = None, + joinToken: Option[String] = None, + listenAddr: Option[String] = None, + remoteAddrs: List[String] = List()): UIO[Unit] + + def killContainer(id: ContainerId, signal: Option[String] = None): IO[NotFoundException, Unit] + + // def launchListenContainer: IO[DockerException, List[String]] + + def leaveSwarm(force: Boolean = false): UIO[Unit] + + def listArtifactoryRepositories(registryUrl: String, + repository: String, + authConfig: AuthConfig): IO[OkHttpError, List[String]] + + def listArtifactoryTags(registryUrl: String, + repository: String, + authConfig: AuthConfig, + image: String, + maximum: Option[Int] = None): IO[OkHttpError, List[String]] + + def listContainers(ancestorFilter: List[String] = List(), + before: Option[String] = None, + exitedFilter: Option[Int] = None, + filters: Map[String, List[String]] = Map(), + idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + limit: Option[Int] = None, + nameFilter: List[String] = List(), + networkFilter: List[String] = List(), + showAll: Option[Boolean] = None, + showSize: Option[Boolean] = None, + since: Option[String] = None, + statusFilter: List[String] = List(), + volumeFilter: List[String] = List()): UIO[List[Container]] + + def listDockerRepositories(registryUrl: String): IO[OkHttpError, List[String]] + + def listDockerTags(registryUrl: String, + image: String, + maximum: Option[Int] = None): IO[OkHttpError, List[String]] + + def listImages(danglingFilter: Option[Boolean] = None, + imageNameFilter: Option[String] = None, + labelFilter: Map[String, String] = Map(), + showAll: Option[Boolean] = None): UIO[List[Image]] + + def listNetworks(filter: Option[(String, List[String])] = None, + idFilter: List[String] = List(), + nameFilter: List[String] = List()): IO[NotFoundException, List[Network]] + + def listServices(idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + nameFilter: List[String] = List()): IO[NotFoundException, List[DockerService]] + + def listSwarmNodes(idFilter: List[String] = List(), + membershipFilter: List[String] = List(), + nameFilter: List[String] = List(), + roleFilter: List[String] = List()): IO[NotFoundException, List[SwarmNode]] + + def listTasks(idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + nameFilter: List[String] = List(), + nodeFilter: List[String] = List(), + serviceFilter: List[String] = List(), + stateFilter: List[TaskState] = List()): IO[NotFoundException, List[Task]] + + def listVolumes(includeDangling: Boolean = true, filter: Option[(String, List[String])] = None) : IO[NotFoundException, List[InspectVolumeResponse]] + + def loadImage(stream: InputStream): UIO[Unit] + + def logContainer(id: ContainerId, + followStream: Option[Boolean] = None, + since: Option[Int] = None, + stdErr: Option[Boolean] = None, + stdOut: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] + + def logService(id: ServiceId, + details: Option[Boolean] = None, + follow: Option[Boolean] = None, + since: Option[Int] = None, + stdout: Option[Boolean] = None, + stderr: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] + + def logTask(id: ServiceId, + details: Option[Boolean] = None, + follow: Option[Boolean] = None, + since: Option[Int] = None, + stdout: Option[Boolean] = None, + stderr: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] + + def pauseContainer(id: ContainerId): IO[NotFoundException, Unit] + + def ping: UIO[Unit] + + def prune(pruneType: PruneType, + dangling: Option[Boolean] = None, + labelFilter: List[String] = List(), + untilFilter: Option[String] = None): IO[NotFoundException, Long] + + def pullImage(repository: String, + authConfig: Option[AuthConfig] = None, + platform: Option[String] = None, + registry: Option[String] = None, + tag: Option[String] = None): IO[DockerException, Unit] + + def pushImage(name: String, + authConfig: Option[AuthConfig] = None, + tag: Option[String] = None): IO[DockerException, Unit] + + def removeContainer(id: ContainerId, force: Boolean = false, removeVolumes: Boolean = false): IO[NotFoundException, Unit] + + def removeContainers(name: String, force: Boolean = false, removeVolumes: Boolean = false): IO[NotFoundException, Unit] + + def removeImage(id: ImageId, force: Boolean = false, prune: Boolean = true): IO[NotFoundException, Unit] + + def removeNetwork(id: NetworkId): IO[NotFoundException, Unit] + + def removeService(id: ServiceId): IO[NotFoundException, Unit] + + def removeVolume(name: String): IO[NotFoundException, Unit] + + def renameContainer(id: ContainerId, name: String): UIO[Unit] + + def restartContainer(id: ContainerId, timeout: Option[Int] = None): IO[DockerException, Unit] + + def saveImage(name: String, tag: Option[String] = None): IO[NotFoundException, InputStream] + + def searchImages(term: String): UIO[List[SearchItem]] + + def startContainer(id: ContainerId): IO[DockerException, Unit] + + + def startLocalRegistry: IO[DockerException, Unit] + + def stats(id: ContainerId): IO[DockerException, Statistics] + + def stopContainer(id: ContainerId, timeout: Option[Int] = None): IO[DockerException, Unit] + + def stopLocalRegistry: IO[DockerException, Unit] + + def tagImage(id: ImageId, imageNameWithRepository: String, tag: String, force: Boolean = false): UIO[Unit] + + def topContainer(id: ContainerId, psArgs: Option[String] = None): IO[NotFoundException, TopContainerResponse] + + def unpauseContainer(id: ContainerId): IO[NotFoundException, Unit] + + def updateContainer(id: ContainerId, + blkioWeight: Option[Int] = None, + cpuPeriod: Option[Int] = None, + cpuQuota: Option[Int] = None, + cpusetCpus: Option[String] = None, + cpusetMems: Option[String] = None, + cpuShares: Option[Int] = None, + kernelMemory: Option[Long] = None, + memory: Option[Long] = None, + memoryReservation: Option[Long] = None, + memorySwap: Option[Long] = None): IO[NotFoundException, UpdateContainerResponse] + + def updateService(id: ServiceId, spec: ServiceSpec): UIO[Unit] + + def updateSwarm(spec: SwarmSpec): UIO[Unit] + + def updateSwarmNode(id: SwarmId, spec: SwarmNodeSpec, version: Option[Long] = None): IO[NotFoundException, Unit] + + def version: UIO[Unit] + + def waitForContainer(id: ContainerId): IO[DockerException, Int] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/docker/LiveDocker.scala b/jvm/src/main/scala/com/harana/modules/docker/LiveDocker.scala new file mode 100644 index 0000000..39c0454 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/docker/LiveDocker.scala @@ -0,0 +1,913 @@ +package com.harana.modules.docker + +import com.github.dockerjava.api.async.ResultCallback +import com.github.dockerjava.api.command._ +import com.github.dockerjava.api.exception.{DockerException, NotFoundException, UnauthorizedException} +import com.github.dockerjava.api.model.HostConfig.newHostConfig +import com.github.dockerjava.api.model.Network.Ipam +import com.github.dockerjava.api.model.Ports.Binding +import com.github.dockerjava.api.model.{Service => DockerService, _} +import com.github.dockerjava.core.{DefaultDockerClientConfig, DockerClientBuilder} +import com.github.dockerjava.zerodep.ZerodepDockerHttpClient +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.http.models.OkHttpError +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.docker.LiveDocker.registryImage +import io.circe.parser._ +import org.json4s.DefaultFormats +import zio.{IO, Queue, UIO, ZIO, ZLayer} + +import java.io.{Closeable, File, InputStream} +import scala.jdk.CollectionConverters._ + +object LiveDocker { + implicit val formats: DefaultFormats = DefaultFormats + val registryImage = "registry:latest" + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveDocker(config, http, logger, micrometer) + } +} + +case class LiveDocker(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Docker { + + private val client = for { + dockerHost <- config.string("docker.host", "127.0.0.1") + dockerPort <- config.int("docker.port", 1234) + tlsVerify <- config.boolean("docker.tlsVerify", default = false) + certPath <- config.optString("docker.certPath") + registryUsername <- config.optSecret("docker-registry-username") + registryPassword <- config.optSecret("docker-registry-password") + registryEmail <- config.optString("docker.registryEmail") + registryUrl <- config.optString("docker.registryUrl") + } yield { + val config = DefaultDockerClientConfig.createDefaultConfigBuilder() + .withDockerHost("unix:///var/run/docker.sock") + //.withDockerHost(s"tcp://$dockerHost:$dockerPort") + .withDockerTlsVerify(tlsVerify) + + if (certPath.nonEmpty) config.withDockerCertPath(certPath.get) + if (registryUsername.nonEmpty) config.withRegistryUsername(registryUsername.get) + if (registryPassword.nonEmpty) config.withRegistryPassword(registryPassword.get) + if (registryEmail.nonEmpty) config.withRegistryEmail(registryEmail.get) + if (registryUrl.nonEmpty) config.withRegistryUrl(registryUrl.get) + + DockerClientBuilder.getInstance(config.build()).withDockerHttpClient(new ZerodepDockerHttpClient.Builder().build()).build() + } + + + def attachContainer(id: ContainerId): IO[DockerException, InputStream] = + client.map(_.attachContainerCmd(id).getStdin) + + + def auth(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): IO[UnauthorizedException, String] = + for { + authConfig <- authConfig(username, password, identityToken, registryToken) + auth <- auth(Some(authConfig)) + } yield auth + + + def auth(config: Option[AuthConfig]): IO[UnauthorizedException, String] = + client.map { c => + val cmd = c.authCmd + if (config.nonEmpty) cmd.withAuthConfig(config.get) + cmd.exec().getIdentityToken + } + + + def authConfig(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): UIO[AuthConfig] = + (username, password, identityToken, registryToken) match { + case (Some(u), Some(p), _, _) => ZIO.succeed(new AuthConfig().withUsername(u).withPassword(p)) + case (_, _, Some(it), _) => ZIO.succeed(new AuthConfig().withIdentityToken(it)) + case (_, _, _, Some(rt)) => ZIO.succeed(new AuthConfig().withRegistrytoken(rt)) + case (_, _, _, _) => ZIO.succeed(new AuthConfig()) + } + + + def buildImage(dockerFileOrFolder: File, tags: Set[String]): IO[DockerException, ImageId] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, ImageId] => Unit) => + c.buildImageCmd(dockerFileOrFolder).withTags(tags.asJava).exec( + new BuildImageResultCallback() { + override def onNext(item: BuildResponseItem): Unit = cb(ZIO.succeed(item.getImageId)) + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + } + ) + } + } + + + def buildImage(inputStream: InputStream, tags: Set[String]): IO[DockerException, ImageId] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, ImageId] => Unit) => + c.buildImageCmd(inputStream).withTags(tags.asJava).exec( + new BuildImageResultCallback() { + override def onNext(item: BuildResponseItem): Unit = cb(ZIO.succeed(item.getImageId)) + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + } + ) + } + } + + def commit(id: ContainerId): IO[NotFoundException, String] = + client.map(_.commitCmd(id).exec()) + + + def connectToNetwork: UIO[Unit] = + client.map(_.connectToNetworkCmd().exec()) + + + def containerDiff(id: ContainerId): IO[NotFoundException, List[ChangeLog]] = + client.map(_.containerDiffCmd(id).exec().asScala.toList) + + + def containerExists(containerName: String): UIO[Boolean] = + listContainers(filters = Map("name" -> List(containerName))).map(_.nonEmpty) + + + def containerNotExists(containerName: String): UIO[Boolean] = + containerExists(containerName).map(result => !result) + + + def containerRunning(containerName: String): UIO[Boolean] = + listContainers(filters = Map("name" -> List(containerName), "status" -> List("running"))).map(_.nonEmpty) + + + def containerNotRunning(containerName: String): UIO[Boolean] = + containerRunning(containerName).map(result => !result) + + + def copyResourceFromContainer(id: ContainerId, resource: String, hostPath: Option[String] = None): IO[NotFoundException, InputStream] = + client.map { c => + val cmd = c.copyArchiveFromContainerCmd(id, resource) + if (hostPath.nonEmpty) cmd.withHostPath(hostPath.get) + cmd.exec() + } + + + def copyArchiveToContainer(id: ContainerId, tarInputStream: InputStream, remotePath: Option[String] = None): IO[NotFoundException, Unit] = + client.map { c => + val cmd = c.copyArchiveToContainerCmd(id).withTarInputStream(tarInputStream) + if (remotePath.nonEmpty) cmd.withRemotePath(remotePath.get) + cmd.exec() + } + + + def copyResourceToContainer(id: ContainerId, resource: String, remotePath: Option[String] = None): IO[NotFoundException, Unit] = + client.map { c => + val cmd = c.copyArchiveToContainerCmd(id).withHostResource(resource) + if (remotePath.nonEmpty) cmd.withRemotePath(remotePath.get) + cmd.exec() + } + + def createContainer(name: String, + imageName: String, + command: Option[String] = None, + exposedPorts: Map[Int, Int] = Map()): IO[DockerException, ContainerId] = { + val bindings = new Ports() + exposedPorts.foreach { case (k, v) => bindings.bind(new ExposedPort(k), Binding.bindPort(v)) } + + client.map(_.createContainerCmd(imageName).withName(name) + .withHostConfig(newHostConfig().withPortBindings(bindings)) + .withExposedPorts(exposedPorts.keys.map(new ExposedPort(_)).toList.asJava) + .exec().getId + ) + } + + def createImage(repository: String, imageStream: InputStream): IO[NotFoundException, ImageId] = + client.map(_.createImageCmd(repository, imageStream).exec().getId) + + + def createNetwork(name: Option[String] = None, + attachable: Boolean = false, + checkDuplicate: Boolean = false, + driver: Option[String] = None, + enableIpv6: Boolean = false, + internal: Boolean = false, + ipam: Option[Ipam] = None, + labels: Map[String, String] = Map(), + options: Map[String, String] = Map()): IO[DockerException, NetworkId] = + client.map { c => + val cmd = c.createNetworkCmd + .withAttachable(attachable) + .withCheckDuplicate(checkDuplicate) + .withEnableIpv6(enableIpv6) + .withInternal(internal) + .withLabels(labels.asJava) + .withOptions(options.asJava) + if (ipam.nonEmpty) cmd.withIpam(ipam.get) + if (name.nonEmpty) cmd.withName(name.get) + cmd.exec().getId + } + + + def createService(spec: ServiceSpec): IO[NotFoundException, ServiceId] = + client.map(_.createServiceCmd(spec).exec().getId) + + + def createVolume(name: String, driver: Option[String] = None, driverOpts: Map[String, String] = Map()): IO[NotFoundException, String] = + client.map { c => + val cmd = c.createVolumeCmd.withName(name).withDriverOpts(driverOpts.asJava) + if (driver.nonEmpty) cmd.withDriver(driver.get) + cmd.exec().getMountpoint + } + + + def disconnectFromNetwork(networkId: Option[NetworkId] = None, containerId: Option[ContainerId] = None, force: Boolean = false): UIO[Unit] = + client.map { c => + val cmd = c.disconnectFromNetworkCmd().withForce(force) + if (containerId.nonEmpty) cmd.withContainerId(containerId.get) + if (networkId.nonEmpty) cmd.withNetworkId(networkId.get) + cmd.exec() + } + + + def ensureContainerIsRunning(name: String, imageName: String, command: Option[String] = None, exposedPorts: Map[Int, Int] = Map()): IO[DockerException, Unit] = + for { + running <- containerRunning(name) + existing <- listContainers(nameFilter = List(name), showAll = Some(true )) + _ <- ZIO.when(!running)(logger.debug(s"$name not running, starting ${if (existing.isEmpty) "new" else "existing"} container.")) + _ <- ZIO.when(!running && existing.isEmpty)( + for { + _ <- pullImage(imageName) + id <- createContainer(name, imageName, command, exposedPorts) + _ <- startContainer(id) + } yield () + ) + _ <- ZIO.when(!running && existing.nonEmpty)(startContainer(existing.head.getId)) + } yield () + + + def ensureLocalRegistryIsRunning: IO[DockerException, Unit] = + ensureContainerIsRunning("registry", registryImage, exposedPorts = Map(5000 -> 5000)) + + + def events(containerFilter: List[ContainerId] = List(), + eventFilter: List[EventId] = List(), + imageFilter: List[ImageId] = List(), + labelFilter: Map[String, String] = Map(), + withSince: Option[String] = None, + withUntil: Option[String] = None): UIO[Queue[Event]] = + for { + c <- client + q <- Queue.unbounded[Event] + } yield { + val cmd = c.eventsCmd() + .withContainerFilter(containerFilter: _*) + .withEventFilter(eventFilter: _*) + .withImageFilter(imageFilter: _*) + .withLabelFilter(labelFilter.asJava) + if (withSince.nonEmpty) cmd.withSince(withSince.get) + if (withUntil.nonEmpty) cmd.withUntil(withUntil.get) + + cmd.exec( + new ResultCallback[Event]() { + override def onNext(item: Event): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + + def execCreate(id: ExecId, + attachStderr: Boolean = false, + attachStdin: Boolean = false, + attachStdout: Boolean = false, + cmd: List[String] = List(), + containerId: Option[ContainerId] = None, + env: List[String] = List(), + privileged: Boolean = false, + tty: Boolean = false, + user: Option[String] = None, + workingDir: Option[String] = None): IO[NotFoundException, String] = + client.map { c => + var execCreateCmd = c.execCreateCmd(id) + .withAttachStderr(attachStderr) + .withAttachStdin(attachStdin) + .withAttachStdout(attachStdout) + .withCmd(cmd: _*) + .withEnv(env.asJava) + .withPrivileged(privileged) + .withTty(tty) + if (containerId.nonEmpty) execCreateCmd = execCreateCmd.withContainerId(containerId.get) + if (user.nonEmpty) execCreateCmd = execCreateCmd.withUser(user.get) + execCreateCmd.exec().getId + } + + + def execStart(id: ExecId, + detach: Boolean = false, + stdIn: Option[InputStream] = None, + tty: Boolean = false): UIO[Queue[Frame]] = + for { + c <- client + q <- Queue.unbounded[Frame] + } yield { + val cmd = c.execStartCmd(id).withDetach(detach).withTty(tty) + if (stdIn.nonEmpty) cmd.withStdIn(stdIn.get) + cmd.exec( + new ResultCallback[Frame]() { + override def onNext(item: Frame): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + def hubTags(namespace: String, + repository: String, + page: Option[Int] = None, + pageSize: Option[Int] = None): zio.Task[List[HubTag]] = + for { + page <- ZIO.succeed(page.getOrElse(1)) + pageSize <- ZIO.succeed(pageSize.getOrElse(10)) + query = s"https://hub.docker.com/v2/namespaces/$namespace/repositories/$repository/tags?page=$page&page_size=$pageSize" + response <- http.get(query).mapError(ex => new Exception(ex.toString)) + hubTags <- ZIO.from(decode[HubPage](response.body().string())).map(_.results) + } yield hubTags + + + def info: UIO[Info] = + client.map(_.infoCmd().exec()) + + + def initializeSwarm(spec: SwarmSpec): UIO[Unit] = + client.map(_.initializeSwarmCmd(spec).exec()) + + + def inspectContainer(id: ContainerId): IO[NotFoundException, InspectContainerResponse] = + client.map(_.inspectContainerCmd(id).exec()) + + + def inspectExec(id: ExecId): IO[NotFoundException, InspectExecResponse] = + client.map(_.inspectExecCmd(id).exec()) + + + def inspectImage(id: ImageId): IO[NotFoundException, InspectImageResponse] = + client.map(_.inspectImageCmd(id).exec()) + + + def inspectNetwork(id: Option[NetworkId] = None): IO[NotFoundException, Network] = + client.map(_.inspectNetworkCmd().exec()) + + + def inspectService(id: ServiceId): IO[NotFoundException, DockerService] = + client.map(_.inspectServiceCmd(id).exec()) + + + def inspectSwarm: IO[NotFoundException, Swarm] = + client.map(_.inspectSwarmCmd().exec()) + + + def inspectVolume(name: String): IO[NotFoundException, InspectVolumeResponse] = + client.map(_.inspectVolumeCmd(name).exec()) + + + def joinSwarm(advertiseAddr: Option[String] = None, + joinToken: Option[String] = None, + listenAddr: Option[String] = None, + remoteAddrs: List[String] = List()): UIO[Unit] = + client.map { c => + val cmd = c.joinSwarmCmd().withRemoteAddrs(remoteAddrs.asJava) + if (advertiseAddr.nonEmpty) cmd.withAdvertiseAddr(advertiseAddr.get) + if (joinToken.nonEmpty) cmd.withJoinToken(joinToken.get) + if (listenAddr.nonEmpty) cmd.withListenAddr(listenAddr.get) + cmd.exec() + } + + + def killContainer(id: ContainerId, signal: Option[String] = None): IO[NotFoundException, Unit] = + client.map { c => + val cmd = c.killContainerCmd(id) + if (signal.nonEmpty) cmd.withSignal(signal.get) + cmd.exec() + } + + + // def launchListenContainer: IO[DockerException, List[String]] = + // for { + // cmd <- Command( + // "docker", "run", "-d", "-v", "/var/run/docker.sock:/var/run/docker.sock", "-p", "127.0.0.1:1234:1234", + // "bobrik/socat", "TCP-LISTEN:1234,fork", "UNIX-CONNECT:/var/run/docker.sock").lines.provide(Has(blocking)) + // } yield cmd + + + def leaveSwarm(force: Boolean = false): UIO[Unit] = + client.map(_.leaveSwarmCmd().withForceEnabled(force).exec()) + + + def listArtifactoryRepositories(registryUrl: String, + repository: String, + authConfig: AuthConfig): IO[OkHttpError, List[String]] = +null +// for { +// token <- http.getAsJson(s"$registryUrl/v2/token").map(_.hcursor.downField("token").as[String]) +// headers = Map("Authorization" -> s"Bearer $token") +// url = s"$registryUrl/api/docker/$repository/v2/_catalog" +// repositoriesJson <- http.getAsJson(url, Map(), headers).map(_.hcursor.downField("repositories").as[String]) +// repositories = repositoriesJson.children.map(_.extract[String]) +// } yield repositories + + + def listArtifactoryTags(registryUrl: String, + repository: String, + authConfig: AuthConfig, + image: String, + maximum: Option[Int] = None): IO[OkHttpError, List[String]] = +null +// for { +// token <- http.getAsJson(s"$registryUrl/v2/token").map(_.hcursor.downField("token").as[String]) +// headers = Map("Authorization" -> s"Bearer $token") +// url = s"$registryUrl/api/docker/$repository/v2/${image}/tags/list?n=${maximum.getOrElse(5)}" +// repositories <- http.getAsJson(url, Map(), headers).map(_.hcursor.downField("tags").as[List[String]]) +// } yield repositories + + + def listContainers(ancestorFilter: List[String] = List(), + before: Option[String] = None, + exitedFilter: Option[Int] = None, + filters: Map[String, List[String]] = Map(), + idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + limit: Option[Int] = None, + nameFilter: List[String] = List(), + networkFilter: List[String] = List(), + showAll: Option[Boolean] = None, + showSize: Option[Boolean] = None, + since: Option[String] = None, + statusFilter: List[String] = List(), + volumeFilter: List[String] = List()): UIO[List[Container]] = + client.map { c => + val cmd = c.listContainersCmd + if (ancestorFilter.nonEmpty) cmd.withAncestorFilter(ancestorFilter.asJava) + if (filters.nonEmpty) filters.foreach { case (k, v) => cmd.getFilters.put(k, v.asJava) } + if (idFilter.nonEmpty) cmd.withIdFilter(idFilter.asJava) + if (labelFilter.nonEmpty) cmd.withLabelFilter(labelFilter.asJava) + if (nameFilter.nonEmpty) cmd.withNameFilter(nameFilter.asJava) + if (networkFilter.nonEmpty) cmd.withNetworkFilter(networkFilter.asJava) + if (statusFilter.nonEmpty) cmd.withStatusFilter(statusFilter.asJava) + if (volumeFilter.nonEmpty) cmd.withVolumeFilter(volumeFilter.asJava) + if (before.nonEmpty) cmd.withBefore(before.get) + if (exitedFilter.nonEmpty) cmd.withExitedFilter(exitedFilter.get) + if (limit.nonEmpty) cmd.withLimit(limit.get) + if (showAll.nonEmpty) cmd.withShowAll(showAll.get) + if (showSize.nonEmpty) cmd.withShowSize(showSize.get) + cmd.exec().asScala.toList + } + + + def listDockerRepositories(registryUrl: String): IO[OkHttpError, List[String]] = + null +// for { +// url <- ZIO.from(s"$registryUrl/v2/_catalog") +// repositoriesJson <- http.getAsJson(url, Map()).map(_ \ "repositories") +// repositories = repositoriesJson.children.map(_.extract[String]) +// } yield repositories + + + def listDockerTags(registryUrl: String, + image: String, + maximum: Option[Int] = None): IO[OkHttpError, List[String]] = + null +// for { +// url <- ZIO.from(s"$registryUrl/v2/${image}/tags/list?n=${maximum.getOrElse(5)}") +// repositories <- http.getAsJson(url, Map()).map(json => (json \ "tags").extract[List[String]]) +// } yield repositories + + + def listImages(danglingFilter: Option[Boolean] = None, + imageNameFilter: Option[String] = None, + labelFilter: Map[String, String] = Map(), + showAll: Option[Boolean] = None): UIO[List[Image]] = + client.map { c => + val cmd = c.listImagesCmd + .withLabelFilter(labelFilter.asJava) + if (danglingFilter.nonEmpty) cmd.withDanglingFilter(danglingFilter.get) + if (imageNameFilter.nonEmpty) cmd.withImageNameFilter(imageNameFilter.get) + if (showAll.nonEmpty) cmd.withShowAll(showAll.get) + cmd.exec().asScala.toList + } + + + def listNetworks(filter: Option[(String, List[String])] = None, + idFilter: List[String] = List(), + nameFilter: List[String] = List()): IO[NotFoundException, List[Network]] = + client.map { c => + val cmd = c.listNetworksCmd() + .withIdFilter(idFilter: _*) + .withNameFilter(nameFilter: _*) + if (filter.nonEmpty) cmd.withFilter(filter.get._1, filter.get._2.asJava) + cmd.exec().asScala.toList + } + + + def listServices(idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + nameFilter: List[String] = List()): IO[NotFoundException, List[DockerService]] = + client.map(_.listServicesCmd() + .withIdFilter(idFilter.asJava) + .withLabelFilter(labelFilter.asJava) + .withNameFilter(nameFilter.asJava) + .exec().asScala.toList) + + + def listSwarmNodes(idFilter: List[String] = List(), + membershipFilter: List[String] = List(), + nameFilter: List[String] = List(), + roleFilter: List[String] = List()): IO[NotFoundException, List[SwarmNode]] = + client.map(_.listSwarmNodesCmd() + .withIdFilter(idFilter.asJava) + .withMembershipFilter(membershipFilter.asJava) + .withNameFilter(nameFilter.asJava) + .withRoleFilter(roleFilter.asJava) + .exec().asScala.toList) + + + def listTasks(idFilter: List[String] = List(), + labelFilter: Map[String, String] = Map(), + nameFilter: List[String] = List(), + nodeFilter: List[String] = List(), + serviceFilter: List[String] = List(), + stateFilter: List[TaskState] = List()): IO[NotFoundException, List[Task]] = + client.map(_.listTasksCmd() + .withIdFilter(idFilter: _*) + .withLabelFilter(labelFilter.asJava) + .withNameFilter(nameFilter: _*) + .withNodeFilter(nodeFilter: _*) + .withServiceFilter(serviceFilter: _*) + .withStateFilter(stateFilter: _*) + .exec().asScala.toList) + + + def listVolumes(includeDangling: Boolean = true, filter: Option[(String, List[String])] = None): IO[NotFoundException, List[InspectVolumeResponse]] = + client.map { c => + val cmd = c.listVolumesCmd.withDanglingFilter(includeDangling) + if (filter.nonEmpty) cmd.withFilter(filter.get._1, filter.get._2.asJava) + cmd.exec().getVolumes.asScala.toList + } + + + def loadImage(stream: InputStream): UIO[Unit] = + client.map(_.loadImageCmd(stream).exec()) + + + def logContainer(id: ContainerId, + followStream: Option[Boolean] = None, + since: Option[Int] = None, + stderr: Option[Boolean] = None, + stdout: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] = + for { + c <- client + q <- Queue.unbounded[Frame] + } yield { + val cmd = c.logContainerCmd(id) + if (followStream.nonEmpty) cmd.withFollowStream(followStream.get) + if (since.nonEmpty) cmd.withSince(since.get) + if (stderr.nonEmpty) cmd.withStdErr(stderr.get) + if (stdout.nonEmpty) cmd.withStdOut(stdout.get) + if (tail.nonEmpty) cmd.withTail(tail.get) + if (timestamps.nonEmpty) cmd.withTimestamps(timestamps.get) + + cmd.exec( + new ResultCallback[Frame]() { + override def onNext(item: Frame): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + + def logService(id: ServiceId, + details: Option[Boolean] = None, + follow: Option[Boolean] = None, + since: Option[Int] = None, + stdout: Option[Boolean] = None, + stderr: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] = + for { + c <- client + q <- Queue.unbounded[Frame] + } yield { + val cmd = c.logServiceCmd(id) + if (details.nonEmpty) cmd.withDetails(details.get) + if (follow.nonEmpty) cmd.withFollow(follow.get) + if (since.nonEmpty) cmd.withSince(since.get) + if (stderr.nonEmpty) cmd.withStderr(stderr.get) + if (stdout.nonEmpty) cmd.withStdout(stdout.get) + if (tail.nonEmpty) cmd.withTail(tail.get) + if (timestamps.nonEmpty) cmd.withTimestamps(timestamps.get) + + cmd.exec( + new ResultCallback[Frame]() { + override def onNext(item: Frame): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + + def logTask(id: ServiceId, + details: Option[Boolean] = None, + follow: Option[Boolean] = None, + since: Option[Int] = None, + stdout: Option[Boolean] = None, + stderr: Option[Boolean] = None, + tail: Option[Int] = None, + timestamps: Option[Boolean] = None): UIO[Queue[Frame]] = + for { + c <- client + q <- Queue.unbounded[Frame] + } yield { + val cmd = c.logTaskCmd(id) + if (details.nonEmpty) cmd.withDetails(details.get) + if (follow.nonEmpty) cmd.withFollow(follow.get) + if (since.nonEmpty) cmd.withSince(since.get) + if (stderr.nonEmpty) cmd.withStderr(stderr.get) + if (stdout.nonEmpty) cmd.withStdout(stdout.get) + if (tail.nonEmpty) cmd.withTail(tail.get) + if (timestamps.nonEmpty) cmd.withTimestamps(timestamps.get) + + cmd.exec( + new ResultCallback[Frame]() { + override def onNext(item: Frame): Unit = q.offer(item) + override def onError(throwable: Throwable): Unit = {} + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + q + } + + + def pauseContainer(id: ContainerId): IO[NotFoundException, Unit] = + client.map(_.pauseContainerCmd(id).exec()) + + + def ping: UIO[Unit] = + client.map(_.pingCmd().exec()) + + + def prune(pruneType: PruneType, + dangling: Option[Boolean] = None, + labelFilter: List[String] = List(), + untilFilter: Option[String] = None): IO[NotFoundException, Long] = + client.map { c => + val cmd = c.pruneCmd(pruneType).withLabelFilter(labelFilter: _*) + if (dangling.nonEmpty) cmd.withDangling(dangling.get) + if (untilFilter.nonEmpty) cmd.withUntilFilter(untilFilter.get) + cmd.exec().getSpaceReclaimed + } + + + def pullImage(repository: String, + authConfig: Option[AuthConfig] = None, + platform: Option[String] = None, + registry: Option[String] = None, + tag: Option[String] = None): IO[DockerException, Unit] = + client.flatMap { c => + ZIO.async { cb => + val cmd = c.pullImageCmd(repository) + if (authConfig.nonEmpty) cmd.withAuthConfig(authConfig.get) + if (platform.nonEmpty) cmd.withPlatform(platform.get) + if (registry.nonEmpty) cmd.withRegistry(registry.get) + if (tag.nonEmpty) cmd.withTag(tag.get) + + cmd.exec( + new ResultCallback[PullResponseItem]() { + override def onNext(item: PullResponseItem): Unit = {} + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = cb(ZIO.unit) + override def close(): Unit = {} + } + ) + } + } + + + def pushImage(name: String, + authConfig: Option[AuthConfig] = None, + tag: Option[String] = None): IO[DockerException, Unit] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, Unit] => Unit) => + val cmd = c.pushImageCmd(name) + if (authConfig.nonEmpty) cmd.withAuthConfig(authConfig.get) + if (tag.nonEmpty) cmd.withTag(tag.get) + + cmd.exec( + new ResultCallback[PushResponseItem]() { + override def onNext(item: PushResponseItem): Unit = {} + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = cb(ZIO.unit) + override def close(): Unit = {} + } + ) + } + } + + + def removeContainer(id: ContainerId, force: Boolean = false, removeVolumes: Boolean = false): IO[NotFoundException, Unit] = + client.map(_.removeContainerCmd(id).withForce(force).withRemoveVolumes(removeVolumes).exec()) + + + def removeContainers(name: String, force: Boolean = false, removeVolumes: Boolean = false): IO[NotFoundException, Unit] = + for { + containers <- listContainers(nameFilter = List(name)) + _ <- ZIO.foreach(containers)(c => removeContainer(c.getId, force, removeVolumes)) + } yield () + + + def removeImage(id: ImageId, force: Boolean = false, prune: Boolean = true): IO[NotFoundException, Unit] = + client.map(_.removeImageCmd(id).withForce(force).withNoPrune(!prune).exec()) + + + def removeNetwork(id: NetworkId): IO[NotFoundException, Unit] = + client.map(_.removeNetworkCmd(id).exec()) + + + def removeService(id: ServiceId): IO[NotFoundException, Unit] = + client.map(_.removeServiceCmd(id).exec()) + + + def removeVolume(name: String): IO[NotFoundException, Unit] = + client.map(_.removeVolumeCmd(name).exec()) + + + def renameContainer(id: ContainerId, name: String): UIO[Unit] = + client.map(_.renameContainerCmd(id).withName(name).exec()) + + + def restartContainer(id: ContainerId, timeout: Option[Int] = None): IO[DockerException, Unit] = + client.map { c => + val cmd = c.restartContainerCmd(id) + if (timeout.nonEmpty) cmd.withTimeout(timeout.get) + cmd.exec() + } + + + def saveImage(name: String, tag: Option[String] = None): IO[NotFoundException, InputStream] = + client.map { c => + val cmd = c.saveImageCmd(name) + if (tag.nonEmpty) cmd.withTag(tag.get) + cmd.exec() + } + + + def searchImages(term: String): UIO[List[SearchItem]] = + client.map(_.searchImagesCmd(term).exec().asScala.toList) + + + def startContainer(id: ContainerId): IO[DockerException, Unit] = + client.map(_.startContainerCmd(id).exec()) + + + def startLocalRegistry: IO[DockerException, Unit] = + for { + _ <- logger.info("Starting Docker Registry") + _ <- ZIO.whenZIO(containerNotRunning("registry")) { + for { + _ <- logger.debug("Existing Docker Registry container not found. Starting a new one.") + _ <- pullImage(registryImage) + id <- createContainer("registry", registryImage, exposedPorts = Map(5000 -> 5000)) + _ <- startContainer(id) + } yield () + } + } yield () + + + def stats(id: ContainerId): IO[DockerException, Statistics] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, Statistics] => Unit) => + c.statsCmd(id).exec( + new ResultCallback[Statistics]() { + override def onNext(item: Statistics): Unit = cb(ZIO.succeed(item)) + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + } + } + + + def stopContainer(id: ContainerId, timeout: Option[Int] = None): IO[DockerException, Unit] = + client.map { c => + val cmd = c.stopContainerCmd(id) + if (timeout.nonEmpty) cmd.withTimeout(timeout.get) + cmd.exec() + } + + + def stopLocalRegistry: IO[DockerException, Unit] = + for { + _ <- logger.info("Stopping Zookeeper") + containers <- listContainers(nameFilter = List("zookeeper")) + _ <- ZIO.foreachDiscard(containers.map(_.getId))(id => stopContainer(id)) + } yield () + + def tagImage(id: ImageId, imageNameWithRepository: String, tag: String, force: Boolean = false): UIO[Unit] = + client.map(_.tagImageCmd(id, imageNameWithRepository, tag).withForce(force).exec()) + + + def topContainer(id: ContainerId, psArgs: Option[String] = None): IO[NotFoundException, TopContainerResponse] = + client.map { c => + val cmd = c.topContainerCmd(id) + if (psArgs.nonEmpty) cmd.withPsArgs(psArgs.get) + cmd.exec() + } + + + def unpauseContainer(id: ContainerId): IO[NotFoundException, Unit] = + client.map(_.unpauseContainerCmd(id).exec()) + + + def updateContainer(id: ContainerId, + blkioWeight: Option[Int] = None, + cpuPeriod: Option[Int] = None, + cpuQuota: Option[Int] = None, + cpusetCpus: Option[String] = None, + cpusetMems: Option[String] = None, + cpuShares: Option[Int] = None, + kernelMemory: Option[Long] = None, + memory: Option[Long] = None, + memoryReservation: Option[Long] = None, + memorySwap: Option[Long] = None): IO[NotFoundException, UpdateContainerResponse] = + client.map { c => + val cmd = c.updateContainerCmd(id) + if (blkioWeight.nonEmpty) cmd.withBlkioWeight(blkioWeight.get) + if (cpuPeriod.nonEmpty) cmd.withCpuPeriod(cpuPeriod.get) + if (cpuQuota.nonEmpty) cmd.withCpuQuota(cpuQuota.get) + if (cpusetCpus.nonEmpty) cmd.withCpusetCpus(cpusetCpus.get) + if (cpusetMems.nonEmpty) cmd.withCpusetMems(cpusetMems.get) + if (cpuShares.nonEmpty) cmd.withCpuShares(cpuShares.get) + if (kernelMemory.nonEmpty) cmd.withKernelMemory(kernelMemory.get) + if (memory.nonEmpty) cmd.withMemory(memory.get) + if (memoryReservation.nonEmpty) cmd.withMemoryReservation(memoryReservation.get) + if (memorySwap.nonEmpty) cmd.withMemorySwap(memorySwap.get) + cmd.exec() + } + + + def updateService(id: ServiceId, spec: ServiceSpec): UIO[Unit] = + client.map(_.updateServiceCmd(id, spec).exec()) + + + def updateSwarm(spec: SwarmSpec): UIO[Unit] = + client.map(_.updateSwarmCmd(spec).exec()) + + + def updateSwarmNode(id: SwarmId, spec: SwarmNodeSpec, version: Option[Long] = None): IO[NotFoundException, Unit] = + client.map { c => + val cmd = c.updateSwarmNodeCmd().withSwarmNodeId(id).withSwarmNodeSpec(spec) + if (version.nonEmpty) cmd.withVersion(version.get) + cmd.exec() + } + + + def version: UIO[Unit] = + client.map(_.versionCmd().exec()) + + + def waitForContainer(id: ContainerId): IO[DockerException, Int] = + client.flatMap { c => + ZIO.async { (cb: IO[DockerException, Int] => Unit) => + c.waitContainerCmd(id).exec( + new ResultCallback[WaitResponse]() { + override def onNext(item: WaitResponse): Unit = cb(ZIO.succeed(item.getStatusCode)) + override def onError(throwable: Throwable): Unit = cb(ZIO.fail(throwable.asInstanceOf[DockerException])) + override def onStart(closeable: Closeable): Unit = {} + override def onComplete(): Unit = {} + override def close(): Unit = {} + } + ) + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/docker/package.scala b/jvm/src/main/scala/com/harana/modules/docker/package.scala new file mode 100644 index 0000000..8925530 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/docker/package.scala @@ -0,0 +1,53 @@ +package com.harana.modules + +import io.circe.generic.JsonCodec + +package object docker { + + type ContainerId = String + type EventId = String + type ExecId = String + type ImageId = String + type NetworkId = String + type ServiceId = String + type SwarmId = String + type TaskId = String + + @JsonCodec + case class HubPage(count: Int, + next: Option[String], + previous: Option[String], + results: List[HubTag]) + + + @JsonCodec + case class HubTag(creator: Long, + id: Long, + images: List[HubImage] = List(), + last_updated: String, + last_updater: Long, + last_updater_username: String, + name: String, + repository: Long, + full_size: Long, + v2: Boolean, + tag_status: String, + tag_last_pulled: Option[String], + tag_last_pushed: Option[String], + media_type: String, + digest: String) + + @JsonCodec + case class HubImage(architecture: String, + features: String, + variant: Option[String], + digest: String, + os: String, + os_features: String, + os_version: Option[String], + size: Long, + status: String, + last_pulled: Option[String], + last_pushed: Option[String]) + +} diff --git a/jvm/src/main/scala/com/harana/modules/dremio/Dremio.scala b/jvm/src/main/scala/com/harana/modules/dremio/Dremio.scala new file mode 100644 index 0000000..0c8ec30 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/Dremio.scala @@ -0,0 +1,29 @@ +package com.harana.modules.dremio + +import com.harana.modules.dremio.models._ +import io.circe.Decoder +import zio.Task +import zio.macros.accessible + +@accessible +trait Dremio { + + def jobStatus(id: JobId): Task[JobStatus] + def jobResults(id: JobId, offset: Option[Int], limit: Option[Int]): Task[JobResults] + def cancelJob(id: JobId): Task[Unit] + + def getCatalog: Task[List[EntitySummary]] + def getCatalogEntity[E <: Entity](idOrPath: Either[EntityId, String])(implicit d: Decoder[E], m: Manifest[E]): Task[E] + def getCatalogEntityTags(id: EntityId): Task[List[String]] + def getCatalogEntityWiki(id: EntityId): Task[String] + + def updateCatalogEntity[E <: Entity](id: EntityId, entity: E): Task[Unit] + def updateCatalogEntityTags(id: EntityId, tags: List[String]): Task[Unit] + def updateCatalogEntityWiki(id: EntityId, text: String): Task[Unit] + def deleteCatalogEntity(id: EntityId): Task[Unit] + + def refreshCatalogEntity(id: EntityId): Task[Unit] + + def sql(sql: String): Task[JobId] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/LiveDremio.scala b/jvm/src/main/scala/com/harana/modules/dremio/LiveDremio.scala new file mode 100644 index 0000000..2f604e5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/LiveDremio.scala @@ -0,0 +1,161 @@ +package com.harana.modules.dremio + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.dremio.models._ +import io.circe.syntax._ +import io.circe.{Decoder, Json} +import zio.{Task, ZLayer, ZIO} + +object LiveDremio { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveDremio(config, http, logger, micrometer) + } +} + +case class LiveDremio(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Dremio { + + def jobStatus(id: JobId): Task[JobStatus] = + for { + _ <- logger.debug(s"Getting job status: $id") + response <- httpGet(s"/api/v3/job/$id") + jobStatus <- ZIO.fromTry(response.as[JobStatus].toTry) + } yield jobStatus + + + def jobResults(id: JobId, offset: Option[Int], limit: Option[Int]): Task[JobResults] = + for { + _ <- logger.debug(s"Getting job results: $id") + response <- httpGet(s"/api/v3/job/$id/results?offset=${offset.getOrElse("")}&limit=${limit.getOrElse("")}") + jobResults <- ZIO.fromTry(response.as[JobResults].toTry) + } yield jobResults + + + def cancelJob(id: JobId): Task[Unit] = + for { + _ <- logger.debug(s"Cancelling job: $id") + _ <- httpPost(s"/api/v3/job/$id/cancel", None) + } yield () + + + def getCatalog: Task[List[EntitySummary]] = + for { + _ <- logger.debug(s"Getting catalog") + response <- httpGet(s"/api/v3/catalog") + entities <- ZIO.fromTry(response.hcursor.downField("data").as[List[EntitySummary]].toTry) + } yield entities + + + def getCatalogEntity[E <: Entity](idOrPath: Either[EntityId, String])(implicit d: Decoder[E], m: Manifest[E]): Task[E] = + for { + _ <- logger.debug(s"Getting catalog entity: ${idOrPath.toString}") + url = idOrPath match { + case Left(u) => s"/api/v3/catalog" + case Right(u) => s"/api/v3/catalog/by-path" + } + json <- httpGet(url) + entity <- ZIO.fromTry(json.as[E].toTry) + } yield entity + + + def getCatalogEntityTags(id: EntityId): Task[List[String]] = + for { + _ <- logger.debug(s"Getting catalog entity tags: $id≥d") + response <- httpGet(s"/api/v3/catalog/$id/collaboration/tag") + tags <- ZIO.fromTry(response.hcursor.downField("tags").as[List[String]].toTry) + } yield tags + + + def getCatalogEntityWiki(id: EntityId): Task[String] = + for { + _ <- logger.debug(s"Getting catalog entity tags: $id≥d") + response <- httpGet(s"/api/v3/catalog/$id/collaboration/wiki") + wiki <- ZIO.fromTry(response.hcursor.downField("text").as[String].toTry) + } yield wiki + + + def updateCatalogEntity[E <: Entity](id: EntityId, entity: E): Task[Unit] = + for { + _ <- logger.debug(s"Updating catalog entity: $id") + + } yield () + + + def updateCatalogEntityTags(id: EntityId, tags: List[String]): Task[Unit] = + for { + _ <- logger.debug(s"Updating catalog entity tags: $id") + body <- ZIO.attempt(Map("tags" -> tags).asJson.noSpaces) + _ <- httpPost(s"/api/v3/catalog/$id/collaboration/tag", Some(body)) + } yield () + + + def updateCatalogEntityWiki(id: EntityId, text: String): Task[Unit] = + for { + _ <- logger.debug(s"Updating catalog entity wiki: $id") + body <- ZIO.attempt(Map("text" -> text).asJson.noSpaces) + _ <- httpPost(s"/api/v3/catalog/$id/collaboration/wiki", Some(body)) + } yield () + + + def deleteCatalogEntity(id: EntityId): Task[Unit] = + for { + _ <- logger.debug(s"Deleting catalog entity: $id") + _ <- httpDelete(s"/api/v3/catalog/$id") + } yield () + + + def refreshCatalogEntity(id: EntityId): Task[Unit] = + for { + _ <- logger.debug(s"Refreshing catalog entity: $id") + _ <- httpPost(s"/api/v3/catalog/$id/refresh", None) + } yield () + + + def sql(sql: String): Task[JobId] = + for { + _ <- logger.debug(s"SQL query: $sql") + response <- httpPost("/api/v3/sql", Some(Map("sql" -> sql).asJson.noSpaces)) + jobId <- ZIO.fromTry(response.hcursor.downField("id").as[String].toTry) + } yield jobId + + + private def getToken: Task[String] = + for { + username <- config.secret("dremio-username") + password <- config.secret("dremio-password") + body <- ZIO.attempt(Map("username" -> username, "password" -> password).asJson.noSpaces) + response <- httpPost("/api/v2/login", Some(body)) + token <- ZIO.fromTry(response.hcursor.downField("token").as[String].toTry) + } yield s"_dremio{$token}" + + + private def httpGet(suffix: String): Task[Json] = + for { + token <- getToken + host <- config.secret("dremio-host") + response <- http.getAsJson(s"http://$host$suffix", credentials = Some((token, ""))).mapError(e => new Exception(e.toString)) + } yield response + + + private def httpDelete(suffix: String): Task[Json] = + for { + token <- getToken + host <- config.secret("dremio-host") + response <- http.deleteAsJson(s"http://$host$suffix", credentials = Some((token, ""))).mapError(e => new Exception(e.toString)) + } yield response + + + private def httpPost(suffix: String, body: Option[String]): Task[Json] = + for { + token <- getToken + host <- config.secret("dremio-host") + response <- http.postAsJson(s"http://$host$suffix", mimeType = Some("application/json"), body = body, credentials = Some((token, ""))).mapError(e => new Exception(e.toString)) + } yield response +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/AccelerationRefreshPolicy.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/AccelerationRefreshPolicy.scala new file mode 100644 index 0000000..4f14fd8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/AccelerationRefreshPolicy.scala @@ -0,0 +1,9 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class AccelerationRefreshPolicy(refreshPeriodMs: Long, + gracePeriodMs: Long, + method: AccelerationRefreshPolicyMethod, + refreshField: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Catalog.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Catalog.scala new file mode 100644 index 0000000..75be716 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Catalog.scala @@ -0,0 +1,6 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Catalog(data: List[EntitySummary]) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Dataset.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Dataset.scala new file mode 100644 index 0000000..4f726b2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Dataset.scala @@ -0,0 +1,18 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Dataset(entityType: String = "dataset", + id: String, + path: String, + tag: String, + `type`: DatasetType, + fields: List[DatasetField], + createdAt: String, + accelerationRefreshPolicy: Option[AccelerationRefreshPolicy], + sql: Option[String], + sqlContext: String, +// FIXME +// format: DatasetFormat, + approximateStatisticsAllowed: Boolean) extends Entity diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetField.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetField.scala new file mode 100644 index 0000000..3c2d41e --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetField.scala @@ -0,0 +1,7 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class DatasetField(name: String, + `type`: DatasetFieldType) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFieldType.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFieldType.scala new file mode 100644 index 0000000..dccf169 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFieldType.scala @@ -0,0 +1,9 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class DatasetFieldType(name: DatasetFieldName, + subSchema: DatasetField, + precision: Int, + scale: Int) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFormat.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFormat.scala new file mode 100644 index 0000000..ca08d0d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/DatasetFormat.scala @@ -0,0 +1,34 @@ +package com.harana.modules.dremio.models + +import io.circe._ +import org.latestbit.circe.adt.codec._ + +// FIXME +//sealed trait DatasetFormat +//object DatasetFormat { +// +// implicit val encoder : Encoder[DatasetFormat] = JsonTaggedAdtCodec.createEncoder[DatasetFormat]("type") +// implicit val decoder : Decoder[DatasetFormat] = JsonTaggedAdtCodec.createDecoder[DatasetFormat]("type") +// +// case class Excel(sheetName: String, +// extractHeader: Boolean, +// hasMergedCells: Boolean) extends DatasetFormat +// +// case class JSON() extends DatasetFormat +// +// case class Parquet() extends DatasetFormat +// +// case class Text(fieldDelimiter: String, +// lineDelimiter: String, +// quote: String, +// comment: String, +// escape: String, +// skipFirstLine: Boolean, +// extractHeader: Boolean, +// trimHeader: Boolean, +// autoGenerateColumnNames: Boolean) extends DatasetFormat +// +// case class XLS(sheetName: String, +// extractHeader: Boolean, +// hasMergedCells: Boolean) extends DatasetFormat +//} diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Entity.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Entity.scala new file mode 100644 index 0000000..df7b12a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Entity.scala @@ -0,0 +1,8 @@ +package com.harana.modules.dremio.models + +abstract class Entity { + val entityType: String + val id: String + val path: String + val tag: String +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/EntitySummary.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/EntitySummary.scala new file mode 100644 index 0000000..cec83fb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/EntitySummary.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class EntitySummary(id: String, + path: String, + tag: String, + `type`: String, + datasetType: String, + containerType: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/File.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/File.scala new file mode 100644 index 0000000..e30c9f5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/File.scala @@ -0,0 +1,9 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class File(entityType: String = "file", + id: String, + path: String, + tag: String) extends Entity \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Folder.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Folder.scala new file mode 100644 index 0000000..3097a9a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Folder.scala @@ -0,0 +1,10 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Folder(entityType: String = "folder", + id: String, + path: String, + tag: String, + children: List[EntitySummary]) extends Entity \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationRelationship.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationRelationship.scala new file mode 100644 index 0000000..aac9ccc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationRelationship.scala @@ -0,0 +1,8 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobAccelerationRelationship(reflectionId: String, + datasetId: String, + relationship: JobAccelerationRelationshipType) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationStatus.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationStatus.scala new file mode 100644 index 0000000..9c05c19 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobAccelerationStatus.scala @@ -0,0 +1,6 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobAccelerationStatus(reflectionRelationships: List[JobAccelerationRelationship]) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobFailure.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobFailure.scala new file mode 100644 index 0000000..5fa956a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobFailure.scala @@ -0,0 +1,7 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobFailure(errorMessage: String, + moreInfo: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobResults.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobResults.scala new file mode 100644 index 0000000..00d62d7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobResults.scala @@ -0,0 +1,9 @@ +package com.harana.modules.dremio.models + +import io.circe.Json +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobResults(rowCount: Int, + schema: List[DatasetField], + rows: List[Json]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/JobStatus.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/JobStatus.scala new file mode 100644 index 0000000..7d94270 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/JobStatus.scala @@ -0,0 +1,12 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class JobStatus(jobState: JobStateJobQueryType, + queryType: JobQueryType, + startedAt: String, + endedAt: String, + rowCount: Option[Int], + acceleration: Option[JobAccelerationStatus], + errorMessage: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Source.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Source.scala new file mode 100644 index 0000000..7eb7dd1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Source.scala @@ -0,0 +1,6 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Source() diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/SourceMetadataPolicy.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/SourceMetadataPolicy.scala new file mode 100644 index 0000000..1625e08 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/SourceMetadataPolicy.scala @@ -0,0 +1,10 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class SourceMetadataPolicy(authTTLMs: Long, + datasetRefreshAfterMs: Long, + datasetExpireAfterMs: Long, + namesRefreshMs: Long, + datasetUpdateMode: DatasetUpdateMode) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/Space.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/Space.scala new file mode 100644 index 0000000..344de41 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/Space.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Space(entityType: String = "space", + id: String, + name: String, + tag: String, + path: String = "", + children: List[EntitySummary]) extends Entity \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/package.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/package.scala new file mode 100644 index 0000000..7271772 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/package.scala @@ -0,0 +1,150 @@ +package com.harana.modules.dremio + +import enumeratum.values._ +import io.circe.generic.JsonCodec + +package object models { + + type EntityId = String + type JobId = String + + sealed abstract class AWSElasticsearchAuthType(val value: String) extends StringEnumEntry + case object AWSElasticsearchAuthType extends StringEnum[AWSElasticsearchAuthType] with StringCirceEnum[AWSElasticsearchAuthType] { + case object AccessKey extends AWSElasticsearchAuthType("ACCESS_KEY") + case object EC2Metadata extends AWSElasticsearchAuthType("EC2_METADATA") + case object None extends AWSElasticsearchAuthType("NONE") + val values = findValues + } + + sealed abstract class AWSElasticsearchEncryptionValidationMode(val value: String) extends StringEnumEntry + case object AWSElasticsearchEncryptionValidationMode extends StringEnum[AWSElasticsearchEncryptionValidationMode] with StringCirceEnum[AWSElasticsearchEncryptionValidationMode] { + case object CertificateAndHostnameValidation extends AWSElasticsearchEncryptionValidationMode("CERTIFICATE_AND_HOSTNAME_VALIDATION") + case object CertificateOnlyValidation extends AWSElasticsearchEncryptionValidationMode("CERTIFICATE_ONLY_VALIDATION") + case object NoValidation extends AWSElasticsearchEncryptionValidationMode("NO_VALIDATION") + val values = findValues + } + + sealed abstract class StandardAuthType(val value: String) extends StringEnumEntry + case object StandardAuthType extends StringEnum[StandardAuthType] with StringCirceEnum[StandardAuthType] { + case object Anonymous extends StandardAuthType("ANONYMOUS") + case object Master extends StandardAuthType("MASTER") + val values = findValues + } + + sealed abstract class DatasetUpdateMode(val value: String) extends StringEnumEntry + case object DatasetUpdateMode extends StringEnum[DatasetUpdateMode] with StringCirceEnum[DatasetUpdateMode] { + case object Anonymous extends DatasetUpdateMode("PREFETCH") + case object Master extends DatasetUpdateMode("PREFETCH_QUERIED") + case object Inline extends DatasetUpdateMode("INLINE") + val values = findValues + } + + sealed abstract class ContainerType(val value: String) extends StringEnumEntry + case object ContainerType extends StringEnum[ContainerType] with StringCirceEnum[ContainerType] { + case object Home extends ContainerType("HOME") + case object Folder extends ContainerType("FOLDER") + case object Source extends ContainerType("SOURCE") + case object Space extends ContainerType("SPACE") + val values = findValues + } + + sealed abstract class DatasetType(val value: String) extends StringEnumEntry + case object DatasetType extends StringEnum[DatasetType] with StringCirceEnum[DatasetType] { + case object Physical extends DatasetType("PHYSICAL_DATASET") + case object Virtual extends DatasetType("VIRTUAL_DATASET") + val values = findValues + } + + sealed abstract class EntitySummaryType(val value: String) extends StringEnumEntry + case object EntitySummaryType extends StringEnum[EntitySummaryType] with StringCirceEnum[EntitySummaryType] { + case object Dataset extends EntitySummaryType("DATASET") + case object Container extends EntitySummaryType("CONTAINER") + case object File extends EntitySummaryType("FILE") + val values = findValues + } + + sealed abstract class EntitySummaryDatasetType(val value: String) extends StringEnumEntry + case object EntitySummaryDatasetType extends StringEnum[EntitySummaryDatasetType] with StringCirceEnum[EntitySummaryDatasetType] { + case object Virtual extends EntitySummaryDatasetType("VIRTUAL") + case object Promoted extends EntitySummaryDatasetType("PROMOTED") + case object Direct extends EntitySummaryDatasetType("DIRECT") + val values = findValues + } + + sealed abstract class AccelerationRefreshPolicyMethod(val value: String) extends StringEnumEntry + case object AccelerationRefreshPolicyMethod extends StringEnum[AccelerationRefreshPolicyMethod] with StringCirceEnum[AccelerationRefreshPolicyMethod] { + case object Full extends AccelerationRefreshPolicyMethod("FULL") + case object Incremental extends AccelerationRefreshPolicyMethod("INCREMENTAL") + val values = findValues + } + + sealed abstract class JobStateJobQueryType(val value: String) extends StringEnumEntry + case object JobStateJobQueryType extends StringEnum[JobStateJobQueryType] with StringCirceEnum[JobStateJobQueryType] { + case object Pending extends JobStateJobQueryType("PENDING") + case object MetadataRetrieval extends JobStateJobQueryType("METADATA_RETRIEVAL") + case object Planning extends JobStateJobQueryType("PLANNING") + case object Queued extends JobStateJobQueryType("QUEUED") + case object EngineStart extends JobStateJobQueryType("ENGINE_START") + case object ExecutionPlanning extends JobStateJobQueryType("EXECUTION_PLANNING") + case object Starting extends JobStateJobQueryType("STARTING") + case object Running extends JobStateJobQueryType("RUNNING") + case object Completed extends JobStateJobQueryType("COMPLETED") + case object Cancelled extends JobStateJobQueryType("CANCELED") + case object Failed extends JobStateJobQueryType("FAILED") + val values = findValues + } + + sealed abstract class JobQueryType(val value: String) extends StringEnumEntry + case object JobQueryType extends StringEnum[JobQueryType] with StringCirceEnum[JobQueryType] { + case object UIRun extends JobQueryType("UI_RUN") + case object UIPreview extends JobQueryType("UI_PREVIEW") + case object UIInternalPreview extends JobQueryType("UI_INTERNAL_PREVIEW") + case object UIInternalRun extends JobQueryType("UI_INTERNAL_RUN") + case object UIExport extends JobQueryType("UI_EXPORT") + case object ODBC extends JobQueryType("ODBC") + case object JDBC extends JobQueryType("JDBC") + case object REST extends JobQueryType("REST") + case object AcceleratorCreate extends JobQueryType("ACCELERATOR_CREATE") + case object AcceleratorDrop extends JobQueryType("ACCELERATOR_DROP") + case object Unknown extends JobQueryType("UNKNOWN") + case object PrepareInernal extends JobQueryType("PREPARE_INTERNAL") + case object AcceleratorExplain extends JobQueryType("ACCELERATOR_EXPLAIN") + case object UIInitialPreview extends JobQueryType("UI_INITIAL_PREVIEW") + val values = findValues + } + + sealed abstract class JobAccelerationRelationshipType(val value: String) extends StringEnumEntry + case object JobAccelerationRelationshipType extends StringEnum[JobAccelerationRelationshipType] with StringCirceEnum[JobAccelerationRelationshipType] { + case object Considered extends JobAccelerationRelationshipType("CONSIDERED") + case object Matched extends JobAccelerationRelationshipType("MATCHED") + case object Chosen extends JobAccelerationRelationshipType("CHOSEN") + val values = findValues + } + + sealed abstract class DatasetFieldName(val value: String) extends StringEnumEntry + case object DatasetFieldName extends StringEnum[DatasetFieldName] with StringCirceEnum[DatasetFieldName] { + case object Struct extends DatasetFieldName("STRUCT") + case object List extends DatasetFieldName("LIST") + case object Union extends DatasetFieldName("UNION") + case object Integer extends DatasetFieldName("INTEGER") + case object Bigint extends DatasetFieldName("BIGINT") + case object Float extends DatasetFieldName("FLOAT") + case object Double extends DatasetFieldName("DOUBLE") + case object Varchar extends DatasetFieldName("VARCHAR") + case object Varbinary extends DatasetFieldName("VARBINARY") + case object Boolean extends DatasetFieldName("BOOLEAN") + case object Decimal extends DatasetFieldName("DECIMAL") + case object Time extends DatasetFieldName("TIME") + case object Date extends DatasetFieldName("DATE") + case object Timestamp extends DatasetFieldName("TIMESTAMP") + case object IntervalDayToSecond extends DatasetFieldName("INTERVAL DAY TO SECOND") + case object IntervalDayToMonth extends DatasetFieldName("INTERVAL YEAR TO MONTH") + val values = findValues + } + + @JsonCodec + case class Host(hostname: String, port: Int) + + @JsonCodec + case class Property(name: String, value: String) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSElasticsearch.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSElasticsearch.scala new file mode 100644 index 0000000..238aa53 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSElasticsearch.scala @@ -0,0 +1,23 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.{AWSElasticsearchAuthType, AWSElasticsearchEncryptionValidationMode} +import io.circe.generic.JsonCodec + +@JsonCodec +case class AWSElasticsearch(hostname: String, + port: Int, + authenticationType: AWSElasticsearchAuthType, + accessKey: String, + accessSecret: String, + overwriteRegion: Boolean, + regionName: String, + scriptsEnabled: Boolean, + showHiddenIndices: Boolean, + showIdColumn: Boolean, + readTimeoutMillis: Long, + scrollTimeoutMillis: Long, + usePainless: Boolean, + scrollSize: Int, + allowPushdownOnNormalizedOrAnalyzedFields: Boolean, + warnOnRowCountMismatch: Boolean, + encryptionValidationMode: AWSElasticsearchEncryptionValidationMode) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSRedshift.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSRedshift.scala new file mode 100644 index 0000000..c2f8755 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSRedshift.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class AWSRedshift(username: String, + password: String, + authenticationType: StandardAuthType, + fetchSize: Int, + connectionString: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSS3.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSS3.scala new file mode 100644 index 0000000..3a3fbb8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AWSS3.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class AWSS3(accessKey: String, + accessSecret: String, + secure: Boolean, + externalBucketList: List[String], + propertyList: List[Property]) diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureDataLakeStorage.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureDataLakeStorage.scala new file mode 100644 index 0000000..20957bc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureDataLakeStorage.scala @@ -0,0 +1,10 @@ +package com.harana.modules.dremio.models.sources + +import io.circe.generic.JsonCodec + +@JsonCodec +case class AzureDataLakeStorage(mode: String = "CLIENT_KEY", + accountName: String, + clientId: String, + clientKeyRefreshUrl: String, + clientKeyPassword: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureStorage.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureStorage.scala new file mode 100644 index 0000000..821abfd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/AzureStorage.scala @@ -0,0 +1,13 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class AzureStorage(accountKind: String, + accountName: String, + accessKey: String, + enableSSL: Boolean, + rootPath: String, + containers: List[String], + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Elasticsearch.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Elasticsearch.scala new file mode 100644 index 0000000..18d5752 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Elasticsearch.scala @@ -0,0 +1,19 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.{Host, StandardAuthType} +import io.circe.generic.JsonCodec + +@JsonCodec +case class Elasticsearch(username: String, + password: String, + hostList: List[Host], + authenticationType: StandardAuthType, + scriptsEnabled: Option[Boolean], + showHiddenIndices: Option[Boolean], + sslEnabled: Option[Boolean], + showIdColumn: Option[Boolean], + readTimeoutMillis: Option[Long], + scrollTimeoutMillis: Option[Long], + usePainless: Option[Boolean], + useWhitelist: Option[Boolean], + scrollSize: Option[Int]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/HDFS.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/HDFS.scala new file mode 100644 index 0000000..743ed2a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/HDFS.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class HDFS(hostname: String, + port: String, + kerberosPrincipal: String, + enableSasl: Option[Boolean], + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Hive.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Hive.scala new file mode 100644 index 0000000..d47e317 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Hive.scala @@ -0,0 +1,11 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class Hive(hostname: String, + port: String, + kerberosPrincipal: String, + enableSasl: Option[Boolean], + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MongoDB.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MongoDB.scala new file mode 100644 index 0000000..c3ae66d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MongoDB.scala @@ -0,0 +1,16 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.{Host, Property, StandardAuthType} +import io.circe.generic.JsonCodec + +@JsonCodec +case class MongoDB(username: String, + password: String, + hostList: List[Host], + useSsl: Boolean, + authenticationType: StandardAuthType, + authDatabase: String, + authenticationTimeoutMillis: Long, + secondaryReadsOnly: Boolean, + subpartitionSize: Int, + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MySQL.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MySQL.scala new file mode 100644 index 0000000..1ed551b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/MySQL.scala @@ -0,0 +1,12 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class MySQL(username: String, + password: String, + hostname: String, + port: String, + authenticationType: StandardAuthType, + fetchSize: Int) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Oracle.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Oracle.scala new file mode 100644 index 0000000..2ac1e1d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Oracle.scala @@ -0,0 +1,13 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class Oracle(username: String, + password: String, + instance: String, + hostname: String, + port: String, + authenticationType: StandardAuthType, + fetchSize: Int) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/PostgreSQL.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/PostgreSQL.scala new file mode 100644 index 0000000..0402e0b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/PostgreSQL.scala @@ -0,0 +1,13 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class PostgreSQL(username: String, + password: String, + hostname: String, + port: String, + authenticationType: StandardAuthType, + fetchSize: Int, + databaseName: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/SQLServer.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/SQLServer.scala new file mode 100644 index 0000000..b1b51b8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/SQLServer.scala @@ -0,0 +1,14 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.StandardAuthType +import io.circe.generic.JsonCodec + +@JsonCodec +case class SQLServer(username: String, + password: String, + hostname: String, + port: String, + authenticationType: StandardAuthType, + fetchSize: Int, + database: Option[String], + showOnlyConnectiondatabase: Option[Boolean]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Snowflake.scala b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Snowflake.scala new file mode 100644 index 0000000..09f4c4a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/dremio/models/sources/Snowflake.scala @@ -0,0 +1,10 @@ +package com.harana.modules.dremio.models.sources + +import com.harana.modules.dremio.models.Property +import io.circe.generic.JsonCodec + +@JsonCodec +case class Snowflake(account: String, + username: String, + password: String, + propertyList: List[Property]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/email/Email.scala b/jvm/src/main/scala/com/harana/modules/email/Email.scala new file mode 100644 index 0000000..3c6562b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/Email.scala @@ -0,0 +1,18 @@ +package com.harana.modules.email + +import org.apache.commons.mail.EmailException +import zio.IO +import zio.macros.accessible + +@accessible +trait Email { + + def isValid(email: String): Boolean + + def domain(email: String): String + + def obfuscate(email: String): String + + def send(message: EmailMessage): IO[EmailException, String] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/email/LiveEmail.scala b/jvm/src/main/scala/com/harana/modules/email/LiveEmail.scala new file mode 100644 index 0000000..6a039dc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/LiveEmail.scala @@ -0,0 +1,78 @@ +package com.harana.modules.email + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.email.models.{EmailAddress => hrmcEmailAddress} +import org.apache.commons.mail.{EmailAttachment, EmailException, HtmlEmail, MultiPartEmail, SimpleEmail} +import zio.{IO, ZIO, ZLayer} + +import javax.mail.internet.InternetAddress +import scala.jdk.CollectionConverters._ + +object LiveEmail { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveEmail(config, logger, micrometer) + } +} + +case class LiveEmail(config: Config, logger: Logger, micrometer: Micrometer) extends Email { + + def isValid(email: String): Boolean = + hrmcEmailAddress.isValid(email) + + def domain(email: String): String = + hrmcEmailAddress.Domain(email) + + def obfuscate(email: String): String = + hrmcEmailAddress(email).obfuscated.value + + def send(message: EmailMessage): IO[EmailException, String] = { + + val format = + if (message.attachments.nonEmpty) MultiPart + else if (message.richMessage.nonEmpty) Rich + else Plain + + val commonsMail = format match { + case Plain => new SimpleEmail().setMsg(message.message) + case Rich => new HtmlEmail().setHtmlMsg(message.richMessage.get).setTextMsg(message.message) + case MultiPart => + val multipartEmail = new MultiPartEmail() + message.attachments.foreach { file => + val attachment = new EmailAttachment() + attachment.setPath(file.getAbsolutePath) + attachment.setDisposition(EmailAttachment.ATTACHMENT) + attachment.setName(file.getName) + multipartEmail.attach(attachment) + } + multipartEmail.setMsg(message.message) + } + + message.to.foreach(ea => commonsMail.addTo(ea)) + message.cc.foreach(cc => commonsMail.addCc(cc)) + message.bcc.foreach(bcc => commonsMail.addBcc(bcc)) + + for { + host <- config.secret("email-host") + auth <- config.boolean("email.useAuthentication", default = false) + username <- config.secret("email-username") + password <- config.secret("email-password") + ssl <- config.boolean("email.useSSL", default = true) + port <- config.int("email.port", if (ssl) 25 else 587) + } yield { + commonsMail.setHostName(host) + if (auth) commonsMail.setAuthentication(username, password) + commonsMail.setSSLOnConnect(ssl) + commonsMail.setSmtpPort(port) + commonsMail.setFrom(message.from._1, message.from._2) + commonsMail.setSubject(message.subject) + if (message.replyTo.nonEmpty) commonsMail.setReplyTo(List(new InternetAddress(message.replyTo.get)).asJava) + commonsMail.send() + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/email/models/EmailAddress.scala b/jvm/src/main/scala/com/harana/modules/email/models/EmailAddress.scala new file mode 100644 index 0000000..5083783 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/models/EmailAddress.scala @@ -0,0 +1,29 @@ +package com.harana.modules.email.models + +case class EmailAddress(value: String) extends StringValue { + + val (mailbox, domain): (EmailAddress.Mailbox, EmailAddress.Domain) = value match { + case EmailAddress.validEmail(m, d) => (EmailAddress.Mailbox(m), EmailAddress.Domain(d)) + case invalidEmail => throw new IllegalArgumentException(s"'$invalidEmail' is not a valid email address") + } + + lazy val obfuscated = ObfuscatedEmailAddress.apply(value) +} + +object EmailAddress { + final val validDomain = """^([a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*)$""".r + final val validEmail = """^([a-zA-Z0-9.!#$%&’'*+/=?^_`{|}~-]+)@([a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*)$""".r + + def isValid(email: String) = email match { + case validEmail(_,_) => true + case invalidEmail => false + } + + case class Mailbox private[EmailAddress] (value: String) extends StringValue + case class Domain(value: String) extends StringValue { + value match { + case EmailAddress.validDomain(_) => // + case invalidDomain => throw new IllegalArgumentException(s"'$invalidDomain' is not a valid email domain") + } + } +} diff --git a/jvm/src/main/scala/com/harana/modules/email/models/ObfuscatedEmailAddress.scala b/jvm/src/main/scala/com/harana/modules/email/models/ObfuscatedEmailAddress.scala new file mode 100644 index 0000000..b68fce7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/models/ObfuscatedEmailAddress.scala @@ -0,0 +1,30 @@ +package com.harana.modules.email.models + +trait ObfuscatedEmailAddress { + val value: String + override def toString: String = value +} + +object ObfuscatedEmailAddress { + final val shortMailbox = "(.{1,2})".r + final val longMailbox = "(.)(.*)(.)".r + + import EmailAddress.validEmail + + implicit def obfuscatedEmailToString(e: ObfuscatedEmailAddress): String = e.value + + def apply(plainEmailAddress: String): ObfuscatedEmailAddress = new ObfuscatedEmailAddress { + val value = plainEmailAddress match { + case validEmail(shortMailbox(m), domain) => + s"${obscure(m)}@$domain" + + case validEmail(longMailbox(firstLetter,middle,lastLetter), domain) => + s"$firstLetter${obscure(middle)}$lastLetter@$domain" + + case invalidEmail => + throw new IllegalArgumentException(s"Cannot obfuscate invalid email address '$invalidEmail'") + } + } + + private def obscure(text: String) = "*" * text.length +} diff --git a/jvm/src/main/scala/com/harana/modules/email/models/StringValue.scala b/jvm/src/main/scala/com/harana/modules/email/models/StringValue.scala new file mode 100644 index 0000000..d6534a2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/models/StringValue.scala @@ -0,0 +1,11 @@ +package com.harana.modules.email.models + +object StringValue { + import scala.language.implicitConversions + implicit def stringValueToString(e: StringValue): String = e.value +} + +trait StringValue { + def value: String + override def toString: String = value +} diff --git a/jvm/src/main/scala/com/harana/modules/email/package.scala b/jvm/src/main/scala/com/harana/modules/email/package.scala new file mode 100644 index 0000000..e31a593 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/email/package.scala @@ -0,0 +1,23 @@ +package com.harana.modules + +import com.harana.modules.email.models.EmailAddress + +import java.io.File + +package object email { + + sealed abstract class MailType + case object Plain extends MailType + case object Rich extends MailType + case object MultiPart extends MailType + + case class EmailMessage(from: (EmailAddress, String), + replyTo: Option[EmailAddress] = None, + to: List[EmailAddress], + cc: List[EmailAddress] = List(), + bcc: List[EmailAddress] = List(), + subject: String, + message: String, + richMessage: Option[String] = None, + attachments: List[File] = List()) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/facebook/Facebook.scala b/jvm/src/main/scala/com/harana/modules/facebook/Facebook.scala new file mode 100644 index 0000000..1cc3190 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/facebook/Facebook.scala @@ -0,0 +1,188 @@ +package com.harana.modules.facebook + +import com.facebook.ads.sdk.Campaign.{EnumBidStrategy, EnumObjective, EnumSpecialAdCategory} +import com.facebook.ads.sdk._ +import zio.Task +import zio.macros.accessible + +@accessible +trait Facebook { + + def createAd(adAccountId: String, + adLabels: Option[String] = None, + adsetId: Option[String] = None, + adsetSpec: Option[AdSet] = None, + audienceId: Option[String] = None, + bidAmount: Option[Long] = None, + creative: Option[AdCreative] = None, + dateFormat: Option[String] = None, + displaySequence: Option[Long] = None, + draftAdgroupId: Option[String] = None, + engagementAudience: Option[Boolean] = None, + executionOptions: List[Ad.EnumExecutionOptions] = List(), + includeDemoLinkHashes: Option[Boolean] = None, + name: Option[String] = None, + priority: Option[Long] = None, + sourceAdId: Option[String] = None, + status: Option[Ad.EnumStatus] = None, + trackingSpecs: Option[String] = None): Task[Ad] + + def createCampaign(adAccountId: String, + adLabels: Option[String] = None, + bidStrategy: Option[EnumBidStrategy] = None, + budgetRebalanceFlag: Option[Boolean] = None, + buyingType: Option[String] = None, + dailyBudget: Option[Long] = None, + executionOptions: List[Campaign.EnumExecutionOptions] = List(), + iterativeSplitTestConfigs: Option[String] = None, + lifetimeBudget: Option[Long] = None, + name: Option[String] = None, + objective: Option[EnumObjective] = None, + pacingTypes: List[String] = List(), + promotedObject: Option[String] = None, + sourceCampaignId: Option[String] = None, + specialAdCategory: Option[EnumSpecialAdCategory] = None, + spendCap: Option[Long] = None, + status: Option[Campaign.EnumStatus] = None, + topLineId: Option[String] = None, + upstreamEvents: Map[String, String] = Map()): Task[Campaign] + + def adAccount(adAccountId: String): Task[AdAccount] + + def adActivities(adAccountId: String): Task[List[AdActivity]] + + def adCreatives(adAccountId: String): Task[List[AdCreative]] + + def adCreativesByLabels(adAccountId: String): Task[List[AdCreative]] + + def adImages(adAccountId: String): Task[List[AdImage]] + + def adLabels(adAccountId: String): Task[List[AdLabel]] + + def adPlacePageSets(adAccountId: String): Task[List[AdPlacePageSet]] + + def adPlayables(adAccountId: String): Task[List[PlayableContent]] + + def adRulesHistory(adAccountId: String): Task[List[AdAccountAdRulesHistory]] + + def adRulesLibrary(adAccountId: String): Task[List[AdRule]] + + def ads(adAccountId: String): Task[List[Ad]] + + def adsByLabels(adAccountId: String): Task[List[Ad]] + + def adSets(adAccountId: String): Task[List[AdSet]] + + def adSetsByLabels(adAccountId: String): Task[List[AdSet]] + + def adPixels(adAccountId: String): Task[List[AdsPixel]] + + def adStudies(adAccountId: String): Task[List[AdStudy]] + + def adVolume(adAccountId: String): Task[List[AdAccountAdVolume]] + + def adAdvertisableApplications(adAccountId: String): Task[List[Application]] + + def adAffectedAdSets(adAccountId: String): Task[List[AdSet]] + + def adAgencies(adAccountId: String): Task[List[Business]] + + def adApplications(adAccountId: String): Task[List[Application]] + + def adAssignedUsers(adAccountId: String): Task[List[AssignedUser]] + + def adAsyncRequests(adAccountId: String): Task[List[AsyncRequest]] + + def adCampaigns(adAccountId: String): Task[List[Campaign]] + + def adCampaignsByLabels(adAccountId: String): Task[List[Campaign]] + + def adCustomAudiences(adAccountId: String): Task[List[CustomAudience]] + + def adCustomAudiencesTOS(adAccountId: String): Task[List[CustomAudiencesTOS]] + + def adCustomConversions(adAccountId: String): Task[List[CustomConversion]] + + def adDeliveryEstimate(adAccountId: String): Task[List[AdAccountDeliveryEstimate]] + + def adDeprecatedTargetingAdSets(adAccountId: String): Task[List[AdSet]] + + def adImpactingAdStudies(adAccountId: String): Task[List[AdStudy]] + + def adInsights(adAccountId: String): Task[List[AdsInsights]] + + def adInstagramAccounts(adAccountId: String): Task[List[InstagramUser]] + + def adMatchedSearchApplications(adAccountId: String): Task[List[AdAccountMatchedSearchApplicationsEdgeData]] + + def adMaxBid(adAccountId: String): Task[List[AdAccountMaxBid]] + + def adMinimumBudgets(adAccountId: String): Task[List[MinimumBudget]] + + def adOfflineConversionDataSets(adAccountId: String): Task[List[OfflineConversionDataSet]] + + def adOnBehalfRequests(adAccountId: String): Task[List[BusinessOwnedObjectOnBehalfOfRequest]] + + def adPromotePages(adAccountId: String): Task[List[Page]] + + def adPublisherBlockLists(adAccountId: String): Task[List[PublisherBlockList]] + + def adReachEstimate(adAccountId: String): Task[List[AdAccountReachEstimate]] + + def adReachFrequencyPredictions(adAccountId: String): Task[List[ReachFrequencyPrediction]] + + def adSavedAudiences(adAccountId: String): Task[List[SavedAudience]] + + def adSubscribedApps(adAccountId: String): Task[List[AdAccountSubscribedApps]] + + def adTargetingBrowse(adAccountId: String): Task[List[AdAccountTargetingUnified]] + + def adTargetingSearch(adAccountId: String): Task[List[AdAccountTargetingUnified]] + + def adTargetingSentenceLines(adAccountId: String): Task[List[TargetingSentenceLine]] + + def adTargetingSuggestions(adAccountId: String): Task[List[AdAccountTargetingUnified]] + + def adTargetingValidation(adAccountId: String): Task[List[AdAccountTargetingUnified]] + + def adTracking(adAccountId: String): Task[List[AdAccountTrackingData]] + + def adUsers(adAccountId: String): Task[List[AdAccountUser]] + + def userAccounts(userId: String): Task[List[Page]] + + def userAdAccounts(userId: String): Task[List[AdAccount]] + + def userAdStudies(userId: String): Task[List[AdStudy]] + + def userAlbums(userId: String): Task[List[Album]] + + def userAppRequests(userId: String): Task[List[AppRequest]] + + def userAssignedAdAccounts(userId: String): Task[List[AdAccount]] + + def userAssignedBusinessAssetGroups(userId: String): Task[List[BusinessAssetGroup]] + + def userAssignedPages(userId: String): Task[List[Page]] + + def userAssignedProductCatalogs(userId: String): Task[List[ProductCatalog]] + + def userBusinesses(userId: String): Task[List[Business]] + + def userBusinessUsers(userId: String): Task[List[BusinessUser]] + + def userConversations(userId: String): Task[List[UnifiedThread]] + + def userCustomLabels(userId: String): Task[List[PageUserMessageThreadLabel]] + + def userEvents(userId: String): Task[List[Event]] + + def userFriends(userId: String): Task[List[User]] + + def userAdVideos(userId: String): Task[List[AdVideo]] + + def video(videoId: String): Task[AdVideo] + + def videoThumbnails(videoId: String): Task[List[VideoThumbnail]] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/facebook/LiveFacebook.scala b/jvm/src/main/scala/com/harana/modules/facebook/LiveFacebook.scala new file mode 100644 index 0000000..ea34cd5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/facebook/LiveFacebook.scala @@ -0,0 +1,350 @@ +package com.harana.modules.facebook + +import com.facebook.ads.sdk.Campaign.{EnumBidStrategy, EnumObjective, EnumSpecialAdCategory} +import com.facebook.ads.sdk._ +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.ZIO.attemptBlocking +import zio.{Task, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveFacebook { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveFacebook(config, logger, micrometer) + } +} + +case class LiveFacebook(config: Config, logger: Logger, micrometer: Micrometer) extends Facebook { + + private val apiContext = for { + accessToken <- config.secret("facebook-access-token") + appSecret <- config.secret("facebook-app-secret") + } yield { + new APIContext(accessToken, appSecret) + } + + def createAd(adAccountId: String, + adLabels: Option[String] = None, + adsetId: Option[String] = None, + adsetSpec: Option[AdSet] = None, + audienceId: Option[String] = None, + bidAmount: Option[Long] = None, + creative: Option[AdCreative] = None, + dateFormat: Option[String] = None, + displaySequence: Option[Long] = None, + draftAdgroupId: Option[String] = None, + engagementAudience: Option[Boolean] = None, + executionOptions: List[Ad.EnumExecutionOptions] = List(), + includeDemoLinkHashes: Option[Boolean] = None, + name: Option[String] = None, + priority: Option[Long] = None, + sourceAdId: Option[String] = None, + status: Option[Ad.EnumStatus] = None, + trackingSpecs: Option[String] = None): Task[Ad] = + for { + ac <- apiContext + ad <- ZIO.attempt { + var ad = new AdAccount(adAccountId, ac).createAd() + if (adLabels.nonEmpty) ad.setAdlabels(adLabels.get) + if (adsetId.nonEmpty) ad.setAdsetId(adsetId.get) + if (adsetSpec.nonEmpty) ad.setAdsetSpec(adsetSpec.get) + if (audienceId.nonEmpty) ad.setAudienceId(audienceId.get) + if (bidAmount.nonEmpty) ad.setBidAmount(bidAmount.get) + if (creative.nonEmpty) ad.setCreative(creative.get) + if (dateFormat.nonEmpty) ad.setDateFormat(dateFormat.get) + if (displaySequence.nonEmpty) ad.setDisplaySequence(displaySequence.get) + if (draftAdgroupId.nonEmpty) ad.setDraftAdgroupId(draftAdgroupId.get) + if (engagementAudience.nonEmpty) ad.setEngagementAudience(engagementAudience.get) + ad.setExecutionOptions(executionOptions.asJava) + if (includeDemoLinkHashes.nonEmpty) ad.setIncludeDemolinkHashes(includeDemoLinkHashes.get) + if (name.nonEmpty) ad.setName(name.get) + if (priority.nonEmpty) ad.setPriority(priority.get) + if (sourceAdId.nonEmpty) ad.setSourceAdId(sourceAdId.get) + if (status.nonEmpty) ad.setStatus(status.get) + if (trackingSpecs.nonEmpty) ad.setTrackingSpecs(trackingSpecs.get) + + ad.execute() + } + } yield ad + + def createCampaign(adAccountId: String, + adLabels: Option[String] = None, + bidStrategy: Option[EnumBidStrategy] = None, + budgetRebalanceFlag: Option[Boolean] = None, + buyingType: Option[String] = None, + dailyBudget: Option[Long] = None, + executionOptions: List[Campaign.EnumExecutionOptions] = List(), + iterativeSplitTestConfigs: Option[String] = None, + lifetimeBudget: Option[Long] = None, + name: Option[String] = None, + objective: Option[EnumObjective] = None, + pacingTypes: List[String] = List(), + promotedObject: Option[String] = None, + sourceCampaignId: Option[String] = None, + specialAdCategory: Option[EnumSpecialAdCategory] = None, + spendCap: Option[Long] = None, + status: Option[Campaign.EnumStatus] = None, + topLineId: Option[String] = None, + upstreamEvents: Map[String, String] = Map()): Task[Campaign] = + for { + ac <- apiContext + campaign <- ZIO.attempt { + var campaign = new AdAccount(adAccountId, ac).createCampaign() + if (adLabels.nonEmpty) campaign = campaign.setAdlabels(adLabels.get) + if (bidStrategy.nonEmpty) campaign = campaign.setBidStrategy(bidStrategy.get) + if (buyingType.nonEmpty) campaign.setBuyingType(buyingType.get) + if (dailyBudget.nonEmpty) campaign.setDailyBudget(dailyBudget.get) + campaign.setExecutionOptions(executionOptions.asJava) + if (iterativeSplitTestConfigs.nonEmpty) campaign.setIterativeSplitTestConfigs(iterativeSplitTestConfigs.get) + if (lifetimeBudget.nonEmpty) campaign.setLifetimeBudget(lifetimeBudget.get) + if (name.nonEmpty) campaign.setName(name.get) + if (objective.nonEmpty) campaign.setObjective(objective.get) + campaign.setPacingType(pacingTypes.asJava) + if (promotedObject.nonEmpty) campaign.setPromotedObject(promotedObject.get) + if (sourceCampaignId.nonEmpty) campaign.setSourceCampaignId(sourceCampaignId.get) + if (spendCap.nonEmpty) campaign.setSpendCap(spendCap.get) + if (status.nonEmpty) campaign.setStatus(status.get) + if (topLineId.nonEmpty) campaign.setToplineId(topLineId.get) + campaign.setUpstreamEvents(upstreamEvents.asJava) + campaign.execute() + } + } yield campaign + + def adAccount(adAccountId: String): Task[AdAccount] = + for { + ac <- apiContext + pages <- ZIO.attempt(new AdAccount(adAccountId, ac).get.requestAllFields.execute()) + } yield pages + + def adActivities(adAccountId: String): Task[List[AdActivity]] = + adAccount(adAccountId, _.getActivities.requestAllFields.execute()) + + def adCreatives(adAccountId: String): Task[List[AdCreative]] = + adAccount(adAccountId, _.getAdCreatives.requestAllFields.execute()) + + def adCreativesByLabels(adAccountId: String): Task[List[AdCreative]] = + adAccount(adAccountId, _.getAdCreativesByLabels.requestAllFields.execute()) + + def adImages(adAccountId: String): Task[List[AdImage]] = + adAccount(adAccountId, _.getAdImages.requestAllFields.execute()) + + def adLabels(adAccountId: String): Task[List[AdLabel]] = + adAccount(adAccountId, _.getAdLabels.requestAllFields.execute()) + + def adPlacePageSets(adAccountId: String): Task[List[AdPlacePageSet]] = + adAccount(adAccountId, _.getAdPlacePageSets.requestAllFields.execute()) + + def adPlayables(adAccountId: String): Task[List[PlayableContent]] = + adAccount(adAccountId, _.getAdPlayables.requestAllFields.execute()) + + def adRulesHistory(adAccountId: String): Task[List[AdAccountAdRulesHistory]] = + adAccount(adAccountId, _.getAdRulesHistory.requestAllFields.execute()) + + def adRulesLibrary(adAccountId: String): Task[List[AdRule]] = + adAccount(adAccountId, _.getAdRulesLibrary.requestAllFields.execute()) + + def ads(adAccountId: String): Task[List[Ad]] = + adAccount(adAccountId, _.getAds.requestAllFields.execute()) + + def adsByLabels(adAccountId: String): Task[List[Ad]] = + adAccount(adAccountId, _.getAdsByLabels.requestAllFields.execute()) + + def adSets(adAccountId: String): Task[List[AdSet]] = + adAccount(adAccountId, _.getAdSets.requestAllFields.execute()) + + def adSetsByLabels(adAccountId: String): Task[List[AdSet]] = + adAccount(adAccountId, _.getAdSetsByLabels.requestAllFields.execute()) + + def adPixels(adAccountId: String): Task[List[AdsPixel]] = + adAccount(adAccountId, _.getAdsPixels.requestAllFields.execute()) + + def adStudies(adAccountId: String): Task[List[AdStudy]] = + adAccount(adAccountId, _.getAdStudies.requestAllFields.execute()) + + def adVolume(adAccountId: String): Task[List[AdAccountAdVolume]] = + adAccount(adAccountId, _.getAdsVolume.requestAllFields.execute()) + + def adAdvertisableApplications(adAccountId: String): Task[List[Application]] = + adAccount(adAccountId, _.getAdvertisableApplications.requestAllFields.execute()) + + def adAffectedAdSets(adAccountId: String): Task[List[AdSet]] = + adAccount(adAccountId, _.getAffectedAdSets.requestAllFields.execute()) + + def adAgencies(adAccountId: String): Task[List[Business]] = + adAccount(adAccountId, _.getAgencies.requestAllFields.execute()) + + def adApplications(adAccountId: String): Task[List[Application]] = + adAccount(adAccountId, _.getApplications.requestAllFields.execute()) + + def adAssignedUsers(adAccountId: String): Task[List[AssignedUser]] = + adAccount(adAccountId, _.getAssignedUsers.requestAllFields.execute()) + + def adAsyncRequests(adAccountId: String): Task[List[AsyncRequest]] = + adAccount(adAccountId, _.getAsyncRequests.requestAllFields.execute()) + + def adCampaigns(adAccountId: String): Task[List[Campaign]] = + adAccount(adAccountId, _.getCampaigns.requestAllFields.execute()) + + def adCampaignsByLabels(adAccountId: String): Task[List[Campaign]] = + adAccount(adAccountId, _.getCampaignsByLabels.requestAllFields.execute()) + + def adCustomAudiences(adAccountId: String): Task[List[CustomAudience]] = + adAccount(adAccountId, _.getCustomAudiences.requestAllFields.execute()) + + def adCustomAudiencesTOS(adAccountId: String): Task[List[CustomAudiencesTOS]] = + adAccount(adAccountId, _.getCustomAudiencesTos.requestAllFields.execute()) + + def adCustomConversions(adAccountId: String): Task[List[CustomConversion]] = + adAccount(adAccountId, _.getCustomConversions.requestAllFields.execute()) + + def adDeliveryEstimate(adAccountId: String): Task[List[AdAccountDeliveryEstimate]] = + adAccount(adAccountId, _.getDeliveryEstimate.requestAllFields.execute()) + + def adDeprecatedTargetingAdSets(adAccountId: String): Task[List[AdSet]] = + adAccount(adAccountId, _.getDeprecatedTargetingAdSets.requestAllFields.execute()) + + def adImpactingAdStudies(adAccountId: String): Task[List[AdStudy]] = + adAccount(adAccountId, _.getImpactingAdStudies.requestAllFields.execute()) + + def adInsights(adAccountId: String): Task[List[AdsInsights]] = + adAccount(adAccountId, _.getInsights.requestAllFields.execute()) + + def adInstagramAccounts(adAccountId: String): Task[List[InstagramUser]] = + adAccount(adAccountId, _.getInstagramAccounts.requestAllFields.execute()) + + def adMatchedSearchApplications(adAccountId: String): Task[List[AdAccountMatchedSearchApplicationsEdgeData]] = + adAccount(adAccountId, _.getMatchedSearchApplications.requestAllFields.execute()) + + def adMaxBid(adAccountId: String): Task[List[AdAccountMaxBid]] = + adAccount(adAccountId, _.getMaxBid.requestAllFields.execute()) + + def adMinimumBudgets(adAccountId: String): Task[List[MinimumBudget]] = + adAccount(adAccountId, _.getMinimumBudgets.requestAllFields.execute()) + + def adOfflineConversionDataSets(adAccountId: String): Task[List[OfflineConversionDataSet]] = + adAccount(adAccountId, _.getOfflineConversionDataSets.requestAllFields.execute()) + + def adOnBehalfRequests(adAccountId: String): Task[List[BusinessOwnedObjectOnBehalfOfRequest]] = + adAccount(adAccountId, _.getOnBehalfRequests.requestAllFields.execute()) + + def adPromotePages(adAccountId: String): Task[List[Page]] = + adAccount(adAccountId, _.getPromotePages.requestAllFields.execute()) + + def adPublisherBlockLists(adAccountId: String): Task[List[PublisherBlockList]] = + adAccount(adAccountId, _.getPublisherBlockLists.requestAllFields.execute()) + + def adReachEstimate(adAccountId: String): Task[List[AdAccountReachEstimate]] = + adAccount(adAccountId, _.getReachEstimate.requestAllFields.execute()) + + def adReachFrequencyPredictions(adAccountId: String): Task[List[ReachFrequencyPrediction]] = + adAccount(adAccountId, _.getReachFrequencyPredictions.requestAllFields.execute()) + + def adSavedAudiences(adAccountId: String): Task[List[SavedAudience]] = + adAccount(adAccountId, _.getSavedAudiences.requestAllFields.execute()) + + def adSubscribedApps(adAccountId: String): Task[List[AdAccountSubscribedApps]] = + adAccount(adAccountId, _.getSubscribedApps.requestAllFields.execute()) + + def adTargetingBrowse(adAccountId: String): Task[List[AdAccountTargetingUnified]] = + adAccount(adAccountId, _.getTargetingBrowse.requestAllFields.execute()) + + def adTargetingSearch(adAccountId: String): Task[List[AdAccountTargetingUnified]] = + adAccount(adAccountId, _.getTargetingSearch.requestAllFields.execute()) + + def adTargetingSentenceLines(adAccountId: String): Task[List[TargetingSentenceLine]] = + adAccount(adAccountId, _.getTargetingSentenceLines.requestAllFields.execute()) + + def adTargetingSuggestions(adAccountId: String): Task[List[AdAccountTargetingUnified]] = + adAccount(adAccountId, _.getTargetingSuggestions.requestAllFields.execute()) + + def adTargetingValidation(adAccountId: String): Task[List[AdAccountTargetingUnified]] = + adAccount(adAccountId, _.getTargetingValidation.requestAllFields.execute()) + + def adTracking(adAccountId: String): Task[List[AdAccountTrackingData]] = + adAccount(adAccountId, _.getTracking.requestAllFields.execute()) + + def adUsers(adAccountId: String): Task[List[AdAccountUser]] = + adAccount(adAccountId, _.getUsers.requestAllFields.execute()) + + def userAccounts(userId: String): Task[List[Page]] = + user(userId, _.getAccounts.requestAllFields.execute()) + + def userAdAccounts(userId: String): Task[List[AdAccount]] = + user(userId, _.getAdAccounts.requestAllFields.execute()) + + def userAdStudies(userId: String): Task[List[AdStudy]] = + user(userId, _.getAdStudies.requestAllFields.execute()) + + def userAlbums(userId: String): Task[List[Album]] = + user(userId, _.getAlbums.requestAllFields.execute()) + + def userAppRequests(userId: String): Task[List[AppRequest]] = + user(userId, _.getAppRequests.requestAllFields.execute()) + + def userAssignedAdAccounts(userId: String): Task[List[AdAccount]] = + user(userId, _.getAssignedAdAccounts.requestAllFields.execute()) + + def userAssignedBusinessAssetGroups(userId: String): Task[List[BusinessAssetGroup]] = + user(userId, _.getAssignedBusinessAssetGroups.requestAllFields.execute()) + + def userAssignedPages(userId: String): Task[List[Page]] = + user(userId, _.getAssignedPages.requestAllFields.execute()) + + def userAssignedProductCatalogs(userId: String): Task[List[ProductCatalog]] = + user(userId, _.getAssignedProductCatalogs.requestAllFields.execute()) + + def userBusinesses(userId: String): Task[List[Business]] = + user(userId, _.getBusinesses.requestAllFields.execute()) + + def userBusinessUsers(userId: String): Task[List[BusinessUser]] = + user(userId, _.getBusinessUsers.requestAllFields.execute()) + + def userConversations(userId: String): Task[List[UnifiedThread]] = + user(userId, _.getConversations.requestAllFields.execute()) + + def userCustomLabels(userId: String): Task[List[PageUserMessageThreadLabel]] = + user(userId, _.getCustomLabels.requestAllFields.execute()) + + def userEvents(userId: String): Task[List[Event]] = + user(userId, _.getEvents.requestAllFields.execute()) + + def userFriends(userId: String): Task[List[User]] = + user(userId, _.getFriends.requestAllFields.execute()) + + def userAdVideos(userId: String): Task[List[AdVideo]] = + user(userId, _.getVideos.requestAllFields.execute()) + + def video(videoId: String): Task[AdVideo] = + for { + ac <- apiContext + video <- ZIO.attempt(new AdVideo(videoId, ac).get().requestAllFields.execute()) + } yield video + + def videoThumbnails(videoId: String): Task[List[VideoThumbnail]] = + for { + ac <- apiContext + thumbnails <- ZIO.attempt(new AdVideo(videoId, ac).getThumbnails.requestAllFields.execute()) + } yield thumbnails + + + private def adAccount[A <: APINode](adAccountId: String, fn: AdAccount => APINodeList[A]): Task[List[A]] = + for { + ac <- apiContext + list <- attemptBlocking(fn(new AdAccount(adAccountId, ac))) + } yield list + + private def user[A <: APINode](userId: String, fn: User => APINodeList[A]): Task[List[A]] = + for { + ac <- apiContext + list <- attemptBlocking(fn(new User(userId, ac))) + } yield list + + private implicit def toList[A <: APINode](nodeList: APINodeList[A]): List[A] = + nodeList.iterator().asScala.toList +} diff --git a/jvm/src/main/scala/com/harana/modules/file/File.scala b/jvm/src/main/scala/com/harana/modules/file/File.scala new file mode 100644 index 0000000..b5767c4 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/file/File.scala @@ -0,0 +1,41 @@ +package com.harana.modules.file + +import io.circe.{Decoder, Encoder} +import io.vertx.core.buffer.Buffer +import io.vertx.core.streams.ReadStream +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import one.jasyncfio.AsyncFile +import zio.Task +import zio.macros.accessible + +import java.nio.ByteBuffer +import java.nio.file.Path + +@accessible +trait File { + + def readStream(path: Path, range: Option[(Long, Long)] = None): Task[ReadStream[Buffer]] + def read(file: Either[Path, AsyncFile], buffer: ByteBuffer, position: Option[Int] = None): Task[Int] + def readJson[A](file: Path)(implicit decoder: Decoder[A]): Task[A] + def readString(file: Path): Task[String] + + def writeAwsStream(path: Path, + stream: ReactiveWriteStream[Buffer], + length: Long, + onStart: Option[() => Any] = None, + onStop: Option[() => Any] = None): Task[Unit] + def writeStream(path: Path, + stream: ReactiveWriteStream[Buffer], + length: Long, + onStart: Option[() => Any] = None, + onStop: Option[() => Any] = None, + onData: Option[Buffer => (Buffer, Boolean)] = None): Task[Unit] + def write(file: Either[Path, AsyncFile], buffer: ByteBuffer, position: Option[Int] = None): Task[Int] + def writeJson[A](file: Path, obj: A)(implicit encoder: Encoder[A]): Task[Unit] + def writeString(file: Path, string: String): Task[Unit] + + def merge(sourcePaths: List[Path], targetPath: Path): Task[Unit] + + def close(file: Either[Path, AsyncFile]): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/file/LiveFile.scala b/jvm/src/main/scala/com/harana/modules/file/LiveFile.scala new file mode 100644 index 0000000..4fc4c59 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/file/LiveFile.scala @@ -0,0 +1,235 @@ +package com.harana.modules.file + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.file.LiveFile.{chunk_size, eventExecutor} +import com.harana.modules.vertx.models.streams.AsyncFileReadStream +import io.circe.syntax.EncoderOps +import io.circe.{Decoder, Encoder, jawn} +import io.vertx.core.buffer.Buffer +import io.vertx.core.streams.ReadStream +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import one.jasyncfio.{AsyncFile, EventExecutor} +import org.apache.commons.lang3.SystemUtils +import org.reactivestreams.{Subscriber, Subscription} +import zio.{Task, ZIO, ZLayer} + +import java.io.{FileInputStream, FileOutputStream} +import java.nio.ByteBuffer +import java.nio.charset.StandardCharsets +import java.nio.file.{Files, Path, StandardOpenOption} +import java.util + +object LiveFile { + val chunk_size = 1024 + val eventExecutor = if (SystemUtils.IS_OS_LINUX) Some(EventExecutor.initDefault()) else None + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveFile(config, logger, micrometer) + } +} + +case class LiveFile(config: Config, logger: Logger, micrometer: Micrometer) extends File { + + def readStream(path: Path, range: Option[(Long, Long)] = None): Task[ReadStream[Buffer]] = + ZIO.attempt(new AsyncFileReadStream(path.toFile.getAbsolutePath, range)) + + + def read(file: Either[Path, AsyncFile], buffer: ByteBuffer, position: Option[Int] = None) = { + file match { + case Left(path) => + ZIO.attempt { + val bytes = Files.readAllBytes(path) + buffer.put(bytes) + bytes.size + } + + case Right(file) => + ZIO.fromFutureJava(if (position.nonEmpty) file.read(buffer, position.get) else file.read(buffer)).map(_.toInt) + } + } + + + def readJson[A](path: Path)(implicit decoder: Decoder[A]): Task[A] = + ZIO.fromEither(jawn.decode[A](Files.readString(path))) + + + def readString(path: Path): Task[String] = + ZIO.attempt(Files.readString(path)) + + + def writeAwsStream(path: Path, + stream: ReactiveWriteStream[Buffer], + length: Long, + onStart: Option[() => Any] = None, + onStop: Option[() => Any] = None): Task[Unit] = { + var emptyChunk = false + var headerEndPos = -1 + var chunkEndPos = -1 + val crlf = "\r\n".getBytes(StandardCharsets.UTF_8) + val delimiter = ";".getBytes(StandardCharsets.UTF_8) + val buffer = Buffer.buffer() + + def countUntil(data: Buffer, sequence: Array[Byte], start: Int): Int = { + for (i <- start to data.length()) { + if (i + sequence.length < data.length()) { + val bytes = data.getBytes(i, i + sequence.length) + if (util.Arrays.equals(bytes, sequence)) return i + } + } + -1 + } + + writeStream(path, stream, length, onStart, onStop, Some(data => { + buffer.appendBuffer(data) + + if (headerEndPos == -1) { + val delimiterPos = countUntil(buffer, delimiter, 0) + delimiter.length + emptyChunk = buffer.slice(0,2).toString(StandardCharsets.UTF_8).equals("0;") + headerEndPos = countUntil(buffer, crlf, delimiterPos) + crlf.length + } + + if (headerEndPos > 0 && chunkEndPos == -1) { + chunkEndPos = countUntil(buffer, crlf, headerEndPos) + } + + if (headerEndPos > 0 && chunkEndPos > 0) { + (buffer.slice(headerEndPos + 1, chunkEndPos), true) + } else { + (Buffer.buffer(), emptyChunk) + } + }) + ) + } + + + def writeStream(path: Path, + stream: ReactiveWriteStream[Buffer], + length: Long, + onStart: Option[() => Any] = None, + onStop: Option[() => Any] = None, + onData: Option[Buffer => (Buffer, Boolean)] = None): Task[Unit] = + if (eventExecutor.nonEmpty) + for { + file <- ZIO.fromCompletableFuture(AsyncFile.open(path, eventExecutor.get)) + _ <- ZIO.async((cb: Task[Unit] => Unit) => + stream.subscribe(new Subscriber[Buffer] { + var subscription: Subscription = _ + var remaining = length + + override def onSubscribe(sub: Subscription) = { + subscription = sub + if (onStart.nonEmpty) onStart.get.apply() + sub.request(if (remaining > chunk_size) chunk_size else remaining) + } + + override def onNext(t: Buffer) = { + val onDataResult = onData.map(_.apply(t)) + val data = if (onDataResult.nonEmpty) onDataResult.get._1 else t + file.write(data.getByteBuf.nioBuffer()) + + remaining -= data.length() + if (remaining == 0 || (onDataResult.nonEmpty && onDataResult.get._2)) { + subscription.cancel() + onComplete() + } else { + subscription.request(if (remaining > chunk_size) chunk_size else remaining) + } + } + + override def onError(t: Throwable) = throw t + + override def onComplete() = { + file.close() + if (onStop.nonEmpty) onStop.get.apply() + cb(ZIO.unit) + } + }) + ) + } yield () + else + ZIO.async { (cb: Task[Unit] => Unit) => + stream.subscribe(new Subscriber[Buffer] { + var subscription: Subscription = _ + var remaining = length + var fos: FileOutputStream = _ + + override def onSubscribe(sub: Subscription) = { + subscription = sub + fos = new FileOutputStream(path.toFile) + if (onStart.nonEmpty) onStart.get.apply() + subscription.request(if (remaining > chunk_size) chunk_size else remaining) + } + + override def onNext(t: Buffer) = { + val onDataResult = onData.map(_.apply(t)) + val data = if (onDataResult.nonEmpty) onDataResult.get._1 else t + fos.write(data.getBytes) + remaining -= data.length() + + if (remaining <= 0 || (onDataResult.nonEmpty && onDataResult.get._2)) { + subscription.cancel() + onComplete() + } else + subscription.request(if (remaining > chunk_size) chunk_size else remaining) + } + + override def onError(t: Throwable) = cb(ZIO.fail(t)) + + override def onComplete() = { + fos.close() + if (onStop.nonEmpty) onStop.get.apply() + cb(ZIO.unit) + } + }) + } + + + def write(file: Either[Path, AsyncFile], buffer: ByteBuffer, position: Option[Int] = None) = + file match { + case Left(path) => + ZIO.attempt { + val array = buffer.array() + Files.write(path, array) + array.size + } + + case Right(file) => + ZIO.fromFutureJava(if (position.nonEmpty) file.write(buffer, position.get) else file.write(buffer)).map(_.toInt) + } + + + def writeJson[A](path: Path, obj: A)(implicit encoder: Encoder[A]): Task[Unit] = { + ZIO.attempt(Files.writeString(path, obj.asJson.noSpaces, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE, StandardOpenOption.SYNC)) + } + + + def writeString(path: Path, string: String): Task[Unit] = { + ZIO.attempt(Files.writeString(path, string, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE, StandardOpenOption.SYNC)) + } + + + def merge(sourcePaths: List[Path], targetPath: Path): Task[Unit] = + ZIO.attempt { + val target = new FileOutputStream(targetPath.toFile, true).getChannel + sourcePaths.foreach { path => + val fis = new FileInputStream(path.toFile).getChannel + fis.transferFrom(target, Files.size(path), target.size()) + fis.close() + } + } + + + def close(file: Either[Path, AsyncFile]) = + file match { + case Left(path) => + ZIO.unit + case Right(file) => + ZIO.fromFutureJava(file.close()).map(_.toInt) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/git/Git.scala b/jvm/src/main/scala/com/harana/modules/git/Git.scala new file mode 100644 index 0000000..7cf5406 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/git/Git.scala @@ -0,0 +1,36 @@ +package com.harana.modules.git + +import org.eclipse.jgit.api.{Git => JGit} +import org.eclipse.jgit.lib.Ref +import zio.Task +import zio.macros.accessible + +import java.io.File + +@accessible +trait Git { + + def clone(uri: String, + localDirectory: File, + branch: Option[String] = None, + username: Option[String] = None, + password: Option[String] = None, + oauthToken: Option[String] = None): Task[JGit] + + def checkout(git: JGit, branchTagOrCommit: String): Task[Ref] + + def branch(git: JGit, + branch: String, + track: Boolean = true): Task[Ref] + + def refresh(git: JGit): Task[Unit] + + def hasChanged(git: JGit): Task[Boolean] + + def mostRecentCommitHash(git: JGit): Task[Option[String]] + + def filesForCommit(git: JGit, hash: String): Task[List[File]] + + def latestFiles(git: JGit): Task[List[File]] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/git/LiveGit.scala b/jvm/src/main/scala/com/harana/modules/git/LiveGit.scala new file mode 100644 index 0000000..4248a04 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/git/LiveGit.scala @@ -0,0 +1,105 @@ +package com.harana.modules.git + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import org.eclipse.jgit.api.{CreateBranchCommand, Git => JGit} +import org.eclipse.jgit.lib.{ObjectId, Ref} +import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider +import org.eclipse.jgit.treewalk.TreeWalk +import zio.{Task, ZIO, ZLayer} + +import java.io.File +import scala.collection.mutable + +object LiveGit { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveGit(config, logger, micrometer) + } +} + +case class LiveGit(config: Config, logger: Logger, micrometer: Micrometer) extends Git { + + def clone(uri: String, + localDirectory: File, + branch: Option[String] = None, + username: Option[String] = None, + password: Option[String] = None, + oauthToken: Option[String] = None): Task[JGit] = + for { + git <- ZIO.succeed { + val cloneCommand = JGit.cloneRepository().setDirectory(localDirectory).setURI(uri) + if (branch.nonEmpty) cloneCommand.setBranch(branch.get) + if (username.nonEmpty && password.nonEmpty) cloneCommand.setCredentialsProvider(new UsernamePasswordCredentialsProvider(username.get, password.get)) + if (oauthToken.nonEmpty) cloneCommand.setCredentialsProvider(new UsernamePasswordCredentialsProvider(oauthToken.get, "")) + cloneCommand.call() + } + } yield git + + + def checkout(git: JGit, branchTagOrCommit: String): Task[Ref] = + for { + ref <- ZIO.attempt(git.checkout().setName(branchTagOrCommit).call()) + } yield ref + + + def branch(git: JGit, branch: String, track: Boolean = true): Task[Ref] = + ZIO.attempt { + git + .checkout + .setCreateBranch(true) + .setName(branch) + .setUpstreamMode(CreateBranchCommand.SetupUpstreamMode.TRACK) + .setStartPoint("origin/" + branch) + .call() + } + + + def refresh(git: JGit): Task[Unit] = + for { + _ <- ZIO.attempt(git.pull().call()) + } yield () + + + def hasChanged(git: JGit): Task[Boolean] = + for { + prevCommit <- mostRecentCommitHash(git) + _ <- refresh(git) + newCommit <- mostRecentCommitHash(git) + changed = prevCommit.isEmpty && newCommit.nonEmpty || prevCommit.nonEmpty && newCommit.nonEmpty && prevCommit != newCommit + } yield changed + + + def mostRecentCommitHash(git: JGit): Task[Option[String]] = + for { + it <- ZIO.attempt(git.log.setMaxCount(1).call.iterator()).option + hash = if (it.nonEmpty && it.get.hasNext) Some(it.get.next().getName) else None + } yield hash + + + def filesForCommit(git: JGit, hash: String): Task[List[File]] = { + ZIO.attempt { + val treeWalk = new TreeWalk(git.getRepository) + treeWalk.reset(ObjectId.fromString(hash)) + + val paths = mutable.ListBuffer[File]() + + while (treeWalk.next) paths += new File(treeWalk.getPathString) + if (treeWalk != null) treeWalk.close() + + paths.toList + } + } + + + def latestFiles(git: JGit): Task[List[File]] = + for { + hash <- mostRecentCommitHash(git) + files <- ZIO.ifZIO(ZIO.succeed(hash.nonEmpty))(filesForCommit(git, hash.get), ZIO.attempt(List())) + _ <- logger.debug(s"Latest files end") + } yield files +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/google/Google.scala b/jvm/src/main/scala/com/harana/modules/google/Google.scala new file mode 100644 index 0000000..45c2526 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/google/Google.scala @@ -0,0 +1,21 @@ +package com.harana.modules.google + +import zio.Task +import zio.macros.accessible + +@accessible +trait Google { + + def pageView(clientId: String, page: String, title: String): Task[String] + + def event(clientId: String, category: String, action: String, label: String, value: String): Task[Event] + + def exception(clientId: String, description: String, fatal: Boolean): Task[String] + + def time(clientId: String, category: String, variable: String, time: Long, label: String): Task[Event] + + def send(event: Event): Task[Unit] + + def batch(events: List[Event]): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/google/LiveGoogle.scala b/jvm/src/main/scala/com/harana/modules/google/LiveGoogle.scala new file mode 100644 index 0000000..a996feb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/google/LiveGoogle.scala @@ -0,0 +1,104 @@ +package com.harana.modules.google + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.{Task, ZIO, ZLayer} + +object LiveGoogle { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveGoogle(config, http, logger, micrometer) + } +} + +case class LiveGoogle(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Google { + + def pageView(clientId: String, page: String, title: String): Task[Event] = + for { + propertyId <- config.string("google.tags.propertyId") + domain <- config.string("http.domain", "domain") + event <- ZIO.attempt( + Map( + "v" -> 1, + "tid" -> propertyId, + "cid" -> clientId, + "t" -> "pageview", + "dh" -> domain, + "dp" -> (if (page.startsWith("/")) page else s"/$page"), + "dt" -> title, + ).mkString("&") + ) + } yield event + + + def event(clientId: String, category: String, action: String, label: String, value: String): Task[Event] = + for { + propertyId <- config.string("google.tags.propertyId") + event <- ZIO.attempt( + Map( + "v" -> 1, + "tid" -> propertyId, + "cid" -> clientId, + "t" -> "event", + "ec" -> category, + "ea" -> action, + "el" -> label, + "ev" -> value + ).mkString("&") + ) + } yield event + + + def exception(clientId: String, description: String, fatal: Boolean): Task[Event] = + for { + propertyId <- config.string("google.tags.propertyId") + event <- ZIO.attempt( + Map( + "v" -> 1, + "tid" -> propertyId, + "cid" -> clientId, + "t" -> "event", + "exd" -> description, + "exf" -> (if (fatal) 1 else 0) + ).mkString("&") + ) + } yield event + + + def time(clientId: String, category: String, variable: String, time: Long, label: String): Task[Event] = + for { + propertyId <- config.string("google.tags.propertyId") + event <- ZIO.attempt( + Map( + "v" -> 1, + "tid" -> propertyId, + "cid" -> clientId, + "t" -> "timing", + "utc" -> category, + "utv" -> variable , + "utt" -> time, + "utl" -> label + ).mkString("&") + ) + } yield event + + + def send(event: Event): Task[Unit] = + for { + url <- config.string("google.tags.url") + _ <- http.post(s"$url/collect", Some(event)).mapError(e => new Exception(e.toString)) + } yield () + + + def batch(events: List[Event]): Task[Unit] = + for { + url <- config.string("google.tags.url") + _ <- http.post(s"$url/batch", Some(events.mkString("\n"))).mapError(e => new Exception(e.toString)) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/google/package.scala b/jvm/src/main/scala/com/harana/modules/google/package.scala new file mode 100644 index 0000000..6aabd81 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/google/package.scala @@ -0,0 +1,7 @@ +package com.harana.modules + +package object google { + + type Event = String + +} diff --git a/jvm/src/main/scala/com/harana/modules/handlebars/Handlebars.scala b/jvm/src/main/scala/com/harana/modules/handlebars/Handlebars.scala new file mode 100644 index 0000000..88c2162 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/handlebars/Handlebars.scala @@ -0,0 +1,13 @@ +package com.harana.modules.handlebars + +import zio.Task +import zio.macros.accessible + +@accessible +trait Handlebars { + + def renderPath(path: String, props: Map[String, Object]): Task[String] + + def renderString(name: String, props: Map[String, Object]): Task[String] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/handlebars/LiveHandlebars.scala b/jvm/src/main/scala/com/harana/modules/handlebars/LiveHandlebars.scala new file mode 100644 index 0000000..460b28d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/handlebars/LiveHandlebars.scala @@ -0,0 +1,46 @@ +package com.harana.modules.handlebars + +import com.github.jknack.handlebars.{Context, Handlebars => CoreHandlebars} +import com.github.jknack.handlebars.context.{JavaBeanValueResolver, MapValueResolver, MethodValueResolver} +import com.github.jknack.handlebars.io.ClassPathTemplateLoader +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.{Task, ZIO, ZLayer} + +object LiveHandlebars { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveHandlebars(config, logger, micrometer) + } +} + +case class LiveHandlebars(config: Config, logger: Logger, micrometer: Micrometer) extends Handlebars { + + private val handlebars = { + val l = new ClassPathTemplateLoader + l.setPrefix("/templates/") + l.setSuffix(".hbs") + val hb = new CoreHandlebars(l) + hb.registerHelper("each", ScalaEachHelper) + hb.infiniteLoops(true) + } + + def renderPath(name: String, props: Map[String, Object]): Task[String] = + ZIO.attempt(handlebars.compile(name)(context(props))) + + + def renderString(content: String, props: Map[String, Object]): Task[String] = + ZIO.attempt(handlebars.compileInline(content)(context(props))) + + + private def context(props: Map[String, Object]) = + Context + .newBuilder(props) + .resolver(ScalaResolver, MapValueResolver.INSTANCE, MethodValueResolver.INSTANCE, JavaBeanValueResolver.INSTANCE + ).build() + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/handlebars/ScalaResolver.scala b/jvm/src/main/scala/com/harana/modules/handlebars/ScalaResolver.scala new file mode 100644 index 0000000..a02e3d6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/handlebars/ScalaResolver.scala @@ -0,0 +1,59 @@ +package com.harana.modules.handlebars + +import com.github.jknack.handlebars.context.MapValueResolver +import com.github.jknack.handlebars.helper.EachHelper +import com.github.jknack.handlebars.{Helper, Options, ValueResolver} + +import scala.jdk.CollectionConverters._ +import scala.reflect.runtime.{universe => ru} +import scala.util.Try + +object ScalaResolver extends ValueResolver { + + private val rootMirror = ru.runtimeMirror(getClass.getClassLoader) + + private def methodMirrorFor(context: AnyRef, name: String): Option[ru.MethodMirror] = { + val meta = rootMirror.reflect(context) + val optAccessor = meta.symbol.info.decls find { m => + m.isMethod && m.isPublic && m.name.toString == name + } + optAccessor.map(a => meta.reflectMethod(a.asMethod)) + } + + override def resolve(context: AnyRef, name: String): AnyRef = context match { + case m: collection.Map[_,_] => MapValueResolver.INSTANCE.resolve(m.asJava, name) + case _ => + val optMM = methodMirrorFor(context, name) + val ret = optMM.fold(ValueResolver.UNRESOLVED)(m => resolve(m.apply())): AnyRef + println(s"...returning ${ret.toString}") + ret + } + + override def resolve(context: scala.Any): AnyRef = context match { + case m: collection.Map[_,_] => MapValueResolver.INSTANCE.resolve(m.asJava) + case Some(x: AnyRef) => x + case None => null + case x: AnyRef => x + } + + override def propertySet(context: scala.Any): java.util.Set[java.util.Map.Entry[String, AnyRef]] = context match { + case m: collection.Map[_,_] => + MapValueResolver.INSTANCE.propertySet(m.asJava) + case _ => + println(s"ScalaMemberResolver.propertySet in context: [${context.getClass.getName}]") + val meta = rootMirror.reflect(context) + val accessors = meta.symbol.info.decls.filter(m => m.isMethod && m.isPublic).toSeq + val results = for { + a <- accessors + v <- Try(meta.reflectMethod(a.asMethod).apply()).toOption + } yield a.name.toString -> v.asInstanceOf[AnyRef] + results.toMap.asJava.entrySet + } +} + +object ScalaEachHelper extends Helper[AnyRef] { + override def apply(context: scala.AnyRef, options: Options): AnyRef = context match { + case iter: Iterable[_] => EachHelper.INSTANCE.apply(iter.asJava, options) + case _ => EachHelper.INSTANCE.apply(context, options) + } +} diff --git a/jvm/src/main/scala/com/harana/modules/ignite/Ignite.scala b/jvm/src/main/scala/com/harana/modules/ignite/Ignite.scala new file mode 100644 index 0000000..5a8ae69 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ignite/Ignite.scala @@ -0,0 +1,94 @@ +package com.harana.modules.ignite + +import zio.Task +import zio.macros.accessible + +@accessible +trait Ignite { + + def attach(vmId: String, + quiet: Option[Boolean] = None): Task[List[String]] + + def completion(quiet: Option[Boolean] = None): Task[List[String]] + + def copyToVM(source: String, + destination: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None): Task[List[String]] + + def createVM(ociImage: String, + copyFiles: List[String] = List(), + cpus: Option[Int] = None, + disk: Option[Int] = None, + labels: Map[String, String] = Map(), + memory: Option[Int] = None, + name: Option[String] = None, + ports: Map[String, String] = Map(), + quiet: Option[Boolean] = None, + ssh: Option[String] = None, + volumes: List[String] = List()): Task[List[String]] + + def exec(vmId: String, + command: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None, + tty: Option[Boolean] = None): Task[List[String]] + + def importImage(ociImage: String, + quiet: Option[Boolean] = None): Task[List[String]] + + def inspectVM(vmId: String, + outputFormat: Option[String] = None, + quiet: Option[Boolean] = None, + template: Option[String] = None): Task[List[String]] + + def killVMs(vmIds: List[String], + quiet: Option[Boolean] = None): Task[List[String]] + + def listImages(quiet: Option[Boolean] = None): Task[List[String]] + + def listVMs(all: Option[Boolean] = None, + filter: Option[String] = None, + quiet: Option[Boolean] = None, + template: Option[String] = None): Task[List[String]] + + def logs(vmId: String, + quiet: Option[Boolean] = None): Task[List[String]] + + def removeImages(imageIds: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] + + def removeVMs(vmIds: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] + + def runVM(ociImage: String, + copyFiles: List[String] = List(), + cpus: Option[Int] = None, + disk: Option[Int] = None, + interactive: Option[Boolean] = None, + labels: Map[String, String] = Map(), + memory: Option[Int] = None, + name: Option[String] = None, + ports: Map[String, String] = Map(), + quiet: Option[Boolean] = None, + ssh: Option[String] = None, + volumes: List[String] = List()): Task[List[String]] + + def ssh(vmId: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None, + tty: Option[Boolean] = None): Task[List[String]] + + def startVM(vmId: String, + interactive: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] + + def stopVM(vmId: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] + + def version(outputFormat: Option[String] = None, + quiet: Option[Boolean] = None): Task[List[String]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ignite/LiveIgnite.scala b/jvm/src/main/scala/com/harana/modules/ignite/LiveIgnite.scala new file mode 100644 index 0000000..eee23da --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ignite/LiveIgnite.scala @@ -0,0 +1,235 @@ +package com.harana.modules.ignite + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.process.Command +import zio.{Task, UIO, ZIO, ZLayer} + +object LiveIgnite { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveIgnite(config, logger, micrometer) + } +} + +case class LiveIgnite(config: Config, logger: Logger, micrometer: Micrometer) extends Ignite { + + def attach(vmId: String, quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("action", vmId) ++ quiet: _*).lines + } yield cmd.toList + + + def completion(quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("completion") ++ quiet: _*).lines + } yield cmd.toList + + + def copyToVM(source: String, + destination: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + timeout <- intArg("timeout", timeout) + cmd <- Command("ignite", List("cp", source, destination) ++ quiet ++ timeout: _*).lines + } yield cmd.toList + + + def createVM(ociImage: String, + copyFiles: List[String] = List(), + cpus: Option[Int] = None, + disk: Option[Int] = None, + labels: Map[String, String] = Map(), + memory: Option[Int] = None, + name: Option[String] = None, + ports: Map[String, String] = Map(), + quiet: Option[Boolean] = None, + ssh: Option[String] = None, + volumes: List[String] = List()): Task[List[String]] = + for { + cpusArg <- intArg("cpus", cpus, includeValue = true) + diskArg <- intArg("size", disk, includeValue = true) + labelsArg = if (labels.isEmpty) List() else List("l", labels.map { case (k,v) => s"$k=$v" }.mkString) + memoryArg <- intArg("memory", memory, includeValue = true) + nameArg <- stringArg("name", name, includeValue = true) + quietArg <- booleanArg("quiet", quiet) + sshArg = ssh.map(s => List(s"ssh=$s")).getOrElse(List()) + args = cpusArg ++ diskArg ++ labelsArg ++ memoryArg ++ nameArg ++ quietArg ++ sshArg + cmd <- Command("ignite", List("create", ociImage) ++ args.toSeq: _*).lines + } yield cmd.toList + + + def exec(vmId: String, + command: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None, + tty: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + timeout <- intArg("timeout", timeout) + tty <- booleanArg("tty", tty) + cmd <- Command("ignite", List("exec", command) ++ quiet ++ timeout ++ tty: _*).lines + } yield cmd.toList + + + def importImage(ociImage: String, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("image", "import", ociImage) ++ quiet: _*).lines + } yield cmd.toList + + + def inspectVM(vmId: String, + outputFormat: Option[String] = None, + quiet: Option[Boolean] = None, + template: Option[String] = None): Task[List[String]] = + for { + outputFormat <- stringArg("output", outputFormat) + quiet <- booleanArg("quiet", quiet) + template <- stringArg("t", template, includeValue = true) + cmd <- Command("ignite", List("inspect", "vm", vmId) ++ outputFormat ++ quiet ++ template: _*).lines + } yield cmd.toList + + + def killVMs(vmIds: List[String], + quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("kill") ++ vmIds ++ quiet: _*).lines + } yield cmd.toList + + + def listImages(quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("image", "ls") ++ quiet: _*).lines + } yield cmd.toList + + + def listVMs(all: Option[Boolean] = None, + filter: Option[String] = None, + quiet: Option[Boolean] = None, + template: Option[String] = None): Task[List[String]] = + for { + all <- booleanArg("all", all) + filter <- stringArg("filter", filter, includeValue = true) + quiet <- booleanArg("quiet", quiet) + template <- stringArg("template", template) + cmd <- Command("ignite", List("ps") ++ all ++ filter ++ quiet ++ template: _*).lines + } yield cmd.toList + + + def logs(vmId: String, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("logs", vmId) ++ quiet: _*).lines + } yield cmd.toList + + + def removeImages(imageIds: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + force <- booleanArg("force", force) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("rmi") ++ imageIds ++ force ++ quiet: _*).lines + } yield cmd.toList + + + def removeVMs(vmIds: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + force <- booleanArg("force", force) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("rm") ++ vmIds ++ force ++ quiet: _*).lines + } yield cmd.toList + + + def runVM(ociImage: String, + copyFiles: List[String] = List(), + cpus: Option[Int] = None, + disk: Option[Int] = None, + interactive: Option[Boolean] = None, + labels: Map[String, String] = Map(), + memory: Option[Int] = None, + name: Option[String] = None, + ports: Map[String, String] = Map(), + quiet: Option[Boolean] = None, + ssh: Option[String] = None, + volumes: List[String] = List()): Task[List[String]] = + for { + cpusArg <- intArg("cpus", cpus, includeValue = true) + copyFilesArg = if (copyFiles.isEmpty) List() else List("copy-files", copyFiles.mkString) + diskArg <- intArg("size", disk, includeValue = true) + interactiveArg <- booleanArg("i", interactive) + labelsArg = if (labels.isEmpty) List() else List("l", labels.map { case (k,v) => s"$k=$v" }.mkString) + memoryArg <- intArg("memory", memory, includeValue = true) + nameArg <- stringArg("name", name, includeValue = true) + quietArg <- booleanArg("quiet", quiet) + sshesArg = ssh.map(s => List(s"ssh=$s")).getOrElse(List()) + args = copyFilesArg ++ cpusArg ++ diskArg ++ interactiveArg ++ labelsArg ++ memoryArg ++ nameArg ++ quietArg ++ sshesArg + cmd <- Command("ignite", List("run", ociImage) ++ args.toSeq: _*).lines + } yield cmd.toList + + + def ssh(vmId: String, + quiet: Option[Boolean] = None, + timeout: Option[Int] = None, + tty: Option[Boolean] = None): Task[List[String]] = + for { + quiet <- booleanArg("quiet", quiet) + timeout <- intArg("timeout", timeout, includeValue = true) + tty <- booleanArg("tty", tty) + cmd <- Command("ignite", List("ssh", vmId) ++ quiet ++ timeout ++ tty: _*).lines + } yield cmd.toList + + + def startVM(vmId: String, + interactive: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + interactive <- booleanArg("interactive", interactive) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("start", vmId) ++ interactive ++ quiet: _*).lines + } yield cmd.toList + + + def stopVM(vmId: List[String], + force: Option[Boolean] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + force <- booleanArg("force-kill", force) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("stop") ++ vmId ++ force ++ quiet: _*).lines + } yield cmd.toList + + + def version(outputFormat: Option[String] = None, + quiet: Option[Boolean] = None): Task[List[String]] = + for { + outputFormat <- stringArg("output", outputFormat, includeValue = true) + quiet <- booleanArg("quiet", quiet) + cmd <- Command("ignite", List("version") ++ outputFormat ++ quiet: _*).lines + } yield cmd.toList + + + private def booleanArg(name: String, arg: Option[Boolean], includeValue: Boolean = false): UIO[List[String]] = + ZIO.succeed(arg.map(v => List(name) ++ (if (includeValue) List(v.toString) else List())).getOrElse(List())) + + private def intArg(name: String, arg: Option[Int], includeValue: Boolean = false): UIO[List[String]] = + ZIO.succeed(arg.map(v => List(name) ++ (if (includeValue) List(v.toString) else List())).getOrElse(List())) + + private def stringArg(name: String, arg: Option[String], includeValue: Boolean = false): UIO[List[String]] = + ZIO.succeed(arg.map(v => List(name) ++ (if (includeValue) List(v) else List())).getOrElse(List())) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/Jsoup.scala b/jvm/src/main/scala/com/harana/modules/jsoup/Jsoup.scala new file mode 100644 index 0000000..babb81c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/Jsoup.scala @@ -0,0 +1,49 @@ +package com.harana.modules.jsoup + +import com.harana.modules.jsoup.models._ +import org.jsoup.nodes.{Document, Element, Node} +import org.jsoup.select.Elements +import zio.macros.accessible +import zio.stream._ +import zio.{IO, UIO} + +import java.io.{BufferedInputStream, File} +import java.net.URL + +@accessible +trait Jsoup { + def parse(file: File): IO[JsoupError, Document] + + def parse(string: String, fragment: Boolean = false): IO[JsoupError, Document] + + def parse(string: String, baseUri: String): IO[JsoupError, Document] + + def parse(url: URL, connectionOptions: ConnectionOptions): IO[JsoupError, Document] + + def parse(urlStream: Stream[JsoupError, URL], connectionOptions: ConnectionOptions): UIO[Stream[JsoupError, Document]] + + def elementStream(doc: Document, selector: String): UIO[Stream[JsoupError, Element]] + + def linkStream(doc: Document): UIO[Stream[JsoupError, URL]] + + def mediaStream(doc: Document): UIO[Stream[JsoupError, URL]] + + def stream(url: URL, connectionOptions: ConnectionOptions): IO[JsoupError, BufferedInputStream] + + def download(url: URL, path: File, connectionOptions: ConnectionOptions): IO[JsoupError, Unit] + + def mirror(url: URL, downloadDir: File, connectionOptions: ConnectionOptions): IO[JsoupError, Unit] + + def recursiveDownload(startDoc: Document, + navigateSelector: String, + downloadSelector: String, + downloadDir: File, + shouldDownload: Document => Boolean, + connectionOptions: ConnectionOptions): IO[JsoupError, Unit] +} + +object Jsoup { + implicit def enrichElements(xs: Elements): RichElements = new RichElements(xs) + implicit def enrichElement(el: Element): RichElement = new RichElement(el) + implicit def enrichNodeList[N <: Node](l: java.util.List[N]): RichNodeList[N] = new RichNodeList(l) +} diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/AttributeOption.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/AttributeOption.scala new file mode 100644 index 0000000..7d304a8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/AttributeOption.scala @@ -0,0 +1,34 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes.Attributes + +import scala.jdk.CollectionConverters._ +import scala.util.matching.Regex + +trait AttributeOption extends ElementTarget { + def attribute(name: String): Option[String] = EmptyStringToOption(target.attr(name)) + def attributeRegex(nameRegex: Regex): Option[String] = AttributeRegexToOption(target.attributes, nameRegex) +} + +trait ElementsAttributeOption extends ElementsTarget { + def attribute(name: String): Option[String] = EmptyStringToOption(target.attr(name)) + + def attributeRegex(nameRegex: Regex): Option[String] = { + val elems = target.listIterator.asScala + elems.collectFirst(Function.unlift(elem => AttributeRegexToOption(elem.attributes, nameRegex))) + } +} + +object EmptyStringToOption { + def apply(ss: String): Option[String] = ss match { + case "" => None + case s: String => Some(s) + } +} + +object AttributeRegexToOption { + def apply(attributes: Attributes, nameRegex: Regex): Option[String] = { + val atts = attributes.asList.asScala + atts.find(att => nameRegex.findFirstIn(att.getKey).nonEmpty).flatMap(att => EmptyStringToOption(att.getValue)) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/ClosestElement.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/ClosestElement.scala new file mode 100644 index 0000000..366bf4b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/ClosestElement.scala @@ -0,0 +1,66 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes.Element +import org.jsoup.select.Elements + +object ClosestFinder extends JsoupImplicits { + + def findClosestOption(selector: String, elem: Element): Option[Element] = { + enrichElements(elem.select(selector)).headOption.orElse { + elem.parents.headOption.flatMap { _ => + findClosestOption(selector, elem.parents) + } + } + } + + def findClosestOption(selector: String, elems: Elements): Option[Element] = { + elems.select(selector).headOption.orElse { + elems.parents.headOption.flatMap { _ => + findClosestOption(selector, elems.parents) + } + } + } + + /** Returns an Elements - i.e. it doesn't just grab the first */ + def findClosest(selector: String, elems: Elements): Elements = { + elems.headOption.fold(elems) { _ => + val here = elems.select(selector) + here.headOption.fold(findClosest(selector, elems.parents))(_ => here) + } + } + + def findClosestBeforeOption(selector: String, elem: Element): Option[Element] = + findClosest(selector, new Elements(elem)).find(_.isBefore(elem)) + + def findClosestAfterOption(selector: String, elem: Element): Option[Element] = + findClosest(selector, new Elements(elem)).find(_.isAfter(elem)) + +} + +trait ClosestElement extends ElementTarget { + + def closestOption(selector: String): Option[Element] = + ClosestFinder.findClosestOption(selector, target) + + def closest(selector: String): Elements = + ClosestFinder.findClosest(selector, new Elements(target)) + + def closestBeforeOption(selector: String): Option[Element] = + ClosestFinder.findClosestBeforeOption(selector, target) + + def closestAfterOption(selector: String): Option[Element] = + ClosestFinder.findClosestAfterOption(selector, target) + +} + +trait ClosestElements extends ElementsTarget { + + def closestOption(selector: String): Option[Element] = { + ClosestFinder.findClosestOption(selector, target) + } + + def closest(selector: String): Elements = { + ClosestFinder.findClosest(selector, target) + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/ConnectionOptions.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/ConnectionOptions.scala new file mode 100644 index 0000000..26054ec --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/ConnectionOptions.scala @@ -0,0 +1,20 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.Connection + +import javax.net.ssl.SSLSocketFactory + +case class ConnectionOptions(cookies: Map[String, String] = Map(), + data: Map[String, String] = Map(), + followRedirects: Option[Boolean] = None, + headers: Map[String, String] = Map(), + ignoreContentType: Option[Boolean] = None, + ignoreHttpErrors: Option[Boolean] = None, + maxBodySize: Option[Int] = None, + method: Option[Connection.Method] = None, + postDataCharset: Option[String] = None, + proxy: Option[Proxy] = None, + referrer: Option[String] = None, + requestBody: Option[String] = None, + sslSocketFactory: Option[SSLSocketFactory] = None, + timeout: Option[Int] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/DocumentPositioning.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/DocumentPositioning.scala new file mode 100644 index 0000000..4ecf66d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/DocumentPositioning.scala @@ -0,0 +1,36 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes.Element + +import scala.annotation.tailrec + +trait DocumentPositioning extends ElementTarget { + + lazy val documentCoordinates = coordinatesOf(List(), Some(target)).reverse + + @tailrec + private def coordinatesOf(accum: List[Int], maybeElement: Option[Element]): List[Int] = { + if (maybeElement.isEmpty) { + accum + } else { + val elem = maybeElement.get + coordinatesOf(accum :+ elem.siblingIndex, Option(elem.parent)) + } + } + + private def compareCoordinates(maybeOther: Option[DocumentPositioning])(f: (Int, Int) => Boolean): Boolean = { + maybeOther.fold(false) { other => + val zip = documentCoordinates.zipAll(other.documentCoordinates, 0, 0) + val maybeFirstDiff = zip.dropWhile(dc => dc._1 == dc._2).headOption + maybeFirstDiff.fold(false) { diff => + f(diff._1, diff._2) + } + } + } + + def isBefore(other: DocumentPositioning): Boolean = + compareCoordinates(Option(other))((a, b) => a < b) + + def isAfter(other: DocumentPositioning): Boolean = + compareCoordinates(Option(other))((a, b) => a > b) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/ElementTarget.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/ElementTarget.scala new file mode 100644 index 0000000..20960ee --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/ElementTarget.scala @@ -0,0 +1,11 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes.Element +import org.jsoup.select.Elements + +trait Target[T] { + val target: T +} + +trait ElementTarget extends Target[Element] +trait ElementsTarget extends Target[Elements] \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupError.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupError.scala new file mode 100644 index 0000000..c4a030c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupError.scala @@ -0,0 +1,8 @@ +package com.harana.modules.jsoup.models + +sealed trait JsoupError extends Product with Serializable +object JsoupError { + case object NotFound extends JsoupError + case object AlreadyStopped extends JsoupError + case class Exception(t: Throwable) extends JsoupError +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupImplicits.scala b/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupImplicits.scala new file mode 100644 index 0000000..4b10a57 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/jsoup/models/JsoupImplicits.scala @@ -0,0 +1,36 @@ +package com.harana.modules.jsoup.models + +import org.jsoup.nodes._ +import org.jsoup.select.Elements + +import scala.jdk.CollectionConverters._ + +trait JsoupImplicits { + implicit def enrichElements(xs: Elements): RichElements = new RichElements(xs) + implicit def enrichElement(el: Element): RichElement = new RichElement(el) + implicit def enrichNodeList[N <: Node](l: java.util.List[N]): RichNodeList[N] = new RichNodeList(l) +} + +object JsoupImplicits extends JsoupImplicits + +class RichElements(val target: Elements) + extends Iterable[Element] + with ClosestElements + with ElementsAttributeOption { + + def iterator: Iterator[Element] = { + target.asScala.iterator + } +} + +class RichElement(val target: Element) + extends ClosestElement + with DocumentPositioning + with AttributeOption { +} + +class RichNodeList[N <: Node](val target: java.util.List[N]) extends Iterable[Node] { + def iterator: Iterator[Node] = { + target.asScala.iterator + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kind/Kind.scala b/jvm/src/main/scala/com/harana/modules/kind/Kind.scala new file mode 100644 index 0000000..112a778 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kind/Kind.scala @@ -0,0 +1,39 @@ +package com.harana.modules.kind + +import com.harana.modules.kind.models.Cluster +import zio.Task +import zio.macros.accessible + +import java.io.File + +@accessible +trait Kind { + + def createCluster(name: String, + cluster: Option[Cluster] = None, + kubeConfig: Option[File] = None, + nodeImage: Option[String] = None, + retainNodesOnFailure: Boolean = false, + waitForControlPlane: Int = 0): Task[List[String]] + + def deleteCluster(name: String, + kubeConfig: Option[File] = None): Task[Unit] + + def listClusters: Task[List[String]] + + def listNodes(name: String): Task[List[String]] + + def buildBaseImage(image: String): Task[Unit] + + def buildNodeImage(image: String): Task[Unit] + + def loadImage(image: String): Task[Unit] + + def exportLogs(path: Option[File] = None): Task[Unit] + + def exportKubeConfig(name: String, + path: Option[File] = None): Task[Unit] + + def printKubeConfig(name: String, + internalAddress: Boolean = false): Task[Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kind/LiveKind.scala b/jvm/src/main/scala/com/harana/modules/kind/LiveKind.scala new file mode 100644 index 0000000..44cf0ad --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kind/LiveKind.scala @@ -0,0 +1,94 @@ +package com.harana.modules.kind + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kind.models.Cluster +import zio.process.Command +import zio.{Task, ZIO, ZLayer} + +import java.io.File +import scala.collection.mutable + +object LiveKind { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveKind(config, logger, micrometer) + } +} + +case class LiveKind(config: Config, logger: Logger, micrometer: Micrometer) extends Kind { + + def createCluster(name: String, + cluster: Option[Cluster] = None, + kubeConfig: Option[File] = None, + nodeImage: Option[String] = None, + retainNodesOnFailure: Boolean = false, + waitForControlPlane: Int = 0) = + for { + _ <- logger.info(s"Creating Kind cluster: $name") + args <- ZIO.succeed { + val args = mutable.ListBuffer[String]("create", "cluster", "--name", name) + if (cluster.nonEmpty) args += s"--config ${generateConfig(cluster.get)}" + if (kubeConfig.nonEmpty) args += s"--kubeconfig ${kubeConfig.get.getAbsolutePath}" + if (nodeImage.nonEmpty) args += s"--image ${nodeImage.get}" + if (retainNodesOnFailure) args += s"--retain ${retainNodesOnFailure.toString}" + if (waitForControlPlane > 0) args += s"--wait ${waitForControlPlane}s" + args + } + cmd <- Command("kind", args.toSeq: _*).lines + } yield cmd.toList + + + def deleteCluster(name: String, kubeConfig: Option[File] = None): Task[Unit] = + for { + _ <- logger.info(s"Deleting Kind cluster: $name") + args <- ZIO.succeed { + val args = mutable.ListBuffer[String]("delete", "cluster", "--name", name) + if (kubeConfig.nonEmpty) args += s"--kubeconfig ${kubeConfig.get.getAbsolutePath}" + args + } + _ <- Command("kind", args.toSeq: _*).lines + } yield () + + + def listClusters: Task[List[String]] = + for { + cmd <- Command("kind", "list", "nodes").lines + } yield cmd.toList + + + def listNodes(name: String): Task[List[String]] = + for { + cmd <- Command("kind", List("--name", name): _*).lines + } yield cmd.toList + + + def buildBaseImage(image: String): Task[Unit] = + null + + + def buildNodeImage(image: String): Task[Unit] = + null + + + def loadImage(image: String): Task[Unit] = + null + + + def exportLogs(path: Option[File] = None): Task[Unit] = + null + + + def exportKubeConfig(name: String, + path: Option[File] = None): Task[Unit] = + null + + + def printKubeConfig(name: String, + internalAddress: Boolean = false): Task[Unit] = + null +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kind/models/Cluster.scala b/jvm/src/main/scala/com/harana/modules/kind/models/Cluster.scala new file mode 100644 index 0000000..55864b3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kind/models/Cluster.scala @@ -0,0 +1,22 @@ +package com.harana.modules.kind.models + +case class Cluster(nodes: List[Node], + apiServerListenAddress: Option[String] = None, + apiServerListenPort: Option[String] = None, + disableDefaultCNI: Option[Boolean] = None, + ipFamily: Option[String] = None, + podSubnet: Option[String] = None, + serviceSubnet: Option[String] = None) + +case class Node(role: Option[String] = None, + image: Option[String] = None, + extraMounts: List[Mount] = List(), + extraPortMappings: List[PortMapping] = List()) + +case class Mount(hostPath: String, + containerPath: String) + +case class PortMapping(containerPort: Int, + hostPort: Int, + listenAddress: Option[String] = None, + protocol: Option[String] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kind/package.scala b/jvm/src/main/scala/com/harana/modules/kind/package.scala new file mode 100644 index 0000000..b842458 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kind/package.scala @@ -0,0 +1,11 @@ +package com.harana.modules + +import com.harana.modules.kind.models.Cluster + +import java.io.File + +package object kind { + + @inline + def generateConfig(cluster: Cluster): File = null +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kubernetes/Kubernetes.scala b/jvm/src/main/scala/com/harana/modules/kubernetes/Kubernetes.scala new file mode 100644 index 0000000..00af4b1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kubernetes/Kubernetes.scala @@ -0,0 +1,106 @@ +package com.harana.modules.kubernetes + +import akka.stream.scaladsl.Source +import akka.util.ByteString +import play.api.libs.json.{Format, Writes} +import skuber._ +import skuber.api.client._ +import skuber.api.patch.Patch +import skuber.apiextensions.CustomResourceDefinition +import zio.macros.accessible +import zio.stream.ZStream +import zio.{IO, Task} + +import scala.concurrent.Promise + +@accessible +trait Kubernetes { + def newClient: IO[K8SException, KubernetesClient] + + def get[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Option[O]] + + def exists[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Boolean] + + def save(client: KubernetesClient, namespace: String, crd: CustomResourceDefinition): IO[K8SException, CustomResourceDefinition] + + def createNamespace(client: KubernetesClient, namespace: String)(implicit lc: LoggingContext): IO[K8SException, Namespace] + + def create[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, O] + + def createPodAndWait(client: KubernetesClient, namespace: String, obj: Pod, startupTime: Long)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Pod] + + def podInState(client: KubernetesClient, namespace: String, name: String, desiredState: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Boolean] + + def podTerminating(client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Boolean] + + def waitForPodToTerminate(client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Unit] + + def update[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, O] + + def delete[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, gracePeriodSeconds: Int = -1)(implicit rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Unit] + + def deleteWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, options: DeleteOptions)(implicit rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Unit] + + def deleteAll[O <: ObjectResource](client: KubernetesClient, namespace: String)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def deleteAllSelected[O <: ObjectResource](client: KubernetesClient, namespace: String, labelSelector: LabelSelector)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def getNamespaceNames(client: KubernetesClient)(implicit lc: LoggingContext): IO[K8SException, List[String]] + + def listByNamespace[O <: ObjectResource](client: KubernetesClient)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, Map[String, ListResource[O]]] + + def list[O <: ObjectResource](client: KubernetesClient, namespace: String)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def listSelected[O <: ObjectResource](client: KubernetesClient, namespace: String, labelSelector: LabelSelector)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def listWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, options: ListOptions)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] + + def updateStatus[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], statusEv: HasStatusSubresource[O], lc: LoggingContext): IO[K8SException, O] + + def getStatus[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], statusEv: HasStatusSubresource[O], lc: LoggingContext): IO[K8SException, O] + + def watch[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watch[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchAll[O <: ObjectResource](client: KubernetesClient, namespace: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchAllContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def watchWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, options: ListOptions, bufsize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] + + def getScale[O <: ObjectResource](client: KubernetesClient, namespace: String, objName: String)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext) : IO[K8SException, Scale] + + def updateScale[O <: ObjectResource](client: KubernetesClient, namespace: String, objName: String, scale: Scale)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext): IO[K8SException, Scale] + + def patch[P <: Patch, O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, patchData: P) + (implicit patchfmt: Writes[P], fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): IO[K8SException, O] + + def getPodLogSource(client: KubernetesClient, namespace: String, name: String, queryParams: Pod.LogQueryParams)(implicit lc: LoggingContext): IO[K8SException, Source[ByteString, _]] + + def exec(client: KubernetesClient, + namespace: String, + podName: String, + command: Seq[String], + containerName: Option[String] = None, + stdin: Option[ZStream[Any, Nothing, String]] = None, + stdout: Option[String => Task[Unit]] = None, + stderr: Option[String => Task[Unit]] = None, + tty: Boolean = false, + maybeClose: Option[Promise[Unit]] = None)(implicit lc: LoggingContext): IO[K8SException, Unit] + + def getServerAPIVersions(client: KubernetesClient)(implicit lc: LoggingContext): IO[K8SException, List[String]] + + def resourceFromFile[A <: ObjectResource](fileName: String)(implicit fmt: Format[A]): Task[A] + + def close(client: KubernetesClient): IO[K8SException, Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kubernetes/LiveKubernetes.scala b/jvm/src/main/scala/com/harana/modules/kubernetes/LiveKubernetes.scala new file mode 100644 index 0000000..4b5b682 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kubernetes/LiveKubernetes.scala @@ -0,0 +1,251 @@ +package com.harana.modules.kubernetes + +import akka.actor.ActorSystem +import akka.stream.Materializer +import akka.stream.scaladsl.{Sink, Source} +import akka.util.ByteString +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory +import com.harana.modules.core.app.App.runEffect +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.LiveKubernetes._ +import play.api.libs.json.{Format, Json, Writes} +import skuber.api.Configuration +import skuber.api.client.{KubernetesClient, LoggingContext, RequestLoggingContext, WatchEvent} +import skuber.api.patch.Patch +import skuber.apiextensions.CustomResourceDefinition +import skuber.json.format.namespaceFormat +import skuber.{K8SException, k8sInit, _} +import zio.interop.reactivestreams.streamToPublisher +import zio.{Clock, _} +import zio.stream.ZStream + +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Promise + +object LiveKubernetes { + val yamlReader = new ObjectMapper(new YAMLFactory) + val jsonWriter = new ObjectMapper() + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveKubernetes(config, logger, micrometer) + } +} + +case class LiveKubernetes(config: Config, logger: Logger, micrometer: Micrometer) extends Kubernetes { + + def newClient: IO[K8SException, KubernetesClient] = { + val cld = classOf[ActorSystem].getClassLoader + implicit val system = ActorSystem("Kubernetes", classLoader = Some(cld)) + ZIO.attempt(k8sInit(Configuration.parseKubeconfigFile().get)).refineToOrDie[K8SException] + } + + + def get[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Option[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).getOption[O](name)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def exists[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Boolean] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).getOption(name)(fmt, rd, lc).map(_.nonEmpty) }.refineToOrDie[K8SException] + + + def save(client: KubernetesClient, namespace: String, crd: CustomResourceDefinition): IO[K8SException, CustomResourceDefinition] = + ZIO.fromFuture { _ => + client.usingNamespace(namespace).create(crd).recoverWith { + case alreadyExists: K8SException if alreadyExists.status.code.contains(409) => + client.get[CustomResourceDefinition](crd.name).flatMap { existing => + val currentVersion = existing.metadata.resourceVersion + val newMeta = crd.metadata.copy(resourceVersion = currentVersion) + val updatedObj = crd.copy(metadata = newMeta) + client.update(updatedObj) + } + } + + }.refineToOrDie[K8SException] + + + def createNamespace(client: KubernetesClient, namespace: String)(implicit lc: LoggingContext): IO[K8SException, Namespace] = + ZIO.fromFuture { _ => client.create[Namespace](Namespace(namespace)) }.refineToOrDie[K8SException] + + + def create[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, O] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).create[O](obj)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def createPodAndWait(client: KubernetesClient, namespace: String, obj: Pod, startupTime: Long)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Pod] = + for { + pod <- create[Pod](client, namespace, obj) + schedule = Schedule.fixed(500.milliseconds) && Schedule.recurUntil[Boolean](running => running == true) + _ <- podInState(client, namespace, obj.name, "running").repeat(schedule) + } yield pod + + + def podTerminating(client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Boolean] = + get[Pod](client, namespace, name).map(pod => pod.flatMap(p => p.metadata.deletionTimestamp).nonEmpty) + + + def waitForPodToTerminate(client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Unit] = + for { + schedule <- ZIO.succeed(Schedule.fixed(500.milliseconds) && Schedule.recurWhile[Boolean](terminating => terminating == true)) + _ <- (for { + terminating <- podTerminating(client, namespace, name) + exists <- exists[Pod](client, namespace, name) + } yield terminating && exists).repeat(schedule) + } yield () + + + def podInState(client: KubernetesClient, namespace: String, name: String, desiredState: String)(implicit fmt: Format[Pod], rd: ResourceDefinition[Pod], lc: LoggingContext): IO[K8SException, Boolean] = + get[Pod](client, namespace, name).map(maybePod => + for { + pod <- maybePod + status <- pod.status + containerStatus <- status.containerStatuses.headOption + containerState <- containerStatus.state + ready = containerState.id == desiredState + } yield ready + ).map(_.getOrElse(false)) + + + def update[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, O] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).update[O](obj)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def delete[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, gracePeriodSeconds: Int = -1)(implicit rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Unit] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).delete[O](name, gracePeriodSeconds)(rd, lc) }.refineToOrDie[K8SException] + + + def deleteWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, options: DeleteOptions)(implicit rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Unit] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).deleteWithOptions[O](name, options)(rd, lc) }.refineToOrDie[K8SException] + + + def deleteAll[O <: ObjectResource](client: KubernetesClient, namespace: String)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).deleteAll[ListResource[O]]()(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def deleteAllSelected[O <: ObjectResource](client: KubernetesClient, namespace: String, labelSelector: LabelSelector)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).deleteAllSelected[ListResource[O]](labelSelector)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def getNamespaceNames(client: KubernetesClient)(implicit lc: LoggingContext): IO[K8SException, List[String]] = + ZIO.fromFuture { _ => client.getNamespaceNames(lc) }.refineToOrDie[K8SException] + + + def listByNamespace[O <: ObjectResource](client: KubernetesClient)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, Map[String, ListResource[O]]] = + ZIO.fromFuture { _ => client.listByNamespace[ListResource[O]]()(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def list[O <: ObjectResource](client: KubernetesClient, namespace: String)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).list[ListResource[O]]()(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def listSelected[O <: ObjectResource](client: KubernetesClient, namespace: String, labelSelector: LabelSelector)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).listSelected[ListResource[O]](labelSelector)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def listWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, options: ListOptions)(implicit fmt: Format[ListResource[O]], rd: ResourceDefinition[ListResource[O]], lc: LoggingContext): IO[K8SException, ListResource[O]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).listWithOptions[ListResource[O]](options)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def updateStatus[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], statusEv: HasStatusSubresource[O], lc: LoggingContext): IO[K8SException, O] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).updateStatus[O](obj)(fmt, rd, statusEv, lc) }.refineToOrDie[K8SException] + + + def getStatus[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], statusEv: HasStatusSubresource[O], lc: LoggingContext): IO[K8SException, O] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).getStatus[O](name)(fmt, rd, statusEv, lc) }.refineToOrDie[K8SException] + + + def watch[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).watch[O](obj)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def watch[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).watch[O](name, sinceResourceVersion, bufSize)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def watchAll[O <: ObjectResource](client: KubernetesClient, namespace: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).watchAll[O](sinceResourceVersion, bufSize)(fmt, rd, lc) }.refineToOrDie[K8SException] + + + def watchContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.from(client.usingNamespace(namespace).watchContinuously[O](obj)(fmt, rd, lc)).refineToOrDie[K8SException] + + + def watchContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.from(client.usingNamespace(namespace).watchContinuously[O](name, sinceResourceVersion, bufSize)(fmt, rd, lc)).refineToOrDie[K8SException] + + + def watchAllContinuously[O <: ObjectResource](client: KubernetesClient, namespace: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.from(client.usingNamespace(namespace).watchAllContinuously[O](sinceResourceVersion, bufSize)(fmt, rd, lc)).refineToOrDie[K8SException] + + + def watchWithOptions[O <: ObjectResource](client: KubernetesClient, namespace: String, options: ListOptions, bufSize: Int = 10000)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): IO[K8SException, Source[WatchEvent[O], _]] = + ZIO.from(client.usingNamespace(namespace).watchWithOptions[O](options, bufSize)(fmt, rd, lc)).refineToOrDie[K8SException] + + + def getScale[O <: ObjectResource](client: KubernetesClient, namespace: String, objName: String)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext) : IO[K8SException, Scale] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).getScale[O](objName)(rd, sc, lc) }.refineToOrDie[K8SException] + + + def updateScale[O <: ObjectResource](client: KubernetesClient, namespace: String, objName: String, scale: Scale)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext): IO[K8SException, Scale] = + ZIO.fromFuture { _ => client.usingNamespace(namespace).updateScale[O](objName, scale)(rd, sc, lc) }.refineToOrDie[K8SException] + + + def patch[P <: Patch, O <: ObjectResource](client: KubernetesClient, namespace: String, name: String, patchData: P)(implicit patchfmt: Writes[P], fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): IO[K8SException, O] = + ZIO.fromFuture { _ => client.patch[P, O](name, patchData, Some(namespace))(patchfmt, fmt, rd, lc) }.refineToOrDie[K8SException] + + + def getPodLogSource(client: KubernetesClient, namespace: String, name: String, queryParams: Pod.LogQueryParams)(implicit lc: LoggingContext): IO[K8SException, Source[ByteString, _]] = + ZIO.fromFuture { _ => client.getPodLogSource(name, queryParams, Some(namespace))(lc) }.refineToOrDie[K8SException] + + + def exec(client: KubernetesClient, + namespace: String, + podName: String, + command: Seq[String], + containerName: Option[String] = None, + stdin: Option[ZStream[Any, Nothing, String]] = None, + stdout: Option[String => Task[Unit]] = None, + stderr: Option[String => Task[Unit]] = None, + tty: Boolean = false, + close: Option[Promise[Unit]] = None)(implicit lc: LoggingContext): IO[K8SException, Unit] = + for { + source <- if (stdin.nonEmpty) + for { + publisher <- stdin.get.toPublisher + source = Some(Source.fromPublisher(publisher)) + } yield source + else ZIO.none + + sinkOut = if (stdout.nonEmpty) Some(Sink.foreach[String](s => runEffect(stdout.get(s)))) else None + sinkErr = if (stderr.nonEmpty) Some(Sink.foreach[String](s => runEffect(stderr.get(s)))) else None + + _ <- ZIO.fromFuture { _ => + client.usingNamespace(namespace).exec(podName, command, containerName, source, sinkOut, sinkErr, tty, close)(lc) + }.refineToOrDie[K8SException] + } yield () + + + def getServerAPIVersions(client: KubernetesClient)(implicit lc: LoggingContext): IO[K8SException, List[String]] = + ZIO.fromFuture { _ => client.getServerAPIVersions(lc) }.refineToOrDie[K8SException] + + + def resourceFromFile[A <: ObjectResource](fileName: String)(implicit fmt: Format[A]): Task[A] = + for { + yaml <- ZIO.attempt(scala.io.Source.fromResource(fileName).mkString) + obj <- ZIO.attempt(yamlReader.readValue(yaml, classOf[Object])) + .onError(ex => logger.info(s"Failed to parse YAML for: $fileName with message: ${ex.prettyPrint}")) + json <- ZIO.attempt(jsonWriter.writeValueAsString(obj)) + resource <- ZIO.attempt(Json.parse(json).as[A]) + .onError(ex => logger.info(s"Failed to convert YAML to object for: $fileName with message: ${ex.prettyPrint}")) + } yield resource + + + def close(client: KubernetesClient): IO[K8SException, Unit] = + ZIO.from(client.close).refineToOrDie[K8SException] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kubernetes_s3/KubernetesS3.scala b/jvm/src/main/scala/com/harana/modules/kubernetes_s3/KubernetesS3.scala new file mode 100644 index 0000000..fbf4619 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kubernetes_s3/KubernetesS3.scala @@ -0,0 +1,18 @@ +package com.harana.modules.kubernetes_s3 + +import zio.Task +import zio.macros.accessible + +@accessible +trait KubernetesS3 { + + def createPersistentVolumeClaim(namePrefix: String, + namespace: String, + s3StorageClassName: String, + s3Endpoint: String, + s3Bucket: String, + s3Path: String, + s3AccessKeyId: String, + s3SecretAccessKey: String, + s3Capacity: Int): Task[String] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/kubernetes_s3/LiveKubernetesS3.scala b/jvm/src/main/scala/com/harana/modules/kubernetes_s3/LiveKubernetesS3.scala new file mode 100644 index 0000000..7192048 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/kubernetes_s3/LiveKubernetesS3.scala @@ -0,0 +1,76 @@ +package com.harana.modules.kubernetes_s3 + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.kubernetes.Kubernetes +import io.circe.syntax._ +import skuber.PersistentVolume.AccessMode +import skuber.Resource.Quantity +import skuber.Volume.GenericVolumeSource +import skuber._ +import skuber.json.format._ +import zio.{ZIO, ZLayer} + +object LiveKubernetesS3 { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveKubernetesS3(config, kubernetes, logger, micrometer) + } +} + +case class LiveKubernetesS3(config: Config, kubernetes: Kubernetes, logger: Logger, micrometer: Micrometer) extends KubernetesS3 { + + def createPersistentVolumeClaim(namePrefix: String, + namespace: String, + s3StorageClassName: String, + s3Endpoint: String, + s3Bucket: String, + s3Path: String, + s3AccessKeyId: String, + s3SecretAccessKey: String, + s3Capacity: Int) = + for { + _ <- logger.info(s"Creating volume with prefix: $namePrefix") + + client <- kubernetes.newClient + + _ <- kubernetes.create[Secret](client, namespace, + Secret(metadata = ObjectMeta(name = s"$namePrefix-secret", namespace = namespace), + data = Map("accessKeyID" -> s3AccessKeyId.getBytes, "secretAccessKey" -> s3SecretAccessKey.getBytes, "endpoint" -> s3Endpoint.getBytes) + )) + + _ <- kubernetes.create[PersistentVolume](client, namespace, + PersistentVolume(metadata = ObjectMeta(name = s"$namePrefix-pv", namespace = namespace), + spec = Some(PersistentVolume.Spec( + accessModes = List(AccessMode.ReadWriteMany), + capacity = Map(Resource.storage -> Quantity(s"${s3Capacity}Gi")), + claimRef = Some(ObjectReference(name = s"$namePrefix-pvc", namespace = namespace)), +// storageClassName = Some(s3StorageClassName), + source = GenericVolumeSource( + Map( + "csi" -> Map( + "driver" -> s"ru.yandex.s3.csi".asJson, + "controllerPublishSecretRef" -> Map("name" -> s"$namePrefix-secret", "namespace" -> namespace).asJson, + "nodePublishSecretRef" -> Map("name" -> s"$namePrefix-secret", "namespace" -> namespace).asJson, + "nodeStageSecretRef" -> Map("name" -> s"$namePrefix-secret", "namespace" -> namespace).asJson, + "volumeAttributes" -> Map("capacity" -> s"${s3Capacity}Gi", "mounter" -> "geesfs").asJson, + "volumeHandle" -> s"$s3Bucket/$s3Path".asJson + ).asJson + ).asJson.noSpaces + ) + )))) + + _ <- kubernetes.create[PersistentVolumeClaim](client, namespace, + PersistentVolumeClaim(metadata = ObjectMeta(name = s"$namePrefix-pvc", namespace = namespace), + spec = Some(PersistentVolumeClaim.Spec( + accessModes = List(AccessMode.ReadWriteMany), + resources = Some(Resource.Requirements(requests = Map(Resource.storage -> Quantity(s"${s3Capacity}Gi")))) + )) + )) + } yield s"$namePrefix-pvc" +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ldap/Ldap.scala b/jvm/src/main/scala/com/harana/modules/ldap/Ldap.scala new file mode 100644 index 0000000..4a101bf --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ldap/Ldap.scala @@ -0,0 +1,15 @@ +package com.harana.modules.ldap + +import zio.Task +import zio.macros.accessible + +@accessible +trait Ldap { + + def createUser(emailAddress: String, password: String): Task[Unit] + + def deleteUser(emailAddress: String): Task[Unit] + + def setPassword(emailAddress: String, password: String): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ldap/LiveLdap.scala b/jvm/src/main/scala/com/harana/modules/ldap/LiveLdap.scala new file mode 100644 index 0000000..d8dfcdc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ldap/LiveLdap.scala @@ -0,0 +1,92 @@ +package com.harana.modules.ldap + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.unboundid.ldap.sdk._ +import zio.{Task, ZIO, ZLayer} + +object LiveLdap { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveLdap(config, logger, micrometer) + } +} + +case class LiveLdap(config: Config, logger: Logger, micrometer: Micrometer) extends Ldap { + + def createUser(emailAddress: String, password: String): Task[Unit] = + for { + connection <- getConnection + entry = new Entry(dn(emailAddress)) { addAttribute("userPassword", password) } + result <- ZIO.async { (cb: Task[Unit] => Unit) => + connection.asyncAdd(new AddRequest(entry), new AsyncResultListener { + override def ldapResultReceived(requestID: AsyncRequestID, ldapResult: LDAPResult): Unit = { + if (ldapResult.getResultCode.equals(ResultCode.SUCCESS)) cb(ZIO.unit) + else cb(ZIO.fail(new Exception(ldapResult.getDiagnosticMessage))) + } + }) + } + } yield result + + + def deleteUser(emailAddress: String): Task[Unit] = + for { + connection <- getConnection + result <- ZIO.async { (cb: Task[Unit] => Unit) => + connection.asyncDelete(new DeleteRequest(dn(emailAddress)), new AsyncResultListener { + override def ldapResultReceived(requestID: AsyncRequestID, ldapResult: LDAPResult): Unit = { + if (ldapResult.getResultCode.equals(ResultCode.SUCCESS)) cb(ZIO.unit) + else cb(ZIO.fail(new Exception(ldapResult.getDiagnosticMessage))) + } + }) + } + } yield result + + + def setPassword(emailAddress: String, password: String): Task[Unit] = + for { + connection <- getConnection + _ <- bind(connection) + modifyRequest = new ModifyRequest(dn(emailAddress), new Modification(ModificationType.REPLACE, "userPassword", password)) + result <- ZIO.async { (cb: Task[Unit] => Unit) => + connection.asyncModify(modifyRequest, new AsyncResultListener { + override def ldapResultReceived(requestID: AsyncRequestID, ldapResult: LDAPResult): Unit = { + if (ldapResult.getResultCode.equals(ResultCode.SUCCESS)) cb(ZIO.unit) + else cb(ZIO.fail(new Exception(ldapResult.getDiagnosticMessage))) + } + }) + } + } yield () + + + private def bind(connection: LDAPConnection): Task[Unit] = + for { + bindUsername <- config.secret("ldap-bind-username") + bindPassword <- config.secret("ldap-bind-password") + bindTimeout <- config.long("auth.ldap.bindTimeout") + bindRequest <- ZIO.succeed { + val br = new SimpleBindRequest(s"cn=$bindUsername", bindPassword) + br.setResponseTimeoutMillis(bindTimeout) + br + } + _ <- ZIO.attempt(connection.bind(bindRequest)) + } yield () + + + private def getConnection: Task[LDAPConnection] = + for { + host <- config.secret("ldap-bind-host") + port <- config.int("auth.ldap.port") + connectTimeout <- config.int("auth.ldap.connectTimeout") + connection = new LDAPConnection() + _ <- ZIO.attempt(connection.connect(host, port, connectTimeout)) + } yield connection + + + private def dn(username: String) = + s"uid=$username,dc=harana,dc=com" +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/meilisearch/LiveMeilisearch.scala b/jvm/src/main/scala/com/harana/modules/meilisearch/LiveMeilisearch.scala new file mode 100644 index 0000000..e419c2f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/meilisearch/LiveMeilisearch.scala @@ -0,0 +1,81 @@ +package com.harana.modules.meilisearch + +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.meilisearch.sdk.model.TaskInfo +import com.meilisearch.sdk.{Client, Config, SearchRequest, TasksHandler} +import io.circe.Encoder +import io.circe.syntax.EncoderOps +import zio.{Clock, Schedule, ZIO, ZLayer, durationInt} +import zio.Duration._ + +import scala.jdk.CollectionConverters._ + +object LiveMeilisearch { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveMeilisearch(config, logger, micrometer) + } +} + +case class LiveMeilisearch(config: Config, logger: Logger, micrometer: Micrometer) extends Meilisearch { + + def newClient(host: String, port: Option[Long] = None, apiKey: Option[String] = None) = + ZIO.attempt { + val url = s"http://$host:${port.getOrElse(7000)}" + new Client(if (apiKey.isEmpty) new Config(url) else new Config(url, apiKey.get)) + } + + + def createIndex(client: Client, index: String, primaryKey: Option[String] = None) = + executeTask(client, if (primaryKey.isEmpty) client.createIndex(index) else client.createIndex(index, primaryKey.get)) + + + def deleteIndex(client: Client, index: String) = + executeTask(client, client.deleteIndex(index)) + + + def addObjects[T](client: Client, index: String, objects: List[T], inBatches: Boolean = false)(implicit encoder: Encoder[T]) = + executeTask(client, client.index(index).addDocuments(objects.asJson.noSpaces)) + + + def updateObjects[T](client: Client, index: String, objects: List[T], inBatches: Boolean = false)(implicit encoder: Encoder[T]) = + executeTask(client, client.index(index).updateDocuments(objects.asJson.noSpaces)) + + + def deleteObjects(client: Client, index: String, ids: List[String]) = + executeTask(client, client.index(index).deleteDocuments(ids.asJava)) + + + def deleteAllObjects(client: Client, index: String) = + executeTask(client, client.index(index).deleteAllDocuments()) + + + def search(client: Client, index: String, query: String) = + ZIO.attempt(client.index(index).search(query).getHits.asScala.map(_.asScala.toMap).toList) + + + def search(client: Client, index: String, request: SearchRequest) = + ZIO.attempt(client.index(index).search(request).getHits.asScala.map(_.asScala.toMap).toList) + + + def stopWords(client: Client, index: String) = + ZIO.attempt(client.index(index).getStopWordsSettings.toList) + + + def updateStopWords(client: Client, index: String, stopWords: List[String]) = + ZIO.attempt(client.index(index).updateStopWordsSettings(stopWords.toArray)) + + + private def executeTask(client: Client, fn: => TaskInfo) = + for { + id <- ZIO.succeed(fn.getTaskUid) + schedule = Schedule.fixed(50 milliseconds) && Schedule.recurUntil[String](status => + status != TasksHandler.SUCCEEDED && status != TasksHandler.FAILED + ) + _ <- ZIO.attempt(client.getTask(id).getStatus).repeat(schedule) + } yield () +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/meilisearch/Meilisearch.scala b/jvm/src/main/scala/com/harana/modules/meilisearch/Meilisearch.scala new file mode 100644 index 0000000..8656819 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/meilisearch/Meilisearch.scala @@ -0,0 +1,33 @@ +package com.harana.modules.meilisearch + +import com.meilisearch.sdk.{Client, SearchRequest} +import io.circe.Encoder +import zio.Task +import zio.macros.accessible + +@accessible +trait Meilisearch { + + def newClient(host: String, port: Option[Long] = None, apiKey: Option[String] = None): Task[Client] + + def createIndex(client: Client, index: String, primaryKey: Option[String] = None): Task[Unit] + + def deleteIndex(client: Client, index: String): Task[Unit] + + def addObjects[T](client: Client, index: String, objects: List[T], inBatches: Boolean = false)(implicit encoder: Encoder[T]): Task[Unit] + + def updateObjects[T](client: Client, index: String, objects: List[T], inBatches: Boolean = false)(implicit encoder: Encoder[T]): Task[Unit] + + def deleteObjects(client: Client, index: String, ids: List[String]): Task[Unit] + + def deleteAllObjects(client: Client, index: String): Task[Unit] + + def search(client: Client, index: String, query: String): Task[List[Map[String, AnyRef]]] + + def search(client: Client, index: String, request: SearchRequest): Task[Unit] + + def stopWords(client: Client, index: String): Task[List[String]] + + def updateStopWords(client: Client, index: String, stopWords: List[String]): Task[Unit] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/mixpanel/LiveMixpanel.scala b/jvm/src/main/scala/com/harana/modules/mixpanel/LiveMixpanel.scala new file mode 100644 index 0000000..9681b60 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/mixpanel/LiveMixpanel.scala @@ -0,0 +1,166 @@ +package com.harana.modules.mixpanel + + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.mixpanel.mixpanelapi.{ClientDelivery, MessageBuilder, MixpanelAPI} +import org.json.{JSONArray, JSONObject} +import zio.{ZIO, ZLayer} + +import java.util.UUID +import scala.jdk.CollectionConverters._ + +object LiveMixpanel { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveMixpanel(config, logger, micrometer) + } +} + +case class LiveMixpanel(config: Config, logger: Logger, micrometer: Micrometer) extends Mixpanel { + + private val api = new MixpanelAPI() + private val messageBuilder = config.secret("mixpanel-token").map(t => new MessageBuilder(t)) + + def append(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.append(id.toString, toJson(properties), toJson(modifiers))) + } yield r + + + def delete(id: UUID, modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.delete(id.toString, toJson(modifiers))) + } yield r + + + def event(id: UUID, name: String, properties: Map[String, Object]) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.event(id.toString, name, toJson(properties))) + } yield r + + + def groupDelete(groupKey: String, groupId: String, modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupDelete(groupKey, groupId, toJson(modifiers))) + } yield r + + + def groupMessage(groupKey: String, groupId: String, actionType: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupMessage(groupKey, groupId, actionType, toJson(properties), toJson(modifiers))) + } yield r + + + def groupRemove(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupRemove(groupKey, groupId, toJson(properties), toJson(modifiers))) + } yield r + + + def groupSet(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupSet(groupKey, groupId, toJson(properties), toJson(modifiers))) + } yield r + + + def groupSetOnce(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupSetOnce(groupKey, groupId, toJson(properties), toJson(modifiers))) + } yield r + + + def groupUnion(groupKey: String, groupId: String, properties: Map[String, JSONArray], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupUnion(groupKey, groupId, properties.asJava, toJson(modifiers))) + } yield r + + + def groupUnset(groupKey: String, groupId: String, propertyNames: List[String], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.groupUnset(groupKey, groupId, propertyNames.asJava, toJson(modifiers))) + } yield r + + + def increment(id: UUID, properties: Map[String, Long], modifiers: Map[String, String] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.increment(id.toString, properties.view.mapValues(Long.box).toMap.asJava, toJson(modifiers))) + } yield r + + + def peopleMessage(id: UUID, actionType: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.peopleMessage(id.toString, actionType, toJson(properties), toJson(modifiers))) + } yield r + + + def remove(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.remove(id.toString, toJson(properties), toJson(modifiers))) + } yield r + + + def send(messages: List[JSONObject]) = { + val delivery = new ClientDelivery() + messages.foreach(delivery.addMessage) + ZIO.from(api.deliver(delivery)).unit + } + + def set(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.set(id.toString, toJson(properties), toJson(modifiers))) + } yield r + + + def setOnce(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.setOnce(id.toString, toJson(properties), toJson(modifiers))) + } yield r + + + def trackCharge(id: UUID, amount: Double, properties: Map[String, Object], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.trackCharge(id.toString, amount, toJson(properties), toJson(modifiers))) + } yield r + + + def union(id: UUID, properties: Map[String, JSONArray], modifiers: Map[String, Object] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.union(id.toString, properties.asJava, toJson(modifiers))) + } yield r + + + def unset(id: UUID, propertyNames: List[String], modifiers: Map[String, String] = Map()) = + for { + mb <- messageBuilder + r <- ZIO.attempt(mb.unset(id.toString, propertyNames.asJava, toJson(modifiers))) + } yield r + + + private def toJson(properties: Map[String, Object]) = { + val json = new JSONObject + properties.foreach(p => json.put(p._1, p._2)) + json + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/mixpanel/Mixpanel.scala b/jvm/src/main/scala/com/harana/modules/mixpanel/Mixpanel.scala new file mode 100644 index 0000000..662f9ad --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/mixpanel/Mixpanel.scala @@ -0,0 +1,48 @@ +package com.harana.modules.mixpanel + +import org.json.{JSONArray, JSONObject} +import zio.Task +import zio.macros.accessible + +import java.util.UUID + +@accessible +trait Mixpanel { + def append(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def delete(id: UUID, modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def event(id: UUID, name: String, properties: Map[String, Object]): Task[JSONObject] + + def groupDelete(groupKey: String, groupId: String, modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupMessage(groupKey: String, groupId: String, actionType: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupRemove(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupSet(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupSetOnce(groupKey: String, groupId: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupUnion(groupKey: String, groupId: String, properties: Map[String, JSONArray], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def groupUnset(groupKey: String, groupId: String, propertyNames: List[String], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def increment(id: UUID, properties: Map[String, Long], modifiers: Map[String, String] = Map()): Task[JSONObject] + + def peopleMessage(id: UUID, actionType: String, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def remove(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def send(messages: List[JSONObject]): Task[Unit] + + def set(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def setOnce(id: UUID, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def trackCharge(id: UUID, amount: Double, properties: Map[String, Object], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def union(id: UUID, properties: Map[String, JSONArray], modifiers: Map[String, Object] = Map()): Task[JSONObject] + + def unset(id: UUID, propertyNames: List[String], modifiers: Map[String, String] = Map()): Task[JSONObject] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ognl/LiveOgnl.scala b/jvm/src/main/scala/com/harana/modules/ognl/LiveOgnl.scala new file mode 100644 index 0000000..2adcafd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ognl/LiveOgnl.scala @@ -0,0 +1,32 @@ +package com.harana.modules.ognl + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.ognl.models.{OgnlMemberAccess, OgnlObjectPropertyAccessor} +import ognl.{DefaultClassResolver, OgnlRuntime, Ognl => jOgnl} +import zio.{Task, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveOgnl { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveOgnl(config, logger, micrometer) + } +} + +case class LiveOgnl(config: Config, logger: Logger, micrometer: Micrometer) extends Ognl { + + OgnlRuntime.setPropertyAccessor(classOf[Object], new OgnlObjectPropertyAccessor()) + + def render(expression: String, context: Map[String, Any]): Task[Any] = { + val ognlContext = jOgnl.createDefaultContext(context.asJava, new OgnlMemberAccess, new DefaultClassResolver, null) + val ognlExpression = jOgnl.parseExpression(expression) + ZIO.succeed(jOgnl.getValue(ognlExpression, ognlContext, context.asJava)) + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ognl/Ognl.scala b/jvm/src/main/scala/com/harana/modules/ognl/Ognl.scala new file mode 100644 index 0000000..6434919 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ognl/Ognl.scala @@ -0,0 +1,9 @@ +package com.harana.modules.ognl + +import zio.Task +import zio.macros.accessible + +@accessible +trait Ognl { + def render(expression: String, context: Map[String, Any]): Task[Any] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlMemberAccess.java b/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlMemberAccess.java new file mode 100644 index 0000000..57429fc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlMemberAccess.java @@ -0,0 +1,34 @@ +package com.harana.modules.ognl.models; + +import ognl.MemberAccess; + +import java.lang.reflect.AccessibleObject; +import java.lang.reflect.Member; +import java.lang.reflect.Modifier; +import java.util.Map; + +public class OgnlMemberAccess implements MemberAccess { + + @Override + public Object setup(Map context, Object target, Member member, String propertyName) { + Object result = null; + if (isAccessible(context, target, member, propertyName)) { + AccessibleObject accessible = (AccessibleObject) member; + if (!accessible.isAccessible()) { + result = Boolean.FALSE; + accessible.setAccessible(true); + } + } + return result; + } + + @Override + public void restore(Map context, Object target, Member member, String propertyName, Object state) { + if (state != null) ((AccessibleObject) member).setAccessible(((Boolean) state)); + } + + @Override + public boolean isAccessible(Map context, Object target, Member member, String propertyName) { + return Modifier.isPublic(member.getModifiers()); + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlObjectPropertyAccessor.scala b/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlObjectPropertyAccessor.scala new file mode 100644 index 0000000..11b29c1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ognl/models/OgnlObjectPropertyAccessor.scala @@ -0,0 +1,79 @@ +package com.harana.modules.ognl.models + +import ognl.{ObjectPropertyAccessor, OgnlContext, OgnlException, OgnlRuntime} + +import java.beans.IntrospectionException +import java.util +import scala.jdk.CollectionConverters._ + +class OgnlObjectPropertyAccessor extends ObjectPropertyAccessor { + + override def getPossibleProperty(context: util.Map[_, _], target: scala.Any, name: String): AnyRef = { + if (isCaseClass(target)) { + getCaseClassFieldValueByName(target, name) match { + case Some(x) => return x.asInstanceOf[AnyRef] + case None => + } + } + + var result: Object = null + val ognlContext: OgnlContext = context.asInstanceOf[OgnlContext] + + try { + result = OgnlRuntime.getMethodValue(ognlContext, target, name, true) + if (result == OgnlRuntime.NotFound) { + result = OgnlRuntime.getFieldValue(ognlContext, target, name, true) + } + } + catch { + case ex: IntrospectionException => throw new OgnlException(name, ex) + case ex: OgnlException => throw ex + case ex: Exception => throw new OgnlException(name, ex) + } + + result + } + + def getCaseClassFieldValueByName(targetClass: Any, fieldName: String): Option[Any] = { + val productInstance = targetClass.asInstanceOf[Product] + val fieldsNameToValueMap = productInstance.getClass.getDeclaredFields.map(_.getName) + .zip(productInstance.productIterator).toMap + fieldsNameToValueMap.get(fieldName) + } + + def isCaseClass(instance: Any) = { + import reflect.runtime.universe._ + val typeMirror = runtimeMirror(instance.getClass.getClassLoader) + val instanceMirror = typeMirror.reflect(instance) + val symbol = instanceMirror.symbol + symbol.isCaseClass + } + + override def setPossibleProperty(context: util.Map[_, _], target: scala.Any, name: String, value: scala.Any): AnyRef = super.setPossibleProperty(context, target, name, value) + + override def hasGetProperty(context: OgnlContext, target: scala.Any, oname: scala.Any): Boolean = super.hasGetProperty(context, target, oname) + + override def hasGetProperty(context: util.Map[_, _], target: scala.Any, oname: scala.Any): Boolean = super.hasGetProperty(context, target, oname) + + override def hasSetProperty(context: OgnlContext, target: scala.Any, oname: scala.Any): Boolean = super.hasSetProperty(context, target, oname) + + override def hasSetProperty(context: util.Map[_, _], target: scala.Any, oname: scala.Any): Boolean = super.hasSetProperty(context, target, oname) + + override def getProperty(context: util.Map[_, _], target: scala.Any, oname: scala.Any): AnyRef = { + val result = super.getProperty(context, target, oname) + result match { + case c: List[_] => c.asJava + case c: Map[_, _] => c.asJava + case c: Set[_] => c.asJava + case _ => result + } + } + + override def setProperty(context: util.Map[_, _], target: scala.Any, oname: scala.Any, value: scala.Any): Unit = super.setProperty(context, target, oname, value) + + override def getPropertyClass(context: OgnlContext, target: scala.Any, index: scala.Any): Class[_] = super.getPropertyClass(context, target, index) + + override def getSourceAccessor(context: OgnlContext, target: scala.Any, index: scala.Any): String = super.getSourceAccessor(context, target, index) + + override def getSourceSetter(context: OgnlContext, target: scala.Any, index: scala.Any): String = super.getSourceSetter(context, target, index) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ohc/LiveOHC.scala b/jvm/src/main/scala/com/harana/modules/ohc/LiveOHC.scala new file mode 100644 index 0000000..01d2a46 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ohc/LiveOHC.scala @@ -0,0 +1,85 @@ +package com.harana.modules.ohc + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import org.caffinitas.ohc.{CacheLoader, DirectValueAccess, OHCache, OHCacheBuilder} +import zio.{ZIO, ZLayer} + +import java.nio.ByteBuffer +import scala.jdk.CollectionConverters._ + +object LiveOHC { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveOHC(config, logger, micrometer) + } +} + +case class LiveOHC(config: Config, logger: Logger, micrometer: Micrometer) extends OHC { + + def newCache[K, V](hashTableSize: Option[Int] = None, + chunkSize: Option[Int] = None, + capacity: Option[Long] = None, + segmentCount: Option[Int] = None) = { + val builder = OHCacheBuilder.newBuilder[K, V]() + if (hashTableSize.nonEmpty) builder.hashTableSize(hashTableSize.get) + if (chunkSize.nonEmpty) builder.chunkSize(chunkSize.get) + if (capacity.nonEmpty) builder.capacity(capacity.get) + if (segmentCount.nonEmpty) builder.segmentCount(segmentCount.get) + ZIO.succeed(builder.build()) + } + + def put[K, V](cache: OHCache[K, V], key: K, value: V, expireAt: Option[Long] = None) = + ZIO.succeed(if (expireAt.nonEmpty) cache.put(key, value, expireAt.get) else cache.put(key, value)) + + + def putIfAbsent[K, V](cache: OHCache[K, V], key: K, value: V, expireAt: Option[Long] = None) = + ZIO.succeed(if (expireAt.nonEmpty) cache.put(key, value, expireAt.get) else cache.putIfAbsent(key, value)) + + + def putAll[K, V](cache: OHCache[K, V], values: Map[K, V]) = + ZIO.succeed(cache.putAll(values.asJava)) + + + def addOrReplace[K, V](cache: OHCache[K, V], key: K, oldValue: V, newValue: V, expireAt: Option[Long] = None) = + ZIO.succeed(if (expireAt.nonEmpty) cache.addOrReplace(key, oldValue, newValue, expireAt.get) else cache.addOrReplace(key, oldValue, newValue)) + + + def remove[K, V](cache: OHCache[K, V], key: K) = + ZIO.succeed(cache.remove(key)) + + + def removeAll[K, V](cache: OHCache[K, V], keys: Set[K]) = + ZIO.succeed(cache.removeAll(keys.asJava)) + + + def clear[K, V](cache: OHCache[K, V]) = + ZIO.succeed(cache.clear()) + + + def get[K, V](cache: OHCache[K, V], key: K) = + ZIO.succeed(cache.get(key)) + + +// FIXME +// def getAsBytes[K, V](cache: OHCache[K, V], key: K, updateLRU: Boolean = false) = +// ZIO.acquireReleaseWith[OHCache[K, V], DirectValueAccess, ByteBuffer](ZIO.succeed(cache.getDirect(key, updateLRU)), d => ZIO.succeed(d.close()), d => ZIO.succeed(d.buffer())) + + + def getWithLoader[K, V](cache: OHCache[K, V], key: K, loader: CacheLoader[K, V], expireAt: Option[Long] = None) = + ZIO.fromFutureJava( + if (expireAt.nonEmpty) cache.getWithLoaderAsync(key, loader, expireAt.get) else cache.getWithLoaderAsync(key, loader) + ).orDie + + + def containsKey[K, V](cache: OHCache[K, V], key: K) = + ZIO.succeed(cache.containsKey(key)) + + + def size[K, V](cache: OHCache[K, V]) = + ZIO.succeed(cache.size()) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/ohc/OHC.scala b/jvm/src/main/scala/com/harana/modules/ohc/OHC.scala new file mode 100644 index 0000000..dc3abc4 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/ohc/OHC.scala @@ -0,0 +1,39 @@ +package com.harana.modules.ohc + +import org.caffinitas.ohc.{CacheLoader, OHCache} +import zio.UIO +import zio.macros.accessible + +import java.nio.ByteBuffer + +@accessible +trait OHC { + def newCache[K, V](hashTableSize: Option[Int] = None, + chunkSize: Option[Int] = None, + capacity: Option[Long] = None, + segmentCount: Option[Int] = None): UIO[OHCache[K, V]] + + def put[K, V](cache: OHCache[K, V], key: K, value: V, expireAt: Option[Long] = None): UIO[Boolean] + + def putIfAbsent[K, V](cache: OHCache[K, V], key: K, value: V, expireAt: Option[Long] = None): UIO[Boolean] + + def putAll[K, V](cache: OHCache[K, V], values: Map[K, V]): UIO[Unit] + + def addOrReplace[K, V](cache: OHCache[K, V], key: K, oldValue: V, newValue: V, expireAt: Option[Long] = None): UIO[Boolean] + + def remove[K, V](cache: OHCache[K, V], key: K): UIO[Boolean] + + def removeAll[K, V](cache: OHCache[K, V], keys: Set[K]): UIO[Unit] + + def clear[K, V](cache: OHCache[K, V]): UIO[Unit] + + def get[K, V](cache: OHCache[K, V], key: K): UIO[V] + +// def getAsBytes[K, V](cache: OHCache[K, V], key: K, updateLRU: Boolean = false): UIO[ByteBuffer] + + def getWithLoader[K, V](cache: OHCache[K, V], key: K, loader: CacheLoader[K, V], expiresAt: Option[Long] = None): UIO[V] + + def containsKey[K, V](cache: OHCache[K, V], key: K): UIO[Boolean] + + def size[K, V](cache: OHCache[K, V]): UIO[Long] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/plugin/LivePlugin.scala b/jvm/src/main/scala/com/harana/modules/plugin/LivePlugin.scala new file mode 100644 index 0000000..d55243d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/plugin/LivePlugin.scala @@ -0,0 +1,170 @@ +package com.harana.modules.plugin + +import com.harana.models.{PluginInfo, PluginServiceInfo} +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.ohc.OHC +import com.harana.modules.plugin.LivePlugin._ +import com.harana.modules.plugin.models.PluginError +import com.harana.sdk.shared.plugin.Service +import com.harana.sdk.shared.plugin.Service.{ServiceId => PluginServiceId} +import org.apache.felix.framework.Felix +import org.osgi.framework.Constants +import zio._ + +import java.io.File +import java.net.URL +import java.util +import scala.reflect.ClassTag + +object LivePlugin { + + val scalazVersion = "7.2.7" + val scalaVersion = scala.util.Properties.versionNumberString + + val systemPackages = + "scala;version=" + scalaVersion + + ",scala.annotation;version=" + scalaVersion + + ",scala.collection;version=" + scalaVersion + + ",scala.collection.convert;version=" + scalaVersion + + ",scala.collection.generic;version=" + scalaVersion + + ",scala.collection.immutable;version=" + scalaVersion + + ",scala.collection.mutable;version=" + scalaVersion + + ",scala.collection.parallel;version=" + scalaVersion + + ",scala.collection.parallel.immutable;version=" + scalaVersion + + ",scala.collection.parallel.mutable;version=" + scalaVersion + + ",scala.collection.script;version=" + scalaVersion + + ",scala.concurrent;version=" + scalaVersion + + ",scala.concurrent.duration;version=" + scalaVersion + + ",scala.io;version=" + scalaVersion + + ",scala.math;version=" + scalaVersion + + ",scala.reflect.api;version=" + scalaVersion + + ",scala.reflect.internal;version=" + scalaVersion + + ",scala.reflect.internal.util;version=" + scalaVersion + + ",scala.reflect;version=" + scalaVersion + + ",scala.reflect.macros.blackbox;version=" + scalaVersion + + ",scala.reflect.macros.whitebox;version=" + scalaVersion + + ",scala.reflect.macros.contexts;version=" + scalaVersion + + ",scala.reflect.macros;version=" + scalaVersion + + ",scala.runtime;version=" + scalaVersion + + ",scala.runtime.java8;version=" + scalaVersion + + ",scala.tools.nsc;version=" + scalaVersion + + ",scala.tools.nsc.ast;version=" + scalaVersion + + ",scala.tools.nsc.typechecker;version=" + scalaVersion + + ",scala.sys;version=" + scalaVersion + + ",scala.util;version=" + scalaVersion + + ",scala.util.control;version=" + scalaVersion + + ",scala.util.hashing;version=" + scalaVersion + + ",scala.annotation;version=" + scalaVersion + + ",scala.util.matching;version=" + scalaVersion + + ",scala.xml;version=1.0.6" + + ",scalaz;version=" + scalazVersion + + ",scalaz.syntax;version=" + scalazVersion + + ",org.osgi.framework;version=1.6.0" + + ",org.osgi.service,services,utils,com.fasterxml.jackson,play,io.gatling.jsonpath,android.util,org.joda.convert,org.apache.felix.scr,org.slf4j,io.dropwizard.metrics.healthchecks" + + val sdkPackages = + "com.harana.sdk.components" + + ",com.harana.sdk.components.basic" + + ",com.harana.sdk.components.cards" + + ",com.harana.sdk.components.elements" + + ",com.harana.sdk.components.lists" + + ",com.harana.sdk.components.maps" + + ",com.harana.sdk.components.panels" + + ",com.harana.sdk.components.structure" + + ",com.harana.sdk.components.widgets" + + ",com.harana.sdk.models" + + ",com.harana.sdk.parameters" + + ",com.harana.sdk.plugin" + + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LivePlugin(config, logger, micrometer) + } +} + +case class LivePlugin(config: Config, logger: Logger, micrometer: Micrometer) extends Plugin { + + private val bundlesDirectory = config.string("plugins.bundleDir").map(f => new File(f)) + private val pluginsDirectory = config.string("plugins.pluginsDir").map(f => new File(f)) + private val cacheDirectory = config.string("plugins.cacheDir").map(f => new File(f)) + + private val bundleContext = + for { + bundlesDir <- bundlesDirectory + pluginsDir <- pluginsDirectory + props <- ZIO.succeed { + val props = new util.HashMap[String, String] + props.put(Constants.FRAMEWORK_SYSTEMPACKAGES_EXTRA, systemPackages + "," + sdkPackages) + props.put(Constants.FRAMEWORK_STORAGE_CLEAN, Constants.FRAMEWORK_STORAGE_CLEAN_ONFIRSTINIT) + props.put(Constants.FRAMEWORK_STORAGE, cacheDirectory.toString) + props.put("felix.shutdown.hook", "true") + props.put("felix.service.urlhandlers", "true") + props.put("felix.fileinstall.dir", pluginsDir.getAbsolutePath) + props.put("felix.fileinstall.noInitialDelay", "true") + props.put("felix.fileinstall.log.level", "4") + props + } + felix = new Felix(props) + _ <- ZIO.attempt(felix.start()).mapError(PluginError.Exception) + _ <- installSystemBundles(bundlesDir, felix.getBundleContext) + _ <- ZIO.attempt(felix.init()).mapError(PluginError.Exception) + } yield felix.getBundleContext + + + def findPlugins[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, List[PluginInfo]] = + for { + context <- bundleContext + plugins = context.getBundles.map(b => PluginInfo(b.getSymbolicName, "", b.getVersion.getMajor.toLong)).toList + } yield plugins + + + def findServices[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, Map[PluginServiceId, T]] = + for { + context <- bundleContext + references = context.getAllServiceReferences(cmf.runtimeClass.getName, null) + services <- { + if (references == null) ZIO.fail(PluginError.NoServicesFound) + else ZIO.succeed( + references.map { ref => + val id = ref.getProperty("id").asInstanceOf[PluginServiceId] + val service = context.getService(ref).asInstanceOf[T] + (id, service) + }.toMap + ) + } + } yield services + + + def findServiceInfos[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, List[PluginServiceInfo]] = + for { + services <- findServices + serviceInfos = services.map { case (serviceId, any) => PluginServiceInfo()}.toList + } yield serviceInfos + + + def getService[T <: Service](serviceId: PluginServiceId)(implicit cmf: ClassTag[T]): IO[PluginError, T] = + for { + services <- findServices(cmf) + service <- services.get(serviceId) match { + case Some(x) => ZIO.succeed(x) + case None => ZIO.fail(PluginError.NoServiceFound) + } + } yield service + + + def getResource(className: String, resourcePath: String): IO[PluginError, URL] = + for { + context <- bundleContext + resource <- context.getBundles + .find(_.getEntry(className.replace(".", "/") + ".class") != null) + .map(_.getEntry(resourcePath)) match { + case Some(x) => ZIO.succeed(x) + case None => ZIO.fail(PluginError.NoResourceFound) + } + } yield resource +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/plugin/Plugin.scala b/jvm/src/main/scala/com/harana/modules/plugin/Plugin.scala new file mode 100644 index 0000000..dedf56b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/plugin/Plugin.scala @@ -0,0 +1,24 @@ +package com.harana.modules.plugin + +import com.harana.models.{PluginInfo, PluginServiceInfo} +import com.harana.modules.plugin.models.PluginError +import com.harana.sdk.shared.plugin.Service +import com.harana.sdk.shared.plugin.Service.ServiceId +import zio.IO +import zio.macros.accessible + +import java.net.URL +import scala.reflect.ClassTag + +@accessible +trait Plugin { + def findPlugins[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, List[PluginInfo]] + + def findServices[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, Map[ServiceId, T]] + + def findServiceInfos[T <: Service](implicit cmf: ClassTag[T]): IO[PluginError, List[PluginServiceInfo]] + + def getService[T <: Service](serviceId: ServiceId)(implicit cmf: ClassTag[T]): IO[PluginError, T] + + def getResource(className: String, fileName: String): IO[PluginError, URL] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/plugin/models/PluginError.scala b/jvm/src/main/scala/com/harana/modules/plugin/models/PluginError.scala new file mode 100644 index 0000000..27d650d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/plugin/models/PluginError.scala @@ -0,0 +1,9 @@ +package com.harana.modules.plugin.models + +sealed trait PluginError +object PluginError { + case object NoResourceFound extends PluginError + case object NoServicesFound extends PluginError + case object NoServiceFound extends PluginError + case class Exception(t: Throwable) extends PluginError +} diff --git a/jvm/src/main/scala/com/harana/modules/plugin/package.scala b/jvm/src/main/scala/com/harana/modules/plugin/package.scala new file mode 100644 index 0000000..b258a7e --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/plugin/package.scala @@ -0,0 +1,45 @@ +package com.harana.modules + +import com.harana.modules.plugin.models.PluginError +import org.osgi.framework.BundleContext +import zio.{IO, ZIO} + +import java.io.File +import java.nio.file.{Files, Path, Paths} + +package object plugin { + + def installPlugin(bundleContext: BundleContext, bundleLocation: String): IO[PluginError, Unit] = + ZIO.attempt { + bundleContext.installBundle(bundleLocation).start() + }.mapError(PluginError.Exception) + + def installSystemBundles(bundlesDirectory: File, bundleContext: BundleContext): IO[PluginError, Unit] = + for { + files <- ZIO.succeed(bundlesDirectory.listFiles.filter(_.isFile).filter(_.getName.endsWith("jar"))) + bundles <- ZIO.attempt(files.map(b => bundleContext.installBundle(s"file:${b.getAbsolutePath}"))).mapError(PluginError.Exception) + _ <- ZIO.attempt(bundles.foreach(_.start())).mapError(PluginError.Exception) + } yield () + + def uninstallPlugin(bundleContext: BundleContext, bundleLocation: String): IO[PluginError, Unit] = + ZIO.attempt { + bundleContext.getBundles + .filter(_.getLocation == bundleLocation) + .foreach { bundle => + bundle.uninstall() + } + }.mapError(PluginError.Exception) + + def removePlugin(pluginsDirectory: File, pluginName: String): IO[PluginError, Unit] = + ZIO.attempt { + pluginsDirectory.listFiles + .filter(_.isFile) + .filter(_.getName == pluginName) + .foreach(file => if (file.exists()) file.delete()) + }.mapError(PluginError.Exception) + + def copyPlugin(pluginsDirectory: File, filePath: String): IO[PluginError, Path] = + ZIO.attempt { + Files.copy(Paths.get(filePath), Paths.get(pluginsDirectory + "/" + new File(filePath).getName)) + }.mapError(PluginError.Exception) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/LiveProjects.scala b/jvm/src/main/scala/com/harana/modules/projects/LiveProjects.scala new file mode 100644 index 0000000..efd48f7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/LiveProjects.scala @@ -0,0 +1,351 @@ +package com.harana.modules.projects + +import com.harana.designer.backend.modules.projects.models.{Trigger, _} +import com.harana.modules.argo.events.EventSource._ +import com.harana.modules.argo.events.Rollout.{BlueGreen, Canary, Rollout, Strategy} +import com.harana.modules.argo.events.Sensor.{EventDependency, Http, K8SResource, Sensor, Subscription, TriggerTemplate} +import com.harana.modules.argo.events.Trigger.{K8SSource, K8STrigger} +import com.harana.modules.argo.events._ +import com.harana.modules.argo.workflows._ +import com.harana.modules.argo.{EnvironmentVariable, ObjectMetadata, Requests, Resources, VolumeMount, Container => ArgoContainer, Template => ArgoTemplate} +import com.harana.modules.buildpack.Buildpack +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.docker.Docker +import com.harana.modules.git.Git +import com.harana.modules.kubernetes.Kubernetes +import com.harana.modules.vertx.Vertx +import io.circe.yaml.parser +import io.scalaland.chimney.dsl._ +import org.apache.commons.io.FileUtils +import org.eclipse.jgit.api.{Git => JGit} +import zio.{Clock, _} + +import java.io.{File => JFile} +import java.nio.charset.Charset +import java.nio.file.Files +import java.text.SimpleDateFormat +import java.util.Date +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters._ + +object LiveProjects { + val layer = ZLayer { + for { + buildpack <- ZIO.service[Buildpack] + config <- ZIO.service[Config] + docker <- ZIO.service[Docker] + git <- ZIO.service[Git] + kubernetes <- ZIO.service[Kubernetes] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + vertx <- ZIO.service[Vertx] + } yield LiveProjects(buildpack, config, docker, git, kubernetes, logger, micrometer, vertx) + } +} + +case class LiveProjects(buildpack: Buildpack, + config: Config, + docker: Docker, + git: Git, + kubernetes: Kubernetes, + logger: Logger, + micrometer: Micrometer, + vertx: Vertx) extends Projects { + + private val projectsRepository = new AtomicReference[JGit]() + private val allProjects = new AtomicReference[Set[Project]](Set()) + private val allRepositories = new AtomicReference[Map[(Project, String), JGit]](Map()) + private val tempDirectory = Files.createTempDirectory("harana-projects").toFile + private val dateFormatter = new SimpleDateFormat("yyyy-MM-dd@HH:mm:ss") + + private val repositoryAuthConfig = + for { + username <- config.optString("projects.docker.repository.username") + password <- config.optString("projects.docker.repository.password") + identityToken <- config.optString("projects.docker.repository.identityToken") + registryToken <- config.optString("projects.docker.repository.registryToken") + authConfig = dockerAuthConfig(username, password, identityToken, registryToken) + } yield authConfig + + + def setup(namespace: String): Task[Unit] = + for { + authConfig <- repositoryAuthConfig + + _ <- logger.info("Creating Argo Events/Workflow CRDs") + client <- kubernetes.newClient + _ <- kubernetes.save(client, namespace, EventSource.crd) + _ <- kubernetes.save(client, namespace, Rollout.crd) + _ <- kubernetes.save(client, namespace, Sensor.crd) + _ <- kubernetes.save(client, namespace, Workflow.crd) + _ <- kubernetes.save(client, namespace, WorkflowTemplate.crd) + _ <- kubernetes.close(client) + + _ <- logger.info("Pulling Python/Scala build images") + pythonImage <- config.optString("projects.build.pythonImage") + scalaImage <- config.optString("projects.build.scalaImage") + _ <- ZIO.when(pythonImage.nonEmpty)(docker.pullImage(pythonImage.get)) + _ <- ZIO.when(scalaImage.nonEmpty)(docker.pullImage(scalaImage.get, authConfig)) + + _ <- logger.info("Setting default Buildpack builder") + defaultBuilder <- config.optString("projects.build.buildpack.defaultBuilder") + _ <- ZIO.when(defaultBuilder.nonEmpty)(buildpack.setDefaultBuilder(defaultBuilder.get)) + + _ <- logger.info("Cloning Projects Git repository") + _ <- cloneProjects + + } yield () + + + def startMonitoring(namespace: String): Task[Unit] = + for { + _ <- refreshProjects.repeat(Schedule.spaced(5.seconds)) + } yield () + + + def stopMonitoring(namespace: String): Task[Unit] = { + ZIO.unit + } + + private def cloneProjects: Task[Unit] = + for { + url <- config.string("projects.git.url") + branch <- config.string("projects.git.branch", "master") + username <- config.optString("projects.git.username") + password <- config.optString("projects.git.password") + oauthToken <- config.optString("projects.git.oauthToken") + gitRepository <- git.clone(url, tempDirectory, Some(branch), username, password, oauthToken) + _ = projectsRepository.set(gitRepository) + } yield () + + + private def refreshProjects: Task[Set[Project]] = + for { + hasChanged <- git.hasChanged(projectsRepository.get) + _ <- logger.debug(s"Refreshing Projects Git repository: ${if (hasChanged) "changed" else "not changed" }") + projects <- ZIO.ifZIO(ZIO.succeed(hasChanged))(changedProjects(projectsRepository.get), ZIO.attempt(Set[Project]())) + } yield projects + + + private def cloneRepository(project: Project, repository: Repository): Task[Unit] = + for { + branchTagOrCommit <- ZIO.succeed(repository.git.branch.orElse(repository.git.tag.orElse(repository.git.commit.orElse(None)))) + directory = new JFile(tempDirectory, s"$project/${repository.name}") + gitRepository <- git.clone(repository.git.url, directory, None, repository.git.username, repository.git.password, repository.git.oauthToken) + _ <- git.checkout(gitRepository, branchTagOrCommit.getOrElse("master")) + _ = allRepositories.set(allRepositories.get + ((project, repository.name) -> gitRepository)) + } yield () + + + private def changedProjects(projectsRepo: JGit): Task[Set[Project]] = + for { + foundProjects <- findProjects(projectsRepo.getRepository.getDirectory.getParentFile) + changedProjects = allProjects.get.filterNot(foundProjects) + _ <- logger.debug(s"Changed projects: ${changedProjects.map(_.title).mkString(", ")}").when(changedProjects.nonEmpty) + } yield changedProjects + + + private def findProjects(directory: JFile): Task[Set[Project]] = + for { + files <- ZIO.succeed(FileUtils.listFiles(directory, Array("yml"), true).asScala.toList) + _ <- logger.debug(s"Found files: ${files.map(_.getAbsolutePath).mkString(", ")}") + ymls <- ZIO.attempt(files.map(f => (f.getName, FileUtils.readFileToString(f, Charset.defaultCharset())))) + parsedProjects <- ZIO.foreach(ymls)(parseProject) + foundProjects = parsedProjects.filter(_.nonEmpty).map(_.get).toSet + _ <- logger.info(s"Found projects: ${foundProjects.map(_.title)}") + } yield foundProjects + + + private def parseProject(yml: (String, String)): UIO[Option[Project]] = + for { + json <- ZIO.succeed(parser.parse(yml._2)) + _ <- ZIO.when(json.isLeft)(logger.error(s"Failed to parse YAML: ${yml._1} due to error: ${json.left.get.getMessage}")) + _ <- ZIO.when(json.isRight && json.toOption.get.as[Project].isLeft)(logger.error(s"Failed to parse project: ${yml._1} due to error: ${json.toOption.get.as[Project].left.get.getMessage}")) + project = json.toOption.flatMap(_.as[Project].toOption) + } yield project + + + private def refreshRepositories(project: Project): Task[Set[Repository]] = + project.repositories match { + case Some(repositories) => + for { + _ <- logger.debug(s"Refreshing repositories for project: ${project.title}") + changedReposRef <- Ref.make(Set[Repository]()) + _ <- ZIO.foreach(repositories) { r => + if (allRepositories.get.contains((project, r.name))) + for { + gitRepo <- ZIO.succeed(allRepositories.get()((project, r.name))) + hasChanged <- git.hasChanged(gitRepo) + _ <- changedReposRef.getAndUpdate(_ + r).when(hasChanged) + } yield () + else + cloneRepository(project, r) *> changedReposRef.getAndUpdate(_ + r) + } + changedRepos <- changedReposRef.get + } yield changedRepos + case None => ZIO.attempt(Set()) + } + + + private def buildContainers(project: Project): Task[List[Boolean]] = + for { + _ <- logger.debug(s"Building containers for project: ${project.title}") + successes <- ZIO.foreachPar(project.containers) { c => + val imageName = s"${name(project.title)}_${name(c.name)}" + val date = dateFormatter.format(new Date()) + + if (c.auto.map(_.repository).nonEmpty) + for { + git <- ZIO.succeed(allRepositories.get()((project, c.docker.get.repository.get))) + success <- buildpack.build(s"$imageName:$date", git.getRepository.getDirectory).map(_.mkString(",").contains("ERROR")) + } yield success + + if (c.docker.map(_.repository).nonEmpty) + for { + git <- ZIO.succeed(allRepositories.get()((project, c.docker.get.repository.get))) + dockerFile = new JFile(git.getRepository.getDirectory, c.docker.get.path.getOrElse("Dockerfile")) + success <- docker.buildImage(dockerFile, Set(date)).option.map(_.nonEmpty) + } yield success + + ZIO.succeed(false) + } + } yield successes + + + private def pipelineTriggers(project: Project): UIO[List[(Pipeline, List[Trigger])]] = + ZIO.succeed(project.pipelines.map(_.map(p => (p, p.start.triggers.getOrElse(List())))).getOrElse(List())) + + + private def eventSources(eventBusName: String, project: Project): UIO[List[EventSource]] = + for { + triggers <- pipelineTriggers(project).map(_.flatMap(_._2)) + eventSources <- ZIO.foreach(triggers) { trigger => + for { + eventSourceName <- ZIO.succeed(s"${name(project.title)}-${name(trigger.name)}-eventsource") + spec = EventSource.Spec( + calendar = trigger.calendar.map(c => Map(trigger.name -> c)).getOrElse(Map()), + eventBusName = eventBusName, + file = trigger.file.map(c => Map(trigger.name -> c)).getOrElse(Map()), + github = trigger.github.map(c => Map(trigger.name -> c)).getOrElse(Map()), + gitlab = trigger.gitlab.map(c => Map(trigger.name -> c)).getOrElse(Map()), + hdfs = trigger.hdfs.map(c => Map(trigger.name -> c)).getOrElse(Map()), + kafka = trigger.kafka.map(c => Map(trigger.name -> c)).getOrElse(Map()), + redis = trigger.redis.map(c => Map(trigger.name -> c)).getOrElse(Map()), + resource = trigger.resource.map(c => Map(trigger.name -> c)).getOrElse(Map()), + slack = trigger.slack.map(c => Map(trigger.name -> c)).getOrElse(Map()), + sns = trigger.sns.map(c => Map(trigger.name -> c)).getOrElse(Map()), + sqs = trigger.sqs.map(c => Map(trigger.name -> c)).getOrElse(Map()), + stripe = trigger.stripe.map(c => Map(trigger.name -> c)).getOrElse(Map()), + webhook = trigger.webhook.map(c => Map(trigger.name -> c)).getOrElse(Map()) + ) + eventSource = EventSource(eventSourceName, spec) + } yield eventSource + } + } yield eventSources + + + private def rollouts(project: Project): UIO[List[Rollout]] = + project.daemons match { + case Some(daemons) => ZIO.foreach(daemons) { daemon => + for { + strategy <- ZIO.succeed(daemon.strategy.map(s => Strategy(s.blueGreen.map(_.into[BlueGreen].transform), s.canary.map(_.into[Canary].transform)))) + containers <- ZIO.foreach(daemon.containers)(argoContainer(project, _)) + spec = Rollout.Spec(minReadySeconds = daemon.minReadySeconds, replicas = daemon.replicas, revisionHistoryLimit = daemon.revisionHistoryLimit, strategy = strategy) + rollout = Rollout(daemon.name, spec) + } yield rollout + } + case None => ZIO.succeed(List.empty) + } + + + private def sensors(project: Project): UIO[List[Sensor]] = + for { + pipelineTriggers <- pipelineTriggers(project).map(pt => pt.filter(_._2.isEmpty)) + argoContainer = ArgoContainer( + name = "sensor", + image = "argoproj/sensor:v0.13.0", + imagePullPolicy = Some("Always") + ) + template = ArgoTemplate(container = Some(argoContainer)) + sensors <- ZIO.foreach(pipelineTriggers){ pipelineTrigger => + for { + prefix <- ZIO.succeed(s"${name(project.title)}-${name(pipelineTrigger._1.name)}") + dependencies = pipelineTrigger._2.map(t => EventDependency(s"${name(t.name)}-gateway", s"${name(project.title)}-${name(t.name)}-gateway", "example")) + subscription = Subscription(Some(Http(9300))) + metadata = ObjectMetadata(generateName = Some(s"$prefix-workflow-")) + workflow <- argoWorkflow(project, pipelineTrigger._1) + k8sResource = K8SResource("argoproj.io/v1alpha1", "Workflow", metadata, workflow) + k8sTrigger = K8STrigger("argoproj.io", "v1alpha1", "workflows", "create", K8SSource(k8sResource)) + triggers = List(Sensor.Trigger(template = TriggerTemplate(s"$prefix-workflow", k8s = Some(k8sTrigger)))) + sensor = Sensor(s"$prefix-workflow", Sensor.Spec(Some(template), dependencies, subscription = Some(subscription), triggers = triggers)) + } yield sensor + } + } yield sensors + + + private def argoWorkflow(project: Project, pipeline: Pipeline): UIO[Workflow.Spec] = + for { + entrypoint <- ZIO.succeed(pipeline.start.action) + containers <- ZIO.foreach(pipeline.actions.map(_.container))(argoContainer(project, _)) + templates = containers.map(c => Template(container = Some(c), name = c.name)) + dagTasks = pipeline.actions.map(a => DAGTask(name = Some(a.name), dependencies = a.dependencies.getOrElse(List()), template = Some(a.name))) + dagTemplate = Template(name = pipeline.name, dag = Some(DAG(tasks = dagTasks))) + workflow = Workflow.Spec(entrypoint = Some(entrypoint), templates = templates :+ dagTemplate) + } yield workflow + + + private def name(title: String) = + title.toLowerCase.replaceAll("\\s", "-") + + + private def argoContainer(project: Project, container: Container): UIO[ArgoContainer] = + for { + globalContainer <- ZIO.succeed(project.containers.filter(_.name.equals(container.name)).head) + args = container.arguments.getOrElse(globalContainer.arguments.getOrElse(List())) + command = container.command.getOrElse(globalContainer.command.getOrElse(List())) + environmentVariables = container.environmentVariables.map(_ ++ globalContainer.environmentVariables.getOrElse(List())) + envs = environmentVariables.getOrElse(List()).map(e => EnvironmentVariable(e.name, e.value)) + image = globalContainer.docker.flatMap(_.image).getOrElse(s"${globalContainer.name}:latest") + imagePullPolicy = container.imagePullPolicy.orElse(globalContainer.imagePullPolicy) + resources = Resources(container.resources.orElse(globalContainer.resources).map(_.into[Requests].transform)) + volumeMounts = container.volumeMounts.getOrElse(globalContainer.volumeMounts.getOrElse(List())).map(_.into[VolumeMount].transform) + argoContainer = ArgoContainer(args, command, envs, image, imagePullPolicy, None, container.name, Some(resources), volumeMounts) + } yield argoContainer + + + private def projectDeployed(eventBusName: String, namespace: String, project: Project): Task[Boolean] = + for { + client <- kubernetes.newClient + e <- customResourcesDeployed[EventSource](client, namespace, eventSources(eventBusName, project))(kubernetes) + r <- customResourcesDeployed[Rollout](client, namespace, rollouts(project))(kubernetes) + s <- customResourcesDeployed[Sensor](client, namespace, sensors(project))(kubernetes) + deployed = e && r && s + _ <- kubernetes.close(client) + } yield deployed + + + private def deployProject(eventBusName: String, namespace: String, project: Project): Task[Boolean] = + for { + _ <- logger.debug(s"Deploying project: ${project.title} to namespace: $namespace") + client <- kubernetes.newClient + e <- deployCustomResources[EventSource](client, namespace, eventSources(eventBusName, project))(kubernetes) + r <- deployCustomResources[Rollout](client, namespace, rollouts(project))(kubernetes) + s <- deployCustomResources[Sensor](client, namespace, sensors(project))(kubernetes) + deployed = e && r && s + _ <- kubernetes.close(client) + } yield deployed + + + private def undeployProject(eventBusName: String, namespace: String, project: Project): Task[Boolean] = + for { + _ <- logger.debug(s"Un-deploying project: ${project.title} from namespace: $namespace") + client <- kubernetes.newClient + e <- undeployCustomResources[EventSource](client, namespace, eventSources(eventBusName, project))(kubernetes) + r <- undeployCustomResources[Rollout](client, namespace, rollouts(project))(kubernetes) + s <- undeployCustomResources[Sensor](client, namespace, sensors(project))(kubernetes) + undeployed = e && r && s + _ <- kubernetes.close(client) + } yield undeployed +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/Projects.scala b/jvm/src/main/scala/com/harana/modules/projects/Projects.scala new file mode 100644 index 0000000..0160807 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/Projects.scala @@ -0,0 +1,13 @@ +package com.harana.modules.projects + +import zio.Task +import zio.macros.accessible + +@accessible +trait Projects { + def setup(namespace: String): Task[Unit] + + def startMonitoring(namespace: String): Task[Unit] + + def stopMonitoring(namespace: String): Task[Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Action.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Action.scala new file mode 100644 index 0000000..85b955d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Action.scala @@ -0,0 +1,10 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Action(name: String, + container: Container, + parameters: Option[List[Parameter]], + dependencies: Option[List[String]], + withItems: Option[List[String]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Artifactory.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Artifactory.scala new file mode 100644 index 0000000..d584a33 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Artifactory.scala @@ -0,0 +1,9 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Artifactory(url: String, + path: Option[String], + username: Option[String], + password: Option[String]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Author.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Author.scala new file mode 100644 index 0000000..5b5b122 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Author.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Author(name: String, + email: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Auto.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Auto.scala new file mode 100644 index 0000000..0ebbf1e --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Auto.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Auto(repository: Option[String], + path: Option[String], + builder: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/BlueGreen.scala b/jvm/src/main/scala/com/harana/modules/projects/models/BlueGreen.scala new file mode 100644 index 0000000..c683dda --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/BlueGreen.scala @@ -0,0 +1,12 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class BlueGreen(activeService: String, + previewService: String, + previewReplicaCount: Option[Int], + autoPromotionEnabled: Option[Boolean], + autoPromotionSeconds: Option[Int], + scaleDownDelaySeconds: Option[Int], + scaleDownDelayRevisionLimit: Option[Int]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Canary.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Canary.scala new file mode 100644 index 0000000..ddf6ad8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Canary.scala @@ -0,0 +1,10 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Canary(stableService: String, + canaryService: String, + steps: List[CanaryStep] = List(), + maxSurge: Option[String] = None, + maxUnavailable: Option[String] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/CanaryStep.scala b/jvm/src/main/scala/com/harana/modules/projects/models/CanaryStep.scala new file mode 100644 index 0000000..4f5f565 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/CanaryStep.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class CanaryStep(weight: Int, + pause: Option[Int]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Container.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Container.scala new file mode 100644 index 0000000..701cf87 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Container.scala @@ -0,0 +1,18 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Container(name: String, + arguments: Option[List[String]], + auto: Option[Auto], + command: Option[List[String]], + docker: Option[Docker], + environmentVariables: Option[List[EnvironmentVariable]], + imagePullPolicy: Option[String], + ports: Option[List[Port]], + python: Option[Python], + resources: Option[Resources], + scala: Option[Scala], + version: Option[String], + volumeMounts: Option[List[VolumeMount]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Daemon.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Daemon.scala new file mode 100644 index 0000000..9523d55 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Daemon.scala @@ -0,0 +1,12 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Daemon(name: String, + minReadySeconds: Option[Int], + replicas: Option[Int], + revisionHistoryLimit: Option[Int], + containers: List[Container], + start: Option[DaemonStart], + strategy: Option[Strategy]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/DaemonStart.scala b/jvm/src/main/scala/com/harana/modules/projects/models/DaemonStart.scala new file mode 100644 index 0000000..3184998 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/DaemonStart.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class DaemonStart(triggers: Option[List[Trigger]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Docker.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Docker.scala new file mode 100644 index 0000000..fbb0230 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Docker.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Docker(image: Option[String], + repository: Option[String], + path: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Email.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Email.scala new file mode 100644 index 0000000..e8fd6a8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Email.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Email(address: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/EnvironmentVariable.scala b/jvm/src/main/scala/com/harana/modules/projects/models/EnvironmentVariable.scala new file mode 100644 index 0000000..2d322e3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/EnvironmentVariable.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class EnvironmentVariable(name: String, value: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Git.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Git.scala new file mode 100644 index 0000000..ddab28f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Git.scala @@ -0,0 +1,13 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Git(url: String, + path: Option[String], + branch: Option[String], + tag: Option[String], + commit: Option[String], + username: Option[String], + password: Option[String], + oauthToken: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Notification.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Notification.scala new file mode 100644 index 0000000..9d0aa03 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Notification.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Notification(name: String, + email: Option[Email], + slack: Option[NotificationSlack]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/NotificationSlack.scala b/jvm/src/main/scala/com/harana/modules/projects/models/NotificationSlack.scala new file mode 100644 index 0000000..5288d7f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/NotificationSlack.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class NotificationSlack(channel: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Parameter.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Parameter.scala new file mode 100644 index 0000000..fdfe4eb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Parameter.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Parameter(name: String, value: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Pipeline.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Pipeline.scala new file mode 100644 index 0000000..5d3e96c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Pipeline.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Pipeline(name: String, + start: PipelineStart, + actions: List[Action]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/PipelineStart.scala b/jvm/src/main/scala/com/harana/modules/projects/models/PipelineStart.scala new file mode 100644 index 0000000..c82825b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/PipelineStart.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class PipelineStart(action: String, + triggers: Option[List[Trigger]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Port.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Port.scala new file mode 100644 index 0000000..37a529c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Port.scala @@ -0,0 +1,8 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Port(name: String, + internal: Option[Int], + external: Option[Int]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Project.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Project.scala new file mode 100644 index 0000000..b60357b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Project.scala @@ -0,0 +1,14 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Project(title: String, + description: String, + author: Author, + parameters: Option[List[Parameter]], + repositories: Option[List[Repository]], + containers: List[Container], + pipelines: Option[List[Pipeline]], + daemons: Option[List[Daemon]], + notifications: Option[List[Notification]]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Python.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Python.scala new file mode 100644 index 0000000..17d6fa1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Python.scala @@ -0,0 +1,9 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Python(repository: Option[String], + path: Option[String], + file: Option[String], + baseImage: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Repository.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Repository.scala new file mode 100644 index 0000000..1e9be50 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Repository.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Repository(name: String, + git: Git) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Resources.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Resources.scala new file mode 100644 index 0000000..4c98ead --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Resources.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Resources(cpu: Option[String], + memory: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/SBT.scala b/jvm/src/main/scala/com/harana/modules/projects/models/SBT.scala new file mode 100644 index 0000000..8771c54 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/SBT.scala @@ -0,0 +1,6 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class SBT(memory: Option[String]) diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Scala.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Scala.scala new file mode 100644 index 0000000..2484c92 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Scala.scala @@ -0,0 +1,9 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Scala(repository: Option[String], + path: Option[String], + baseImage: Option[String], + sbt: Option[SBT]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Strategy.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Strategy.scala new file mode 100644 index 0000000..ca01e68 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Strategy.scala @@ -0,0 +1,7 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class Strategy(blueGreen: Option[BlueGreen], + canary: Option[Canary]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/Trigger.scala b/jvm/src/main/scala/com/harana/modules/projects/models/Trigger.scala new file mode 100644 index 0000000..a179778 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/Trigger.scala @@ -0,0 +1,20 @@ +package com.harana.designer.backend.modules.projects.models + +import com.harana.modules.argo.events.EventSource._ +import io.circe.generic.JsonCodec + +@JsonCodec +case class Trigger(name: String, + calendar: Option[Calendar] = None, + file: Option[File] = None, + github: Option[Github] = None, + gitlab: Option[Gitlab] = None, + hdfs: Option[Hdfs] = None, + kafka: Option[Kafka] = None, + redis: Option[Redis] = None, + resource: Option[Resource] = None, + slack: Option[Slack] = None, + sns: Option[SNS] = None, + sqs: Option[SQS] = None, + stripe: Option[Stripe] = None, + webhook: Option[Webhook] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/models/VolumeMount.scala b/jvm/src/main/scala/com/harana/modules/projects/models/VolumeMount.scala new file mode 100644 index 0000000..7b057f6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/models/VolumeMount.scala @@ -0,0 +1,11 @@ +package com.harana.designer.backend.modules.projects.models + +import io.circe.generic.JsonCodec + +@JsonCodec +case class VolumeMount(mountPath: Option[String] = None, + mountPropagation: Option[String] = None, + name: String, + readOnly: Option[Boolean] = None, + subPath: Option[String] = None, + subPathExpr: Option[String] = None) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/projects/package.scala b/jvm/src/main/scala/com/harana/modules/projects/package.scala new file mode 100644 index 0000000..1dc6d25 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/projects/package.scala @@ -0,0 +1,53 @@ +package com.harana.modules + +import com.github.dockerjava.api.model.AuthConfig +import com.harana.modules.kubernetes.Kubernetes +import play.api.libs.json.Format +import skuber.api.client.{KubernetesClient, LoggingContext} +import skuber.{ObjectResource, ResourceDefinition} +import zio.{Task, UIO, ZIO} + +package object projects { + + def dockerAuthConfig(username: Option[String], password: Option[String], identityToken: Option[String], registryToken: Option[String]): Option[AuthConfig] = + (username, password, identityToken, registryToken) match { + case (Some(u), Some(p), _, _ ) => Some(new AuthConfig().withUsername(u).withPassword(p)) + case (_, _, Some(it), _ ) => Some(new AuthConfig().withIdentityToken(it)) + case (_, _, _, Some(rt) ) => Some(new AuthConfig().withRegistrytoken(rt)) + case (_, _, _, _) => None + } + + + def customResourcesDeployed[A <: ObjectResource](client: KubernetesClient, + namespace: String, + customResources: UIO[List[A]])(kubernetes: Kubernetes) + (implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext): Task[Boolean] = + for { + cr <- customResources + deployed <- ZIO.foreach(cr) { cr => + kubernetes.get[A](client, namespace, cr.name).map(o => cr.metadata.resourceVersion.equals(o.get.metadata.resourceVersion)).either + } + } yield deployed.exists(_.isLeft) + + + + def deployCustomResources[A <: ObjectResource](client: KubernetesClient, + namespace: String, + customResources: UIO[List[A]])(kubernetes: Kubernetes) + (implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext): Task[Boolean] = + for { + cr <- customResources + deployed <- ZIO.foreach(cr)(cr => kubernetes.create[A](client, namespace, cr).option) + } yield deployed.exists(_.nonEmpty) + + + + def undeployCustomResources[A <: ObjectResource](client: KubernetesClient, + namespace: String, + customResources: UIO[List[A]])(kubernetes: Kubernetes) + (implicit fmt: Format[A], rd: ResourceDefinition[A], lc: LoggingContext): Task[Boolean] = + for { + cr <- customResources + deployed <- ZIO.foreach(cr)(cr => kubernetes.delete[A](client, namespace, cr.name).option) + } yield deployed.exists(_.nonEmpty) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/salesforce/LiveSalesforce.scala b/jvm/src/main/scala/com/harana/modules/salesforce/LiveSalesforce.scala new file mode 100644 index 0000000..813acb0 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/salesforce/LiveSalesforce.scala @@ -0,0 +1,105 @@ +package com.harana.modules.salesforce + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.salesforce.models.{SalesforceError, SalesforceQuota} +import io.circe.Json +import io.circe.optics.JsonPath +import zio.{IO, ZIO, ZLayer} + +object LiveSalesforce { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveSalesforce(config, http, logger, micrometer) + } +} + +case class LiveSalesforce(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Salesforce { + + private val loginTokenUrl = "https://login.salesforce.com/services/oauth2/token" + + private val accessToken = + for { + username <- config.secret("salesforce-username") + password <- config.secret("salesforce-password") + clientId <- config.secret("salesforce-client-id") + clientSecret <- config.secret("salesforce-client-secret") + securityToken <- config.secret("salesforce-security-token") + grantType <- config.string("salesforce.grantType") + response <- http.postAsJson(loginTokenUrl, params = Map( + "grant_type" -> List(grantType), + "client_id" -> List(clientId), + "client_secret" -> List(clientSecret), + "username" -> List(username), + "password" -> List(s"$password$securityToken") + )).mapBoth(SalesforceError.ConnectionError, JsonPath.root.access_token.string.getOption) + } yield response + + + def quota: IO[SalesforceError, SalesforceQuota] = + null +// for { +// baseUrl <- config.string("salesforce.baseUrl") +// apiVersion <- config.int("salesforce.apiVersion") +// json <- get(s"$baseUrl/services/data/v$apiVersion/limits") +// response <- (json \ "DailyApiRequests").toOption match { +// case Some(value) => +// val max = (value \ "Max").toString.toFloat +// val remaining = (value \ "Remaining").toString.toFloat +// val used = max - remaining +// val percent: Float = (used / max) * 100 +// IO.succeed(SalesforceQuota(used.toInt, remaining.toInt, percent.toInt)) +// +// case None => +// IO.fail(SalesforceError.ParseError) +// } +// } yield response + + + def describeObject(name: String): IO[SalesforceError, Json] = + for { + baseUrl <- config.string("salesforce.baseUrl") + apiVersion <- config.int("salesforce.apiVersion") + json <- get(s"$baseUrl/services/data/v$apiVersion/sobjects/$name/describe") + } yield json + + + def objectList: IO[SalesforceError, Json] = + null +// for { +// baseUrl <- config.string("salesforce.baseUrl") +// apiVersion <- config.int("salesforce.apiVersion") +// json <- get(s"$baseUrl/services/data/v$apiVersion/sobjects") +// response <- (json \ "sobjects") match { +// case JArray(x) => IO.succeed(x) +// case _ => IO.fail(SalesforceError.ParseError) +// } +// } yield response + + + def objectNames: IO[SalesforceError, List[String]] = { + null + } +// objectList.map(_.map { +// item => (item \ "name").toString +// }) +// } + + + private def get(url: String, query: Option[String] = None): IO[SalesforceError, Json] = { + accessToken.flatMap { token => + val headers = Map( + "Authorization" -> s"Bearer $token", + "Content-Type" -> "application/json" + ) + val params = query.map { q => Map("q" -> List(q)) }.getOrElse(Map()) + http.getAsJson(url, params, headers).mapError(SalesforceError.ConnectionError) + } + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/salesforce/Salesforce.scala b/jvm/src/main/scala/com/harana/modules/salesforce/Salesforce.scala new file mode 100644 index 0000000..4904067 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/salesforce/Salesforce.scala @@ -0,0 +1,17 @@ +package com.harana.modules.salesforce + +import com.harana.modules.salesforce.models.{SalesforceError, SalesforceQuota} +import io.circe.Json +import zio.IO +import zio.macros.accessible + +@accessible +trait Salesforce { + def quota: IO[SalesforceError, SalesforceQuota] + + def describeObject(name: String): IO[SalesforceError, Json] + + def objectList: IO[SalesforceError, Json] + + def objectNames: IO[SalesforceError, List[String]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceError.scala b/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceError.scala new file mode 100644 index 0000000..b27dfa8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceError.scala @@ -0,0 +1,9 @@ +package com.harana.modules.salesforce.models + +import com.harana.modules.core.http.models.OkHttpError + +sealed trait SalesforceError +object SalesforceError { + case object ParseError extends SalesforceError + case class ConnectionError(err: OkHttpError) extends SalesforceError +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceQuota.scala b/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceQuota.scala new file mode 100644 index 0000000..20b8268 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/salesforce/models/SalesforceQuota.scala @@ -0,0 +1,5 @@ +package com.harana.modules.salesforce.models + +case class SalesforceQuota(used: Int, + remaining: Int, + percentage: Int) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/segment/LiveSegment.scala b/jvm/src/main/scala/com/harana/modules/segment/LiveSegment.scala new file mode 100644 index 0000000..57af1b9 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/segment/LiveSegment.scala @@ -0,0 +1,70 @@ +package com.harana.modules.segment + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.segment.models.SegmentOptions +import com.segment.analytics.Analytics +import com.segment.analytics.messages._ +import zio.{IO, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveSegment { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveSegment(config, logger, micrometer) + } +} + +case class LiveSegment(config: Config, logger: Logger, micrometer: Micrometer) extends Segment { + + private val analytics = config.secret("segment-write-key").map(c => Analytics.builder(c).build) + + def alias(previousId: String, userId: String, options: SegmentOptions): IO[Nothing, Unit] = { + val builder = AliasMessage.builder(previousId) + sendMessage(builder, userId, options) + } + + def group(userId: String, groupId: String, traits: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = GroupMessage.builder(groupId).traits(traits.asJava) + sendMessage(builder, userId, options) + } + + def identify(userId: String, traits: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = IdentifyMessage.builder().traits(traits.asJava) + sendMessage(builder, userId, options) + } + + def page(userId: String, name: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = PageMessage.builder(name).properties(properties.asJava) + sendMessage(builder, userId, options) + } + + def screen(userId: String, name: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = ScreenMessage.builder(name).properties(properties.asJava) + sendMessage(builder, userId, options) + } + + def track(userId: String, event: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] = { + val builder = TrackMessage.builder(event).properties(properties.asJava) + sendMessage(builder, userId, options) + } + + def flush: IO[Nothing, Unit] = + analytics.map(_.flush()) + + private def sendMessage(builder: MessageBuilder[_ <: Message, _ <: MessageBuilder[_ <: Message, _ <: AnyRef]], + userId: String, + options: SegmentOptions) = { + if (options.isAnonymous) builder.anonymousId(userId) else builder.userId(userId) + if (options.timestamp.nonEmpty) builder.timestamp(options.timestamp.get) + //if (options.integrationOptions.nonEmpty) builder.integrationOptions(options.integrationOptions.get._1, options.integrationOptions.get._2.asJava) + builder.context(options.context.asJava) + options.enabledIntegrations.foreach { i => builder.enableIntegration(i._1, i._2) } + analytics.map(_.enqueue(builder)) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/segment/Segment.scala b/jvm/src/main/scala/com/harana/modules/segment/Segment.scala new file mode 100644 index 0000000..2acc7b2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/segment/Segment.scala @@ -0,0 +1,22 @@ +package com.harana.modules.segment + +import com.harana.modules.segment.models.SegmentOptions +import zio.IO +import zio.macros.accessible + +@accessible +trait Segment { + def alias(previousId: String, userId: String, options: SegmentOptions): IO[Nothing, Unit] + + def group(userId: String, groupId: String, traits: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def identify(userId: String, traits: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def page(userId: String, name: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def screen(userId: String, name: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def track(userId: String, event: String, properties: Map[String, _], options: SegmentOptions): IO[Nothing, Unit] + + def flush: IO[Nothing, Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/segment/models/SegmentOptions.scala b/jvm/src/main/scala/com/harana/modules/segment/models/SegmentOptions.scala new file mode 100644 index 0000000..d168e88 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/segment/models/SegmentOptions.scala @@ -0,0 +1,10 @@ +package com.harana.modules.segment.models + +import java.util.Date + +case class SegmentOptions(context: Map[String, _] = Map(), + enabledIntegrations: Map[String, Boolean] = Map(), + integrationOptions: Option[(String, Map[String, _ <: Object])] = None, + isAnonymous: Boolean = false, + messageId: Option[String] = None, + timestamp: Option[Date] = None){} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/sentry/LiveSentry.scala b/jvm/src/main/scala/com/harana/modules/sentry/LiveSentry.scala new file mode 100644 index 0000000..1aa98c1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/sentry/LiveSentry.scala @@ -0,0 +1,32 @@ +package com.harana.modules.sentry + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import io.sentry.{Breadcrumb, Sentry => ioSentry} +import zio.{UIO, ZIO, ZLayer} + +object LiveSentry { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveSentry(config, logger, micrometer) + } +} + +case class LiveSentry(config: Config, logger: Logger, micrometer: Micrometer) extends Sentry { + + // FIXME + config.string("sentry.dsn", "").map(ioSentry.init) + + def addBreadcrumb(message: String): UIO[Unit] = { + ZIO.succeed(ioSentry.addBreadcrumb(message)) + } + + def addBreadcrumb(breadcrumb: Breadcrumb): UIO[Unit] = { + ZIO.succeed(ioSentry.addBreadcrumb(breadcrumb)) + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/sentry/Sentry.scala b/jvm/src/main/scala/com/harana/modules/sentry/Sentry.scala new file mode 100644 index 0000000..c8a33d2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/sentry/Sentry.scala @@ -0,0 +1,11 @@ +package com.harana.modules.sentry + +import io.sentry.Breadcrumb +import zio.UIO +import zio.macros.accessible + +@accessible +trait Sentry { + def addBreadcrumb(message: String): UIO[Unit] + def addBreadcrumb(breadcrumb: Breadcrumb): UIO[Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/LiveShopify.scala b/jvm/src/main/scala/com/harana/modules/shopify/LiveShopify.scala new file mode 100644 index 0000000..944a0d2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/LiveShopify.scala @@ -0,0 +1,304 @@ +package com.harana.modules.shopify + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.shopify.models._ +import io.circe.parser._ +import io.circe.syntax.EncoderOps +import io.circe.{Decoder, Encoder} +import purecsv.safe._ +import zio.{Ref, Task, ZIO, ZLayer} + +import java.io.File +import java.time.ZoneId +import java.time.temporal.{ChronoUnit, TemporalAdjusters} +import scala.util.Try + +object LiveShopify { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveShopify(config, http, logger, micrometer) + } +} + +case class LiveShopify(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends Shopify { + + def forecastInventory(implicit connection: ShopifyConnection): Task[List[Output]] = + for { + products <- products(limit = Some(250), status = Some("active")).map(_.items) + _ <- logger.info(s"Number of products: ${products.size}") + productMap = products.map(p => p.id -> p).toMap + variants = products.flatMap(p => p.variants) + variantsMap = variants.map(v => v.id -> v).toMap + _ <- logger.info(s"Number of variants: ${variantsMap.size}") + + outputs = variants.map(v => Output(productMap(v.productId).title, v.title, v.sku.getOrElse(""), v.id, v.option1.getOrElse(""), v.option2.getOrElse(""), v.option3.getOrElse(""), "0", "0", "0", "0", "0")) + orders <- all(orders(limit = Some(250), status = Some("any"))) + _ <- logger.info(s"Number of orders: ${orders.size}") + + ordersByDate = orders.groupBy(o => o.createdAt.atZone(ZoneId.systemDefault()).`with`(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS)) + lineItemsByDate = ordersByDate.view.mapValues(orders => orders.flatMap(_.lineItems.map(li => (lineItemTitle(li), li.quantity)))) + lineItemsByVariantIdMap = orders.flatMap(_.lineItems.map(li => li.variantId -> li)).toMap + groupedLineItemsByDate = lineItemsByDate.mapValues(lineItems => lineItems.groupBy(_._1).view.mapValues(_.map(_._2).sum).toList.sortBy(_._1)) + + groupedLineItemsByDateMap = groupedLineItemsByDate.mapValues(sumByKeys).mapValues(_.toMap) + sortedDates = groupedLineItemsByDate.keys.toList.sortBy(_.toString).take(3) + _ <- logger.info(s"Dates to output: ${sortedDates.map(_.toString)}") + + middleOutputs = outputs.map { o => + if (lineItemsByVariantIdMap.contains(o.variantId)) { + val lineItem = lineItemsByVariantIdMap(o.variantId) + + val month1 = Try(groupedLineItemsByDateMap(sortedDates.head)(lineItemTitle(lineItem))).toOption.getOrElse(0L) + val month2 = Try(groupedLineItemsByDateMap(sortedDates(1))(lineItemTitle(lineItem))).toOption.getOrElse(0L) + val month3 = Try(groupedLineItemsByDateMap(sortedDates(2))(lineItemTitle(lineItem))).toOption.getOrElse(0L) + val total = month1 + month2 + month3 + o.copy(month1Sales = month1.toString, month2Sales = month2.toString, month3Sales = month3.toString, totalSales = total.toString) + } else { + o.copy(month1Sales = "-", month2Sales = "-", month3Sales = "-", totalSales = "-") + } + } + + location <- locations(connection).map(_.items.head) + _ <- logger.info(s"Found location with id: ${location.id}") + + inventoryLevels <- all(inventoryLevels(limit = Some(250), locationIds = List(location.id))) + inventoryLevelsMap = inventoryLevels.map(il => il.inventoryItemId -> il.available).toMap + finalOutputs = middleOutputs.map(o => o.copy(inventoryLevel = Try(inventoryLevelsMap(variantsMap(o.variantId).inventoryItemId).toString).toOption.getOrElse("-"))) + + _ = finalOutputs.writeCSVToFile(new File("/tmp/output.csv")) + + } yield finalOutputs + + + private def sumByKeys[A](tuples: List[(A, Long)]) : List[(A, Long)] = { + tuples.groupBy(_._1).view.mapValues(_.map(_._2).sum).toList + } + + + private def lineItemTitle(li: LineItem) = + s"${li.productId}-${li.variantId}" + + + def orders(ids: List[String] = List(), + limit: Option[Int] = None, + sinceId: Option[String] = None, + createdAtMin: Option[String] = None, + createdAtMax: Option[String] = None, + updatedAtMin: Option[String] = None, + updatedAtMax: Option[String] = None, + processedAtMin: Option[String] = None, + processedAtMax: Option[String] = None, + attributionAppId: Option[String] = None, + status: Option[String] = None, + financialStatus: Option[String] = None, + fulfillment_status: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[Order]] = + getList[Order](s"orders", Map( + "ids" -> ids.mkString(","), + "limit" -> limit.getOrElse(50).toString, + "status" -> status.getOrElse("") + )) + + + def products(ids: List[String] = List(), + limit: Option[Int] = None, + sinceId: Option[String] = None, + title: Option[String] = None, + vendor: Option[String] = None, + handle: Option[String] = None, + productType: Option[String] = None, + status: Option[String] = None, + collectionId: Option[String] = None, + createdAtMin: Option[String] = None, + createdAtMax: Option[String] = None, + updatedAtMin: Option[String] = None, + updatedAtMax: Option[String] = None, + processedAtMin: Option[String] = None, + processedAtMax: Option[String] = None, + publishedStatus: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[models.Product]] = + getList[models.Product](s"products", Map( + "ids" -> ids.mkString(","), + "limit" -> limit.getOrElse(50).toString, + "status" -> status.getOrElse("")) + ) + + + def inventoryLevels(inventoryItemIds: List[Long] = List(), + locationIds: List[Long] = List(), + limit: Option[Int] = None, + updatedAtMin: Option[String] = None) + (implicit connection: ShopifyConnection): Task[Page[InventoryLevel]] = + getList[InventoryLevel](s"inventory_levels", Map( + "limit" -> limit.getOrElse(50).toString, + "location_ids" -> locationIds.map(_.toString).mkString(","))) + + + def locations(implicit connection: ShopifyConnection): Task[Page[Location]] = + getList[Location](s"locations", Map()) + + + def customer(id: String, + fields: List[String] = List()) + (implicit connection: ShopifyConnection): Task[Customer] = + get[Customer](s"customers/$id", Map()) + + + def createCustomerMetafield(customerId: String, metafield: Metafield)(implicit connection: ShopifyConnection): Task[Metafield] = + post[Metafield](s"customers/$customerId/metafields", Map(), metafield) + + + def customerMetafield(customerId: String, metafieldId: String)(implicit connection: ShopifyConnection): Task[Metafield] = + get[Metafield](s"customers/$customerId/metafields/$metafieldId.json", Map()) + + + def setCustomerMetafield(customerId: String, metafieldId: String, metafieldType: String, metafieldValue: String)(implicit connection: ShopifyConnection): Task[Unit] = + post[String](s"customers/$customerId/metafields/$metafieldId.json", Map(), + Map("metafield" -> Map("id" -> metafieldId, "value" -> metafieldValue, "type" -> metafieldType)).asJson.noSpaces + ).ignore + + def orderMetafield(orderId: String, metafieldId: String)(implicit connection: ShopifyConnection): Task[Metafield] = + get[Metafield](s"orders/$orderId/metafields/$metafieldId.json", Map()) + + + def setOrderMetafield(orderId: String, metafieldId: String, metafieldType: String, metafieldValue: String)(implicit connection: ShopifyConnection): Task[Unit] = + post[String](s"orders/$orderId/metafields/$metafieldId.json", Map(), + Map("metafield" -> Map("id" -> metafieldId, "value" -> metafieldValue, "type" -> metafieldType)).asJson.noSpaces + ).ignore + + + def product(id: String, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[models.Product] = + get[models.Product](s"products/$id", Map()) + + + def productVariants(productId: Long, + limit: Option[Int] = None, + presentmentCurrencies: List[String] = List(), + sinceId: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[ProductVariant]] = + getList[ProductVariant](s"products/$productId/variants", Map( + "fields" -> fields.mkString(","), + "limit" -> limit.getOrElse(50).toString, + "presentment_currencies" -> presentmentCurrencies.mkString(","), + "since_id" -> sinceId.getOrElse(""))) + + + def uploadImage(sourceUrl: String)(implicit connection: ShopifyConnection): Task[Unit] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-07/graphql.json") + _ <- http.post( + url, + credentials = Some((connection.apiKey, connection.password)), + body = Some( + s"""{ + "query": "mutation fileCreate($$files: [FileCreateInput!]!) { fileCreate(files: $$files) { files { alt createdAt } } }", + "variables": { + "files": { + "contentType": "IMAGE", + "originalSource": "$sourceUrl" + } + } + }""" + ) + ).mapBoth(e => new Exception(e.toString), _.body().string()) + } yield () + + + def previousPage[T](page: Page[T])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[Option[Page[T]]] = + ZIO.foreach(page.previousUrl)(url => getUrlList[T](url, Map())) + + + def nextPage[T](page: Page[T])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[Option[Page[T]]] = + ZIO.foreach(page.nextUrl)(url => getUrlList[T](url, Map())) + + + def all[T](fn: => Task[Page[T]])(implicit connection: ShopifyConnection, d : Decoder[T]): Task[List[T]] = + for { + itemsRef <- Ref.make[List[T]](List()) + initialPage <- fn + currentPageRef <- Ref.make[Option[Page[T]]](Some(initialPage)) + _ <- (for { + currentPage <- currentPageRef.get + existingItems <- itemsRef.get + _ <- itemsRef.set (existingItems ++ currentPage.get.items) + nextPage <- ZIO.foreach(currentPage)(nextPage[T](_)).map(_.flatten) + _ <- currentPageRef.set(nextPage) + } yield ()).repeatWhileZIO { _ => currentPageRef.get.map(_.nonEmpty) } + items <- itemsRef.get + } yield items + + + private def get[T](endpoint: String, parameters: Map[String, String]) + (implicit connection: ShopifyConnection, d: Decoder[T]): Task[T] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-04/$endpoint.json") + response <- http.get(url, params = parameters.map{ case (k, v) => k -> List(v) }, credentials = Some((connection.apiKey, connection.password))).mapBoth(e => new Exception(e.toString), _.body().string()) + obj <- ZIO.fromEither(decode[T](response)) + } yield obj + + + private def getList[T](endpoint: String, parameters: Map[String, String]) + (implicit connection: ShopifyConnection, d: Decoder[T]): Task[Page[T]] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-04/$endpoint.json") + page <- getUrlList[T](url, parameters) + } yield page + + + private def getUrlList[T](url: String, parameters: Map[String, String]) + (implicit connection: ShopifyConnection, d: Decoder[T]): Task[Page[T]] = + for { + response <- http.get(url, params = parameters.map{ case (k, v) => k -> List(v) }, credentials = Some((connection.apiKey, connection.password))).mapError(e => new Exception(e.toString)) + + rel = Option(response.header("link")) + relUrl = rel.map(r => r.substring(1, r.indexOf(">"))) + relType = rel.map(r => r.substring(r.indexOf("rel=")+5, r.length-1)) + + cursor <- ZIO.attempt(parse(response.body().string).toOption.get.hcursor) + root <- ZIO.attempt(cursor.keys.get.head) + json <- ZIO.attempt(cursor.downField(root).focus.get) + items <- ZIO.fromEither(json.as[List[T]]).onError(e => logger.error(e.prettyPrint)) + + page = (rel, relType) match { + case (None, _) => Page(None, None, items) + case (Some(_), Some(rt)) if (rt == "previous") => Page(relUrl, None, items) + case (Some(_), Some(rt)) if (rt == "next") => Page(None, relUrl, items) + } + } yield page + + + private def post[T](endpoint: String, parameters: Map[String, String], body: T) + (implicit connection: ShopifyConnection, d: Decoder[T], e: Encoder[T]): Task[T] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-04/$endpoint.json") + response <- http.post( + url, + params = parameters.map { case (k, v) => k -> List(v) }, + credentials = Some((connection.apiKey, connection.password)), + body = Some(body.asJson.noSpaces) + ).mapBoth(e => new Exception(e.toString), _.body().string()) + obj <- ZIO.fromEither(decode[T](response)) + } yield obj + + + private def put[T](endpoint: String, parameters: Map[String, String], body: T) + (implicit connection: ShopifyConnection, d: Decoder[T], e: Encoder[T]): Task[T] = + for { + url <- ZIO.succeed(s"https://${connection.subdomain}.myshopify.com/admin/api/2023-04/$endpoint.json") + response <- http.put( + url, + params = parameters.map { case (k, v) => k -> List(v) }, + credentials = Some((connection.apiKey, connection.password)), + body = Some(body.asJson.noSpaces) + ).mapBoth(e => new Exception(e.toString), _.body().string()) + obj <- ZIO.fromEither(decode[T](response)) + } yield obj +} diff --git a/jvm/src/main/scala/com/harana/modules/shopify/Shopify.scala b/jvm/src/main/scala/com/harana/modules/shopify/Shopify.scala new file mode 100644 index 0000000..e035008 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/Shopify.scala @@ -0,0 +1,80 @@ +package com.harana.modules.shopify + +import com.harana.modules.shopify.models._ +import io.circe.Decoder +import zio.Task +import zio.macros.accessible + +@accessible +trait Shopify { + + def all[T](fn: => Task[Page[T]])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[List[T]] + + def previousPage[T](page: Page[T])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[Option[Page[T]]] + + def nextPage[T](page: Page[T])(implicit connection: ShopifyConnection, d: Decoder[T]): Task[Option[Page[T]]] + + def forecastInventory(implicit connection: ShopifyConnection): Task[List[Output]] + + def orders(ids: List[String] = List(), + limit: Option[Int] = None, + sinceId: Option[String] = None, + createdAtMin: Option[String] = None, + createdAtMax: Option[String] = None, + updatedAtMin: Option[String] = None, + updatedAtMax: Option[String] = None, + processedAtMin: Option[String] = None, + processedAtMax: Option[String] = None, + attributionAppId: Option[String] = None, + status: Option[String] = None, + financialStatus: Option[String] = None, + fulfillment_status: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[Order]] + + def products(ids: List[String] = List(), + limit: Option[Int] = None, + sinceId: Option[String] = None, + title: Option[String] = None, + vendor: Option[String] = None, + handle: Option[String] = None, + productType: Option[String] = None, + status: Option[String] = None, + collectionId: Option[String] = None, + createdAtMin: Option[String] = None, + createdAtMax: Option[String] = None, + updatedAtMin: Option[String] = None, + updatedAtMax: Option[String] = None, + processedAtMin: Option[String] = None, + processedAtMax: Option[String] = None, + publishedStatus: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[models.Product]] + + def inventoryLevels(inventoryItemIds: List[Long] = List(), + locationIds: List[Long] = List(), + limit: Option[Int] = None, + updatedAtMin: Option[String] = None)(implicit connection: ShopifyConnection): Task[Page[InventoryLevel]] + + def customer(id: String, fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Customer] + + def customerMetafield(customerId: String, metafieldId: String)(implicit connection: ShopifyConnection): Task[Metafield] + + def createCustomerMetafield(customerId: String, metafield: Metafield)(implicit connection: ShopifyConnection): Task[Metafield] + + def setCustomerMetafield(customerId: String, metafieldId: String, metafieldType: String, metafieldValue: String)(implicit connection: ShopifyConnection): Task[Unit] + + def orderMetafield(orderId: String, metafieldId: String)(implicit connection: ShopifyConnection): Task[Metafield] + + def setOrderMetafield(orderId: String, metafieldId: String, metafieldType: String, metafieldValue: String)(implicit connection: ShopifyConnection): Task[Unit] + + def locations(implicit connection: ShopifyConnection): Task[Page[Location]] + + def product(id: String, fields: List[String] = List())(implicit connection: ShopifyConnection): Task[models.Product] + + def productVariants(productId: Long, + limit: Option[Int] = None, + presentmentCurrencies: List[String] = List(), + sinceId: Option[String] = None, + fields: List[String] = List())(implicit connection: ShopifyConnection): Task[Page[ProductVariant]] + + def uploadImage(sourceUrl: String)(implicit connection: ShopifyConnection): Task[Unit] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Address.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Address.scala new file mode 100644 index 0000000..567232d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Address.scala @@ -0,0 +1,17 @@ +package com.harana.modules.shopify.models + +case class Address(firstName: String, + lastName: String, + name: String, + company: Option[String], + address1: String, + address2: Option[String], + city: String, + zip: String, + province: String, + country: String, + provinceCode: String, + countryCode: String, + phone: Option[String], + latitude: Option[BigDecimal], + longitude: Option[BigDecimal]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Attribute.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Attribute.scala new file mode 100644 index 0000000..6cdad2c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Attribute.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class Attribute(name: String, + value: String) + diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Customer.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Customer.scala new file mode 100644 index 0000000..3da0df1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Customer.scala @@ -0,0 +1,18 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Customer(id: Long, + email: String, + acceptsMarketing: Boolean, + createdAt: Instant, + updatedAt: Instant, + firstName: String, + lastName: String, + phone: Option[String], + ordersCount: Long, + state: String, + totalSpent: BigDecimal, + note: Option[String], + verifiedEmail: Option[Boolean], + addresses: Set[Address]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Fulfilment.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Fulfilment.scala new file mode 100644 index 0000000..59b22cc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Fulfilment.scala @@ -0,0 +1,15 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Fulfilment(id: Long, + orderId: Long, + status: String, + createdAt: Instant, + updatedAt: Instant, + trackingCompany: Option[String], + trackingNumber: Option[String], + lineItems: List[LineItem], + trackingUrl: Option[String], + trackingUrls: List[String], + locationId: Option[Long]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Image.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Image.scala new file mode 100644 index 0000000..e97646a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Image.scala @@ -0,0 +1,8 @@ +package com.harana.modules.shopify.models + +case class Image(id: Long, + productId: Long, + name: Option[String], + position: Int, + source: Option[String], + variantIds: List[Long]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryLevel.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryLevel.scala new file mode 100644 index 0000000..5f4357d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryLevel.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class InventoryLevel(inventoryItemId: Long, + locationId: Option[Long], + available: Long) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryPolicy.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryPolicy.scala new file mode 100644 index 0000000..0a37aab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/InventoryPolicy.scala @@ -0,0 +1,11 @@ +package com.harana.modules.shopify.models + +import enumeratum.values.{StringCirceEnum, StringEnum, StringEnumEntry} + +sealed abstract class InventoryPolicy(val value: String) extends StringEnumEntry + +object InventoryPolicy extends StringEnum[InventoryPolicy] with StringCirceEnum[InventoryPolicy] { + case object Continue extends InventoryPolicy("continue") + case object Deny extends InventoryPolicy("deny") + val values = findValues +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/LineItem.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/LineItem.scala new file mode 100644 index 0000000..b6149a2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/LineItem.scala @@ -0,0 +1,21 @@ +package com.harana.modules.shopify.models + +case class LineItem(id: Long, + variantId: Long, + title: String, + quantity: Long, + price: BigDecimal, + grams: Long, + sku: Option[String], + variantTitle: String, + vendor: String, + productId: Long, + requiresShipping: Boolean, + taxable: Boolean, + giftCard: Boolean, + name: String, + variantInventoryManagement: Option[String], + fulfillableQuantity: Long, + totalDiscount: BigDecimal, + fulfillmentStatus: Option[String], + fulfillmentService: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Location.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Location.scala new file mode 100644 index 0000000..70acb87 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Location.scala @@ -0,0 +1,14 @@ +package com.harana.modules.shopify.models + +case class Location(id: Long, + name: String, + address1: String, + address2: String, + city: String, + zip: String, + country: String, + phone: String, + province: String, + countryCode: String, + countryName: String, + provinceCode: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Metafield.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Metafield.scala new file mode 100644 index 0000000..0cb22ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Metafield.scala @@ -0,0 +1,15 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Metafield(id: Long, + namespace: String, + key: String, + value: String, + description: Option[String], + owner_id: Long, + owner_resource: String, + created_at: Option[Instant], + updated_at: Option[Instant], + `type`: String, + admin_graphql_api_id: String) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/MetafieldType.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/MetafieldType.scala new file mode 100644 index 0000000..c32194b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/MetafieldType.scala @@ -0,0 +1,53 @@ +package com.harana.modules.shopify.models + +import enumeratum.values.{StringCirceEnum, StringEnum, StringEnumEntry} + +sealed abstract class MetafieldType(val value: String) extends StringEnumEntry + +object MetafieldType extends StringEnum[MetafieldType] with StringCirceEnum[MetafieldType] { + case object Boolean extends MetafieldType("boolean") + case object Color extends MetafieldType("color") + case object Date extends MetafieldType("date") + case object DateTime extends MetafieldType("date_time") + case object Dimension extends MetafieldType("dimension") + case object Json extends MetafieldType("json") + case object Money extends MetafieldType("money") + case object MultiLineTextField extends MetafieldType("multi_line_text_field") + case object NumberDecimal extends MetafieldType("number_decimal") + case object NumberInteger extends MetafieldType("number_integer") + case object Rating extends MetafieldType("rating") + case object RichTextField extends MetafieldType("rich_text_field") + case object SingleLineTextField extends MetafieldType("single_line_text_field") + case object Url extends MetafieldType("url") + case object Volume extends MetafieldType("volume") + case object Weight extends MetafieldType("weight") + + case object CollectionReference extends MetafieldType("collection_reference") + case object FileReference extends MetafieldType("file_reference") + case object MetaobjectReference extends MetafieldType("metaobject_reference") + case object MixedReference extends MetafieldType("mixed_reference") + case object PageReference extends MetafieldType("page_reference") + case object ProductReference extends MetafieldType("product_reference") + case object VariantReference extends MetafieldType("variant_reference") + + case object ListCollectionReference extends MetafieldType("list.collection_reference") + case object ListColor extends MetafieldType("list.color") + case object ListDate extends MetafieldType("list.date") + case object ListDateTime extends MetafieldType("list.date_time") + case object ListDimension extends MetafieldType("list.dimension") + case object ListFileReference extends MetafieldType("list.file_reference") + case object ListMetaobjectReference extends MetafieldType("list.metaobject_reference") + case object ListMixedReference extends MetafieldType("list.mixed_reference") + case object ListNumberInteger extends MetafieldType("list.number_integer") + case object ListNumberDecimal extends MetafieldType("list.number_decimal") + case object ListPageReference extends MetafieldType("list.page_reference") + case object ListProductReference extends MetafieldType("list.product_reference") + case object ListRating extends MetafieldType("list.rating") + case object ListSingleLineTextField extends MetafieldType("list.single_line_text_field") + case object ListUrl extends MetafieldType("list.url") + case object ListVariantReference extends MetafieldType("list.variant_reference") + case object ListVolume extends MetafieldType("list.volume") + case object ListWeight extends MetafieldType("list.weight") + + val values = findValues +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Metaobject.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Metaobject.scala new file mode 100644 index 0000000..40d6d09 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Metaobject.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class Metaobject(id: String, + `type`: String, + fields: List[Metafield]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/MetaobjectDefinition.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/MetaobjectDefinition.scala new file mode 100644 index 0000000..79c5999 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/MetaobjectDefinition.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class MetaobjectDefinition(id: String, + `type`: String, + fieldDefinitions: List[Metafield]) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Order.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Order.scala new file mode 100644 index 0000000..264bb99 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Order.scala @@ -0,0 +1,47 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Order(id: Long, + email: String, + closedAt: Option[Instant], + createdAt: Instant, + updatedAt: Instant, + number: Int, + note: Option[String], + token: String, + totalPrice: BigDecimal, + subtotalPrice: BigDecimal, + totalWeight: Long, + totalTax: BigDecimal, + taxesIncluded: Boolean = false, + currency: Option[String], + financialStatus: String, + totalDiscounts: BigDecimal, + totalLineItemsPrice: BigDecimal, + cartToken: String, + buyerAcceptsMarketing: Boolean = false, + name: String, + referringSite: String, + landingSite: String, + cancelledAt: Option[Instant], + cancelReason: Option[String], + userId: Option[Long], + locationId: Option[Long], + processedAt: Instant, + browserIp: String, + orderNumber: Long, + processingMethod: String, + sourceName: String, + fulfillmentStatus: Option[String], + tags: String, + orderStatusUrl: String, + lineItems: List[LineItem], + fulfillments: List[Fulfilment], + billingAddress: Address, + shippingAddress: Option[Address], + customer: Customer, + shippingLines: List[ShippingLine], + taxLines: List[TaxLine], + noteAttributes: List[Attribute], + refunds: List[Refund]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Page.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Page.scala new file mode 100644 index 0000000..c55ee80 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Page.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class Page[T](previousUrl: Option[String], + nextUrl: Option[String], + items: List[T]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Product.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Product.scala new file mode 100644 index 0000000..401c797 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Product.scala @@ -0,0 +1,18 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Product(id: Long, + title: String, + productType: String, + bodyHtml: String, + vendor: String, + tags: String, + options: List[ProductOption], + metafieldsGlobalTitleTag: Option[String], + metafieldsGlobalDescriptionTag: Option[String], + images: List[Image], + image: Image, + variants: List[ProductVariant], + publishedAt: Option[Instant], + published: Option[Boolean]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/ProductOption.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/ProductOption.scala new file mode 100644 index 0000000..9321aa2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/ProductOption.scala @@ -0,0 +1,7 @@ +package com.harana.modules.shopify.models + +case class ProductOption(id: Long, + productId: Long, + name: String, + position: Int, + values: List[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/ProductVariant.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/ProductVariant.scala new file mode 100644 index 0000000..872ba30 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/ProductVariant.scala @@ -0,0 +1,23 @@ +package com.harana.modules.shopify.models + +case class ProductVariant(id: Long, + productId: Long, + title: String, + price: BigDecimal, + compareAtPrice: Option[BigDecimal], + sku: Option[String], + barcode: Option[String], + position: Int, + grams: Long, + inventoryQuantity: Long, + imageId: Option[Long], + inventoryPolicy: Option[String], + inventoryManagement: Option[String], + option1: Option[String], + option2: Option[String], + option3: Option[String], + fulfillmentService: Option[String], + requiresShipping: Boolean, + taxable: Boolean, + inventoryItemId: Long, + available: Option[Long]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Refund.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Refund.scala new file mode 100644 index 0000000..97d74ac --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Refund.scala @@ -0,0 +1,14 @@ +package com.harana.modules.shopify.models + +import java.time.Instant + +case class Refund(id: Long, + orderId: Long, + createdAt: Instant, + note: Option[String], + userId: Option[Long] , + processedAt: Instant, + refundLineItems: List[RefundLineItem], + shipping: Option[RefundShippingDetails], + transactions: List[Transaction], + currency: Option[String]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/RefundLineItem.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/RefundLineItem.scala new file mode 100644 index 0000000..2760dd6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/RefundLineItem.scala @@ -0,0 +1,10 @@ +package com.harana.modules.shopify.models + +case class RefundLineItem(id: Long, + quantity: Long, + lineItemId: Long, + locationId: Option[Long], + restockType: String, + subtotal: BigDecimal, + totalTax: BigDecimal, + lineItem: LineItem) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/RefundShippingDetails.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/RefundShippingDetails.scala new file mode 100644 index 0000000..4facc2d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/RefundShippingDetails.scala @@ -0,0 +1,6 @@ +package com.harana.modules.shopify.models + +case class RefundShippingDetails(amount: BigDecimal, + tax: BigDecimal, + maximumRefundable: Option[BigDecimal], + fullRefund: Boolean) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/ShippingLine.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/ShippingLine.scala new file mode 100644 index 0000000..46208ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/ShippingLine.scala @@ -0,0 +1,7 @@ +package com.harana.modules.shopify.models + +case class ShippingLine(id: Long, + title: String, + price: BigDecimal, + code: String, + source: String) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/ShopifyConnection.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/ShopifyConnection.scala new file mode 100644 index 0000000..610ec34 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/ShopifyConnection.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class ShopifyConnection(subdomain: String, + apiKey: String, + password: String) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/TaxLine.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/TaxLine.scala new file mode 100644 index 0000000..bb35fa2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/TaxLine.scala @@ -0,0 +1,5 @@ +package com.harana.modules.shopify.models + +case class TaxLine(title: String, + price: BigDecimal, + rate: BigDecimal) diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/Transaction.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/Transaction.scala new file mode 100644 index 0000000..05acdfd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/Transaction.scala @@ -0,0 +1,10 @@ +package com.harana.modules.shopify.models + +case class Transaction(orderId: Long, + kind: String, + gateway: String, + parentId: Long, + amount: BigDecimal, + currency: Option[String], + maximumRefundable: Option[BigDecimal], + receipt: TransactionReceipt) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/models/TransactionReceipt.scala b/jvm/src/main/scala/com/harana/modules/shopify/models/TransactionReceipt.scala new file mode 100644 index 0000000..089e307 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/models/TransactionReceipt.scala @@ -0,0 +1,3 @@ +package com.harana.modules.shopify.models + +case class TransactionReceipt(applePay: Option[Boolean]) \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify/package.scala b/jvm/src/main/scala/com/harana/modules/shopify/package.scala new file mode 100644 index 0000000..fd45daf --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify/package.scala @@ -0,0 +1,80 @@ +package com.harana.modules + +import com.harana.modules.shopify.models._ +import io.circe.generic.extras.Configuration +import io.circe.generic.extras.semiauto._ +import io.circe.{Decoder, Encoder} + +import java.time.Instant +import java.time.format.DateTimeFormatter +import scala.util.Try + +package object shopify { + + case class Output(productTitle: String, + variantTitle: String, + variantSku: String, + variantId: Long, + variantOption1: String, + variantOption2: String, + variantOption3: String, + month1Sales: String, + month2Sales: String, + month3Sales: String, + totalSales: String, + inventoryLevel: String) + + implicit val jsonConfig: Configuration = Configuration.default.withSnakeCaseMemberNames.withSnakeCaseConstructorNames.withDefaults + + implicit val encodeInstant: Encoder[Instant] = Encoder.encodeString.contramap[Instant](_.toString) + implicit val decodeInstant: Decoder[Instant] = Decoder.decodeString.emapTry { str => Try(Instant.from(DateTimeFormatter.ISO_OFFSET_DATE_TIME.parse(str))) } + + implicit val addressEncoder: Encoder[Address] = deriveConfiguredEncoder[Address] + implicit val addressDecoder: Decoder[Address] = deriveConfiguredDecoder[Address] + implicit val attributeEncoder: Encoder[Attribute] = deriveConfiguredEncoder[Attribute] + implicit val attributeDecoder: Decoder[Attribute] = deriveConfiguredDecoder[Attribute] + implicit val customerEncoder: Encoder[Customer] = deriveConfiguredEncoder[Customer] + implicit val customerDecoder: Decoder[Customer] = deriveConfiguredDecoder[Customer] + implicit val fulfilmentEncoder: Encoder[Fulfilment] = deriveConfiguredEncoder[Fulfilment] + implicit val fulfilmentDecoder: Decoder[Fulfilment] = deriveConfiguredDecoder[Fulfilment] + implicit val imageEncoder: Encoder[Image] = deriveConfiguredEncoder[Image] + implicit val imageDecoder: Decoder[Image] = deriveConfiguredDecoder[Image] + implicit val inventoryLevelEncoder: Encoder[InventoryLevel] = deriveConfiguredEncoder[InventoryLevel] + implicit val inventoryLevelDecoder: Decoder[InventoryLevel] = deriveConfiguredDecoder[InventoryLevel] + implicit val inventoryPolicyEncoder: Encoder[InventoryPolicy] = deriveConfiguredEncoder[InventoryPolicy] + implicit val inventoryPolicyDecoder: Decoder[InventoryPolicy] = deriveConfiguredDecoder[InventoryPolicy] + implicit val lineItemEncoder: Encoder[LineItem] = deriveConfiguredEncoder[LineItem] + implicit val lineItemDecoder: Decoder[LineItem] = deriveConfiguredDecoder[LineItem] + implicit val locationEncoder: Encoder[Location] = deriveConfiguredEncoder[Location] + implicit val locationDecoder: Decoder[Location] = deriveConfiguredDecoder[Location] + implicit val metafieldEncoder: Encoder[Metafield] = deriveConfiguredEncoder[Metafield] + implicit val metafieldDecoder: Decoder[Metafield] = deriveConfiguredDecoder[Metafield] + implicit val metaobjectEncoder: Encoder[Metaobject] = deriveConfiguredEncoder[Metaobject] + implicit val metaobjectDecoder: Decoder[Metaobject] = deriveConfiguredDecoder[Metaobject] + implicit val metaobjectDefinitionEncoder: Encoder[MetaobjectDefinition] = deriveConfiguredEncoder[MetaobjectDefinition] + implicit val metaobjectDefinitionDecoder: Decoder[MetaobjectDefinition] = deriveConfiguredDecoder[MetaobjectDefinition] + implicit val metafieldTypeEncoder: Encoder[MetafieldType] = deriveConfiguredEncoder[MetafieldType] + implicit val metafieldTypeDecoder: Decoder[MetafieldType] = deriveConfiguredDecoder[MetafieldType] + implicit val orderEncoder: Encoder[Order] = deriveConfiguredEncoder[Order] + implicit val orderDecoder: Decoder[Order] = deriveConfiguredDecoder[Order] + implicit val productEncoder: Encoder[Product] = deriveConfiguredEncoder[Product] + implicit val productDecoder: Decoder[Product] = deriveConfiguredDecoder[Product] + implicit val productOptionEncoder: Encoder[ProductOption] = deriveConfiguredEncoder[ProductOption] + implicit val productOptionDecoder: Decoder[ProductOption] = deriveConfiguredDecoder[ProductOption] + implicit val productVariantEncoder: Encoder[ProductVariant] = deriveConfiguredEncoder[ProductVariant] + implicit val productVariantDecoder: Decoder[ProductVariant] = deriveConfiguredDecoder[ProductVariant] + implicit val refundEncoder: Encoder[Refund] = deriveConfiguredEncoder[Refund] + implicit val refundDecoder: Decoder[Refund] = deriveConfiguredDecoder[Refund] + implicit val refundLineItemEncoder: Encoder[RefundLineItem] = deriveConfiguredEncoder[RefundLineItem] + implicit val refundLineItemDecoder: Decoder[RefundLineItem] = deriveConfiguredDecoder[RefundLineItem] + implicit val refundShippingDetailsEncoder: Encoder[RefundShippingDetails] = deriveConfiguredEncoder[RefundShippingDetails] + implicit val refundShippingDetailsDecoder: Decoder[RefundShippingDetails] = deriveConfiguredDecoder[RefundShippingDetails] + implicit val shippingLineEncoder: Encoder[ShippingLine] = deriveConfiguredEncoder[ShippingLine] + implicit val shippingLineDecoder: Decoder[ShippingLine] = deriveConfiguredDecoder[ShippingLine] + implicit val taxLineEncoder: Encoder[TaxLine] = deriveConfiguredEncoder[TaxLine] + implicit val taxLineDecoder: Decoder[TaxLine] = deriveConfiguredDecoder[TaxLine] + implicit val transactionEncoder: Encoder[Transaction] = deriveConfiguredEncoder[Transaction] + implicit val transactionDecoder: Decoder[Transaction] = deriveConfiguredDecoder[Transaction] + implicit val transactionReceiptEncoder: Encoder[TransactionReceipt] = deriveConfiguredEncoder[TransactionReceipt] + implicit val transactionReceiptDecoder: Decoder[TransactionReceipt] = deriveConfiguredDecoder[TransactionReceipt] +} diff --git a/jvm/src/main/scala/com/harana/modules/shopify_app/LiveShopifyApp.scala b/jvm/src/main/scala/com/harana/modules/shopify_app/LiveShopifyApp.scala new file mode 100644 index 0000000..f801522 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify_app/LiveShopifyApp.scala @@ -0,0 +1,188 @@ +package com.harana.modules.shopify_app + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.shopify.ShopifySdk +import com.shopify.model._ +import org.joda.time.DateTime +import zio.{Task, ZIO, ZLayer} + +import java.util.concurrent.atomic.AtomicReference +import scala.jdk.CollectionConverters._ + +object LiveShopifyApp { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveShopifyApp(config, logger, micrometer) + } +} + +case class LiveShopifyApp(config: Config, logger: Logger, micrometer: Micrometer) extends ShopifyApp { + + def activateRecurringApplicationCharge(subdomain: String, accessToken: String, chargeId: String): Task[ShopifyRecurringApplicationCharge] = + sdk(subdomain, accessToken).map(_.activateRecurringApplicationCharge(chargeId)) + + def cancelFulfillment(subdomain: String, accessToken: String, orderId: String, fulfillmentId: String): Task[ShopifyFulfillment] = + sdk(subdomain, accessToken).map(_.cancelFulfillment(orderId, fulfillmentId)) + + def cancelOrder(subdomain: String, accessToken: String, orderId: String, reason: String): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.cancelOrder(orderId, reason)) + + def closeOrder(subdomain: String, accessToken: String, orderId: String): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.closeOrder(orderId)) + + def createCustomCollection(subdomain: String, accessToken: String, request: ShopifyCustomCollectionCreationRequest): Task[ShopifyCustomCollection] = + sdk(subdomain, accessToken).map(_.createCustomCollection(request)) + + def createFulfillment(subdomain: String, accessToken: String, request: ShopifyFulfillmentCreationRequest): Task[ShopifyFulfillment] = + sdk(subdomain, accessToken).map(_.createFulfillment(request)) + + def createGiftCard(subdomain: String, accessToken: String, request: ShopifyGiftCardCreationRequest): Task[ShopifyGiftCard] = + sdk(subdomain, accessToken).map(_.createGiftCard(request)) + + def createOrder(subdomain: String, accessToken: String, request: ShopifyOrderCreationRequest): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.createOrder(request)) + + def createProduct(subdomain: String, accessToken: String, request: ShopifyProductCreationRequest): Task[ShopifyProduct] = + sdk(subdomain, accessToken).map(_.createProduct(request)) + + def createProductMetafield(subdomain: String, accessToken: String, request: ShopifyProductMetafieldCreationRequest): Task[Metafield] = + sdk(subdomain, accessToken).map(_.createProductMetafield(request)) + + def createRecurringApplicationCharge(subdomain: String, accessToken: String, request: ShopifyRecurringApplicationChargeCreationRequest): Task[ShopifyRecurringApplicationCharge] = + sdk(subdomain, accessToken).map(_.createRecurringApplicationCharge(request)) + + def createVariantMetafield(subdomain: String, accessToken: String, request: ShopifyVariantMetafieldCreationRequest): Task[Metafield] = + sdk(subdomain, accessToken).map(_.createVariantMetafield(request)) + + def deleteProduct(subdomain: String, accessToken: String, productId: String): Task[Boolean] = + sdk(subdomain, accessToken).map(_.deleteProduct(productId)) + + def getAccessToken(subdomain: String, accessToken: String): Task[String] = + sdk(subdomain, accessToken).map(_.getAccessToken) + + def getCustomCollections(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyCustomCollection]] = + sdk(subdomain, accessToken).map(_.getCustomCollections(pageInfo, pageSize)) + + def getCustomCollections(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyCustomCollection]] = + sdk(subdomain, accessToken).map(_.getCustomCollections(pageSize)) + + def getCustomCollections(subdomain: String, accessToken: String): Task[List[ShopifyCustomCollection]] = + sdk(subdomain, accessToken).map(_.getCustomCollections.asScala.toList) + + def getCustomer(subdomain: String, accessToken: String, customerId: String): Task[ShopifyCustomer] = + sdk(subdomain, accessToken).map(_.getCustomer(customerId)) + + def getCustomers(subdomain: String, accessToken: String, request: ShopifyGetCustomersRequest): Task[ShopifyPage[ShopifyCustomer]] = + sdk(subdomain, accessToken).map(_.getCustomers(request)) + + def getLocations(subdomain: String, accessToken: String): Task[List[ShopifyLocation]] = + sdk(subdomain, accessToken).map(_.getLocations.asScala.toList) + + def getOrder(subdomain: String, accessToken: String, orderId: String): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.getOrder(orderId)) + + def getOrderMetafields(subdomain: String, accessToken: String, orderId: String): Task[List[Metafield]] = + sdk(subdomain, accessToken).map(_.getOrderMetafields(orderId).asScala.toList) + + def getOrderRisks(subdomain: String, accessToken: String, orderId: String): Task[List[ShopifyOrderRisk]] = + sdk(subdomain, accessToken).map(_.getOrderRisks(orderId).asScala.toList) + + def getOrderTransactions(subdomain: String, accessToken: String, orderId: String): Task[List[ShopifyTransaction]] = + sdk(subdomain, accessToken).map(_.getOrderTransactions(orderId).asScala.toList) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, maximumCreationDate: DateTime): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, maximumCreationDate)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, maximumCreationDate: DateTime, appId: String): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, maximumCreationDate, appId)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, maximumCreationDate: DateTime, appId: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, maximumCreationDate, appId, pageSize)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, maximumCreationDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, maximumCreationDate, pageSize)) + + def getOrders(subdomain: String, accessToken: String, minimumCreationDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(minimumCreationDate, pageSize)) + + def getOrders(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(pageInfo, pageSize)) + + def getOrders(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders(pageSize)) + + def getOrders(subdomain: String, accessToken: String): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getOrders) + + def getProduct(subdomain: String, accessToken: String, productId: String): Task[ShopifyProduct] = + sdk(subdomain, accessToken).map(_.getProduct(productId)) + + def getProductCount(subdomain: String, accessToken: String): Task[Int] = + sdk(subdomain, accessToken).map(_.getProductCount) + + def getProductMetafields(subdomain: String, accessToken: String, productId: String): Task[List[Metafield]] = + sdk(subdomain, accessToken).map(_.getProductMetafields(productId).asScala.toList) + + def getProducts(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyProduct]] = + sdk(subdomain, accessToken).map(_.getProducts(pageInfo, pageSize)) + + def getProducts(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyProduct]] = + sdk(subdomain, accessToken).map(_.getProducts(pageSize)) + + def getProducts(subdomain: String, accessToken: String): Task[ShopifyProducts] = + sdk(subdomain, accessToken).map(_.getProducts) + + def getRecurringApplicationCharge(subdomain: String, accessToken: String, chargeId: String): Task[ShopifyRecurringApplicationCharge] = + sdk(subdomain, accessToken).map(_.getRecurringApplicationCharge(chargeId)) + + def getShop(subdomain: String, accessToken: String): Task[ShopifyShop] = + sdk(subdomain, accessToken).map(_.getShop) + + def getUpdatedOrdersCreatedBefore(subdomain: String, accessToken: String, minimumUpdatedAtDate: DateTime, maximumUpdatedAtDate: DateTime, maximumCreatedAtDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] = + sdk(subdomain, accessToken).map(_.getUpdatedOrdersCreatedBefore(minimumUpdatedAtDate, maximumUpdatedAtDate, maximumCreatedAtDate, pageSize)) + + def getVariant(subdomain: String, accessToken: String, variantId: String): Task[ShopifyVariant] = + sdk(subdomain, accessToken).map(_.getVariant(variantId)) + + def getVariantMetafields(subdomain: String, accessToken: String, variantId: String): Task[List[Metafield]] = + sdk(subdomain, accessToken).map(_.getVariantMetafields(variantId).asScala.toList) + + def refund(subdomain: String, accessToken: String, request: ShopifyRefundCreationRequest): Task[ShopifyRefund] = + sdk(subdomain, accessToken).map(_.refund(request)) + + def revokeOAuthToken(subdomain: String, accessToken: String): Task[Boolean] = + sdk(subdomain, accessToken).map(_.revokeOAuthToken) + + def searchCustomers(subdomain: String, accessToken: String, query: String): Task[ShopifyPage[ShopifyCustomer]] = + sdk(subdomain, accessToken).map(_.searchCustomers(query)) + + def updateCustomer(subdomain: String, accessToken: String, request: ShopifyCustomerUpdateRequest): Task[ShopifyCustomer] = + sdk(subdomain, accessToken).map(_.updateCustomer(request)) + + def updateFulfillment(subdomain: String, accessToken: String, request: ShopifyFulfillmentUpdateRequest): Task[ShopifyFulfillment] = + sdk(subdomain, accessToken).map(_.updateFulfillment(request)) + + def updateInventoryLevel(subdomain: String, accessToken: String, inventoryItemId: String, locationId: String, quantity: Long): Task[ShopifyInventoryLevel] = + sdk(subdomain, accessToken).map(_.updateInventoryLevel(inventoryItemId, locationId, quantity)) + + def updateOrderShippingAddress(subdomain: String, accessToken: String, request: ShopifyOrderShippingAddressUpdateRequest): Task[ShopifyOrder] = + sdk(subdomain, accessToken).map(_.updateOrderShippingAddress(request)) + + def updateProduct(subdomain: String, accessToken: String, request: ShopifyProductUpdateRequest): Task[ShopifyProduct] = + sdk(subdomain, accessToken).map(_.updateProduct(request)) + + def updateVariant(subdomain: String, accessToken: String, request: ShopifyVariantUpdateRequest): Task[ShopifyVariant] = + sdk(subdomain, accessToken).map(_.updateVariant(request)) + + private def sdk(subdomain: String, accessToken: String): Task[ShopifySdk] = + ZIO.attempt(ShopifySdk.newBuilder().withSubdomain(subdomain).withAccessToken(accessToken).build()) + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/shopify_app/ShopifyApp.scala b/jvm/src/main/scala/com/harana/modules/shopify_app/ShopifyApp.scala new file mode 100644 index 0000000..0f326a5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/shopify_app/ShopifyApp.scala @@ -0,0 +1,115 @@ +package com.harana.modules.shopify_app + +import com.shopify.model._ +import org.joda.time.DateTime +import zio.Task +import zio.macros.accessible + +@accessible +trait ShopifyApp { + def activateRecurringApplicationCharge(subdomain: String, accessToken: String, chargeId: String): Task[ShopifyRecurringApplicationCharge] + + def cancelFulfillment(subdomain: String, accessToken: String, orderId: String, fulfillmentId: String): Task[ShopifyFulfillment] + + def cancelOrder(subdomain: String, accessToken: String, orderId: String, reason: String): Task[ShopifyOrder] + + def closeOrder(subdomain: String, accessToken: String, orderId: String): Task[ShopifyOrder] + + def createCustomCollection(subdomain: String, accessToken: String, request: ShopifyCustomCollectionCreationRequest): Task[ShopifyCustomCollection] + + def createFulfillment(subdomain: String, accessToken: String, request: ShopifyFulfillmentCreationRequest): Task[ShopifyFulfillment] + + def createGiftCard(subdomain: String, accessToken: String, request: ShopifyGiftCardCreationRequest): Task[ShopifyGiftCard] + + def createOrder(subdomain: String, accessToken: String, request: ShopifyOrderCreationRequest): Task[ShopifyOrder] + + def createProduct(subdomain: String, accessToken: String, request: ShopifyProductCreationRequest): Task[ShopifyProduct] + + def createProductMetafield(subdomain: String, accessToken: String, request: ShopifyProductMetafieldCreationRequest): Task[Metafield] + + def createRecurringApplicationCharge(subdomain: String, accessToken: String, request: ShopifyRecurringApplicationChargeCreationRequest): Task[ShopifyRecurringApplicationCharge] + + def createVariantMetafield(subdomain: String, accessToken: String, request: ShopifyVariantMetafieldCreationRequest): Task[Metafield] + + def deleteProduct(subdomain: String, accessToken: String, productId: String): Task[Boolean] + + def getAccessToken(subdomain: String, accessToken: String): Task[String] + + def getCustomCollections(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyCustomCollection]] + + def getCustomCollections(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyCustomCollection]] + + def getCustomCollections(subdomain: String, accessToken: String): Task[List[ShopifyCustomCollection]] + + def getCustomer(subdomain: String, accessToken: String, customerId: String): Task[ShopifyCustomer] + + def getCustomers(subdomain: String, accessToken: String, request: ShopifyGetCustomersRequest): Task[ShopifyPage[ShopifyCustomer]] + + def getLocations(subdomain: String, accessToken: String): Task[List[ShopifyLocation]] + + def getOrder(subdomain: String, accessToken: String, orderId: String): Task[ShopifyOrder] + + def getOrderMetafields(subdomain: String, accessToken: String, orderId: String): Task[List[Metafield]] + + def getOrderRisks(subdomain: String, accessToken: String, orderId: String): Task[List[ShopifyOrderRisk]] + + def getOrderTransactions(subdomain: String, accessToken: String, orderId: String): Task[List[ShopifyTransaction]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, maximumCreationDate: DateTime): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, maximumCreationDate: DateTime, appId: String): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, maximumCreationDate: DateTime, appId: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, maximumCreationDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, mininumCreationDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getOrders(subdomain: String, accessToken: String): Task[ShopifyPage[ShopifyOrder]] + + def getProduct(subdomain: String, accessToken: String, productId: String): Task[ShopifyProduct] + + def getProductCount(subdomain: String, accessToken: String): Task[Int] + + def getProductMetafields(subdomain: String, accessToken: String, productId: String): Task[List[Metafield]] + + def getProducts(subdomain: String, accessToken: String, pageInfo: String, pageSize: Int): Task[ShopifyPage[ShopifyProduct]] + + def getProducts(subdomain: String, accessToken: String, pageSize: Int): Task[ShopifyPage[ShopifyProduct]] + + def getProducts(subdomain: String, accessToken: String): Task[ShopifyProducts] + + def getRecurringApplicationCharge(subdomain: String, accessToken: String, chargeId: String): Task[ShopifyRecurringApplicationCharge] + + def getShop(subdomain: String, accessToken: String): Task[ShopifyShop] + + def getUpdatedOrdersCreatedBefore(subdomain: String, accessToken: String, minimumUpdatedAtDate: DateTime, maximumUpdatedAtDate: DateTime, maximumCreatedAtDate: DateTime, pageSize: Int): Task[ShopifyPage[ShopifyOrder]] + + def getVariant(subdomain: String, accessToken: String, variantId: String): Task[ShopifyVariant] + + def getVariantMetafields(subdomain: String, accessToken: String, variantId: String): Task[List[Metafield]] + + def refund(subdomain: String, accessToken: String, request: ShopifyRefundCreationRequest): Task[ShopifyRefund] + + def revokeOAuthToken(subdomain: String, accessToken: String): Task[Boolean] + + def searchCustomers(subdomain: String, accessToken: String, query: String): Task[ShopifyPage[ShopifyCustomer]] + + def updateCustomer(subdomain: String, accessToken: String, request: ShopifyCustomerUpdateRequest): Task[ShopifyCustomer] + + def updateFulfillment(subdomain: String, accessToken: String, request: ShopifyFulfillmentUpdateRequest): Task[ShopifyFulfillment] + + def updateInventoryLevel(subdomain: String, accessToken: String, inventoryItemId: String, locationId: String, quantity: Long): Task[ShopifyInventoryLevel] + + def updateOrderShippingAddress(subdomain: String, accessToken: String, request: ShopifyOrderShippingAddressUpdateRequest): Task[ShopifyOrder] + + def updateProduct(subdomain: String, accessToken: String, request: ShopifyProductUpdateRequest): Task[ShopifyProduct] + + def updateVariant(subdomain: String, accessToken: String, request: ShopifyVariantUpdateRequest): Task[ShopifyVariant] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/slack/LiveSlack.scala b/jvm/src/main/scala/com/harana/modules/slack/LiveSlack.scala new file mode 100644 index 0000000..2575134 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/slack/LiveSlack.scala @@ -0,0 +1,599 @@ +package com.harana.modules.slack + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.hubspot.slack.client.methods.params.auth.AuthRevokeParams +import com.hubspot.slack.client.methods.params.channels._ +import com.hubspot.slack.client.methods.params.chat._ +import com.hubspot.slack.client.methods.params.conversations._ +import com.hubspot.slack.client.methods.params.dialog.DialogOpenParams +import com.hubspot.slack.client.methods.params.files.{FilesSharedPublicUrlParams, FilesUploadParams} +import com.hubspot.slack.client.methods.params.group.{GroupsKickParams, GroupsListParams} +import com.hubspot.slack.client.methods.params.im.ImOpenParams +import com.hubspot.slack.client.methods.params.reactions.ReactionsAddParams +import com.hubspot.slack.client.methods.params.search.SearchMessagesParams +import com.hubspot.slack.client.methods.params.usergroups._ +import com.hubspot.slack.client.methods.params.usergroups.users.UsergroupUsersUpdateParams +import com.hubspot.slack.client.methods.params.users.{UserEmailParams, UsersInfoParams, UsersListParams} +import com.hubspot.slack.client.methods.{ResultSort, ResultSortOrder} +import com.hubspot.slack.client.models.conversations.{Conversation, ConversationType} +import com.hubspot.slack.client.models.dialog.SlackDialog +import com.hubspot.slack.client.models.files.SlackFile +import com.hubspot.slack.client.models.group.SlackGroup +import com.hubspot.slack.client.models.response.auth.AuthTestResponse +import com.hubspot.slack.client.models.response.{MessagePage, SlackError} +import com.hubspot.slack.client.models.teams.SlackTeam +import com.hubspot.slack.client.models.usergroups.SlackUsergroup +import com.hubspot.slack.client.models.users.SlackUser +import com.hubspot.slack.client.models.{Attachment, LiteMessage, SlackChannel} +import com.hubspot.slack.client.{SlackClient, SlackClientFactory, SlackClientRuntimeConfig} +import zio.{IO, ZIO, ZLayer} + +import scala.jdk.CollectionConverters._ + +object LiveSlack { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveSlack(config, logger, micrometer) + } +} + +case class LiveSlack(config: Config, logger: Logger, micrometer: Micrometer) extends Slack { + + def newClient(token: String): IO[Nothing, SlackClient] = { + val config = SlackClientRuntimeConfig.builder() + .setTokenSupplier(() => token) + .build() + ZIO.succeed(SlackClientFactory.defaultFactory().build(config)) + } + + + def Auth(client: SlackClient): IO[Either[SlackError, Throwable], AuthTestResponse] = + client.testAuth() + + + def revokeAuth(client: SlackClient): IO[Either[SlackError, Throwable], Boolean] = + client.revokeAuth(AuthRevokeParams.builder().build()).map(_.isRevoked) + + + def searchMessages(client: SlackClient, + count: Int, + page: Int, + query: String, + shouldHighlight: Boolean, + sort: ResultSort, + sortOrder: ResultSortOrder): IO[Either[SlackError, Throwable], MessagePage] = + client.searchMessages( + SearchMessagesParams.builder() + .setCount(count) + .setPage(page) + .setQuery(query) + .setShouldHighlight(shouldHighlight) + .setSort(sort) + .setSortOrder(sortOrder) + .build() + ).map(_.getMessages) + + + def findReplies(client: SlackClient, + channelId: String, + threadTs: String): IO[Either[SlackError, Throwable], List[LiteMessage]] = + client.findReplies( + FindRepliesParams.builder() + .setChannelId(channelId) + .setThreadTs(threadTs) + .build() + ).map(_.getMessages.asScala.toList) + + + def findUser(client: SlackClient, + userId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], SlackUser] = + client.findUser( + UsersInfoParams.builder() + .setIncludeLocale(includeLocale) + .setUserId(userId) + .build() + ).map(_.getUser) + + + def lookupUserByEmail(client: SlackClient, + email: String): IO[Either[SlackError, Throwable], SlackUser] = + client.lookupUserByEmail( + UserEmailParams.builder() + .setEmail(email) + .build() + ).map(_.getUser) + + + def listUsers(client: SlackClient): IO[Either[SlackError, Throwable], List[SlackUser]] = + client.listUsers() + + + def listUsersPaginated(client: SlackClient, + cursor: Option[String], + limit: Option[Int]): IO[Either[SlackError, Throwable], List[SlackUser]] = + client.listUsersPaginated( + UsersListParams.builder() + .setCursor(cursor) + .setLimit(limit) + .build() + ).map(_.getMembers.asScala.toList) + + + def listChannels(client: SlackClient, + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], List[SlackChannel]] = + client.listChannels( + ChannelsListParams.builder() + .setCursor(cursor) + .setLimit(limit) + .setShouldExcludeArchived(shouldExcludeArchived) + .setShouldExcludeMembers(shouldExcludeMembers) + .build() + ) + + + def channelHistory(client: SlackClient, + channelId: String, + count: Option[Int], + inclusive: Boolean): IO[Either[SlackError, Throwable], List[LiteMessage]] = + client.channelHistory( + ChannelsHistoryParams.builder() + .setChannelId(channelId) + .setCount(count) + .setInclusive(inclusive) + .build() + ) + + + def getChannelByName(client: SlackClient, + channelName: String, + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], SlackChannel] = + client.getChannelByName( + channelName, + ChannelsFilter.builder() + .setShouldExcludeArchived(shouldExcludeArchived) + .setShouldExcludeMembers(shouldExcludeMembers) + .build() + ) + + + def getChannelInfo(client: SlackClient, + channelId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], SlackChannel] = + client.getChannelInfo( + ChannelsInfoParams.builder() + .setChannelId(channelId) + .setIncludeLocale(includeLocale) + .build() + ).map(_.getChannel) + + + def kickUserFromChannel(client: SlackClient, + channelId: String, + userId: String): IO[Either[SlackError, Throwable], Unit] = + client.kickUserFromChannel( + ChannelsKickParams.builder() + .setChannelId(channelId) + .setUserId(userId) + .build() + ).unit + + + def listGroups(client: SlackClient, + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], List[SlackGroup]] = + client.listGroups( + GroupsListParams.builder() + .setShouldExcludeArchived(shouldExcludeArchived) + .setShouldExcludeMembers(shouldExcludeMembers) + .build()) + + + def kickUserFromGroup(client: SlackClient, + channelId: String, + userId: String): IO[Either[SlackError, Throwable], Unit] = + client.kickUserFromGroup( + GroupsKickParams.builder() + .setChannelId(channelId) + .setUserId(userId) + .build() + ).unit + + + def openIm(client: SlackClient, + includeLocale: Boolean, + returnIm: Boolean, + userId: String): IO[Either[SlackError, Throwable], Unit] = + client.openIm( + ImOpenParams.builder() + .setIncludeLocale(includeLocale) + .setReturnIm(returnIm) + .setUserId(userId) + .build() + ).unit + + + def postMessage(client: SlackClient, + asUser: Boolean, + attachments: List[Attachment], + channelId: String, + iconEmoji: Option[String], + iconUrl: Option[String], + replyBroadcast: Boolean, + shouldLinkNames: Boolean, + text: Option[String], + threadTs: Option[String]): IO[Either[SlackError, Throwable], String] = + client.postMessage( + ChatPostMessageParams.builder() + .setAsUser(asUser) + .setAttachments(attachments.asJava) + .setChannelId(channelId) + .setIconEmoji(iconEmoji) + .setIconUrl(iconUrl) + .setLinkNames(shouldLinkNames) + .setReplyBroadcast(replyBroadcast) + .setText(text) + .setThreadTs(threadTs) + .build() + ).map(_.getTs) + + + def postEphemeralMessage(client: SlackClient, + attachments: List[Attachment], + channelId: String, + parseMode: String, + sendAsUser: Boolean, + shouldLinkNames: Boolean, + text: Option[String], + threadTs: Option[String], + userToSendTo: String): IO[Either[SlackError, Throwable], String] = + client.postEphemeralMessage( + ChatPostEphemeralMessageParams.builder() + .setAttachments(attachments.asJava) + .setChannelId(channelId) + .setParseMode(parseMode) + .setSendAsUser(sendAsUser) + .setShouldLinkNames(shouldLinkNames) + .setText(text) + .setThreadTs(threadTs) + .setUserToSendTo(userToSendTo) + .build() + ).map(_.getMessageTs) + + + def updateMessage(client: SlackClient, + asUser: Boolean, + attachments: List[Attachment], + channelId: String, + parse: String, + shouldLinkNames: Boolean, + text: Option[String], + ts: String): IO[Either[SlackError, Throwable], Unit] = + client.updateMessage( + ChatUpdateMessageParams.builder() + .setAsUser(asUser) + .setAttachments(attachments.asJava) + .setChannelId(channelId) + .setParse(parse) + .setShouldLinkNames(shouldLinkNames) + .setText(text) + .setTs(ts) + .build() + ).unit + + + def getPermalink(client: SlackClient, + channelId: String, + messageTs: String): IO[Either[SlackError, Throwable], String] = + client.getPermalink( + ChatGetPermalinkParams.builder() + .setChannelId(channelId) + .setMessageTs(messageTs) + .build() + ).map(_.getPermalink) + + + def deleteMessage(client: SlackClient, + asUser: Boolean, + channelId: String, + messageToDeleteTs: String): IO[Either[SlackError, Throwable], Unit] = + client.deleteMessage( + ChatDeleteParams.builder() + .setAsUser(asUser) + .setChannelId(channelId) + .setMessageToDeleteTs(messageToDeleteTs) + .build() + ).unit + + + def listConversations(client: SlackClient, + conversationTypes: List[ConversationType], + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean): IO[Either[SlackError, Throwable], List[Conversation]] = + client.listConversations( + ConversationsListParams.builder() + .setConversationTypes(conversationTypes.asJava) + .setCursor(cursor) + .setLimit(limit) + .setShouldExcludeArchived(shouldExcludeArchived) + .build()) + + + def usersConversations(client: SlackClient, + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean, + userId: Option[String]): IO[Either[SlackError, Throwable], List[Conversation]] = + client.usersConversations( + ConversationsUserParams.builder() + .setCursor(cursor) + .setLimit(limit) + .setShouldExcludeArchived(shouldExcludeArchived) + .setUserId(userId) + .build() + ) + + + def createConversation(client: SlackClient, + isPrivate: Boolean, + name: String): IO[Either[SlackError, Throwable], Unit] = + client.createConversation( + ConversationCreateParams.builder() + .setIsPrivate(isPrivate) + .setName(name) + .build() + ).unit + + + def inviteToConversation(client: SlackClient, + channelId: String, + users: List[String]): IO[Either[SlackError, Throwable], Unit] = + client.inviteToConversation( + ConversationInviteParams.builder() + .setChannelId(channelId) + .setUsers(users.asJava) + .build() + ).unit + + + def unarchiveConversation(client: SlackClient, channelId: String): IO[Either[SlackError, Throwable], Unit] = + client.unarchiveConversation( + ConversationUnarchiveParams.builder() + .setChannelId(channelId) + .build() + ).unit + + + def getConversationHistory(client: SlackClient, + channelId: String, + inclusive: Boolean, + limit: Option[Int], + newestTimestamp: Option[String], + oldestTimestamp: Option[String]): IO[Either[SlackError, Throwable], List[LiteMessage]] = + client.getConversationHistory( + ConversationsHistoryParams.builder() + .setChannelId(channelId) + .setInclusive(inclusive) + .setLimit(limit) + .setNewestTimestamp(newestTimestamp) + .setOldestTimestamp(oldestTimestamp) + .build() + ) + + + def archiveConversation(client: SlackClient, + channelId: String): IO[Either[SlackError, Throwable], Unit] = + client.archiveConversation( + ConversationArchiveParams.builder() + .setChannelId(channelId) + .build() + ).unit + + + def getConversationInfo(client: SlackClient, + conversationId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], Conversation] = + client.getConversationInfo( + ConversationsInfoParams.builder() + .setConversationId(conversationId) + .setIncludeLocale(includeLocale) + .build() + ).map(_.getConversation) + + + def getConversationReplies(client: SlackClient, + channel: String, + ts: String): IO[Either[SlackError, Throwable], List[LiteMessage]] = + client.getConversationReplies( + ConversationsRepliesParams.builder() + .setChannel(channel) + .setTs(ts) + .build() + ).map(_.getMessages.asScala.toList) + + + def getConversationByName(client: SlackClient, + conversationName: String, + conversationTypes: List[ConversationType], + shouldExcludeArchived: Boolean): IO[Either[SlackError, Throwable], Conversation] = + client.getConversationByName( + conversationName, + ConversationsFilter.builder() + .setConversationTypes(conversationTypes.asJava) + .setShouldExcludeArchived(shouldExcludeArchived) + .build() + ).map(c => Conversation.builder().setId(c.getId).setName(c.getName).build()) + + + def openConversation(client: SlackClient, + channelId: Option[String], + returnIm: Boolean, + users: List[String]): IO[Either[SlackError, Throwable], Conversation] = + client.openConversation( + ConversationOpenParams.builder() + .setChannelId(channelId) + .setReturnIm(returnIm) + .setUsers(users.asJava) + .build() + ).map(_.getConversation) + + + // Usergroups + def createUsergroup(client: SlackClient, + description: Option[String], + handle: Option[String], + includeCount: Boolean, + name: String, + rawChannelIds: List[String]): IO[Either[SlackError, Throwable], SlackUsergroup] = + client.createUsergroup( + UsergroupCreateParams.builder() + .setDescription(description) + .setHandle(handle) + .setIncludeCount(includeCount) + .setName(name) + .setRawChannelIds(rawChannelIds.asJava) + .build() + ).map(_.getUsergroup) + + + def listUsergroups(client: SlackClient, + includeCount: Boolean, + includeDisabled: Boolean, + includeUsers: Boolean): IO[Either[SlackError, Throwable], List[SlackUsergroup]] = + client.listUsergroups( + UsergroupListParams.builder() + .setIncludeCount(includeCount) + .setIncludeDisabled(includeDisabled) + .setIncludeUsers(includeUsers) + .build() + ) + + + def updateUsergroup(client: SlackClient, + description: Option[String], + handle: Option[String], + includeCount: Boolean, + name: Option[String], + rawChannelIds: List[String], + userGroupId: String): IO[Either[SlackError, Throwable], SlackUsergroup] = + client.updateUsergroup( + UsergroupUpdateParams.builder() + .setDescription(description) + .setHandle(handle) + .setIncludeCount(includeCount) + .setName(name) + .setRawChannelIds(rawChannelIds.asJava) + .setUsergroupId(userGroupId) + .build() + ).map(_.getUsergroup) + + + def enableUsergroup(client: SlackClient, + includeCount: Boolean, + userGroupId: String): IO[Either[SlackError, Throwable], Unit] = { + client.enableUsergroup( + UsergroupEnableParams.builder() + .setIncludeCount(includeCount) + .setUsergroupId(userGroupId) + .build() + ).unit + } + + + def disableUsergroup(client: SlackClient, + includeCount: Boolean, + userGroupId: String): IO[Either[SlackError, Throwable], Unit] = + client.disableUsergroup( + UsergroupDisableParams.builder() + .setIncludeCount(includeCount) + .setUsergroupId(userGroupId) + .build() + ).unit + + + def updateUsergroupUsers(client: SlackClient, + includeCount: Boolean, + rawUserIds: List[String], + userGroupId: String): IO[Either[SlackError, Throwable], SlackUsergroup] = + client.updateUsergroupUsers( + UsergroupUsersUpdateParams.builder() + .setIncludeCount(includeCount) + .setRawUserIds(rawUserIds.asJava) + .setUsergroupId(userGroupId) + .build() + ).map(_.getUsergroup) + + + def openDialog(client: SlackClient, + slackDialog: SlackDialog, + triggerId: String): IO[Either[SlackError, Throwable], Unit] = + client.openDialog( + DialogOpenParams.builder() + .setDialog(slackDialog) + .setTriggerId(triggerId) + .build() + ).unit + + + def addReaction(client: SlackClient, + channel: Option[String], + file: Option[String], + fileComment: Option[String], + name: String, + timestamp: Option[String]): IO[Either[SlackError, Throwable], Unit] = + client.addReaction( + ReactionsAddParams.builder() + .setChannel(channel) + .setFile(file) + .setFileComment(fileComment) + .setName(name) + .setTimestamp(timestamp) + .build() + ).unit + + + def getTeamInfo(client: SlackClient): IO[Either[SlackError, Throwable], SlackTeam] = + client.getTeamInfo.map(_.getSlackTeam) + + + def uploadFile(client: SlackClient, + channels: List[String], + content: Option[String], + filename: Option[String], + initialComment: Option[String], + threadTs: Option[String], + title: Option[String]): IO[Either[SlackError, Throwable], SlackFile] = + client.uploadFile( + FilesUploadParams.builder() + .setChannels(channels.asJava) + .setContent(content) + .setFilename(filename) + .setInitialComment(initialComment) + .setThreadTs(threadTs) + .setTitle(title) + .build() + ).map(_.getFile) + + + def shareFilePublically(client: SlackClient, + fileId: String): IO[Either[SlackError, Throwable], SlackFile] = + client.shareFilePublically( + FilesSharedPublicUrlParams.builder() + .setFileId(fileId) + .build() + ).map(_.getFile) + + + def listEmoji(client: SlackClient): IO[Either[SlackError, Throwable], Map[String, String]] = + client.listEmoji.map(_.getEmoji.asScala.toMap) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/slack/Slack.scala b/jvm/src/main/scala/com/harana/modules/slack/Slack.scala new file mode 100644 index 0000000..d4d46f1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/slack/Slack.scala @@ -0,0 +1,236 @@ +package com.harana.modules.slack + +import com.hubspot.slack.client.SlackClient +import com.hubspot.slack.client.methods.{ResultSort, ResultSortOrder} +import com.hubspot.slack.client.models.conversations.{Conversation, ConversationType} +import com.hubspot.slack.client.models.dialog.SlackDialog +import com.hubspot.slack.client.models.files.SlackFile +import com.hubspot.slack.client.models.group.SlackGroup +import com.hubspot.slack.client.models.response.auth.AuthTestResponse +import com.hubspot.slack.client.models.response.{MessagePage, SlackError} +import com.hubspot.slack.client.models.teams.SlackTeam +import com.hubspot.slack.client.models.usergroups.SlackUsergroup +import com.hubspot.slack.client.models.users.SlackUser +import com.hubspot.slack.client.models.{Attachment, LiteMessage, SlackChannel} +import zio.IO +import zio.macros.accessible + +@accessible +trait Slack { + def newClient(token: String): IO[Nothing, SlackClient] + + def Auth(client: SlackClient): IO[Either[SlackError, Throwable], AuthTestResponse] + + def revokeAuth(client: SlackClient): IO[Either[SlackError, Throwable], Boolean] + + def searchMessages(client: SlackClient, + count: Int, + page: Int, + query: String, + shouldHighlight: Boolean, + sort: ResultSort, + sortOrder: ResultSortOrder): IO[Either[SlackError, Throwable], MessagePage] + + def findReplies(client: SlackClient, + channelId: String, + threadTs: String): IO[Either[SlackError, Throwable], List[LiteMessage]] + + def findUser(client: SlackClient, + userId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], SlackUser] + + def lookupUserByEmail(client: SlackClient, + email: String): IO[Either[SlackError, Throwable], SlackUser] + + def listUsers(client: SlackClient): IO[Either[SlackError, Throwable], List[SlackUser]] + + def listUsersPaginated(client: SlackClient, + cursor: Option[String], + limit: Option[Int]): IO[Either[SlackError, Throwable], List[SlackUser]] + + def listChannels(client: SlackClient, + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], List[SlackChannel]] + + def channelHistory(client: SlackClient, + channelId: String, + count: Option[Int], + inclusive: Boolean): IO[Either[SlackError, Throwable], List[LiteMessage]] + + def getChannelByName(client: SlackClient, + channelName: String, + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], SlackChannel] + + def getChannelInfo(client: SlackClient, + channelId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], SlackChannel] + + def kickUserFromChannel(client: SlackClient, + channelId: String, + userId: String): IO[Either[SlackError, Throwable], Unit] + + def listGroups(client: SlackClient, + shouldExcludeArchived: Boolean, + shouldExcludeMembers: Boolean): IO[Either[SlackError, Throwable], List[SlackGroup]] + + def kickUserFromGroup(client: SlackClient, + channelId: String, + userId: String): IO[Either[SlackError, Throwable], Unit] + + def openIm(client: SlackClient, + includeLocale: Boolean, + returnIm: Boolean, + userId: String): IO[Either[SlackError, Throwable], Unit] + + def postMessage(client: SlackClient, + asUser: Boolean, + attachments: List[Attachment], + channelId: String, + iconEmoji: Option[String], + iconUrl: Option[String], + replyBroadcast: Boolean, + shouldLinkNames: Boolean, + text: Option[String], + threadTs: Option[String]): IO[Either[SlackError, Throwable], String] + + def postEphemeralMessage(client: SlackClient, + attachments: List[Attachment], + channelId: String, + parseMode: String, + sendAsUser: Boolean, + shouldLinkNames: Boolean, + text: Option[String], + threadTs: Option[String], + userToSendTo: String): IO[Either[SlackError, Throwable], String] + + def updateMessage(client: SlackClient, + asUser: Boolean, + attachments: List[Attachment], + channelId: String, + parse: String, + shouldLinkNames: Boolean, + text: Option[String], + ts: String): IO[Either[SlackError, Throwable], Unit] + + def getPermalink(client: SlackClient, + channelId: String, + messageTs: String): IO[Either[SlackError, Throwable], String] + + def deleteMessage(client: SlackClient, + asUser: Boolean, + channelId: String, + messageToDeleteTs: String): IO[Either[SlackError, Throwable], Unit] + + def listConversations(client: SlackClient, + conversationTypes: List[ConversationType], + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean): IO[Either[SlackError, Throwable], List[Conversation]] + + def usersConversations(client: SlackClient, + cursor: Option[String], + limit: Option[Int], + shouldExcludeArchived: Boolean, + userId: Option[String]): IO[Either[SlackError, Throwable], List[Conversation]] + + def createConversation(client: SlackClient, + isPrivate: Boolean, + name: String): IO[Either[SlackError, Throwable], Unit] + + def inviteToConversation(client: SlackClient, + channelId: String, + users: List[String]): IO[Either[SlackError, Throwable], Unit] + + def unarchiveConversation(client: SlackClient, + channelId: String): IO[Either[SlackError, Throwable], Unit] + + def getConversationHistory(client: SlackClient, + channelId: String, + inclusive: Boolean, + limit: Option[Int], + newestTimestamp: Option[String], + oldestTimestamp: Option[String]): IO[Either[SlackError, Throwable], List[LiteMessage]] + + def archiveConversation(client: SlackClient, + channelId: String): IO[Either[SlackError, Throwable], Unit] + + def getConversationInfo(client: SlackClient, + conversationId: String, + includeLocale: Boolean): IO[Either[SlackError, Throwable], Conversation] + + def getConversationReplies(client: SlackClient, + channel: String, + ts: String): IO[Either[SlackError, Throwable], List[LiteMessage]] + + def getConversationByName(client: SlackClient, + conversationName: String, + conversationTypes: List[ConversationType], + shouldExcludeArchived: Boolean): IO[Either[SlackError, Throwable], Conversation] + + def openConversation(client: SlackClient, + channelId: Option[String], + returnIm: Boolean, + users: List[String]): IO[Either[SlackError, Throwable], Conversation] + + def createUsergroup(client: SlackClient, + description: Option[String], + handle: Option[String], + includeCount: Boolean, + name: String, + rawChannelIds: List[String]): IO[Either[SlackError, Throwable], SlackUsergroup] + + def listUsergroups(client: SlackClient, + includeCount: Boolean, + includeDisabled: Boolean, + includeUsers: Boolean): IO[Either[SlackError, Throwable], List[SlackUsergroup]] + + def updateUsergroup(client: SlackClient, + description: Option[String], + handle: Option[String], + includeCount: Boolean, + name: Option[String], + rawChannelIds: List[String], + userGroupId: String): IO[Either[SlackError, Throwable], SlackUsergroup] + + def enableUsergroup(client: SlackClient, + includeCount: Boolean, + userGroupId: String): IO[Either[SlackError, Throwable], Unit] + + def disableUsergroup(client: SlackClient, + includeCount: Boolean, + userGroupId: String): IO[Either[SlackError, Throwable], Unit] + + def updateUsergroupUsers(client: SlackClient, + includeCount: Boolean, + rawUserIds: List[String], + userGroupId: String): IO[Either[SlackError, Throwable], SlackUsergroup] + + def openDialog(client: SlackClient, + slackDialog: SlackDialog, + triggerId: String): IO[Either[SlackError, Throwable], Unit] + + def addReaction(client: SlackClient, + channel: Option[String], + file: Option[String], + fileComment: Option[String], + name: String, + timestamp: Option[String]): IO[Either[SlackError, Throwable], Unit] + + def getTeamInfo(client: SlackClient): IO[Either[SlackError, Throwable], SlackTeam] + + def uploadFile(client: SlackClient, + channels: List[String], + content: Option[String], + filename: Option[String], + initialComment: Option[String], + threadTs: Option[String], + title: Option[String]): IO[Either[SlackError, Throwable], SlackFile] + + def shareFilePublically(client: SlackClient, + fileId: String): IO[Either[SlackError, Throwable], SlackFile] + + def listEmoji(client: SlackClient): IO[Either[SlackError, Throwable], Map[String, String]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/slack/package.scala b/jvm/src/main/scala/com/harana/modules/slack/package.scala new file mode 100644 index 0000000..4ccc070 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/slack/package.scala @@ -0,0 +1,39 @@ +package com.harana.modules + +import com.hubspot.algebra.Result +import com.hubspot.slack.client.models.response.SlackError +import zio.{IO, ZIO} + +import java.util.Optional +import java.util.concurrent.CompletableFuture +import scala.compat.java8.FutureConverters._ +import scala.compat.java8.OptionConverters._ +import scala.concurrent.ExecutionContext.Implicits.global +import scala.jdk.CollectionConverters._ + +package object slack { + + implicit def toIO[SlackError, A](fn: CompletableFuture[Result[A, SlackError]]): IO[Either[SlackError, Throwable], A] = + ZIO.async { (cb: IO[Either[SlackError, Throwable], A] => Unit) => + fn.toScala.onComplete { f => + f.toEither match { + case Left(t) => cb(ZIO.fail(Right(t))) + case Right(x) => try { + if (x.isOk) cb(ZIO.succeed(x.unwrapOrElseThrow())) + else cb(ZIO.fail(Left(x.unwrapErrOrElseThrow()))) + } catch { + case e: Exception => cb(ZIO.fail(Right(e))) + } + } + } + } + + implicit def toIOIterable[A](fn: java.lang.Iterable[CompletableFuture[Result[java.util.List[A], SlackError]]]): IO[Either[SlackError, Throwable], List[A]] = + ZIO.foreach(fn.asScala.toList)(toIO).map(_.flatMap(_.asScala.toList)) + + implicit def toOptionalInt(opt: Option[Int]): Optional[Integer] = + opt.map { o => new Integer(o) }.asJava + + implicit def toOptionalDefault[A](opt: Option[A]): Optional[A] = + opt.asJava +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeAccounts.scala new file mode 100644 index 0000000..01814f3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeAccounts.scala @@ -0,0 +1,94 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.connect._ +import zio.{IO, ZIO, ZLayer} + +object LiveStripeAccounts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeAccounts(config, logger, micrometer) + } +} + +case class LiveStripeAccounts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeAccounts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).accounts) + + def create(country: Option[String] = None, + email: Option[String] = None, + custom: Boolean = false, + accountToken: Option[String] = None, + businessLogo: Option[String] = None, + businessName: Option[String] = None, + businessPrimaryColor: Option[String] = None, + businessURL: Option[String] = None, + legalEntity: Option[LegalEntity] = None, + tosAcceptance: Option[Acceptance] = None): IO[ResponseError, Account] = + for { + c <- client + r <- execute(c.create(country, email, custom, accountToken, businessLogo, businessName, businessPrimaryColor, businessURL, legalEntity, tosAcceptance)) + } yield r + + + def byId(accountId: String): IO[ResponseError, Account] = + for { + c <- client + r <- execute(c.byId(accountId)) + } yield r + + + def update(accountId: String, + businessLogo: Option[String] = None, + businessName: Option[String] = None, + businessPrimaryColor: Option[String] = None, + businessUrl: Option[String] = None, + debitNegativeBalances: Option[Boolean] = None, + declineChargeOn: Option[DeclineChargeOn] = None, + defaultCurrency: Option[String] = None, + email: Option[String] = None, + externalAccount: Option[String] = None, + legalEntity: Option[LegalEntity] = None, + metadata: Map[String, String] = Map.empty, + productDescription: Option[String] = None, + statementDescriptor: Option[String] = None, + supportEmail: Option[String] = None, + supportPhone: Option[String] = None, + supportUrl: Option[String] = None, + tosAcceptance: Option[Acceptance] = None, + transferSchedule: Option[TransferSchedule] = None, + transferStatementDescriptor: Option[String] = None): IO[ResponseError, Account] = + for { + c <- client + r <- execute(c.update(accountId, businessLogo, businessName, businessPrimaryColor, businessUrl, debitNegativeBalances, declineChargeOn, + defaultCurrency, email, externalAccount, legalEntity, metadata, productDescription, statementDescriptor, supportEmail, supportPhone, supportUrl, + tosAcceptance, transferSchedule, transferStatementDescriptor)) + } yield r + + + def delete(accountId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(accountId)) + } yield r + + + def reject(accountId: String, reason: String): IO[ResponseError, Account] = + for { + c <- client + r <- execute(c.reject(accountId, reason)) + } yield r + + + def list(config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Account]] = + for { + c <- client + r <- execute(c.list(config)) + } yield r +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFeeRefunds.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFeeRefunds.scala new file mode 100644 index 0000000..d3b69db --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFeeRefunds.scala @@ -0,0 +1,52 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.connect.FeeRefund +import zio.{IO, ZIO, ZLayer} + +object LiveStripeApplicationFeeRefunds { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeApplicationFeeRefunds(config, logger, micrometer) + } +} + +case class LiveStripeApplicationFeeRefunds(config: Config, logger: Logger, micrometer: Micrometer) extends StripeApplicationFeeRefunds { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).applicationFees.refunds) + + def create(feeId: String, + amount: Option[Money] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, FeeRefund] = + for { + c <- client + r <- execute(c.create(feeId, amount, metadata)) + } yield r + + + def byId(feeId: String, refundId: String): IO[ResponseError, FeeRefund] = + for { + c <- client + r <- execute(c.byId(feeId, refundId)) + } yield r + + + def update(feeId: String, refundId: String, metadata: Map[String, String] = Map.empty): IO[ResponseError, FeeRefund] = + for { + c <- client + r <- execute(c.update(feeId, refundId, metadata)) + } yield r + + + def list(feeId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[FeeRefund]] = + for { + c <- client + r <- execute(c.list(feeId, config)) + } yield r +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFees.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFees.scala new file mode 100644 index 0000000..d1503e0 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeApplicationFees.scala @@ -0,0 +1,39 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.connect.ApplicationFee +import zio.{IO, ZIO, ZLayer} + +object LiveStripeApplicationFees { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeApplicationFees(config, logger, micrometer) + } +} + +case class LiveStripeApplicationFees(config: Config, logger: Logger, micrometer: Micrometer) extends StripeApplicationFees { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).applicationFees) + + def byId(feeId: String): IO[ResponseError, ApplicationFee] = + for { + c <- client + r <- execute(c.byId(feeId)) + } yield r + + + def list(charge: Option[String] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[ApplicationFee]] = + for { + c <- client + r <- execute(c.list(charge, created, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeBalance.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeBalance.scala new file mode 100644 index 0000000..5757987 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeBalance.scala @@ -0,0 +1,50 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.balance.{Balance, BalanceTransaction} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeBalance { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeBalance(config, logger, micrometer) + } +} + +case class LiveStripeBalance(config: Config, logger: Logger, micrometer: Micrometer) extends StripeBalance { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).balance) + + def get: IO[ResponseError, Balance] = + for { + c <- client + r <- execute(c.apply()) + } yield r + + + def byId(id: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, BalanceTransaction] = + for { + c <- client + r <- execute(c.byId(id, config)) + } yield r + + + def list(availableOn: Option[TimestampFilter] = None, + created: Option[TimestampFilter] = None, + currency: Option[String] = None, + source: Option[String] = None, + transfer: Option[String] = None, + `type`: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BalanceTransaction]] = + for { + c <- client + r <- execute(c.list(availableOn, created, currency, source, transfer, `type`, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCharges.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCharges.scala new file mode 100644 index 0000000..839004c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCharges.scala @@ -0,0 +1,84 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.{Charge, FraudDetails, Shipping} +import zio.{IO, ZIO, ZLayer} + +import scala.language.implicitConversions + +object LiveStripeCharges { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCharges(config, logger, micrometer) + } +} + +case class LiveStripeCharges(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCharges { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).charges) + + def create(amount: Money, + currency: String, + applicationFee: Option[Money] = None, + capture: Boolean = true, + description: Option[String] = None, + destination: Option[String] = None, + metadata: Map[String, String] = Map.empty, + receiptEmail: Option[String] = None, + shipping: Option[Shipping] = None, + customer: Option[String] = None, + source: Option[String] = None, + statementDescriptor: Option[String] = None): IO[ResponseError, Charge] = + for { + c <- client + r <- execute(c.create(amount, currency, applicationFee, capture, description, destination, metadata, receiptEmail, shipping, + customer, source, statementDescriptor)) + } yield r + + + def byId(chargeId: String): IO[ResponseError, Charge] = + for { + c <- client + r <- execute(c.byId(chargeId)) + } yield r + + + def update(chargeId: String, + description: Option[String] = None, + fraudDetails: Option[FraudDetails] = None, + metadata: Map[String, String] = Map.empty, + receiptEmail: Option[String] = None, + shipping: Option[Shipping] = None): IO[ResponseError, Charge] = + for { + c <- client + r <- execute(c.update(chargeId, description, fraudDetails, metadata, receiptEmail, shipping)) + } yield r + + + def capture(chargeId: String, + amount: Option[Money] = None, + applicationFee: Option[Money] = None, + receiptEmail: Option[String] = None, + statementDescriptor: Option[String] = None): IO[ResponseError, Charge] = + for { + c <- client + r <- execute(c.capture(chargeId, amount, applicationFee, receiptEmail, statementDescriptor)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + source: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Charge]] = + for { + c <- client + r <- execute(c.list(created, customer, source, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCountrySpecs.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCountrySpecs.scala new file mode 100644 index 0000000..cde1388 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCountrySpecs.scala @@ -0,0 +1,37 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe.connect.CountrySpec +import com.outr.stripe.{QueryConfig, ResponseError, Stripe, StripeList} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCountrySpecs { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCountrySpecs(config, logger, micrometer) + } +} + +case class LiveStripeCountrySpecs(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCountrySpecs { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).countrySpecs) + + def list(config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[CountrySpec]] = + for { + c <- client + r <- execute(c.list(config)) + } yield r + + + def byId(countryCode: String): IO[ResponseError, CountrySpec] = + for { + c <- client + r <- execute(c.byId(countryCode)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCoupons.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCoupons.scala new file mode 100644 index 0000000..8a79579 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCoupons.scala @@ -0,0 +1,67 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.Coupon +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCoupons { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCoupons(config, logger, micrometer) + } +} + +case class LiveStripeCoupons(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCoupons { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).coupons) + + def create(couponId: String, + duration: String, + amountOff: Option[Money] = None, + currency: Option[String] = None, + durationInMonths: Option[Int] = None, + maxRedemptions: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + percentOff: Option[Int] = None, + redeemBy: Option[Long] = None): IO[ResponseError, Coupon] = + for { + c <- client + r <- execute(c.create(couponId, duration, amountOff, currency, durationInMonths, maxRedemptions, metadata, percentOff, redeemBy)) + } yield r + + + def byId(couponId: String): IO[ResponseError, Coupon] = + for { + c <- client + r <- execute(c.byId(couponId)) + } yield r + + + def update(couponId: String, metadata: Map[String, String]): IO[ResponseError, Coupon] = + for { + c <- client + r <- execute(c.update(couponId, metadata)) + } yield r + + + def delete(couponId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(couponId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Coupon]] = + for { + c <- client + r <- execute(c.list(created, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerBankAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerBankAccounts.scala new file mode 100644 index 0000000..9a4fe45 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerBankAccounts.scala @@ -0,0 +1,76 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.BankAccount +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCustomerBankAccounts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCustomerBankAccounts(config, logger, micrometer) + } +} + +case class LiveStripeCustomerBankAccounts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCustomerBankAccounts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).customers.sources.bankAccounts) + + def create(customerId: String, + source: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.create(customerId, source, defaultForCurrency, metadata)) + } yield r + + + def byId(customerId: String, bankAccountId: String): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.byId(customerId, bankAccountId)) + } yield r + + + def update(customerId: String, + bankAccountId: String, + accountHolderName: Option[String] = None, + accountHolderType: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.update(customerId, bankAccountId, accountHolderName, accountHolderType, metadata)) + } yield r + + + def verify(customerId: String, + bankAccountId: String, + amount1: Option[Money] = None, + amount2: Option[Money] = None, + verificationMethod: Option[String] = None): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.verify(customerId, bankAccountId, amount1, amount2, verificationMethod)) + } yield r + + + def delete(customerId: String, bankAccountId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(customerId, bankAccountId)) + } yield r + + + def list(customerId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BankAccount]] = + for { + c <- client + r <- execute(c.list(customerId, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerCreditCards.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerCreditCards.scala new file mode 100644 index 0000000..12332c7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomerCreditCards.scala @@ -0,0 +1,73 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.Card +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCustomerCreditCards { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCustomerCreditCards(config, logger, micrometer) + } +} + +case class LiveStripeCustomerCreditCards(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCustomerCreditCards { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).customers.sources.cards) + + def create(customerId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.create(customerId, source, externalAccount, defaultForCurrency, metadata)) + } yield r + + + def byId(customerId: String, cardId: String): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.byId(customerId, cardId)) + } yield r + + + def update(customerId: String, + cardId: String, + addressCity: Option[String] = None, + addressCountry: Option[String] = None, + addressLine1: Option[String] = None, + addressLine2: Option[String] = None, + addressState: Option[String] = None, + addressZip: Option[String] = None, + defaultForCurrency: Option[String] = None, + expMonth: Option[Int] = None, + expYear: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.update(customerId, cardId, addressCity, addressCountry, addressLine1, addressLine2, addressState, addressZip, defaultForCurrency, + expMonth, expYear, metadata, name)) + } yield r + + def delete(customerId: String, cardId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(customerId, cardId)) + } yield r + + def list(customerId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Card]] = + for { + c <- client + r <- execute(c.list(customerId, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomers.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomers.scala new file mode 100644 index 0000000..2905ac3 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeCustomers.scala @@ -0,0 +1,90 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.{Address, Card, Shipping} +import com.outr.stripe.customer.Customer +import zio.{IO, ZIO, ZLayer} + +object LiveStripeCustomers { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeCustomers(config, logger, micrometer) + } +} + +case class LiveStripeCustomers(config: Config, logger: Logger, micrometer: Micrometer) extends StripeCustomers { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).customers) + + def create(address: Option[Address] = None, + balance: Option[Money] = None, + coupon: Option[String] = None, + description: Option[String] = None, + email: Option[String] = None, + invoicePrefix: Option[String] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + nextInvoiceSequence: Option[Int] = None, + paymentMethodId: Option[String] = None, + phone: Option[String] = None, + promotionCode: Option[String] = None, + shipping: Option[Shipping] = None, + source: Option[Card] = None, + taxExempt: Option[String] = None): IO[ResponseError, Customer] = + for { + c <- client + r <- execute(c.create(address, balance, coupon, description, email, invoicePrefix, metadata, name, nextInvoiceSequence, paymentMethodId, phone, promotionCode, shipping, source, taxExempt)) + } yield r + + + def byId(customerId: String): IO[ResponseError, Customer] = + for { + c <- client + r <- execute(c.byId(customerId)) + } yield r + + + def update(customerId: String, + address: Option[Address] = None, + balance: Option[Money] = None, + coupon: Option[String] = None, + defaultSource: Option[String] = None, + description: Option[String] = None, + email: Option[String] = None, + invoicePrefix: Option[String] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + nextInvoiceSequence: Option[Int] = None, + phone: Option[String] = None, + promotionCode: Option[String] = None, + shipping: Option[Shipping] = None, + source: Option[Card] = None, + taxExempt: Option[String] = None): IO[ResponseError, Customer] = + for { + c <- client + r <- execute(c.update(customerId, address, balance, coupon, defaultSource, description, email, invoicePrefix, metadata, name, nextInvoiceSequence, phone, promotionCode, shipping, source, taxExempt)) + } yield r + + + def delete(customerId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(customerId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + email: Option[String] = None): IO[ResponseError, StripeList[Customer]] = + for { + c <- client + r <- execute(c.list(created, config, email)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDiscounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDiscounts.scala new file mode 100644 index 0000000..63901c7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDiscounts.scala @@ -0,0 +1,36 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe.{Deleted, ResponseError, Stripe} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeDiscounts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeDiscounts(config, logger, micrometer) + } +} + +case class LiveStripeDiscounts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeDiscounts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).discounts) + + def deleteCustomerDiscount(customerId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.deleteCustomerDiscount(customerId)) + } yield r + + + def deleteSubscriptionDiscount(subscriptionId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.deleteSubscriptionDiscount(subscriptionId)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDisputes.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDisputes.scala new file mode 100644 index 0000000..8195a5a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeDisputes.scala @@ -0,0 +1,54 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.dispute.{Dispute, DisputeEvidence} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeDisputes { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeDisputes(config, logger, micrometer) + } +} + +case class LiveStripeDisputes(config: Config, logger: Logger, micrometer: Micrometer) extends StripeDisputes { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).disputes) + + def byId(disputeId: String): IO[ResponseError, Dispute] = + for { + c <- client + r <- execute(c.byId(disputeId)) + } yield r + + + def update(disputeId: String, + evidence: Option[DisputeEvidence] = None, + metadata: Map[String, String]): IO[ResponseError, Dispute] = + for { + c <- client + r <- execute(c.update(disputeId, evidence, metadata)) + } yield r + + + def close(disputeId: String): IO[ResponseError, Dispute] = + for { + c <- client + r <- execute(c.close(disputeId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Dispute]] = + for { + c <- client + r <- execute(c.list(created, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeEvents.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeEvents.scala new file mode 100644 index 0000000..fbe6f91 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeEvents.scala @@ -0,0 +1,40 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.event.Event +import zio.{IO, ZIO, ZLayer} + +object LiveStripeEvents { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeEvents(config, logger, micrometer) + } +} + +case class LiveStripeEvents(config: Config, logger: Logger, micrometer: Micrometer) extends StripeEvents { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).events) + + def byId(eventId: String): IO[ResponseError, Event] = + for { + c <- client + r <- execute(c.byId(eventId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + `type`: Option[String] = None, + types: List[String] = Nil, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Event]] = + for { + c <- client + r <- execute(c.list(created, `type`, types, config)) + } yield r + +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalBankAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalBankAccounts.scala new file mode 100644 index 0000000..68188e5 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalBankAccounts.scala @@ -0,0 +1,65 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.BankAccount +import zio.{IO, ZIO, ZLayer} + +object LiveStripeExternalBankAccounts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeExternalBankAccounts(config, logger, micrometer) + } +} + +case class LiveStripeExternalBankAccounts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeExternalBankAccounts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).accounts.external.bankAccounts) + + def create(accountId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.create(accountId, source, externalAccount, defaultForCurrency ,metadata)) + } yield r + + + def byId(accountId: String, bankAccountId: String): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.byId(accountId, bankAccountId)) + } yield r + + + def update(accountId: String, + bankAccountId: String, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] = + for { + c <- client + r <- execute(c.update(accountId, bankAccountId, defaultForCurrency, metadata)) + } yield r + + + def delete(accountId: String, bankAccountId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(accountId, bankAccountId)) + } yield r + + + def list(accountId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BankAccount]] = + for { + c <- client + r <- execute(c.list(accountId, config)) + } yield r + +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalCreditCards.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalCreditCards.scala new file mode 100644 index 0000000..cba1383 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeExternalCreditCards.scala @@ -0,0 +1,75 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.charge.Card +import zio.{IO, ZIO, ZLayer} + +object LiveStripeExternalCreditCards { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeExternalCreditCards(config, logger, micrometer) + } +} + +case class LiveStripeExternalCreditCards(config: Config, logger: Logger, micrometer: Micrometer) extends StripeExternalCreditCards { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).accounts.external.cards) + + def create(accountId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.create(accountId, source, externalAccount, defaultForCurrency, metadata)) + } yield r + + + def byId(accountId: String, cardId: String): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.byId(accountId, cardId)) + } yield r + + + def update(accountId: String, + cardId: String, + addressCity: Option[String] = None, + addressCountry: Option[String] = None, + addressLine1: Option[String] = None, + addressLine2: Option[String] = None, + addressState: Option[String] = None, + addressZip: Option[String] = None, + defaultForCurrency: Option[String] = None, + expMonth: Option[Int] = None, + expYear: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None): IO[ResponseError, Card] = + for { + c <- client + r <- execute(c.update(accountId, cardId, addressCity, addressCountry, addressLine1, addressLine2, addressState, addressZip, + defaultForCurrency, expMonth, expYear, metadata, name)) + } yield r + + + def delete(accountId: String, cardId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(accountId, cardId)) + } yield r + + + def list(accountId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Card]] = + for { + c <- client + r <- execute(c.list(accountId, config)) + } yield r + +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoiceItems.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoiceItems.scala new file mode 100644 index 0000000..cb0df24 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoiceItems.scala @@ -0,0 +1,71 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.InvoiceItem +import zio.{IO, ZIO, ZLayer} + +object LiveStripeInvoiceItems { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeInvoiceItems(config, logger, micrometer) + } +} + +case class LiveStripeInvoiceItems(config: Config, logger: Logger, micrometer: Micrometer) extends StripeInvoiceItems { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).invoiceItems) + + def create(amount: Money, + currency: String, + customerId: String, + description: Option[String] = None, + discountable: Option[Boolean] = None, + invoice: Option[String] = None, + metadata: Map[String, String] = Map.empty, + subscription: Option[String] = None): IO[ResponseError, InvoiceItem] = + for { + c <- client + r <- execute(c.create(amount, currency, customerId, description, discountable, invoice, metadata, subscription)) + } yield r + + + def byId(invoiceItemId: String): IO[ResponseError, InvoiceItem] = + for { + c <- client + r <- execute(c.byId(invoiceItemId)) + } yield r + + + def update(invoiceItemId: String, + amount: Option[Money] = None, + description: Option[String] = None, + discountable: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, InvoiceItem] = + for { + c <- client + r <- execute(c.update(invoiceItemId, amount, description, discountable, metadata)) + } yield r + + + def delete(invoiceItemId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(invoiceItemId)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[InvoiceItem]] = + for { + c <- client + r <- execute(c.list(created, customer, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoices.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoices.scala new file mode 100644 index 0000000..f643ca1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeInvoices.scala @@ -0,0 +1,104 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.{Invoice, InvoiceLine} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeInvoices { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeInvoices(config, logger, micrometer) + } +} + +case class LiveStripeInvoices(config: Config, logger: Logger, micrometer: Micrometer) extends StripeInvoices { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).invoices) + + def create(customerId: String, + applicationFee: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + statementDescriptor: Option[String] = None, + subscription: Option[String] = None, + taxPercent: Option[BigDecimal] = None): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.create(customerId, applicationFee, description, metadata, statementDescriptor, subscription, taxPercent)) + } yield r + + + def byId(invoiceId: String): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.byId(invoiceId)) + } yield r + + + def linesById(invoiceId: String, + coupon: Option[String] = None, + customer: Option[String] = None, + subscription: Option[String] = None, + subscriptionPlan: Option[String] = None, + subscriptionProrate: Option[String] = None, + subscriptionProrationDate: Option[Long] = None, + subscriptionQuantity: Option[Int] = None, + subscriptionTrialEnd: Option[Long] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[InvoiceLine]] = + for { + c <- client + r <- execute(c.linesById(invoiceId, coupon, customer, subscription, subscriptionPlan, subscriptionProrate, subscriptionProrationDate, subscriptionQuantity, + subscriptionTrialEnd, config)) + } yield r + + + def upcoming(customerId: String, + coupon: Option[String] = None, + subscription: Option[String] = None, + subscriptionPlan: Option[String] = None, + subscriptionProrate: Option[String] = None, + subscriptionProrationDate: Option[Long] = None, + subscriptionQuantity: Option[Int] = None, + subscriptionTrialEnd: Option[Long] = None): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.upcoming(customerId, coupon, subscription, subscriptionPlan, subscriptionProrate, subscriptionProrationDate, subscriptionQuantity, subscriptionTrialEnd)) + } yield r + + + def update(invoiceId: String, + applicationFee: Option[Money] = None, + closed: Option[Boolean] = None, + description: Option[String] = None, + forgiven: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + statementDescriptor: Option[String] = None, + taxPercent: Option[BigDecimal] = None): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.update(invoiceId, applicationFee, closed, description, forgiven, metadata, statementDescriptor, taxPercent)) + } yield r + + + def pay(invoiceId: String): IO[ResponseError, Invoice] = + for { + c <- client + r <- execute(c.pay(invoiceId)) + } yield r + + + def list(customerId: Option[String] = None, + date: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Invoice]] = + for { + c <- client + r <- execute(c.list(customerId, date, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePlans.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePlans.scala new file mode 100644 index 0000000..34edf8e --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePlans.scala @@ -0,0 +1,73 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.Plan +import zio.{IO, ZIO, ZLayer} + +object LiveStripePlans { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripePlans(config, logger, micrometer) + } +} + +case class LiveStripePlans(config: Config, logger: Logger, micrometer: Micrometer) extends StripePlans { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).plans) + + def create(planId: String, + amount: Money, + currency: String, + interval: String, + productId: String, + intervalCount: Int = 1, + metadata: Map[String, String] = Map.empty, + nickname: Option[String], + trialPeriodDays: Option[Int] = None): IO[ResponseError, Plan] = + for { + c <- client + r <- execute(c.create(planId, amount, currency, interval, productId, intervalCount, metadata, nickname, trialPeriodDays)) + } yield r + + + def byId(planId: String): IO[ResponseError, Plan] = + for { + c <- client + r <- execute(c.byId(planId)) + } yield r + + + def update(planId: String, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + productId: Option[String] = None, + statementDescriptor: Option[String] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Plan] = + for { + c <- client + r <- execute(c.update(planId, metadata, name, productId, statementDescriptor, trialPeriodDays)) + } yield r + + + def delete(planId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(planId)) + } yield r + + + def list(active: Option[Boolean] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Plan]] = + for { + c <- client + r <- execute(c.list(active, created, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePrices.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePrices.scala new file mode 100644 index 0000000..509bf45 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripePrices.scala @@ -0,0 +1,82 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.price.{Price, Recurring, Tier, TransformQuantity} +import zio.{IO, ZIO, ZLayer} + +object LiveStripePrices { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripePrices(config, logger, micrometer) + } +} + +case class LiveStripePrices(config: Config, logger: Logger, micrometer: Micrometer) extends StripePrices { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).prices) + + def create(currency: String, + active: Option[Boolean] = None, + billingScheme: Option[String] = None, + lookupKey: Option[String] = None, + metadata: Map[String, String] = Map.empty, + nickname: Option[String] = None, + recurring: Option[Recurring] = None, + tiers: List[Tier] = List(), + tiersMode: Option[String] = None, + transferLookupKey: Option[Boolean] = None, + transformQuantity: Option[TransformQuantity] = None, + unitAmount: Option[Int] = None, + unitAmountDecimal: Option[BigDecimal] = None): IO[ResponseError, Price] = + for { + c <- client + r <- execute(c.create(currency, active, billingScheme, lookupKey, metadata, nickname, recurring, tiers, tiersMode, transferLookupKey, transformQuantity, unitAmount, unitAmountDecimal)) + } yield r + + + def byId(priceId: String): IO[ResponseError, Price] = + for { + c <- client + r <- execute(c.byId(priceId)) + } yield r + + + def update(priceId: String, + active: Option[Boolean] = None, + lookupKey: Option[String] = None, + metadata: Map[String, String] = Map.empty, + nickname: Option[String] = None, + transferLookupKey: Option[Boolean] = None): IO[ResponseError, Price] = + for { + c <- client + r <- execute(c.update(priceId, active, lookupKey, metadata, nickname, transferLookupKey)) + } yield r + + + def delete(priceId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(priceId)) + } yield r + + + def list(active: Option[Boolean] = None, + currency: Option[String] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + endingBefore: Option[String] = None, + limit: Option[Int] = None, + productId: Option[String] = None, + `type`: Option[String] = None): IO[ResponseError, StripeList[Price]] = + for { + c <- client + r <- execute(c.list(active, currency, created, config, endingBefore, limit, productId, `type`)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeProducts.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeProducts.scala new file mode 100644 index 0000000..bfd8064 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeProducts.scala @@ -0,0 +1,94 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.product.{PackageDimensions, Product => StripeProduct} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeProducts { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeProducts(config, logger, micrometer) + } +} + +case class LiveStripeProducts(config: Config, logger: Logger, micrometer: Micrometer) extends StripeProducts { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).products) + + def create(name: String, + active: Option[Boolean] = None, + attributes: List[String] = List.empty, + caption: Option[String] = None, + deactivateOn: List[String] = List.empty, + description: Option[String] = None, + images: List[String] = List.empty, + liveMode: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + packageDimensions: Option[PackageDimensions] = None, + productId: Option[String] = None, + shippable: Option[Boolean] = None, + statementDescriptor: Option[String] = None, + `type`: Option[String] = None, + unitLabel: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeProduct] = + for { + c <- client + r <- execute(c.create(name, active, attributes, caption, deactivateOn, description, images, liveMode, metadata, packageDimensions, productId, shippable, statementDescriptor, `type`, unitLabel, url)) + } yield r + + + def byId(productId: String): IO[ResponseError, StripeProduct] = + for { + c <- client + r <- execute(c.byId(productId)) + } yield r + + + def update(productId: String, + active: Option[Boolean] = None, + attributes: List[String] = List.empty, + caption: Option[String] = None, + deactivateOn: List[String] = List.empty, + description: Option[String] = None, + images: List[String] = List.empty, + liveMode: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + packageDimensions: Option[PackageDimensions] = None, + shippable: Option[Boolean] = None, + statementDescriptor: Option[String] = None, + `type`: Option[String] = None, + unitLabel: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeProduct] = + for { + c <- client + r <- execute(c.update(productId, active, attributes, caption, deactivateOn, description, images, liveMode, metadata, name, packageDimensions, shippable, statementDescriptor, `type`, unitLabel, url)) + } yield r + + + def delete(productId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(productId)) + } yield r + + + def list(active: Option[Boolean] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + ids: List[String] = Nil, + shippable: Option[Boolean] = None, + `type`: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeList[StripeProduct]] = + for { + c <- client + r <- execute(c.list(active, created, config, ids, shippable, `type`, url)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeRefunds.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeRefunds.scala new file mode 100644 index 0000000..ea109f6 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeRefunds.scala @@ -0,0 +1,57 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.refund.Refund +import zio.{IO, ZIO, ZLayer} + +object LiveStripeRefunds { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeRefunds(config, logger, micrometer) + } +} + +case class LiveStripeRefunds(config: Config, logger: Logger, micrometer: Micrometer) extends StripeRefunds { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).refunds) + + def create(chargeId: String, + amount: Option[Money] = None, + metadata: Map[String, String] = Map.empty, + reason: Option[String] = None, + refundApplicationFee: Boolean = false, + reverseTransfer: Boolean = false): IO[ResponseError, Refund] = + for { + c <- client + r <- execute(c.create(chargeId, amount, metadata, reason, refundApplicationFee, reverseTransfer)) + } yield r + + + def byId(refundId: String): IO[ResponseError, Refund] = + for { + c <- client + r <- execute(c.byId(refundId)) + } yield r + + + def update(refundId: String, metadata: Map[String, String] = Map.empty): IO[ResponseError, Refund] = + for { + c <- client + r <- execute(c.update(refundId, metadata)) + } yield r + + + def list(chargeId: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Refund]] = + for { + c <- client + r <- execute(c.list(chargeId, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptionItems.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptionItems.scala new file mode 100644 index 0000000..3c379fa --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptionItems.scala @@ -0,0 +1,76 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.SubscriptionItem +import zio.{IO, ZIO, ZLayer} + +object LiveStripeSubscriptionItems { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeSubscriptionItems(config, logger, micrometer) + } +} + +case class LiveStripeSubscriptionItems(config: Config, logger: Logger, micrometer: Micrometer) extends StripeSubscriptionItems { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).subscriptionItems) + + def create(subscriptionId: String, + billingThresholds: Map[String, String] = Map(), + metadata: Map[String, String] = Map(), + paymentBehavior: Option[String] = None, + priceId: Option[String] = None, + prorationBehavior: Option[String] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + taxRates: List[String] = List()): IO[ResponseError, SubscriptionItem] = + for { + c <- client + r <- execute(c.create(subscriptionId, billingThresholds, metadata, paymentBehavior, priceId, prorationBehavior, prorationDate, quantity, taxRates)) + } yield r + + + def byId(subscriptionItemId: String): IO[ResponseError, SubscriptionItem] = + for { + c <- client + r <- execute(c.byId(subscriptionItemId)) + } yield r + + + def update(subscriptionItemId: String, + billingThresholds: Map[String, String] = Map(), + metadata: Map[String, String] = Map(), + offSession: Option[Boolean] = None, + paymentBehavior: Option[String] = None, + priceId: Option[String] = None, + prorationBehavior: Option[String] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + taxRates: List[String] = List()): IO[ResponseError, SubscriptionItem] = + for { + c <- client + r <- execute(c.update(subscriptionItemId, billingThresholds, metadata, offSession, paymentBehavior, priceId, prorationBehavior, prorationDate, quantity, taxRates)) + } yield r + + + def delete(subscriptionItemId: String): IO[ResponseError, Deleted] = + for { + c <- client + r <- execute(c.delete(subscriptionItemId)) + } yield r + + + def list(subscription: String, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[SubscriptionItem]] = + for { + c <- client + r <- execute(c.list(subscription, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptions.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptions.scala new file mode 100644 index 0000000..e27dca7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeSubscriptions.scala @@ -0,0 +1,85 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.subscription.{CreateSubscriptionItem, Subscription} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeSubscriptions { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeSubscriptions(config, logger, micrometer) + } +} + +case class LiveStripeSubscriptions(config: Config, logger: Logger, micrometer: Micrometer) extends StripeSubscriptions { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).subscriptions) + + def create(customerId: String, + items: List[CreateSubscriptionItem], + applicationFeePercent: Option[BigDecimal] = None, + coupon: Option[String] = None, + metadata: Map[String, String] = Map.empty, + prorate: Option[Boolean] = None, + quantity: Option[Int] = None, + source: Option[String] = None, + taxPercent: Option[BigDecimal] = None, + trialEnd: Option[Long] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Subscription] = + for { + c <- client + r <- execute(c.create(customerId, items, applicationFeePercent, coupon, metadata, prorate, quantity, source, taxPercent, trialEnd, trialPeriodDays)) + } yield r + + + def byId(subscriptionId: String): IO[ResponseError, Subscription] = + for { + c <- client + r <- execute(c.byId(subscriptionId)) + } yield r + + + def update(subscriptionId: String, + items: List[CreateSubscriptionItem] = List(), + applicationFeePercent: Option[BigDecimal] = None, + coupon: Option[String] = None, + metadata: Map[String, String] = Map.empty, + prorate: Option[Boolean] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + source: Option[String] = None, + taxPercent: Option[BigDecimal], + trialEnd: Option[Long] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Subscription] = + for { + c <- client + r <- execute(c.update(subscriptionId, items, applicationFeePercent, coupon, metadata, prorate, prorationDate, quantity, source, taxPercent, trialEnd, trialPeriodDays)) + } yield r + + + def cancel(customerId: String, + subscriptionId: String, + atPeriodEnd: Boolean = false): IO[ResponseError, Subscription] = + for { + c <- client + r <- execute(c.cancel(customerId, subscriptionId, atPeriodEnd)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + plan: Option[String] = None, + status: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Subscription]] = + for { + c <- client + r <- execute(c.list(created, customer, plan, status, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTokens.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTokens.scala new file mode 100644 index 0000000..d0f2feb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTokens.scala @@ -0,0 +1,41 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe.charge.{BankAccount, Card, PII} +import com.outr.stripe.token.Token +import com.outr.stripe.{ResponseError, Stripe} +import zio.{IO, ZIO, ZLayer} + +object LiveStripeTokens { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeTokens(config, logger, micrometer) + } +} + +case class LiveStripeTokens(config: Config, logger: Logger, micrometer: Micrometer) extends StripeTokens { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).tokens) + + def create(card: Option[Card] = None, + bankAccount: Option[BankAccount] = None, + pii: Option[PII] = None, + customerId: Option[String] = None): IO[ResponseError, Token] = + for { + c <- client + r <- execute(c.create(card, bankAccount, pii, customerId)) + } yield r + + + def byId(tokenId: String): IO[ResponseError, Token] = + for { + c <- client + r <- execute(c.byId(tokenId)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransferReversals.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransferReversals.scala new file mode 100644 index 0000000..55e6c0a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransferReversals.scala @@ -0,0 +1,59 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.transfer.TransferReversal +import zio.{IO, ZIO, ZLayer} + +object LiveStripeTransferReversals { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeTransferReversals(config, logger, micrometer) + } +} + +case class LiveStripeTransferReversals(config: Config, logger: Logger, micrometer: Micrometer) extends StripeTransferReversals { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).transfers.reversals) + + def create(transferId: String, + amount: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + refundApplicationFee: Boolean = false): IO[ResponseError, TransferReversal] = + for { + c <- client + r <- execute(c.create(transferId, amount, description, metadata, refundApplicationFee)) + } yield r + + + def byId(transferId: String, transferReversalId: String): IO[ResponseError, TransferReversal] = + for { + c <- client + r <- execute(c.byId(transferId, transferReversalId)) + } yield r + + + def update(transferId: String, + transferReversalId: String, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, TransferReversal] = + for { + c <- client + r <- execute(c.update(transferId, transferReversalId, description, metadata)) + } yield r + + + def list(transferId: String, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[TransferReversal]] = + for { + c <- client + r <- execute(c.list(transferId, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransfers.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransfers.scala new file mode 100644 index 0000000..352e916 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeTransfers.scala @@ -0,0 +1,67 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.outr.stripe._ +import com.outr.stripe.transfer.Transfer +import zio.{IO, ZIO, ZLayer} + +object LiveStripeTransfers { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeTransfers(config, logger, micrometer) + } +} + +case class LiveStripeTransfers(config: Config, logger: Logger, micrometer: Micrometer) extends StripeTransfers { + + private val client = config.secret("stripe-secret-key").map(key => new Stripe(key).transfers) + + def create(amount: Money, + currency: String, + destination: String, + applicationFee: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + sourceTransaction: Option[String] = None, + statementDescriptor: Option[String] = None, + sourceType: String = "card", + method: String = "standard"): IO[ResponseError, Transfer] = + for { + c <- client + r <- execute(c.create(amount, currency, destination, applicationFee, description, metadata, sourceTransaction, statementDescriptor, sourceType, method)) + } yield r + + + def byId(transferId: String): IO[ResponseError, Transfer] = + for { + c <- client + r <- execute(c.byId(transferId)) + } yield r + + + def update(transferId: String, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Transfer] = + for { + c <- client + r <- execute(c.update(transferId, description, metadata)) + } yield r + + + def list(created: Option[TimestampFilter] = None, + date: Option[TimestampFilter] = None, + destination: Option[String] = None, + recipient: Option[String] = None, + status: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Transfer]] = + for { + c <- client + r <- execute(c.list(created, date, destination, recipient, status, config)) + } yield r + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeUI.scala b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeUI.scala new file mode 100644 index 0000000..9187439 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/LiveStripeUI.scala @@ -0,0 +1,50 @@ +package com.harana.modules.stripe + +import com.harana.modules.core.config.Config +import com.harana.modules.core.http.Http +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import zio.{Task, ZIO, ZLayer} + +object LiveStripeUI { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + http <- ZIO.service[Http] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveStripeUI(config, http, logger, micrometer) + } +} + +case class LiveStripeUI(config: Config, http: Http, logger: Logger, micrometer: Micrometer) extends StripeUI { + + def billingPortalUrl(customerId: String, returnUrl: String): Task[String] = + for { + apiKey <- config.secret("stripe-secret-key") + formBody = Map( + "customer" -> customerId, + "return_url" -> returnUrl + ) + response <- http.postFormAsJson("https://api.stripe.com/v1/billing_portal/sessions", formBody, credentials = Some((apiKey, ""))).mapError(e => new Exception(e.toString)) + url <- ZIO.fromTry(response.hcursor.downField("url").as[String].toTry) + } yield url + + + def createCheckoutSession(customerId: String, priceId: String, successUrl: String, cancelUrl: String): Task[String] = + for { + apiKey <- config.secret("stripe-secret-key") + formBody = Map( + "cancel_url" -> cancelUrl, + "customer" -> customerId, + "line_items[][price]" -> priceId, + "line_items[][quantity]" -> "1", + "mode" -> "subscription", + "payment_method_types[]" -> "card", + "success_url" -> successUrl + ) + response <- http.postFormAsJson("https://api.stripe.com/v1/checkout/sessions", formBody, credentials = Some((apiKey, ""))).mapError(e => new Exception(e.toString)) + id <- ZIO.fromTry(response.hcursor.downField("id").as[String].toTry) + } yield id + +} diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeAccounts.scala new file mode 100644 index 0000000..c0be14f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeAccounts.scala @@ -0,0 +1,49 @@ +package com.harana.modules.stripe + +import com.outr.stripe.connect._ +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeAccounts { + def create(country: Option[String] = None, + email: Option[String] = None, + custom: Boolean = false, + accountToken: Option[String] = None, + businessLogo: Option[String] = None, + businessName: Option[String] = None, + businessPrimaryColor: Option[String] = None, + businessURL: Option[String] = None, + legalEntity: Option[LegalEntity] = None, + tosAcceptance: Option[Acceptance] = None): IO[ResponseError, Account] + + def byId(accountId: String): IO[ResponseError, Account] + + def update(accountId: String, + businessLogo: Option[String] = None, + businessName: Option[String] = None, + businessPrimaryColor: Option[String] = None, + businessUrl: Option[String] = None, + debitNegativeBalances: Option[Boolean] = None, + declineChargeOn: Option[DeclineChargeOn] = None, + defaultCurrency: Option[String] = None, + email: Option[String] = None, + externalAccount: Option[String] = None, + legalEntity: Option[LegalEntity] = None, + metadata: Map[String, String] = Map.empty, + productDescription: Option[String] = None, + statementDescriptor: Option[String] = None, + supportEmail: Option[String] = None, + supportPhone: Option[String] = None, + supportUrl: Option[String] = None, + tosAcceptance: Option[Acceptance] = None, + transferSchedule: Option[TransferSchedule] = None, + transferStatementDescriptor: Option[String] = None): IO[ResponseError, Account] + + def delete(accountId: String): IO[ResponseError, Deleted] + + def reject(accountId: String, reason: String): IO[ResponseError, Account] + + def list(config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Account]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFeeRefunds.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFeeRefunds.scala new file mode 100644 index 0000000..c67d709 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFeeRefunds.scala @@ -0,0 +1,19 @@ +package com.harana.modules.stripe + +import com.outr.stripe.connect.FeeRefund +import com.outr.stripe.{Money, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeApplicationFeeRefunds { + def create(feeId: String, + amount: Option[Money] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, FeeRefund] + + def byId(feeId: String, refundId: String): IO[ResponseError, FeeRefund] + + def update(feeId: String, refundId: String, metadata: Map[String, String] = Map.empty): IO[ResponseError, FeeRefund] + + def list(feeId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[FeeRefund]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFees.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFees.scala new file mode 100644 index 0000000..8cc04eb --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeApplicationFees.scala @@ -0,0 +1,15 @@ +package com.harana.modules.stripe + +import com.outr.stripe.connect.ApplicationFee +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeApplicationFees { + def byId(feeId: String): IO[ResponseError, ApplicationFee] + + def list(charge: Option[String] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[ApplicationFee]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeBalance.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeBalance.scala new file mode 100644 index 0000000..ee4f985 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeBalance.scala @@ -0,0 +1,21 @@ +package com.harana.modules.stripe + +import com.outr.stripe.balance.{Balance, BalanceTransaction} +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeBalance { + def get: IO[ResponseError, Balance] + + def byId(id: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, BalanceTransaction] + + def list(availableOn: Option[TimestampFilter] = None, + created: Option[TimestampFilter] = None, + currency: Option[String] = None, + source: Option[String] = None, + transfer: Option[String] = None, + `type`: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BalanceTransaction]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCharges.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCharges.scala new file mode 100644 index 0000000..896780a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCharges.scala @@ -0,0 +1,42 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.charge.{Charge, FraudDetails, Shipping} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCharges { + def create(amount: Money, + currency: String, + applicationFee: Option[Money] = None, + capture: Boolean = true, + description: Option[String] = None, + destination: Option[String] = None, + metadata: Map[String, String] = Map.empty, + receiptEmail: Option[String] = None, + shipping: Option[Shipping] = None, + customer: Option[String] = None, + source: Option[String] = None, + statementDescriptor: Option[String] = None): IO[ResponseError, Charge] + + def byId(chargeId: String): IO[ResponseError, Charge] + + def update(chargeId: String, + description: Option[String] = None, + fraudDetails: Option[FraudDetails] = None, + metadata: Map[String, String] = Map.empty, + receiptEmail: Option[String] = None, + shipping: Option[Shipping] = None): IO[ResponseError, Charge] + + def capture(chargeId: String, + amount: Option[Money] = None, + applicationFee: Option[Money] = None, + receiptEmail: Option[String] = None, + statementDescriptor: Option[String] = None): IO[ResponseError, Charge] + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + source: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Charge]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCountrySpecs.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCountrySpecs.scala new file mode 100644 index 0000000..134e981 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCountrySpecs.scala @@ -0,0 +1,13 @@ +package com.harana.modules.stripe + +import com.outr.stripe.connect.CountrySpec +import com.outr.stripe.{QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCountrySpecs { + def list(config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[CountrySpec]] + + def byId(countryCode: String): IO[ResponseError, CountrySpec] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCoupons.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCoupons.scala new file mode 100644 index 0000000..ea33f17 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCoupons.scala @@ -0,0 +1,28 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.subscription.Coupon +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCoupons { + def create(couponId: String, + duration: String, + amountOff: Option[Money] = None, + currency: Option[String] = None, + durationInMonths: Option[Int] = None, + maxRedemptions: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + percentOff: Option[Int] = None, + redeemBy: Option[Long] = None): IO[ResponseError, Coupon] + + def byId(couponId: String): IO[ResponseError, Coupon] + + def update(couponId: String, metadata: Map[String, String]): IO[ResponseError, Coupon] + + def delete(couponId: String): IO[ResponseError, Deleted] + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Coupon]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerBankAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerBankAccounts.scala new file mode 100644 index 0000000..61781ca --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerBankAccounts.scala @@ -0,0 +1,32 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.charge.BankAccount +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCustomerBankAccounts { + def create(customerId: String, + source: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] + + def byId(customerId: String, bankAccountId: String): IO[ResponseError, BankAccount] + + def update(customerId: String, + bankAccountId: String, + accountHolderName: Option[String] = None, + accountHolderType: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] + + def verify(customerId: String, + bankAccountId: String, + amount1: Option[Money] = None, + amount2: Option[Money] = None, + verificationMethod: Option[String] = None): IO[ResponseError, BankAccount] + + def delete(customerId: String, bankAccountId: String): IO[ResponseError, Deleted] + + def list(customerId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BankAccount]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerCreditCards.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerCreditCards.scala new file mode 100644 index 0000000..cd7c3d1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomerCreditCards.scala @@ -0,0 +1,35 @@ +package com.harana.modules.stripe + +import com.outr.stripe.charge.Card +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCustomerCreditCards { + def create(customerId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Card] + + def byId(customerId: String, cardId: String): IO[ResponseError, Card] + + def update(customerId: String, + cardId: String, + addressCity: Option[String] = None, + addressCountry: Option[String] = None, + addressLine1: Option[String] = None, + addressLine2: Option[String] = None, + addressState: Option[String] = None, + addressZip: Option[String] = None, + defaultForCurrency: Option[String] = None, + expMonth: Option[Int] = None, + expYear: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None): IO[ResponseError, Card] + + def delete(customerId: String, cardId: String): IO[ResponseError, Deleted] + + def list(customerId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Card]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomers.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomers.scala new file mode 100644 index 0000000..01c4fc7 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeCustomers.scala @@ -0,0 +1,51 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.charge.{Address, Card, Shipping} +import com.outr.stripe.customer.Customer +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeCustomers { + def create(address: Option[Address] = None, + balance: Option[Money] = None, + coupon: Option[String] = None, + description: Option[String] = None, + email: Option[String] = None, + invoicePrefix: Option[String] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + nextInvoiceSequence: Option[Int] = None, + paymentMethodId: Option[String] = None, + phone: Option[String] = None, + promotionCode: Option[String] = None, + shipping: Option[Shipping] = None, + source: Option[Card] = None, + taxExempt: Option[String] = None): IO[ResponseError, Customer] + + def byId(customerId: String): IO[ResponseError, Customer] + + def update(customerId: String, + address: Option[Address] = None, + balance: Option[Money] = None, + coupon: Option[String] = None, + defaultSource: Option[String] = None, + description: Option[String] = None, + email: Option[String] = None, + invoicePrefix: Option[String] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + nextInvoiceSequence: Option[Int] = None, + phone: Option[String] = None, + promotionCode: Option[String] = None, + shipping: Option[Shipping] = None, + source: Option[Card] = None, + taxExempt: Option[String] = None): IO[ResponseError, Customer] + + def delete(customerId: String): IO[ResponseError, Deleted] + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + email: Option[String] = None): IO[ResponseError, StripeList[Customer]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeDiscounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeDiscounts.scala new file mode 100644 index 0000000..29911fc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeDiscounts.scala @@ -0,0 +1,12 @@ +package com.harana.modules.stripe + +import com.outr.stripe.{Deleted, ResponseError} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeDiscounts { + def deleteCustomerDiscount(customerId: String): IO[ResponseError, Deleted] + + def deleteSubscriptionDiscount(subscriptionId: String): IO[ResponseError, Deleted] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeDisputes.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeDisputes.scala new file mode 100644 index 0000000..62048ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeDisputes.scala @@ -0,0 +1,20 @@ +package com.harana.modules.stripe + +import com.outr.stripe.dispute.{Dispute, DisputeEvidence} +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeDisputes { + def byId(disputeId: String): IO[ResponseError, Dispute] + + def update(disputeId: String, + evidence: Option[DisputeEvidence] = None, + metadata: Map[String, String]): IO[ResponseError, Dispute] + + def close(disputeId: String): IO[ResponseError, Dispute] + + def list(created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Dispute]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeEvents.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeEvents.scala new file mode 100644 index 0000000..f9b6665 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeEvents.scala @@ -0,0 +1,16 @@ +package com.harana.modules.stripe + +import com.outr.stripe.event.Event +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeEvents { + def byId(eventId: String): IO[ResponseError, Event] + + def list(created: Option[TimestampFilter] = None, + `type`: Option[String] = None, + types: List[String] = Nil, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Event]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalBankAccounts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalBankAccounts.scala new file mode 100644 index 0000000..845736f --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalBankAccounts.scala @@ -0,0 +1,26 @@ +package com.harana.modules.stripe + +import com.outr.stripe.charge.BankAccount +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeExternalBankAccounts { + def create(accountId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] + + def byId(accountId: String, bankAccountId: String): IO[ResponseError, BankAccount] + + def update(accountId: String, + bankAccountId: String, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, BankAccount] + + def delete(accountId: String, bankAccountId: String): IO[ResponseError, Deleted] + + def list(accountId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[BankAccount]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalCreditCards.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalCreditCards.scala new file mode 100644 index 0000000..404f63b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeExternalCreditCards.scala @@ -0,0 +1,35 @@ +package com.harana.modules.stripe + +import com.outr.stripe.charge.Card +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeExternalCreditCards { + def create(accountId: String, + source: Option[String] = None, + externalAccount: Option[String] = None, + defaultForCurrency: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Card] + + def byId(accountId: String, cardId: String): IO[ResponseError, Card] + + def update(accountId: String, + cardId: String, + addressCity: Option[String] = None, + addressCountry: Option[String] = None, + addressLine1: Option[String] = None, + addressLine2: Option[String] = None, + addressState: Option[String] = None, + addressZip: Option[String] = None, + defaultForCurrency: Option[String] = None, + expMonth: Option[Int] = None, + expYear: Option[Int] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None): IO[ResponseError, Card] + + def delete(accountId: String, cardId: String): IO[ResponseError, Deleted] + + def list(accountId: String, config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Card]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoiceItems.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoiceItems.scala new file mode 100644 index 0000000..5bad7b2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoiceItems.scala @@ -0,0 +1,32 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.subscription.InvoiceItem +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeInvoiceItems { + def create(amount: Money, + currency: String, + customerId: String, + description: Option[String] = None, + discountable: Option[Boolean] = None, + invoice: Option[String] = None, + metadata: Map[String, String] = Map.empty, + subscription: Option[String] = None): IO[ResponseError, InvoiceItem] + + def byId(invoiceItemId: String): IO[ResponseError, InvoiceItem] + + def update(invoiceItemId: String, + amount: Option[Money] = None, + description: Option[String] = None, + discountable: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, InvoiceItem] + + def delete(invoiceItemId: String): IO[ResponseError, Deleted] + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[InvoiceItem]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoices.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoices.scala new file mode 100644 index 0000000..c722533 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeInvoices.scala @@ -0,0 +1,54 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.subscription.{Invoice, InvoiceLine} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeInvoices { + def create(customerId: String, + applicationFee: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + statementDescriptor: Option[String] = None, + subscription: Option[String] = None, + taxPercent: Option[BigDecimal] = None): IO[ResponseError, Invoice] + + def byId(invoiceId: String): IO[ResponseError, Invoice] + + def linesById(invoiceId: String, + coupon: Option[String] = None, + customer: Option[String] = None, + subscription: Option[String] = None, + subscriptionPlan: Option[String] = None, + subscriptionProrate: Option[String] = None, + subscriptionProrationDate: Option[Long] = None, + subscriptionQuantity: Option[Int] = None, + subscriptionTrialEnd: Option[Long] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[InvoiceLine]] + + def upcoming(customerId: String, + coupon: Option[String] = None, + subscription: Option[String] = None, + subscriptionPlan: Option[String] = None, + subscriptionProrate: Option[String] = None, + subscriptionProrationDate: Option[Long] = None, + subscriptionQuantity: Option[Int] = None, + subscriptionTrialEnd: Option[Long] = None): IO[ResponseError, Invoice] + + def update(invoiceId: String, + applicationFee: Option[Money] = None, + closed: Option[Boolean] = None, + description: Option[String] = None, + forgiven: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + statementDescriptor: Option[String] = None, + taxPercent: Option[BigDecimal] = None): IO[ResponseError, Invoice] + + def pay(invoiceId: String): IO[ResponseError, Invoice] + + def list(customerId: Option[String] = None, + date: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Invoice]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripePlans.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripePlans.scala new file mode 100644 index 0000000..6c8f268 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripePlans.scala @@ -0,0 +1,34 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.subscription.Plan +import zio.IO +import zio.macros.accessible + +@accessible +trait StripePlans { + def create(planId: String, + amount: Money, + currency: String, + interval: String, + productId: String, + intervalCount: Int = 1, + metadata: Map[String, String] = Map.empty, + nickname: Option[String], + trialPeriodDays: Option[Int] = None): IO[ResponseError, Plan] + + def byId(planId: String): IO[ResponseError, Plan] + + def update(planId: String, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + productId: Option[String] = None, + statementDescriptor: Option[String] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Plan] + + def delete(planId: String): IO[ResponseError, Deleted] + + def list(active: Option[Boolean] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Plan]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripePrices.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripePrices.scala new file mode 100644 index 0000000..0b6fc87 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripePrices.scala @@ -0,0 +1,47 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.price.{Price, Recurring, Tier, TransformQuantity} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripePrices { + def create(currency: String, + active: Option[Boolean] = None, + billingScheme: Option[String] = None, + lookupKey: Option[String] = None, + metadata: Map[String, String] = Map.empty, + nickname: Option[String] = None, + recurring: Option[Recurring] = None, + tiers: List[Tier] = List(), + tiersMode: Option[String] = None, + transferLookupKey: Option[Boolean] = None, + transformQuantity: Option[TransformQuantity] = None, + unitAmount: Option[Int] = None, + unitAmountDecimal: Option[BigDecimal] = None): IO[ResponseError, Price] + + + def byId(priceId: String): IO[ResponseError, Price] + + + def update(priceId: String, + active: Option[Boolean] = None, + lookupKey: Option[String] = None, + metadata: Map[String, String] = Map.empty, + nickname: Option[String] = None, + transferLookupKey: Option[Boolean] = None): IO[ResponseError, Price] + + + def delete(planId: String): IO[ResponseError, Deleted] + + + def list(active: Option[Boolean] = None, + currency: Option[String] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + endingBefore: Option[String] = None, + limit: Option[Int] = None, + productId: Option[String] = None, + `type`: Option[String] = None): IO[ResponseError, StripeList[Price]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeProducts.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeProducts.scala new file mode 100644 index 0000000..01517dd --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeProducts.scala @@ -0,0 +1,55 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.product.{PackageDimensions, Product => StripeProduct} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeProducts { + def create(name: String, + active: Option[Boolean] = None, + attributes: List[String] = List.empty, + caption: Option[String] = None, + deactivateOn: List[String] = List.empty, + description: Option[String] = None, + images: List[String] = List.empty, + liveMode: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + packageDimensions: Option[PackageDimensions] = None, + productId: Option[String] = None, + shippable: Option[Boolean] = None, + statementDescriptor: Option[String] = None, + `type`: Option[String] = None, + unitLabel: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeProduct] + + def byId(productId: String): IO[ResponseError, StripeProduct] + + def update(productId: String, + active: Option[Boolean] = None, + attributes: List[String] = List.empty, + caption: Option[String] = None, + deactivateOn: List[String] = List.empty, + description: Option[String] = None, + images: List[String] = List.empty, + liveMode: Option[Boolean] = None, + metadata: Map[String, String] = Map.empty, + name: Option[String] = None, + packageDimensions: Option[PackageDimensions] = None, + shippable: Option[Boolean] = None, + statementDescriptor: Option[String] = None, + `type`: Option[String] = None, + unitLabel: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeProduct] + + def delete(productId: String): IO[ResponseError, Deleted] + + def list(active: Option[Boolean] = None, + created: Option[TimestampFilter] = None, + config: QueryConfig = QueryConfig.default, + ids: List[String] = Nil, + shippable: Option[Boolean] = None, + `type`: Option[String] = None, + url: Option[String] = None): IO[ResponseError, StripeList[StripeProduct]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeRefunds.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeRefunds.scala new file mode 100644 index 0000000..55bb324 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeRefunds.scala @@ -0,0 +1,23 @@ +package com.harana.modules.stripe + +import com.outr.stripe.refund.Refund +import com.outr.stripe.{Money, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeRefunds { + def create(chargeId: String, + amount: Option[Money] = None, + metadata: Map[String, String] = Map.empty, + reason: Option[String] = None, + refundApplicationFee: Boolean = false, + reverseTransfer: Boolean = false): IO[ResponseError, Refund] + + def byId(refundId: String): IO[ResponseError, Refund] + + def update(refundId: String, metadata: Map[String, String] = Map.empty): IO[ResponseError, Refund] + + def list(chargeId: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Refund]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptionItems.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptionItems.scala new file mode 100644 index 0000000..9d807e2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptionItems.scala @@ -0,0 +1,37 @@ +package com.harana.modules.stripe + +import com.outr.stripe.subscription.SubscriptionItem +import com.outr.stripe.{Deleted, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeSubscriptionItems { + def create(subscriptionId: String, + billingThresholds: Map[String, String] = Map(), + metadata: Map[String, String] = Map(), + paymentBehavior: Option[String] = None, + priceId: Option[String] = None, + prorationBehavior: Option[String] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + taxRates: List[String] = List()): IO[ResponseError, SubscriptionItem] + + def byId(subscriptionItemId: String): IO[ResponseError, SubscriptionItem] + + def update(subscriptionItemId: String, + billingThresholds: Map[String, String] = Map(), + metadata: Map[String, String] = Map(), + offSession: Option[Boolean] = None, + paymentBehavior: Option[String] = None, + priceId: Option[String] = None, + prorationBehavior: Option[String] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + taxRates: List[String] = List()): IO[ResponseError, SubscriptionItem] + + def delete(subscriptionItemId: String): IO[ResponseError, Deleted] + + def list(subscription: String, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[SubscriptionItem]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptions.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptions.scala new file mode 100644 index 0000000..8845267 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeSubscriptions.scala @@ -0,0 +1,46 @@ +package com.harana.modules.stripe + +import com.outr.stripe.subscription.{CreateSubscriptionItem, Subscription} +import com.outr.stripe.{QueryConfig, ResponseError, StripeList, TimestampFilter} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeSubscriptions { + def create(customerId: String, + items: List[CreateSubscriptionItem], + applicationFeePercent: Option[BigDecimal] = None, + coupon: Option[String] = None, + metadata: Map[String, String] = Map.empty, + prorate: Option[Boolean] = None, + quantity: Option[Int] = None, + source: Option[String] = None, + taxPercent: Option[BigDecimal] = None, + trialEnd: Option[Long] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Subscription] + + def byId(subscriptionId: String): IO[ResponseError, Subscription] + + def update(subscriptionId: String, + items: List[CreateSubscriptionItem] = List(), + applicationFeePercent: Option[BigDecimal] = None, + coupon: Option[String] = None, + metadata: Map[String, String] = Map.empty, + prorate: Option[Boolean] = None, + prorationDate: Option[Long] = None, + quantity: Option[Int] = None, + source: Option[String] = None, + taxPercent: Option[BigDecimal], + trialEnd: Option[Long] = None, + trialPeriodDays: Option[Int] = None): IO[ResponseError, Subscription] + + def cancel(customerId: String, + subscriptionId: String, + atPeriodEnd: Boolean = false): IO[ResponseError, Subscription] + + def list(created: Option[TimestampFilter] = None, + customer: Option[String] = None, + plan: Option[String] = None, + status: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Subscription]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeTokens.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeTokens.scala new file mode 100644 index 0000000..98e1ace --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeTokens.scala @@ -0,0 +1,17 @@ +package com.harana.modules.stripe + +import com.outr.stripe.ResponseError +import com.outr.stripe.charge.{BankAccount, Card, PII} +import com.outr.stripe.token.Token +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeTokens { + def create(card: Option[Card] = None, + bankAccount: Option[BankAccount] = None, + pii: Option[PII] = None, + customerId: Option[String] = None): IO[ResponseError, Token] + + def byId(tokenId: String): IO[ResponseError, Token] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeTransferReversals.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeTransferReversals.scala new file mode 100644 index 0000000..6e650e2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeTransferReversals.scala @@ -0,0 +1,25 @@ +package com.harana.modules.stripe + +import com.outr.stripe.transfer.TransferReversal +import com.outr.stripe.{Money, QueryConfig, ResponseError, StripeList} +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeTransferReversals { + def create(transferId: String, + amount: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + refundApplicationFee: Boolean = false): IO[ResponseError, TransferReversal] + + def byId(transferId: String, transferReversalId: String): IO[ResponseError, TransferReversal] + + def update(transferId: String, + transferReversalId: String, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, TransferReversal] + + def list(transferId: String, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[TransferReversal]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeTransfers.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeTransfers.scala new file mode 100644 index 0000000..ac1b26a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeTransfers.scala @@ -0,0 +1,33 @@ +package com.harana.modules.stripe + +import com.outr.stripe._ +import com.outr.stripe.transfer.Transfer +import zio.IO +import zio.macros.accessible + +@accessible +trait StripeTransfers { + def create(amount: Money, + currency: String, + destination: String, + applicationFee: Option[Money] = None, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty, + sourceTransaction: Option[String] = None, + statementDescriptor: Option[String] = None, + sourceType: String = "card", + method: String = "standard"): IO[ResponseError, Transfer] + + def byId(transferId: String): IO[ResponseError, Transfer] + + def update(transferId: String, + description: Option[String] = None, + metadata: Map[String, String] = Map.empty): IO[ResponseError, Transfer] + + def list(created: Option[TimestampFilter] = None, + date: Option[TimestampFilter] = None, + destination: Option[String] = None, + recipient: Option[String] = None, + status: Option[String] = None, + config: QueryConfig = QueryConfig.default): IO[ResponseError, StripeList[Transfer]] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/StripeUI.scala b/jvm/src/main/scala/com/harana/modules/stripe/StripeUI.scala new file mode 100644 index 0000000..a61c1e2 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/StripeUI.scala @@ -0,0 +1,11 @@ +package com.harana.modules.stripe + +import zio.Task +import zio.macros.accessible + +@accessible +trait StripeUI { + def billingPortalUrl(customerId: String, returnUrl: String): Task[String] + + def createCheckoutSession(customerId: String, priceId: String, successUrl: String, cancelUrl: String): Task[String] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/stripe/package.scala b/jvm/src/main/scala/com/harana/modules/stripe/package.scala new file mode 100644 index 0000000..d4b7cf1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/stripe/package.scala @@ -0,0 +1,15 @@ +package com.harana.modules + +import zio.{IO, ZIO} + +import scala.concurrent.Future + +package object stripe { + + def execute[E, A](output: Future[Either[E, A]]): IO[E, A] = + ZIO.succeed(output).flatMap { o => + ZIO.fromFuture { _ => + o + }.orDie.absolve + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/thumbnailator/LiveThumbnailator.scala b/jvm/src/main/scala/com/harana/modules/thumbnailator/LiveThumbnailator.scala new file mode 100644 index 0000000..cbc18de --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/thumbnailator/LiveThumbnailator.scala @@ -0,0 +1,44 @@ +package com.harana.modules.thumbnailator + +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.thumbnailator.streams.ByteBufferInputStream +import com.harana.modules.vertx.models.streams.VertxBufferOutputStream +import io.vertx.core.buffer.Buffer +import net.coobird.thumbnailator.Thumbnails +import zio.{Task, ZIO, ZLayer} + +import java.nio.ByteBuffer +import scala.jdk.CollectionConverters._ + +object LiveThumbnailator { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveThumbnailator(config, logger, micrometer) + } +} + +case class LiveThumbnailator(config: Config, logger: Logger, micrometer: Micrometer) extends Thumbnailator { + + def thumbnailAsVertxBuffer(byteBuffer: ByteBuffer, + width: Option[Int] = None, + height: Option[Int] = None, + keepAspectRatio: Boolean = true, + outputFormat: String = "JPEG"): Task[Buffer] = + ZIO.attempt { + val builder = Thumbnails.fromInputStreams(Seq(new ByteBufferInputStream(byteBuffer)).asJava) + val os = new VertxBufferOutputStream + + if (width.nonEmpty) builder.width(width.get) + if (height.nonEmpty) builder.height(height.get) + builder.keepAspectRatio(keepAspectRatio) + builder.outputFormat(outputFormat) + builder.toOutputStream(os) + os.buffer + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/thumbnailator/Thumbnailator.scala b/jvm/src/main/scala/com/harana/modules/thumbnailator/Thumbnailator.scala new file mode 100644 index 0000000..3282daf --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/thumbnailator/Thumbnailator.scala @@ -0,0 +1,17 @@ +package com.harana.modules.thumbnailator + +import io.vertx.core.buffer.Buffer +import zio.Task +import zio.macros.accessible + +import java.nio.ByteBuffer + +@accessible +trait Thumbnailator { + + def thumbnailAsVertxBuffer(byteBuffer: ByteBuffer, + width: Option[Int] = None, + height: Option[Int] = None, + keepAspectRatio: Boolean = true, + outputFormat: String = "JPEG"): Task[Buffer] +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/thumbnailator/streams/ByteBufferInputStream.scala b/jvm/src/main/scala/com/harana/modules/thumbnailator/streams/ByteBufferInputStream.scala new file mode 100644 index 0000000..a6482c8 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/thumbnailator/streams/ByteBufferInputStream.scala @@ -0,0 +1,49 @@ +package com.harana.modules.thumbnailator.streams + +import java.io.InputStream +import java.nio.ByteBuffer + +/** + * Wraps a ByteBuffer so it can be used with interfaces that require an InputStream. The + * buffer should not be modified outside of the reader until reading is complete. + */ +class ByteBufferInputStream(buffer: ByteBuffer) extends InputStream { + + override def read(): Int = { + if (buffer.hasRemaining) buffer.get() else -1 + } + + override def read(buf: Array[Byte], offset: Int, length: Int): Int = { + if (buffer.hasRemaining) { + val readLength = math.min(buffer.remaining(), length) + buffer.get(buf, offset, readLength) + readLength + } else { + -1 + } + } + + override def available(): Int = { + buffer.remaining() + } + + override def skip(n: Long): Long = { + val skipAmount = math.min(buffer.remaining(), n).toInt + buffer.position(buffer.position() + skipAmount) + skipAmount + } + + override def markSupported(): Boolean = true + + override def mark(readlimit: Int): Unit = { + buffer.mark() + } + + override def reset(): Unit = { + buffer.reset() + } + + override def close(): Unit = { + buffer.flip() + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/Handlers.scala b/jvm/src/main/scala/com/harana/modules/vertx/Handlers.scala new file mode 100644 index 0000000..9650f2d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/Handlers.scala @@ -0,0 +1,80 @@ +package com.harana.modules.vertx + +import io.vertx.core.buffer.Buffer +import io.vertx.core.json.JsonObject +import io.vertx.core.{AsyncResult, Handler, Vertx => VX} +import io.vertx.ext.bridge.PermittedOptions +import io.vertx.ext.web.RoutingContext +import io.vertx.ext.web.handler.sockjs.{SockJSBridgeOptions, SockJSHandler} +import io.vertx.ext.web.templ.handlebars.HandlebarsTemplateEngine +import org.jose4j.jwk.{JsonWebKey, JsonWebKeySet} +import org.pac4j.core.config.Config +import org.pac4j.core.context.session.SessionStore +import org.pac4j.core.exception.http.HttpAction +import org.pac4j.core.util.Pac4jConstants +import org.pac4j.http.client.indirect.FormClient +import org.pac4j.vertx.VertxWebContext +import org.pac4j.vertx.handler.impl.{LogoutHandler, LogoutHandlerOptions} + +import javax.ws.rs.core.{HttpHeaders, MediaType} + +object Handlers { + + def sock(vx: VX, inboundPermitted: List[String], outboundPermitted: List[String]) = { + val bridgeOptions = new SockJSBridgeOptions() + inboundPermitted.foreach(regex => bridgeOptions.addInboundPermitted(new PermittedOptions().setAddressRegex(regex))) + outboundPermitted.foreach(regex => bridgeOptions.addOutboundPermitted(new PermittedOptions().setAddressRegex(regex))) + SockJSHandler.create(vx).bridge(bridgeOptions) + } + + + def jwks(jwks: JsonWebKeySet): Handler[RoutingContext] = { + rc: RoutingContext => { + rc.response + .putHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON) + .end(jwks.toJson(JsonWebKey.OutputControlLevel.PUBLIC_ONLY)) + } + } + + + def forceLogin(config: Config, sessionStore: SessionStore): Handler[RoutingContext] = { + rc: RoutingContext => { + val context = new VertxWebContext(rc, sessionStore) + try { + val client = config.getClients.findClient(context.getRequestParameter(Pac4jConstants.DEFAULT_CLIENT_NAME_PARAMETER).get) + val action = client.get.getRedirectionAction(context, sessionStore) + val adapter = config.getHttpActionAdapter + adapter.adapt(action.get, context) + } catch { + case h: HttpAction => rc.fail(h) + } + } + } + + + def loginForm(vx: VX, config: Config, templateFileName: String, parameters: Map[String, AnyRef]): Handler[RoutingContext] = { + rc: RoutingContext => { + val url = config.getClients.findClient("FormClient").get.asInstanceOf[FormClient].getCallbackUrl + template(vx, rc, templateFileName, parameters ++ Map("url" -> url)) + } + } + + + def centralLogout(vx: VX, config: Config, sessionStore: SessionStore, postLogoutUrl: String): Handler[RoutingContext] = { + val options = new LogoutHandlerOptions().setCentralLogout(true).setLocalLogout(false).setDefaultUrl(postLogoutUrl) + new LogoutHandler(vx, sessionStore, options, config) + } + + + def template(vx: VX, rc: RoutingContext, templateFileName: String, parameters: Map[String, AnyRef]): Unit = { + val engine = HandlebarsTemplateEngine.create(vx) + val json = new JsonObject() + parameters.foreach { p => json.put(p._1, p._2) } + engine.render(json, templateFileName, new Handler[AsyncResult[Buffer]] { + override def handle(result: AsyncResult[Buffer]): Unit = { + if (result.succeeded()) rc.response.end(result.result()) else rc.fail(result.cause()) + } + }) + } + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/LiveVertx.scala b/jvm/src/main/scala/com/harana/modules/vertx/LiveVertx.scala new file mode 100644 index 0000000..71d9086 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/LiveVertx.scala @@ -0,0 +1,515 @@ +package com.harana.modules.vertx + +import com.harana.modules.core.app.App.runEffect +import com.harana.modules.core.config.Config +import com.harana.modules.core.logger.Logger +import com.harana.modules.core.micrometer.Micrometer +import com.harana.modules.vertx.gc.GCHealthCheck +import com.harana.modules.vertx.models._ +import com.harana.modules.vertx.proxy.{WSURI, WebProxyClient, WebProxyClientOptions} +import io.vertx.core.eventbus._ +import io.vertx.core.file.FileSystemOptions +import io.vertx.core.http.{HttpServer, HttpServerOptions, WebSocket} +import io.vertx.core.json.JsonObject +import io.vertx.core.net.{JksOptions, NetServer, NetServerOptions} +import io.vertx.core.shareddata.{AsyncMap, Counter, Lock} +import io.vertx.core.{AsyncResult, Context, Handler, VertxOptions, Vertx => VX} +import io.vertx.ext.bridge.{BridgeOptions, PermittedOptions} +import io.vertx.ext.eventbus.bridge.tcp.TcpEventBusBridge +import io.vertx.ext.web.client.{WebClient, WebClientOptions} +import io.vertx.ext.web.handler.{BodyHandler, CorsHandler, SessionHandler} +import io.vertx.ext.web.sstore.cookie.CookieSessionStore +import io.vertx.ext.web.templ.handlebars.HandlebarsTemplateEngine +import io.vertx.ext.web.{Router, RoutingContext} +import io.vertx.micrometer.{MicrometerMetricsOptions, PrometheusScrapingHandler, VertxPrometheusOptions} +import io.vertx.servicediscovery.{Record, ServiceDiscovery} +import io.vertx.spi.cluster.zookeeper.ZookeeperClusterManager +import org.jose4j.jwk.JsonWebKeySet +import org.pac4j.core.client.Clients +import org.pac4j.core.config.{Config => Pac4jConfig} +import org.pac4j.core.profile.UserProfile +import org.pac4j.vertx.context.session.VertxSessionStore +import org.pac4j.vertx.handler.impl._ +import org.pac4j.vertx.http.VertxHttpActionAdapter +import org.pac4j.vertx.{VertxProfileManager, VertxWebContext} +import zio.{Runtime, Task, UIO, Unsafe, ZIO, ZLayer} + +import java.io.File +import java.net.URI +import java.nio.file.Files +import java.util.Base64 +import java.util.concurrent.atomic.AtomicReference +import scala.collection.concurrent.{TrieMap, Map => ConcurrentMap} +import scala.compat.java8.FunctionConverters.asJavaFunction +import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ + +object LiveVertx { + val layer = ZLayer { + for { + config <- ZIO.service[Config] + logger <- ZIO.service[Logger] + micrometer <- ZIO.service[Micrometer] + } yield LiveVertx(config, logger, micrometer) + } +} + +case class LiveVertx(config: Config, logger: Logger, micrometer: Micrometer) extends Vertx { + + System.setProperty("org.jboss.logging.provider", "log4j2") + System.setProperty("vertx.logger-delegate-factory-class-name", "io.vertx.core.logging.Log4j2LogDelegateFactory") + + private val vertxRef = new AtomicReference[VX](null) + private val serviceDiscoveryRef = new AtomicReference[Option[ServiceDiscovery]](None) + private val serviceDiscoveryListeners: ConcurrentMap[String, Record => Unit] = TrieMap.empty + + private def vertx(clustered: Boolean) = + for { + vertxBlockedThreads <- config.long("vertx.blockedThreadsCheckInterval", 10000L) + + zookeeperHost <- config.optSecret("zookeeper-host") + zookeeperPrefix <- config.optString("zookeeper.prefix") + + listenHost <- config.string("http.listenHost") + publicHost <- config.string("http.publicHost", sys.env.getOrElse("POD_IP", listenHost)) + eventBusPort <- config.int("http.eventBusPort", 10000) + + eventBusOptions = new EventBusOptions() + .setClusterPublicHost(publicHost) + .setClusterPublicPort(eventBusPort) + .setLogActivity(true) + + fileSystemOptions = new FileSystemOptions().setFileCachingEnabled(false) + + registry <- micrometer.registry + + clusterManager <- if (zookeeperHost.nonEmpty) ZIO.some { + val zkConfig = new JsonObject() + zkConfig.put("zookeeperHosts", zookeeperHost) + zkConfig.put("rootPath", zookeeperPrefix.map(p => s"$p.vertx").getOrElse("vertx")) + new ZookeeperClusterManager(zkConfig) + } else ZIO.none + + vertxOptions = new VertxOptions() + .setBlockedThreadCheckInterval(vertxBlockedThreads) + .setEventBusOptions(eventBusOptions) + .setFileSystemOptions(fileSystemOptions) + .setMetricsOptions(new MicrometerMetricsOptions() + .setMicrometerRegistry(registry) + .setPrometheusOptions(new VertxPrometheusOptions().setEnabled(true)).setEnabled(true) + ) + + vx <- ZIO.async { (cb: Task[VX] => Unit) => + if (clustered) + VX.clusteredVertx( + vertxOptions.setClusterManager(clusterManager.get), + (result: AsyncResult[VX]) => if (result.succeeded()) cb(ZIO.succeed(result.result)) else cb(ZIO.fail(result.cause())) + ) + else + cb(ZIO.succeed(VX.vertx(vertxOptions))) + } + } yield vx + + + private def serviceDiscovery: Task[ServiceDiscovery] = + for { + serviceDiscovery <- if (serviceDiscoveryRef.get.nonEmpty) ZIO.attempt(serviceDiscoveryRef.get.get) else ZIO.attempt(ServiceDiscovery.create(vertxRef.get())) + _ = serviceDiscoveryRef.set(Some(serviceDiscovery)) + } yield serviceDiscovery + + + def underlying: UIO[VX] = + ZIO.succeed(vertxRef.get) + + + def subscribe(address: Address, `type`: String, onMessage: String => Task[Unit]): Task[MessageConsumer[String]] = + for { + result <- ZIO.async { (cb: Task[MessageConsumer[String]] => Unit) => + val consumer = vertxRef.get().eventBus.consumer(address, (message: Message[String]) => { + if (message.headers().get("type").equals(`type`)) { + val body = if (message.body() == null) null else new String(Base64.getDecoder.decode(message.body())) + runEffect(onMessage(body)) + }} + ) + consumer.completionHandler((result: AsyncResult[Void]) => + if (result.succeeded()) cb(logger.debug(s"Subscribed to address: $address").as(consumer)) + else cb(logger.error(s"Failed to subscribe to address: $address") *> ZIO.fail(result.cause())) + ) + } + } yield result + + + def unsubscribe(consumer: MessageConsumer[String]): Task[Unit] = + for { + result <- ZIO.async { (cb: Task[Unit] => Unit) => + consumer.unregister((result: AsyncResult[Void]) => + if (result.succeeded()) cb(logger.debug(s"Unsubscribed from address: ${consumer.address()}").unit) + else cb(logger.error(s"Failed to unsubscribe from address: ${consumer.address()}") *> ZIO.fail(result.cause())) + ) + } + } yield result + + + def publishMessage(address: Address, `type`: String, message: String): Task[Unit] = + for { + m <- ZIO.attempt(Base64.getEncoder.encode(message.getBytes("UTF-8"))) + _ <- ZIO.attempt(vertxRef.get().eventBus.publish(address, new String(m), new DeliveryOptions().addHeader("type", `type`))) + } yield () + + + def publishMessage(address: Address, `type`: String): Task[Unit] = + ZIO.attempt(vertxRef.get().eventBus.send(address, null, new DeliveryOptions().addHeader("type", `type`))) + + + def sendMessage(address: Address, `type`: String, message: String): Task[Unit] = + for { + m <- ZIO.attempt(Base64.getEncoder.encode(message.getBytes("UTF-8"))) + _ <- ZIO.attempt(vertxRef.get().eventBus.send(address, new String(m), new DeliveryOptions().addHeader("type", `type`))) + } yield () + + + def sendMessage(address: Address, `type`: String): Task[Unit] = + ZIO.attempt(vertxRef.get().eventBus.send(address, null, new DeliveryOptions().addHeader("type", `type`))) + + + def service(name: String): Task[Option[Record]] = + for { + sd <- serviceDiscovery + fn = (record: Record) => Boolean.box(record.getName.equals(name)) + record <- ZIO.async { (cb: Task[Option[Record]] => Unit) => + sd.getRecord(asJavaFunction(fn), (result: AsyncResult[Record]) => + if (result.succeeded()) cb(ZIO.succeed(Option(result.result()))) else cb(ZIO.fail(result.cause())) + ) + } + } yield record + + + def services(filters: Map[String, String]): Task[List[Record]] = + for { + sd <- serviceDiscovery + json = new JsonObject() + _ = filters.foreach { case (k, v) => json.put(k, v) } + record <- ZIO.async { (cb: Task[List[Record]] => Unit) => + sd.getRecords(json, (result: AsyncResult[java.util.List[Record]]) => + if (result.succeeded()) cb(ZIO.succeed(result.result().asScala.toList)) else cb(ZIO.fail(result.cause())) + ) + } + } yield record + + + def registerServiceListener(name: String, onChange: Record => Unit): UIO[Unit] = + ZIO.succeed(serviceDiscoveryListeners.put(name, onChange)) + + + def deregisterServiceListener(name: String): UIO[Unit] = + ZIO.succeed(serviceDiscoveryListeners.remove(name)) + + + def lock(name: String): Task[Lock] = + ZIO.async { (cb: Task[Lock] => Unit) => + vertxRef.get().sharedData().getLock(name, (result: AsyncResult[Lock]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause())) + ) + } + + + def lockWithTimeout(name: String, timeoutSeconds: String, onLock: Lock => Task[Unit]): Task[Lock] = + ZIO.async { (cb: Task[Lock] => Unit) => + vertxRef.get().sharedData().getLock(name, (result: AsyncResult[Lock]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause())) + ) + } + + + def getCounter(name: String): Task[Counter] = + ZIO.async { (cb: Task[Counter] => Unit) => + vertxRef.get().sharedData().getCounter(name, (result: AsyncResult[Counter]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause())) + ) + } + + + private def withMap[K, V, X](name: String, fn: (AsyncMap[K, V], Handler[AsyncResult[X]]) => Unit): Task[X] = + for { + map <- getMap[K, V](name) + result <- ZIO.async { (cb: Task[X] => Unit) => + fn(map, result => if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause()))) + } + } yield result + + + def clearMap[K, V](name: String): Task[Unit] = + withMap[K, V, Void](name, (map, handler) => map.clear(handler)).unit + + + def getMap[K, V](name: String): Task[AsyncMap[K, V]] = + ZIO.async { (cb: Task[AsyncMap[K, V]] => Unit) => + vertxRef.get().sharedData().getAsyncMap[K, V](name, (result: AsyncResult[AsyncMap[K, V]]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause())) + ) + } + + + def getMapKeys[K, V](name: String): Task[Set[K]] = + withMap[K, V, java.util.Set[K]](name, (map, handler) => map.keys(handler)).map(_.asScala.toSet) + + + def getMapValues[K, V](name: String): Task[List[V]] = + withMap[K, V, java.util.List[V]](name, (map, handler) => map.values(handler)).map(_.asScala.toList) + + + def getMapValue[K, V](name: String, key: K): Task[Option[V]] = + withMap[K, V, V](name, (map, handler) => map.get(key, handler)).map(Option.apply) + + + def putMapValue[K, V](name: String, key: K, value: V, ttl: Option[Long] = None): Task[Unit] = + withMap[K, V, Void](name, (map, handler) => if (ttl.nonEmpty) map.put(key, value, ttl.get, handler) else map.put(key, value, handler)).unit + + + def removeMapValue[K, V](name: String, key: K): Task[Unit] = + withMap[K, V, Void](name, (map, _) => map.remove(key)).unit + + + def putMapValueIfAbsent[K, V](name: String, key: K, value: V, ttl: Option[Long] = None): Task[V] = + withMap[K, V, V](name, (map, handler) => if (ttl.nonEmpty) map.putIfAbsent(key, value, ttl.get, handler) else map.putIfAbsent(key, value, handler)) + + + def getOrCreateContext: UIO[Context] = + ZIO.succeed(vertxRef.get().getOrCreateContext()) + + + def close: Task[Unit] = + ZIO.attempt(vertxRef.get().close()) + + + def eventBus: UIO[EventBus] = + ZIO.succeed(vertxRef.get().eventBus()) + + + def startHttpServer(domain: String, + proxyDomain: Option[String] = None, + routes: List[Route] = List(), + clustered: Boolean = false, + defaultHandler: Option[RouteHandler] = None, + proxyMapping: Option[RoutingContext => Task[Option[URI]]] = None, + webSocketProxyMapping: Option[WebSocketHeaders => Task[WSURI]] = None, + errorHandlers: Map[Int, RoutingContext => Task[Response]] = Map(), + eventBusInbound: List[String] = List(), + eventBusOutbound: List[String] = List(), + authTypes: List[AuthType] = List(), + additionalAllowedHeaders: Set[String] = Set(), + postLogin: Option[(RoutingContext, Option[UserProfile]) => Task[Response]] = None, + sessionRegexp: Option[String] = None, + jwtKeySet: Option[JsonWebKeySet] = None, + logActivity: Boolean = false): Task[HttpServer] = + for { + useSSL <- config.boolean("http.useSSL", default = false) + publicSSL <- config.boolean("http.publicSSL", default = true) + listenHost <- config.string("http.listenHost", "127.0.0.1") + listenPort <- config.int("http.listenPort", 8082) + publicHost <- config.string("http.publicHost", listenHost) + publicPort <- config.int("http.publicPort", if (publicSSL) 443 else 80) + keyStorePath <- config.optString("http.keyStorePath") + keyStorePassword <- config.optPassword("http.keyStorePassword") + proxyTimeout <- config.long("http.proxyTimeout", 24 * 60 * 60) + uploadsDirectory <- config.path("http.uploadsDirectory", Files.createTempDirectory("harana")) + + publicUrl = if (publicSSL) s"""https://$domain${if (!publicPort.equals(443)) s":$publicPort" else ""}""" else s"""http://$domain${if (!publicPort.equals(80)) s":$publicPort" else ""}""" + + vx <- vertx(clustered) + _ = vertxRef.set(vx) + + router <- ZIO.succeed(Router.router(vx)) + +// FIXME: What is this for ? +// _ = router.route().handler((rc: RoutingContext) => { +// rc.request().pause() +// rc.next() +// }) + + clusteredStore <- ZIO.attempt(CookieSessionStore.create(vx, "temp")) + sessionStore <- ZIO.attempt(new VertxSessionStore(clusteredStore)) + sessionHandler <- ZIO.attempt(SessionHandler.create(clusteredStore)) + templateEngine <- ZIO.attempt(HandlebarsTemplateEngine.create(vx)) + webClient <- ZIO.attempt(WebClient.create(vx, new WebClientOptions().setFollowRedirects(false).setMaxRedirects(1))) + httpClient <- ZIO.attempt(vx.createHttpClient()) + + _ <- ZIO.attempt { + // Custom Routes + routes.foreach { route => + + def handler(rc: RoutingContext): Unit = + generateResponse(vx, logger, micrometer, templateEngine, uploadsDirectory, rc, route.handler, route.secured) + + route.handler match { + case RouteHandler.Standard(_) | RouteHandler.FileUpload(_) => router.route().handler(BodyHandler.create()) + case RouteHandler.Stream(_) => router.route().handler(rc => rc.request().pause()) + } + + if (route.regex) { + if (route.blocking) + router.routeWithRegex(route.method, route.path).virtualHost(domain).blockingHandler(handler) + else + router.routeWithRegex(route.method, route.path).virtualHost(domain).handler(handler) + } + else { + val customRoute = + if (route.blocking) + router.route(route.method, route.path).virtualHost(domain).blockingHandler(handler).useNormalizedPath(route.normalisedPath) + else + router.route(route.method, route.path).virtualHost(domain).handler(handler).useNormalizedPath(route.normalisedPath) + + if (route.consumes.nonEmpty) customRoute.consumes(route.consumes.get.value) + if (route.produces.nonEmpty) customRoute.produces(route.produces.get.value) + } + } + + // Common + //router.route(HttpMethod.POST, "/eventbus").handler(BodyHandler.create()) + //router.route(HttpMethod.PUT, "/eventbus").handler(BodyHandler.create()) + router.mountSubRouter("/eventbus", Handlers.sock(vx, eventBusInbound, eventBusOutbound)) + router.get("/metrics").handler(PrometheusScrapingHandler.create()) + router.get("/health").handler(rc => { + val response = rc.response.putHeader("content-type", "text/plain") + if (GCHealthCheck.current.isHealthy) + response.setStatusCode(200).end("HEALTHY") + else + response.setStatusCode(503).end("UNHEALTHY") + }) + router.get("/ready").handler(rc => rc.response.putHeader("content-type", "text/plain").setStatusCode(200).end("READY")) + + // Public + // FIXME - Use StaticHandler in Production + router.get("/public/*").handler((rc: RoutingContext) => { + val path = s"${System.getProperty("user.dir")}/src/main/resources${rc.request().uri}" + sendFile(new File(path), vx, rc) + }) + + // CORS + router.route().handler(CorsHandler.create(".*.") + .allowCredentials(true) + .allowedHeaders((defaultAllowedHeaders ++ additionalAllowedHeaders).asJava) + .allowedMethods(defaultAllowedMethods.asJava)) + + // Auth + if (authTypes.nonEmpty) { + val clients = authTypes.map(AuthType.getClient(vx, publicUrl, _)) + val authConfig = new Pac4jConfig(new Clients(publicUrl + "/callback", clients: _*)) + authConfig.setHttpActionAdapter(new VertxHttpActionAdapter()) + + val callbackHandlerOptions = new CallbackHandlerOptions().setDefaultUrl("/postLogin").setMultiProfile(true) + val callbackHandler = new CallbackHandler(vx, sessionStore, authConfig, callbackHandlerOptions) + + if (sessionRegexp.nonEmpty) router.routeWithRegex(sessionRegexp.get).handler(sessionHandler) + router.route.handler(sessionHandler) + + if (jwtKeySet.nonEmpty) router.get("/jwks").handler(Handlers.jwks(jwtKeySet.get)) + router.get("/callback").handler(callbackHandler) + router.post("/callback").handler(BodyHandler.create().setMergeFormAttributes(true)) + router.post("/callback").handler(callbackHandler) + router.get("/login").handler(Handlers.loginForm(vx, authConfig, "public/login.hbs", Map())) + router.get("/forceLogin").handler(Handlers.forceLogin(authConfig, sessionStore)) + router.get("/confirm").handler(Handlers.loginForm(vx, authConfig, "public/login.hbs", Map())) + router.get("/logout").handler(new LogoutHandler(vx, sessionStore, new LogoutHandlerOptions(), authConfig)) + router.get("/centralLogout").handler(Handlers.centralLogout(vx, authConfig, sessionStore, publicUrl)) + router.get("/postLogin").handler(rc => { + val profileManager = new VertxProfileManager(new VertxWebContext(rc, sessionStore), sessionStore) + val postLoginHandler = postLogin.get.apply(_, profileManager.getProfile.asScala) + generateResponse(vx, logger, micrometer, templateEngine, uploadsDirectory, rc, RouteHandler.Standard(postLoginHandler)) + }) + } + + // Proxy + if (proxyDomain.nonEmpty && proxyMapping.nonEmpty) { + val client = new WebProxyClient(webClient, WebProxyClientOptions(iFrameAncestors = List(domain, proxyDomain.get))) + router.route().virtualHost(proxyDomain.get).blockingHandler(rc => + runEffect(proxyMapping.get(rc)) match { + case Some(uri) => client.execute(rc, "/*", uri) + case None => rc.response.end() + } + ) + } + + // Errors + router.route.failureHandler((rc: RoutingContext) => { + val response = rc.response + errorHandlers.get(response.getStatusCode) match { + case Some(r) => generateResponse(vx, logger, micrometer, templateEngine, uploadsDirectory, rc, RouteHandler.Standard(r)) + case None => if (!response.closed() && !response.ended()) response.end() + } + }) + + // Default handler + if (defaultHandler.nonEmpty) + router.route.handler(rc => generateResponse(vx, logger, micrometer, templateEngine, uploadsDirectory, rc, defaultHandler.get)) + + router + } + + options <- ZIO.succeed { + var httpServerOptions = new HttpServerOptions() + .setCompressionSupported(true) + .setDecompressionSupported(true) + .setLogActivity(logActivity) + .setHandle100ContinueAutomatically(true) + .setHost(listenHost) + .setMaxHeaderSize(1024 * 16) + .setPort(listenPort) + .setSsl(useSSL) + .setUseAlpn(getVersion >= 9) + + if (keyStorePath.nonEmpty) httpServerOptions = httpServerOptions.setKeyStoreOptions( + new JksOptions().setPath(keyStorePath.get).setPassword(keyStorePassword.get) + ) + + httpServerOptions + } + + httpServer <- ZIO.async { (cb: Task[HttpServer] => Unit) => + vx.createHttpServer(options) + .requestHandler(router) + .webSocketHandler(sourceSocket => { + if (webSocketProxyMapping.nonEmpty && !sourceSocket.uri().startsWith("/eventbus")) { + val target = runEffect(webSocketProxyMapping.get(sourceSocket.headers())) + httpClient.webSocket(target.port, target.host, sourceSocket.uri(), (connection: AsyncResult[WebSocket]) => { + if (connection.succeeded()) { + val targetSocket = connection.result() + syncSockets(sourceSocket, targetSocket) + } else { + logger.warn(s"Failed to connect to backend WS: $target") + } + }) + } + }) + .listen(listenPort, listenHost, (result: AsyncResult[HttpServer]) => + if (result.succeeded()) + cb( + ( + logger.info(s"Started HTTP server on $listenHost:$listenPort") *> + logger.info(s"Routes: ${router.getRoutes.asScala.map(_.getPath).mkString(", ")}") + ).as(result.result()) + ) + else + cb(logger.error(s"Failed to start HTTP server on $listenHost:$listenPort") *> ZIO.fail(result.cause())) + ) + } + + } yield httpServer + + + def startNetServer(listenHost: String, listenPort: Int, options: Option[NetServerOptions] = None): Task[NetServer] = + ZIO.async { (cb: Task[NetServer] => Unit) => + vertxRef.get().createNetServer().listen(listenPort, listenHost, (result: AsyncResult[NetServer]) => + if (result.succeeded()) cb(ZIO.attempt(result.result())) else cb(ZIO.fail(result.cause()))) + } + + + def startTcpEventBusServer(listenHost: String, listenPort: Int, inAddressRegex: String, outAddressRegex: String): Task[Unit] = + ZIO.async { (cb: Task[Unit] => Unit) => + TcpEventBusBridge.create(vertxRef.get(), new BridgeOptions() + .addInboundPermitted(new PermittedOptions().setAddressRegex(inAddressRegex)) + .addOutboundPermitted(new PermittedOptions().setAddressRegex(outAddressRegex))) + .listen(listenPort, listenHost, (result: AsyncResult[TcpEventBusBridge]) => + if (result.succeeded()) cb(ZIO.succeed(result.result())) else cb(ZIO.fail(result.cause()))) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/Vertx.scala b/jvm/src/main/scala/com/harana/modules/vertx/Vertx.scala new file mode 100644 index 0000000..28f1f6c --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/Vertx.scala @@ -0,0 +1,72 @@ +package com.harana.modules.vertx + +import com.harana.modules.vertx.models._ +import com.harana.modules.vertx.proxy.WSURI +import io.vertx.core.eventbus.{EventBus, MessageConsumer} +import io.vertx.core.http.HttpServer +import io.vertx.core.net.{NetServer, NetServerOptions} +import io.vertx.core.shareddata.{AsyncMap, Counter, Lock} +import io.vertx.core.{Context, MultiMap, Vertx => VX} +import io.vertx.ext.web.RoutingContext +import io.vertx.servicediscovery.Record +import org.jose4j.jwk.JsonWebKeySet +import org.pac4j.core.profile.UserProfile +import zio.macros.accessible +import zio.{Task, UIO} + +import java.net.URI + +@accessible +trait Vertx { + + def subscribe(address: Address, `type`: String, onMessage: String => Task[Unit]): Task[MessageConsumer[String]] + def unsubscribe(consumer: MessageConsumer[String]): Task[Unit] + def publishMessage(address: Address, messageType: String, payload: String): Task[Unit] + def publishMessage(address: Address, `type`: String): Task[Unit] + def sendMessage(address: Address, `type`: String, message: String): Task[Unit] + def sendMessage(address: Address, `type`: String): Task[Unit] + + def service(name: String): Task[Option[Record]] + def services(filters: Map[String, String]): Task[List[Record]] + def registerServiceListener(name: String, onChange: Record => Unit): UIO[Unit] + def deregisterServiceListener(name: String): UIO[Unit] + + def lock(name: String): Task[Lock] + def lockWithTimeout(name: String, timeoutSeconds: String, onLock: Lock => Task[Unit]): Task[Lock] + + def getCounter(name: String): Task[Counter] + + def clearMap[K, V](name: String): Task[Unit] + def getMap[K, V](name: String): Task[AsyncMap[K, V]] + def getMapKeys[K, V](name: String): Task[Set[K]] + def getMapValues[K, V](name: String): Task[List[V]] + def getMapValue[K, V](name: String, key: K): Task[Option[V]] + def putMapValue[K, V](name: String, key: K, value: V, ttl: Option[Long] = None): Task[Unit] + def removeMapValue[K, V](name: String, key: K): Task[Unit] + def putMapValueIfAbsent[K, V](name: String, key: K, value: V, ttl: Option[Long] = None): Task[V] + + def getOrCreateContext: UIO[Context] + def close: Task[Unit] + + def eventBus: UIO[EventBus] + def startHttpServer(domain: String, + proxyDomain: Option[String] = None, + routes: List[Route] = List(), + clustered: Boolean = false, + defaultHandler: Option[RouteHandler] = None, + proxyMapping: Option[RoutingContext => Task[Option[URI]]] = None, + webSocketProxyMapping: Option[WebSocketHeaders => Task[WSURI]] = None, + errorHandlers: Map[Int, RoutingContext => Task[Response]] = Map(), + eventBusInbound: List[String] = List(), + eventBusOutbound: List[String] = List(), + authTypes: List[AuthType] = List(), + additionalAllowedHeaders: Set[String] = Set(), + postLogin: Option[(RoutingContext, Option[UserProfile]) => Task[Response]] = None, + sessionRegexp: Option[String] = None, + jwtKeySet: Option[JsonWebKeySet] = None, + logActivity: Boolean = false): Task[HttpServer] + def startNetServer(listenHost: String, listenPort: Int, options: Option[NetServerOptions] = None): Task[NetServer] + def startTcpEventBusServer(listenHost: String, listenPort: Int, inAddressRegex: String, outAddressRegex: String): Task[Unit] + def underlying: UIO[VX] + +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/VertxUtils.scala b/jvm/src/main/scala/com/harana/modules/vertx/VertxUtils.scala new file mode 100644 index 0000000..cbc77b9 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/VertxUtils.scala @@ -0,0 +1,37 @@ +package com.harana.modules.vertx + +import io.vertx.core.buffer.Buffer +import io.vertx.core.http.HttpHeaders +import io.vertx.core.streams.Pump +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import io.vertx.ext.web.RoutingContext +import org.reactivestreams.{Subscriber, Subscription} +import zio._ + +import scala.collection.mutable.ArrayBuffer + +object VertxUtils { + + def streamToString(rc: RoutingContext, stream: ReactiveWriteStream[Buffer], streamPump: Pump) = + ZIO.async((cb: Task[String] => Unit) => stream.subscribe(new Subscriber[Buffer] { + val bytes = ArrayBuffer.empty[Byte] + var remaining = rc.request().getHeader(HttpHeaders.CONTENT_LENGTH).toLong + println("Waiting for subscription") + + def onSubscribe(s: Subscription) = { + println("Subscribed to stream .. starting pump") + streamPump.start() + s.request(remaining) + } + def onNext(t: Buffer) = { + bytes.addAll(t.getBytes) + remaining -= t.length() + if (remaining == 0) onComplete() + } + def onError(t: Throwable) = cb(ZIO.succeed(streamPump.stop()) *> ZIO.fail(t)) + def onComplete() = cb({ + println("Completed stream .. ") + ZIO.succeed(streamPump.stop()) *> ZIO.attempt(new String(bytes.toArray)) + }) + })) +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/gc/GCDetails.scala b/jvm/src/main/scala/com/harana/modules/vertx/gc/GCDetails.scala new file mode 100644 index 0000000..b66be06 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/gc/GCDetails.scala @@ -0,0 +1,9 @@ +package com.harana.modules.vertx.gc + +case class GCDetails(maxPercent: Int = 0, + gcTimeInPercent: Double = 0.0, + accessTimeMillis: Long = 0L) { + + def isHealthy: Boolean = + gcTimeInPercent <= maxPercent +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/gc/GCHealthCheck.scala b/jvm/src/main/scala/com/harana/modules/vertx/gc/GCHealthCheck.scala new file mode 100644 index 0000000..ceda9b1 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/gc/GCHealthCheck.scala @@ -0,0 +1,50 @@ +package com.harana.modules.vertx.gc + +import java.lang.management.ManagementFactory +import scala.jdk.CollectionConverters._ + +object GCHealthCheck { + + private val PERCENT_OF_PERCENT_FACTOR = 10000 + private val PERCENT_FACTOR = 100.0 + + private var lastTimeAccessed = 0L + private var lastCollectionTime = 0L + + private val maxPercent = 20 + + /** + * The HealthCheck does not make sense when calling healthCheck.current() within a very short interval. + * In that case, it will most likely be in failed state (100% GC). + * The checks should be executed with some delay, to get reasonable percentage values. + * + * @return The current garbage collection details + */ + def current: GCDetails = { + val collectionTime = getCollectionTimeMillis + val accessTimeMillis = System.currentTimeMillis + updateTimeStamps(collectionTime, accessTimeMillis) + + GCDetails(maxPercent, getGCTimeInPercent(collectionTime), accessTimeMillis) + } + + private def getGCTimeInPercent(collectionTime: Long): Double = { + if (lastTimeAccessed == 0) return 0 + val timeSinceLastAccessed = System.currentTimeMillis - lastTimeAccessed + if (timeSinceLastAccessed <= 0) return 0 + val thisCollectionTime = collectionTime - lastCollectionTime + val gcTimeInPercentOfPercents = thisCollectionTime * PERCENT_OF_PERCENT_FACTOR / timeSinceLastAccessed + gcTimeInPercentOfPercents / PERCENT_FACTOR + } + + private def updateTimeStamps(collectionTimeMillis: Long, accesTimeMillis: Long): Unit = { + lastCollectionTime = collectionTimeMillis + lastTimeAccessed = accesTimeMillis + } + + private def getCollectionTimeMillis = + ManagementFactory.getGarbageCollectorMXBeans.asScala + .map(_.getCollectionTime) + .filter(_ != -1) + .sum +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/AuthType.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/AuthType.scala new file mode 100644 index 0000000..1d5c42d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/AuthType.scala @@ -0,0 +1,181 @@ +package com.harana.modules.vertx.models + +import io.vertx.core.{Vertx => VX} +import org.pac4j.cas.client.CasClient +import org.pac4j.cas.config.CasConfiguration +import org.pac4j.core.client.Client +import org.pac4j.core.credentials.authenticator.Authenticator +import org.pac4j.core.profile.creator.ProfileCreator +import org.pac4j.http.client.direct.{DirectBasicAuthClient, ParameterClient} +import org.pac4j.http.client.indirect.FormClient +import org.pac4j.http.credentials.authenticator.test.SimpleTestUsernamePasswordAuthenticator +import org.pac4j.jwt.config.signature.SecretSignatureConfiguration +import org.pac4j.jwt.credentials.authenticator.JwtAuthenticator +import org.pac4j.oauth.client.Google2Client.Google2Scope +import org.pac4j.oauth.client.QQClient.QQScope +import org.pac4j.oauth.client.WechatClient.WechatScope +import org.pac4j.oauth.client.WeiboClient.WeiboScope +import org.pac4j.oauth.client._ +import org.pac4j.oidc.client.OidcClient +import org.pac4j.oidc.config.OidcConfiguration +import org.pac4j.saml.client.SAML2Client +import org.pac4j.saml.config.SAML2Configuration + +import java.io.File +import scala.jdk.CollectionConverters._ + +sealed trait AuthType +object AuthType { + + case class Bitbucket(key: String, secret: String) extends AuthType + case class Dropbox(key: String, secret: String) extends AuthType + case class Facebook(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Github(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Google(key: String, secret: String, scope: Option[Google2Scope] = None) extends AuthType + case class HiOrg(key: String, secret: String) extends AuthType + case class Linkedin(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Odnoklassniki(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Paypal(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class QQ(key: String, secret: String, scope: List[QQScope] = List()) extends AuthType + case class Strava(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Twitter(key: String, secret: String, includeEmail: Boolean) extends AuthType + case class Vk(key: String, secret: String, scope: Option[String] = None) extends AuthType + case class Wechat(key: String, secret: String, scopes: List[WechatScope] = List()) extends AuthType + case class Weibo(key: String, secret: String, scope: Option[WeiboScope] = None) extends AuthType + case class WindowsLive(key: String, secret: String) extends AuthType + case class Wordpress(key: String, secret: String) extends AuthType + case class Yahoo(key: String, secret: String) extends AuthType + + case class Basic(authenticator: Option[Authenticator]) extends AuthType + + case class CAS(url: String) extends AuthType + + case class Form(loginFormUrl: String, + authenticator: Authenticator, + profileCreator: ProfileCreator) extends AuthType + + case class JWT(salt: String) extends AuthType + + case class OIDC(clientId: String, + secret: String, + discoveryUri: String, + customParams: Map[String, String] = Map()) extends AuthType + + case class SAML(keystore: File, + keystorePassword: String, + privateKeyPassword: String, + identityProviderMetadataResource: String, + maximumAuthenticationLifetime: Int, + serviceProviderEntityId: String, + serviceProviderMetadata: File) extends AuthType + + def getClient(vx: VX, baseUrl: String, authType: AuthType): Client = + authType match { + case AuthType.Bitbucket(key, secret) => + new BitbucketClient(key, secret) + + case AuthType.Dropbox(key, secret) => + new DropBoxClient(key, secret) + + case AuthType.Facebook(key, secret, scope) => + val client = new FacebookClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Github(key, secret, scope) => + val client = new GitHubClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Google(key, secret, scope) => + val client = new Google2Client(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.HiOrg(key, secret) => + new HiOrgServerClient(key, secret) + + case AuthType.Linkedin(key, secret, scope) => + val client = new LinkedIn2Client(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Odnoklassniki(key, secret, publicKey) => + new OkClient(key, secret, publicKey.orNull) + + case AuthType.Paypal(key, secret, scope) => + val client = new PayPalClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.QQ(key, secret, scopes) => + val client = new QQClient(key, secret) + client.setScopes(scopes.asJava) + client + + case AuthType.Strava(key, secret, scope) => + val client = new StravaClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Twitter(key, secret, includeEmail) => + new TwitterClient(key, secret, includeEmail) + + case AuthType.Vk(key, secret, scope) => + val client = new VkClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.Wechat(key, secret, scopes) => + val client = new WechatClient(key, secret) + client.setScopes(scopes.asJava) + client + + case AuthType.Weibo(key, secret, scope) => + val client = new WeiboClient(key, secret) + client.setScope(scope.orNull) + client + + case AuthType.WindowsLive(key, secret) => + new WindowsLiveClient(key, secret) + + case AuthType.Wordpress(key, secret) => + new WordPressClient(key, secret) + + case AuthType.Yahoo(key, secret) => + new YahooClient(key, secret) + + case AuthType.Basic(authenticator) => + new DirectBasicAuthClient(authenticator.getOrElse(new SimpleTestUsernamePasswordAuthenticator())) + + case AuthType.CAS(url) => + val cfg = new CasConfiguration(url) +// cfg.setLogoutHandler(new LogoutHandler(vx, new VertxLocalMapStore[String, AnyRef](vx), false)) + new CasClient(cfg) + + case AuthType.Form(loginFormUrl, authenticator, profileCreator) => + new FormClient(loginFormUrl, authenticator, profileCreator) + + case AuthType.JWT(salt) => + val parameterClient = new ParameterClient("token", new JwtAuthenticator(new SecretSignatureConfiguration(salt))) + parameterClient.setSupportGetRequest(true) + parameterClient.setSupportPostRequest(false) + parameterClient + + case AuthType.OIDC(clientId, secret, discoveryUri, customParams) => + val cfg = new OidcConfiguration + cfg.setClientId(clientId) + cfg.setSecret(secret) + cfg.setDiscoveryURI(discoveryUri) + customParams.foreach { case (k, v) => cfg.addCustomParam(k, v)} + new OidcClient(cfg) + + case AuthType.SAML(keystore, keystorePassword, privateKeyPassword, identityProviderMetadataResource, + maximumAuthenticationLifetime, serviceProviderEntityId, serviceProviderMetadata) => + val cfg = new SAML2Configuration(keystore.getAbsolutePath, keystorePassword, privateKeyPassword, identityProviderMetadataResource) + cfg.setMaximumAuthenticationLifetime(maximumAuthenticationLifetime) + cfg.setServiceProviderEntityId(serviceProviderEntityId) + cfg.setServiceProviderMetadataPath(serviceProviderMetadata.getAbsolutePath) + new SAML2Client(cfg) + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/ContentType.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/ContentType.scala new file mode 100644 index 0000000..f2209ab --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/ContentType.scala @@ -0,0 +1,15 @@ +package com.harana.modules.vertx.models + +final case class ContentType(value: String) extends AnyVal { + override def toString: String = value +} + +object ContentType { + val Plain = ContentType("text/plain") + val HTML = ContentType("text/html") + val CSV = ContentType("text/csv") + val XML = ContentType("text/xml") + val JSON = ContentType("application/json") + val OctetStream = ContentType("application/octet-stream") + val Form = ContentType("application/x-www-form-urlencoded") +} diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/CrossOriginResourceSharing.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/CrossOriginResourceSharing.scala new file mode 100644 index 0000000..e0b2885 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/CrossOriginResourceSharing.scala @@ -0,0 +1,52 @@ +package com.harana.modules.vertx.models + +import com.google.common.base.{Joiner, Splitter} +import com.harana.modules.vertx.models.CrossOriginResourceSharing._ + +import java.util.regex.Pattern +import scala.jdk.CollectionConverters._ + +case class CrossOriginResourceSharing(allowedOrigins: Set[String], + allowedMethods: Set[String], + allowedHeaders: Set[String]) { + + val allowedHeadersRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join(allowedHeaders.asJava) + val allowedMethodsRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join(allowedMethods.asJava) + val anyOriginAllowed = allowedOrigins.contains(ALLOW_ANY_ORIGIN) + val allowedOriginPatterns = allowedOrigins.filterNot(_.equals(ALLOW_ANY_ORIGIN)).map(Pattern.compile(_, Pattern.CASE_INSENSITIVE)) + + def getAllowedMethods = + allowedMethodsRaw + + def getAllowedOrigin(origin: String) = + if (anyOriginAllowed) ALLOW_ANY_ORIGIN else origin + + def isOriginAllowed(origin: String): Boolean = + anyOriginAllowed || allowedOriginPatterns.forall(_.matcher(origin).matches) + + def isMethodAllowed(method: String) = + allowedMethods.contains(method) + + def isEveryHeaderAllowed(headers: String) = + allowedHeadersRaw.equals(ALLOW_ANY_HEADER) || Splitter.on(HEADER_VALUE_SEPARATOR).split(headers).asScala.forall(allowedHeaders.contains) + + override def equals(obj: Any): Boolean = { + if (this == obj) return true + if (obj == null || !obj.isInstanceOf[CrossOriginResourceSharing]) return false + + val that = obj.asInstanceOf[CrossOriginResourceSharing] + this.allowedOrigins.equals(that.allowedOrigins) && + this.allowedMethodsRaw.equals(that.allowedMethodsRaw) && + this.allowedHeadersRaw.equals(that.allowedHeadersRaw) + } +} + +object CrossOriginResourceSharing { + val SUPPORTED_METHODS = Set("GET", "HEAD", "PUT", "POST") + val HEADER_VALUE_SEPARATOR = ", " + val ALLOW_ANY_ORIGIN = "*" + val ALLOW_ANY_HEADER = "*" + + def apply() = + new CrossOriginResourceSharing(Set(ALLOW_ANY_ORIGIN), SUPPORTED_METHODS, Set(ALLOW_ANY_HEADER)) +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/Response.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/Response.scala new file mode 100644 index 0000000..b0e326d --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/Response.scala @@ -0,0 +1,86 @@ +package com.harana.modules.vertx.models + +import io.circe.Json +import io.vertx.core.buffer.{Buffer => VertxBuffer} +import io.vertx.core.http.Cookie +import io.vertx.core.streams.{ReadStream => VertxReadStream} + +sealed trait Response { + val contentType: Option[ContentType] + val cookies: List[Cookie] + val statusCode: Option[Int] + val headers: Map[_<: CharSequence, List[_<: CharSequence]] +} + +object Response { + + case class Buffer(buffer: VertxBuffer, + gzipped: Boolean = false, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class Content(content: String, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class Empty(contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class File(filename: String, + inputStream: java.io.InputStream, + gzipped: Boolean = false, + contentSize: Option[Long] = None, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class InputStream(inputStream: java.io.InputStream, + gzipped: Boolean = false, + contentSize: Option[Long] = None, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class JSON(content: Json, + contentType: Option[ContentType] = Some(ContentType.JSON), + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class ReadStream(stream: VertxReadStream[VertxBuffer], + contentSize: Option[Long] = None, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class Redirect(url: String, + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response + + case class Template(path: String, + parameters: Map[String, AnyRef] = Map(), + contentType: Option[ContentType] = None, + cookies: List[Cookie] = List(), + statusCode: Option[Int] = None, + cors: Boolean = false, + headers: Map[_<: CharSequence, List[_<: CharSequence]] = Map()) extends Response +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/Route.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/Route.scala new file mode 100644 index 0000000..c237241 --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/Route.scala @@ -0,0 +1,28 @@ +package com.harana.modules.vertx.models + +import io.vertx.core.buffer.Buffer +import io.vertx.core.http.HttpMethod +import io.vertx.core.streams.Pump +import io.vertx.ext.reactivestreams.ReactiveWriteStream +import io.vertx.ext.web.{RoutingContext, FileUpload => VertxFileUpload} +import zio.Task + +import java.nio.file.Path + +case class Route(path: String, + method: HttpMethod, + handler: RouteHandler, + consumes: Option[ContentType] = None, + produces: Option[ContentType] = Some(ContentType.HTML), + multipart: Boolean = false, + secured: Boolean = false, + regex: Boolean = false, + normalisedPath: Boolean = true, + blocking: Boolean = false) + +sealed trait RouteHandler +object RouteHandler { + case class Standard(handler: RoutingContext => Task[Response]) extends RouteHandler + case class FileUpload(handler: (RoutingContext, Path, List[VertxFileUpload]) => Task[Response]) extends RouteHandler + case class Stream(handler: (RoutingContext, ReactiveWriteStream[Buffer], Pump) => Task[Response]) extends RouteHandler +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/Verticle.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/Verticle.scala new file mode 100644 index 0000000..20f072b --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/Verticle.scala @@ -0,0 +1,27 @@ +package com.harana.modules.vertx.models + +import io.vertx.core.{AbstractVerticle, Promise} +import zio._ + +// FIXME +//trait Verticle extends AbstractVerticle { +// +// def run: ZIO[Nothing, Nothing, Int] +// +// override def start(startPromise: Promise[Void]): Unit = { +// Unsafe.unsafe { implicit unsafe => +// Runtime.default.unsafe.run( +// (for { +// fiber <- run.fork +// _ <- ZIO.succeed(java.lang.Runtime.getRuntime.addShutdownHook(new Thread { +// override def run() = { +// val _ = Runtime.default.unsafe.run(fiber.interrupt) +// } +// })) +// result <- fiber.join +// _ <- fiber.interrupt +// } yield result) +// ) +// } +// } +//} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AsyncFileReadStream.scala b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AsyncFileReadStream.scala new file mode 100644 index 0000000..a4cf9fc --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AsyncFileReadStream.scala @@ -0,0 +1,144 @@ +package com.harana.modules.vertx.models.streams + +import io.vertx.core.Handler +import io.vertx.core.buffer.Buffer +import io.vertx.core.streams.ReadStream +import one.jasyncfio.{AsyncFile, EventExecutor} +import org.apache.commons.lang3.SystemUtils + +import java.nio.ByteBuffer +import java.nio.channels.{AsynchronousFileChannel, CompletionHandler} +import java.nio.file.{Paths, StandardOpenOption} +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Promise +import scala.util.Try + +class AsyncFileReadStream(path: String, range: Option[(Long, Long)] = None) extends ReadStream[Buffer] { + + val file = + if (SystemUtils.IS_OS_LINUX) + Left(AsyncFile.open(path, EventExecutor.initDefault()).get()) + else + Right(AsynchronousFileChannel.open(Paths.get(path), StandardOpenOption.READ)) + + var closed = false + var readPos = if (range.nonEmpty) range.get._1 else 0L + val readBufferSize = 1024 + var readLength = if (range.nonEmpty) (range.get._2 - range.get._1)+1 else Long.MaxValue + + var handler: Option[Handler[Buffer]] = None + var exceptionHandler: Option[Handler[Throwable]] = None + var endHandler: Option[Handler[Void]] = None + + var queue = new InboundBuffer[Buffer](0) + queue.drainHandler(_ => doRead(ByteBuffer.allocateDirect(readBufferSize))) + + queue.handler { buff => + if (buff.length() > 0) { + if (this.handler.nonEmpty) this.handler.get.handle(buff) + } else { + if (this.endHandler.nonEmpty) this.endHandler.get.handle(null) + } + } + + def doRead(bb: ByteBuffer): Unit = { + val buff = Buffer.buffer(readBufferSize) + val readSize = Math.min(readBufferSize, readLength).toInt + bb.limit(readSize) + val promise = Promise[Buffer]() + promise.future.onComplete { (ar: Try[Buffer]) => { + if (ar.isSuccess) { + val buffer = ar.get + readPos += buffer.length() + readLength -= buffer.length() + if (buffer.length == 0) { + if (this.endHandler.nonEmpty) { + this.endHandler.get.handle(null) + } + } else + if (queue.write(buffer)) doRead(bb) + } else + if (this.exceptionHandler.nonEmpty) this.exceptionHandler.get.handle(ar.failed.get) + }} + + read(buff, 0, bb, readPos, promise) + } + + def read(writeBuff: Buffer, offset: Int, buff: ByteBuffer, position: Long, promise: Promise[Buffer]): Unit = + file match { + case Left(asyncFile) => + val tempBuffer = ByteBuffer.allocateDirect(readBufferSize) + var read = position + + while (read != 1 && buff.hasRemaining) { + read = asyncFile.read(tempBuffer, read, readBufferSize).get().toLong + promise.success(Buffer.buffer(tempBuffer.array())) + } + + case Right(channel) => + channel.read(buff, position, null, new CompletionHandler[Integer, Object]() { + var pos = position + + def completed(bytesRead: Integer, attachment: Object) = + if (bytesRead == -1) + done() + else { + if (buff.hasRemaining) { + pos += bytesRead + read(writeBuff, offset, buff, pos, promise) + } else + done() + } + + def failed(t: Throwable, attachment: Object) = { + t.printStackTrace() + promise.failure(t) + } + + def done() = { + buff.flip() + writeBuff.setBytes(offset, buff) + buff.compact() + promise.success(writeBuff) + } + }) + } + + def handler(handler: Handler[Buffer]) = { + if (closed) + this + else { + this.handler = Option(handler) + if (this.handler.nonEmpty) + doRead(ByteBuffer.allocateDirect(readBufferSize)) + else + queue.clear() + } + this + } + + def pause() = { + queue.pause() + this + } + + def resume() = { + if (!closed) queue.resume() + this + } + + def fetch(amount: Long) = { + queue.fetch(amount) + this + } + + def exceptionHandler(handler: Handler[Throwable]) = { + this.exceptionHandler = Option(handler) + this + } + + def endHandler(handler: Handler[Void]) = { + this.endHandler = Some(handler) + this + } +} \ No newline at end of file diff --git a/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedReadStream.java b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedReadStream.java new file mode 100644 index 0000000..793584a --- /dev/null +++ b/jvm/src/main/scala/com/harana/modules/vertx/models/streams/AwsChunkedReadStream.java @@ -0,0 +1,84 @@ +package com.harana.modules.vertx.models.streams; + +import io.vertx.core.Handler; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.logging.Logger; +import io.vertx.core.logging.LoggerFactory; +import io.vertx.core.streams.ReadStream; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +public class AwsChunkedReadStream extends DelegateReadStreamA {@link ReadStream} that delegates to another one but decompresses all + * data with GZIP.
+ *The code is loosely based on {@link java.util.zip.GZIPInputStream}
+ * @author Michel Kraemer + */ +public class GzipReadStream extends DelegateReadStream