-
Notifications
You must be signed in to change notification settings - Fork 1
/
vars.tf
388 lines (359 loc) · 15.9 KB
/
vars.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
# ----------------------------------------
# My IP Address
# This is used in the creation of the security group
# and will allow access to the ec2-instances on ports
# 22 (ssh), 26257 (database), 8080 (for observability)
# and 3389 (rdp)
# ----------------------------------------
variable "my_ip_address" {
description = "User IP address for access to the ec2 instances."
type = string
default = "0.0.0.0"
}
# ----------------------------------------
# Globals
# ----------------------------------------
variable "resource_name" {
description = "resource names will usually be the concatenation of var.owner-var.resource_name-resourceType and also a count.index if there are mulitple resources"
type = string
default = "demo"
nullable = false
}
variable "owner" {
description = "Owner of the infrastructure"
type = string
nullable = false
}
# ----------------------------------------
# Multi-Region
# ----------------------------------------
# Please leave these variables as is. When using this as a module in the multi-region setup, these will be passed in. For single region, leave as is. Total hack.
variable "multi_region" {
type = bool
default = true
}
# TODO: I think I need to delete this. This is not needed here. I'll create the resource group and pass the name to the module! It will NEVER be a variable.
# This is the resource group that's create in main.tf
variable "multi_region_resource_group_name" { #! Not in terraform.tfvars
description = "Name of the resource group"
type = string
default = "blah blah blah"
}
# ----------------------------------------
# Existing Key Info
# ----------------------------------------
variable "azure_ssh_key_name" {
description = "The name of an existing ssh key in Microsoft Azure"
type = string
}
variable "azure_ssh_key_resource_group" {
description = "The name of the resource group containing the existing Microsoft Azure SSH Key"
type = string
}
# ----------------------------------------
# Resource Group
# ----------------------------------------
variable "resource_group_location" {
# where do you want the resource group, that is created as part of this HCL to be located? This is the metadata store location.
# you must leave the default as an empty string. for single region, pass in the value in terraform.tfvars. for multi-region, pass in the value.
type = string
default = ""
}
# ----------------------------------------
# TAGS
# ----------------------------------------
# owner will be applied to all resources that accept tags along with any other optional tags specified here.
# Optional tags
variable "resource_tags" { #! Not in terraform.tfvars
description = "Tags to set for all resources"
type = map(string)
default = {}
}
# ----------------------------------------
# CRDB Regions
# ----------------------------------------
variable "virtual_network_locations" {
description = "list of the Azure regions for the multi-region crdb cluster"
type = list
default = ["westus2", "centralus", "eastus2"]
}
# ----------------------------------------
# Network
# ----------------------------------------
variable "virtual_network_cidr_blocks" {
description = "CIDR block for the VPC"
type = list
default = ["192.168.4.0/24", "192.168.5.0/24", "192.168.6.0/24"]
}
# TODO: Delete this... it's not needed... that's why I have virtual_network_locationS
variable "virtual_network_location" { #! Not in terraform.tfvars
type = string
default = "westeurope"
}
# ----------------------------------------
# CRDB Instance Specifications
# Azure names available here: https://azureprice.net/
# ----------------------------------------
variable "crdb_vm_size" {
description = "The Azure instance type for the crdb instances."
type = string
default = "m6i.large"
}
variable "crdb_nodes" { #! Not in terraform.tfvars (ok)
description = "Number of crdb nodes PER REGION. This should be a multiple of 3. Each node is an Azure Instance"
type = number
default = 3
validation {
condition = var.crdb_nodes%3 == 0
error_message = "The variable 'crdb_nodes' must be a multiple of 3"
}
}
variable "crdb_disk_size" {
description = "Size of the disk attached to the vm"
type = number
default = 64
validation {
condition = contains([64, 128, 256, 512], var.crdb_disk_size)
error_message = "CRDB Node disk size (in GB) must be 64, 128, 256 or 512"
}
}
# Note that crdb_resize_homelv is dangerous. Only use this option if you are use the redhat source image and only if you are sure
# that sda2 contains the lv "rootvg-homelv". This procedure will add any unused space to homelv.
variable "crdb_resize_homelv" {
description = "When creating a larger disk than exists in the image you'll need to repartition the disk to use the remaining space."
type = string
default = "no"
validation {
condition = contains(["yes", "no"], var.crdb_resize_homelv)
error_message = "Valid value for variable 'crdb_resize_homelv' is : 'yes' or 'no'"
}
}
variable "crdb_arm_release" {
description = "Do you want to use the ARM version of CRDB? There are implications on the instances available for the installation. You must choose the correct instance type or this will fail. See https://learn.microsoft.com/en-us/azure/virtual-machines/dpsv5-dpdsv5-series"
type = string
default = "no"
validation {
condition = contains(["yes", "no"], var.crdb_arm_release)
error_message = "Valid value for variable 'arm' is : 'yes' or 'no'"
}
}
variable "crdb_enable_spot_instances" {
description = "Do you want to use SPOT instances? There are implications on the instances available for the installation. You must choose the correct instance type or this will fail."
type = string
default = "no"
validation {
condition = contains(["yes", "no"], var.crdb_enable_spot_instances)
error_message = "Valid value for variable 'spot instances' is : 'yes' or 'no'"
}
}
# ----------------------------------------
# CRDB Admin User - Cert Connection
# ----------------------------------------
variable "create_admin_user" {
description = "'yes' or 'no' to create an admin user in the database. This might only makes sense when adding an app instance since the certs will be created and configured automatically for connection to the database."
type = string
default = "yes"
validation {
condition = contains(["yes", "no"], var.create_admin_user)
error_message = "Valid value for variable 'include_ha_proxy' is : 'yes' or 'no'"
}
}
variable "admin_user_name"{
description = "An admin with this username will be created if 'create_admin_user=yes'"
type = string
default = ""
}
# ----------------------------------------
# CRDB Specifications
# ----------------------------------------
# TODO: This can be removed from multi-region. It will be created in single region. Passed back to mulit-region and then handed off to the other regions.
variable "join_string" { #! Not in terraform.tfvars
description = "The CRDB join string to use at start-up. Do not supply a value"
type = string
default = ""
}
variable "crdb_version" {
description = "CockroachDB Version"
type = string
default = "22.2.10"
}
variable "run_init" { #! not in terraform.tfvars, but I think I want to leave it in case, for whatever reason, I don't want to run the init.
description = "'yes' or 'no' to run init on the database. In a multi-region configuration, only run the init in one of the regions."
type = string
default = "yes"
validation {
condition = contains(["yes", "no"], var.run_init)
error_message = "Valid value for variable 'run_init' is : 'yes' or 'no'"
}
}
# ----------------------------------------
# Cluster Enterprise License Keys
# ----------------------------------------
variable "install_enterprise_keys" {
description = "Setting this to 'yes' will attempt to install enterprise license keys into the cluster. The environment variables (TF_VAR_cluster_organization and TF_VAR_enterprise_license)"
type = string
default = "no"
validation {
condition = contains(["yes", "no"], var.install_enterprise_keys)
error_message = "Valid value for variable 'install_enterprise_keys' is : 'yes' or 'no'"
}
}
# Be sure to do the following in your environment if you plan on installing the license keys
# export TF_VAR_cluster_organization='your cluster organization'
# export TF_VAR_enterprise_license='your enterprise license'
variable "cluster_organization" { #! Not in terraform.tfvars
type = string
default = ""
}
variable "enterprise_license" { #! Not in terraform.tfvars
type = string
default = ""
}
# ----------------------------------------
# Cluster Location Data - For console map
# ----------------------------------------
variable "install_system_location_data" {
description = "Setting this to 'yes' will attempt to install data in the system.location table. The data will be used by the console to display cluster node locations)"
type = string
default = "no"
validation {
condition = contains(["yes", "no"], var.install_system_location_data)
error_message = "Valid value for variable 'install_system_location_data' is : 'yes' or 'no'"
}
}
# ----------------------------------------
# HA Proxy Instance Specifications
# ----------------------------------------
variable "include_ha_proxy" {
description = "'yes' or 'no' to include an HAProxy Instance"
type = string
default = "yes"
validation {
condition = contains(["yes", "no"], var.include_ha_proxy)
error_message = "Valid value for variable 'include_ha_proxy' is : 'yes' or 'no'"
}
}
variable "haproxy_vm_size" {
description = "The Azure instance type for the crdb instances HA Proxy Instance"
type = string
default = "t3a.small"
}
# ----------------------------------------
# APP Instance Specifications
# ----------------------------------------
variable "include_app" {
description = "'yes' or 'no' to include an APP Instance"
type = string
default = "yes"
validation {
condition = contains(["yes", "no"], var.include_app)
error_message = "Valid value for variable 'include_app' is : 'yes' or 'no'"
}
}
variable "app_vm_size" {
description = "The Azure instance type for the crdb instances app Instance"
type = string
default = "t3a.micro"
}
variable "app_disk_size" {
description = "Size of the disk attached to the vm"
type = number
default = 64
validation {
condition = contains([64, 128, 256, 512], var.app_disk_size)
error_message = "CRDB Node disk size (in GB) must be 64, 128, 256 or 512"
}
}
# Note that app_resize_homelv is dangerous. Only use this option if you use the redhat source image and only if you are sure
# that sda2 contains the lv "rootvg-homelv". This procedure will add any unused space to homelv. This is dangerous and a hack.
variable "app_resize_homelv" {
description = "When creating a larger disk than exists in the image you'll need to repartition the disk to use the remaining space."
type = string
default = "no"
validation {
condition = contains(["yes", "no"], var.app_resize_homelv)
error_message = "Valid value for variable 'crdb_resize_homelv' is : 'yes' or 'no'"
}
}
# ----------------------------------------
# Demo
# ----------------------------------------
variable "include_demo" { #! Not in terraform.tfvars
description = "'yes' or 'no' to include an HAProxy Instance"
type = string
default = "no"
validation {
condition = contains(["yes", "no"], var.include_demo)
error_message = "Valid value for variable 'include_demo' is : 'yes' or 'no'"
}
}
# ----------------------------------------
# UI Cert (so that the database console does not issue "Your connection is not private" warning)
# When accessing the database console on 8080, unless there is a certificate signed by an authority
# accepted by the browser, an error message will be displayed. If you have a domain name and can
# associate the IP of the CRDB via a DNS "A" record, then a certifiate can be generated
# via Let's Encrypt / certbot.
# To generate the cert, FIRST assoicate the public IP of the CRDB node with the domain name
# and then run the bash function "UICERT". For certbot to generate the certs, you must
# supply the domain name and email address.
# ----------------------------------------
variable "include_uicert" {
description = "'yes' or 'no' to include the UICERT function in the .bashrc of the CRDB instances. This also opens port 80 to the world on the CRDB instances for certbot."
type = string
default = "no"
validation {
condition = contains(["yes", "no"], var.include_uicert)
error_message = "Valid value for variable 'include_uicert' is : 'yes' or 'no'"
}
}
variable "uicert_domain_name" {
description = "The domain name that will be passed to certbot for the cert."
type = string
default = ""
}
variable "uicert_email_address" {
description = "The email address to be associated with the cert. This is required if choosing to generate a CA cert for the ui."
type = string
default = ""
}
# ----------------------------------------
# TLS Vars -- Leave blank to have then generated
# ----------------------------------------
# For multi-region these are probably not needed, but I'm leaving them in case the user wants to pass them in.
# For single region, they are DEFINITLY needed so that they can all be generated once in the multi-region HCL
# and then passed to the single region module.
variable "tls_private_key" {
description = "tls_private_key.crdb_ca_keys.private_key_pem -> ca.key / TLS Private Key PEM"
type = string
default = ""
}
variable "tls_public_key" {
description = "tls_private_key.crdb_ca_keys.public_key_pem -> ca.pub / TLS Public Key PEM"
type = string
default = ""
}
variable "tls_cert" {
description = "tls_self_signed_cert.crdb_ca_cert.cert_pem -> ca.crt / TLS Cert PEM"
type = string
default = ""
}
variable "tls_self_signed_cert" {
description = "tls_self_signed_cert.crdb_ca_cert.cert_pem -> ca.crt / TLS Cert PEM / Duplicate of tls_cert for better naming"
type = string
default = ""
}
variable "tls_user_cert" {
description = "tls_locally_signed_cert.user_cert.cert_pem -> client.name.crt"
type = string
default = ""
}
variable "tls_locally_signed_cert" {
description = "tls_locally_signed_cert.user_cert.cert_pem -> client.name.crt / Duplicate of tls_user_cert for better naming"
type = string
default = ""
}
variable "tls_user_key" {
description = "tls_private_key.client_keys.private_key_pem -> client.name.key"
type = string
default = ""
}