diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0696b3e --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.terraform \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..d927695 --- /dev/null +++ b/Makefile @@ -0,0 +1,52 @@ +.DEFAULT_GOAL := help + +.PHONY: install all help nixos infra-plan infra-apply infra-destroy infra-init-backends infra-init-all-backends + +nixos-image: ## Build a new NixOS image (snapshot) on Hetzner Cloud. Check README.md to see how VERSION= and BUILD= should be set + @cd nixos; $(MAKE) $@ + +module-exists: + @test -z "$(MODULE)" && { printf "Please provide a module, e.g. MODULE=environment\n"; exit 1; }; \ + test -d "infrastructure/$(MODULE)" || { printf "The module \"%s\" does not exist!\n" "$(MODULE)"; exit 1; }; \ + +infra-plan: ## Run `terraform plan` inside of infrastructure/$(MODULE), e.g. `make infra-plan MODULE=ingress` + @$(MAKE) module-exists MODULE="$(MODULE)" || exit 1; \ + cd infrastructure/$(MODULE); \ + terraform13.5 plan . + +infra-apply: ## Run `terraform apply` inside of infrastructure/$(MODULE), e.g. `make infra-apply MODULE=ingress` + @$(MAKE) module-exists MODULE="$(MODULE)" || exit 1; \ + cd infrastructure/$(MODULE); \ + terraform13.5 apply . + +infra-destroy: ## Run `terraform destroy` inside of infrastructure/$(MODULE), e.g. `make infra-destroy MODULE=ingress` + @$(MAKE) module-exists MODULE="$(MODULE)" || exit 1; \ + cd infrastructure/$(MODULE); \ + terraform13.5 destroy . + +infra-init-backends: ## Initialize the backend of one or more modules, e.g. `make infra-init-backends MODULES="compute ingress"` + @get_value() { awk -v setting="$$1" -F"\"" '$$0 ~ setting {print $$4}' "$$2"; }; \ + environment=$$(get_value "terraform_packer_environment" config.json); \ + project=$$(get_value "terraform_gitlab_backend_project" secrets.json); \ + username=$$(get_value "terraform_gitlab_backend_username" secrets.json); \ + password=$$(get_value "terraform_gitlab_backend_password" secrets.json); \ + cd infrastructure; \ + for m in $(MODULES); do \ + gitlab_address="https://gitlab.com/api/v4/projects/$$project/terraform/state/$$environment-$$m"; \ + backend_params=""; \ + backend_params="$$backend_params -backend-config=\"address=$$gitlab_address\""; \ + backend_params="$$backend_params -backend-config=\"lock_address=$$gitlab_address/lock\""; \ + backend_params="$$backend_params -backend-config=\"unlock_address=$$gitlab_address/lock\""; \ + backend_params="$$backend_params -backend-config=\"username=$$username\""; \ + backend_params="$$backend_params -backend-config=\"password=$$password\""; \ + cd "$$m"; \ + rm -rf .terraform; eval terraform13.5 init -reconfigure "$$backend_params" . ; \ + cd ..; \ + done; \ + +infra-init-all-backends: ## Initialize backends of all modules inside of the `infrastructure/` folder + @modules=$$(ls infrastructure/modules | xargs) && $(MAKE) infra-init-backends MODULES="$$modules" + +all install help: + @printf "Available targets\n" && \ + grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/README.md b/README.md new file mode 100644 index 0000000..cae8a42 --- /dev/null +++ b/README.md @@ -0,0 +1,324 @@ +# Hetzner Cloud Environment +## Project Structure +``` +. +|-- Makefile # Wrapper to simplify interaction +|-- config.json # Read by Make, Terraform, Packer +|-- deploymentagent +|-- infrastructure # Terraform modules +| |-- compute # Loads the compute module +| |-- environment # Loads the environment module and provides outputs +| |-- ingress # Loads the ingress module and provides outputs +| |-- storage # Loads the storage module +| `-- modules # Contains the code for all the modules +|-- nixos # NixOS image builder with Packer +|-- secrets.json # Read by Make, Terraform, Packer +`-- vault # Policy examples +``` + +## Overview + +### Tools and Dependencies +- Terraform 0.13.x [https://releases.hashicorp.com/terraform/](https://releases.hashicorp.com/terraform/) +- Packer 1.6.x [https://releases.hashicorp.com/packer/](https://releases.hashicorp.com/packer/) +- Make & Unix command line tools +Optional: +- Vault CLI 1.6.x [https://releases.hashicorp.com/vault/](https://releases.hashicorp.com/vault/) + +### Configuration +#### Configuring a Hetzner Cloud Project +Login: [https://accounts.hetzner.com/login](https://accounts.hetzner.com/login) + +Visit the [projects](https://console.hetzner.cloud/projects) tab to either create a new project or to pick an existing one. +A project will contain resources (servers, snapshots, load balancers, volumes, ..) as well as a security service to manage API tokens and TLS certificates (which can be used with load balancers). +Check the links below to see which resources are available and how to use them. + +- General documentation: [https://docs.hetzner.com/cloud/](https://docs.hetzner.com/cloud/) +- API documentation: [https://docs.hetzner.cloud/](https://docs.hetzner.cloud/) + +To build and provision resources with Packer and Terraform, an API token is required, which can be created in the *Security* tab. + +##### Hetzner Cloud Limitations +**Floating IPs**: Persistent (floating) IP addresses currently can only be assigned to cloud servers. +This means that when you delete a load balancer, you will also lose the public IP you have been using for the services behind it. +You will probably not delete load balancers in the production environment, but for staging and testing environments, load balancers can be scaled up and down via the Hetzner Cloud web UI or their API/Terraform if you want to save some money. +There appear to be [plans](https://docs.hetzner.com/cloud/load-balancers/faq/#can-i-assign-a-floating-ip-to-my-load-balancer) to add support for load balancers with floating IPs. +**Certificates**: Certificates stored within the security service on Hetzner Cloud cannot be updated, only replaced. +Before a certificate can be deleted, it must be dereferenced from [services](https://docs.hetzner.cloud/#load-balancer-actions-update-service) which were set up on load balancers. +For this creason, Certbot needs to be wrapped by a script which takes care of certificate replacement (see `infrastructure/modules/compute/certbot.sh`). +Unfortunately, Hetzner does not keep a public roadmap, but there seem to be [plans](https://www.reddit.com/r/hetzner/comments/hdp53j/load_balancers_are_now_on_hetzner_cloud/g16rxkt/) to add support for Let's Encrypt directly to cloud load balancers as well. + +#### `config.json` +The `config.json` and `secrets.json` files are read by Make, Packer and Terraform. +This way all changing settings and secrets between environments can be stored in a central place and [HCL](https://github.com/hashicorp/hcl) files used by Packer and Terraform only need to be touched in case the infrastructure is intended to be "refactored". +Due to some technical limitations in Terraform, it can be tricky to track state with [backends](https://www.terraform.io/docs/backends/index.html) in different environments. +To avoid solutions involving templates or third party tools such as Terragrunt, a simple wrapper has been included in the `Makefile` which can set up backends automatically for different environments. + +### Secrets +#### `secrets.json` (with git-secrets) +To decrypt the `secrets.json` file, run the following command on your shell +```sh +git secret reveal +``` + +#### Gitlab +Secrets, such as the SSH key pair for the default system user are stored in the [Gitlab CI/CD](https://gitlab.com/infektweb/glv5/hetzner-cloud-environment/-/settings/ci_cd) settings page of this Git project (for now), in the *Variables* section. + +https://gitlab.com/infektweb/glv5/hetzner-cloud-environment/-/settings/repository/#js-deploy-tokens + +id\_rsa\_operator_pub is baked into the image generated by Packer (see `nixos/nix/system.nix`) + +### NixOS +#### Building NixOS Images (Snapshots) with Packer +The `nixos` target in the `Makefile` wraps around the execution of Packer to build a NixOS image from the default Ubuntu 20.04 image provider by Hetzner Cloud. +Two arguments may be supplied, `VERSION=` to specify the desired NixOS release (see [NixOS Release Notes](https://nixos.org/manual/nixos/stable/release-notes.html)) and `BUILD=` with which you can track versions of the images that have been created. + +Example: +```sh +$ make nixos VERSION=20.09 BUILD=1.0.0 +``` +After a build has been successful, Packer will display the ID of the created snapshot on the very last line of the output. +When provisioning servers via Terraform, the used image ID will be read from the `nixos_snapshot_id` key in the `config.json` file. +In case you missed the ID in the build output, you can query the Hetzner Cloud API like this to retrieve a list of created snapshots. +```sh +$ curl -H "Authorization: Bearer $HCLOUD_TOKEN" 'https://api.hetzner.cloud/v1/images' | jq '.images[] | select(.type == "snapshot")' +``` +It makes sense to use the same NixOS image across all environments. (testing/staging/production/..) + +### Infrastructure + +#### Working with Terraform +Have a look at their [documentation](https://www.terraform.io/docs/cli-index.html). +To learn more about its configuration language [HCL](https://www.terraform.io/docs/configuration/index.html), see +- Resources +- Variables and Outputs +- Functions +- State + +Refer to the [Provider documentation](https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs) to see how to manage resources with Terraform on Hetzner Cloud. + +#### Provisioning Infrastructure + +##### Modules Overview +Rough overview of resources and outputs across the four modules. +``` +environment + - hcloud_network + - hcloud_network_subnet + - outputs + - dc_default_id (identifier of the datacenter in nuremberg) + - environment_name (name of the environment, read from config.json) + - network_primary_id + - network_subnet_a_id +ingress + - hcloud_load_balancer + - hcloud_load_balancer_network (attach to network/subnet configured in envionment module) + - hcloud_load_balancer_service + - hcloud_load_balancer_target (servers are implicitly assigned to load balancers via their labels) +storage + - hcloud_volume + - outputs + - volume_data1_id +compute + - hcloud_server + - hcloud_server_network (attach servers to networks/subnets configured in envionment module) + - hcloud_volume_attachment (directly attach volumes created in the storage module to servers) +``` + +##### Initializing State Backends for Each Module +You will need to (re-)initialize the state backend each time you change environments via `config.json` (see later sections). +```sh +$ make infra-init-backends MODULES="compute" # one module +$ make infra-init-backends MODULES="compute ingress" # multiple modules +$ make infra-init-all-backends # all modules +``` + +##### Applying Modules +You will need to manually confirm with 'yes' before the changes are applied. +```sh +$ make infra-apply MODULE=compute +``` + +##### Destroying Modules +```sh +$ make infra-destroy MODULE=compute +``` + +## Operations Guide + +### Data +#### Ephemeral Data +/opt/ +/etc/nixos +#### Persistent Data +/mnt/data + +### Setting Up a New Environment +The following sections assume the environment to be called 'production'. + +#### Configure Environment in `config.json` and `secrets.json` +Set the environment name and desired NixOS image/snapshot ID in `config.json`. +`config.json`: +```json +{ +tbd +} +``` +Use your personal Gitlab deployment- and Hetzner Cloud tokens. +`secrets.json`: +```json +{ + "terraform_gitlab_backend_username": "", + "terraform_gitlab_backend_password": "", + "terraform_gitlab_backend_project": "", + "gitlab_deploy_token_username": "", + "gitlab_deploy_token_password": "", + "aws_access_key_id": "", + "aws_secret_access_key": "", + "gitlab_deploy_token_password": "", + "hcloud_token_testing": "", + "hcloud_token_production": "", + "vault_db_password_production": "" +} +``` + +#### Provisioning Infrastructure with Terraform +Just to be sure, re-initialize all the Terraform state backends for the desired environment. +```sh +$ make infra-init-all-backends +``` +Roll out all the resources by applying each Terraform module. +The environment module must be applied first, the compute module last. +```sh +$ make infra-apply MODULE=environment +$ make infra-apply MODULE=ingress +$ make infra-apply MODULE=storage +$ make infra-apply MODULE=compute +``` +Take note of the public IP from the load balancer (used to access your services) and the server (used to manage the NixOS system) in the Hetzner Cloud web UI or via their API: +```sh +$ curl -H "Authorization: Bearer $API_TOKEN" 'https://api.hetzner.cloud/v1/servers?label_selector=environment==production' | jq '.servers[].public_net' +``` +```sh +$ curl -H "Authorization: Bearer $API_TOKEN" 'https://api.hetzner.cloud/v1/load_balancers?label_selector=environment==production' | jq '.load_balancers[].public_net' +``` +You can now connect to the newly created server, using the default key pair stored on [Gitlab](https://gitlab.com/infektweb/glv5/hetzner-cloud-environment/-/settings/ci_cd) as user 'operator'. +```sh +$ ssh operator@168.119.230.44 +``` + +#### Changing Passwords of System Users +As a first step you should change the passwords of the `root` and `operator` users. + +```sh +$ sudo -i +$ passwd +$ passwd operator +``` + +#### Configuring Certbot +In case you have an existing configuration for Certbot, you can simply copy it to `/mnt/data/letsencrypt`, otherwise you can set up a new configuration either locally, or directly on the server itself. +```sh +$ export AWS_ACCESS_KEY_ID="..." +$ export AWS_SECRET_ACCESS_KEY="..." +$ export LETSENCRYPT_DIR=/mnt/data/letsencrypt +$ export domains="..." # list of domain_name_production and domain_alternative_names_production in config.json, each each one prefixed with the `-d` flag +$ certbot certonly --dry-run --non-interactive --agree-tos -m webmaster@"$DOMAIN_NAME" --work-dir "$LETSENCRYPT_DIR"/lib --logs-dir "$LETSENCRYPT_DIR"/log --config-dir "$LETSENCRYPT_DIR"/etc --dns-route53 --preferred-challenges dns $domains +``` +At this point you should test whether the configuration is working, to prevent Certbot to create or renew the certificate, you can supply the `--dry-run` flag. + +To know which IAM permission Certbot needs on Amazon Route53, refer to the [Certbot documentation](https://certbot-dns-route53.readthedocs.io/en/stable/) + +Now that the configuration for Certbot is available, rebuild the NixOS system and deploy the certificates to the load balancers. +```sh +$ systemctl start nixos-rebuild +$ systemctl start hetzner-certbot +$ journalctl -u hetzner-certbot +``` + +#### Configuring Vault +##### Creating the Database +Log-in as the `postgres` user and execute the following SQL commands. +```sql +CREATE DATABASE vault; + +CREATE USER vault WITH ENCRYPTED PASSWORD 'change to value of vault_db_password_$ENVIRONMENT'; + +GRANT ALL PRIVILEGES ON DATABASE vault TO vault; +\c vault +CREATE TABLE vault_kv_store ( + parent_path TEXT COLLATE "C" NOT NULL, + path TEXT COLLATE "C", + key TEXT COLLATE "C", + value BYTEA, + CONSTRAINT pkey PRIMARY KEY (path, key) +); +CREATE INDEX parent_path_idx ON vault_kv_store (parent_path); +GRANT ALL PRIVILEGES ON TABLE vault_kv_store TO vault; +``` +Be sure to replace the password with the value which is set for `vault_db_password_production` in `secrets.json`. +```sh +$ sudo -i +$ su -l postgres +$ psql +[.. SQL commands ..] +$ exit +``` +Afterwards, restart Vault. +```sh +$ systemctl restart vault +$ systemctl status vault +``` + +##### Initializing Vault +You can now access Vault on port 9443 via any hostname behind the load balancer [https://guidelines.ch:9443/](https://guidelines.ch:9443/). +As a first step, you will need to create a master key (set) which is used to unseal Vault on each startup. +To use just one master key, initialize Vault with "Key shares" and "Key threshold" both set to "1". +The "initial root token" is used to authenticate as an administrator with the Vault API or web UI +The "key" is used to unseal Vault upon startup. +You can now set up the key-value based secret engine which is supported by the [settings](https://gitlab.com/infektcommon/settings) package. +Be sure to use V2 of the KV engine. +See the [Vault documentation](https://www.vaultproject.io/docs). + +To unseal Vault manually, you can either use curl, the Vault CLI, or use the prompt on the web UI. +```sh +$ curl -XPUT http://127.0.0.1:8200/v1/sys/unseal -d '{"key": "master key"}' +``` +```sh +$ vault operator unseal +Key (will be hidden): +``` + +##### Unseal Vault Automatically on Startup +You can manually write the created master key to `/mnt/data/vault-root-token`. +If this file exists and contains a valid master key, it Vault will be unsealed automatically. + +#### Configuring Elasticsearch +This is going to feel a bit hacky, but we need to provision the default set of built-in Elasticsearch users and the easiest way is to use x-pack. +Since we use a non-standard path for the Elasticsearch "home", we need to copy some files to be able to use the `elasticsarch-setup-passwords` command. +```sh +$ export ES_HOME=/mnt/data/elasticsearch/ # currently missing x-pack commands +$ find / -type d -name "jre" +[..] +/nix/store/g67sykn6hfjmgxhvr6cqv5c7v19d6490-openjdk-headless-8u272-b10-jre/lib/openjdk/jre +$ export JAVA_HOME=/nix/store/g67sykn6hfjmgxhvr6cqv5c7v19d6490-openjdk-headless-8u272-b10-jre/lib/openjdk/jre +$ find / -type f -name "elasticsearch-setup-passwords" +[..] +/nix/store/j5s9sb7r2hbkq16afm87rjssic3czrqx-elasticsearch-7.5.1/bin/elasticsearch-setup-passwords +$ cp /nix/store/j5s9sb7r2hbkq16afm87rjssic3czrqx-elasticsearch-7.5.1/bin/x-pack-* /mnt/data/elasticsearch/bin/ +$ cp /nix/store/j5s9sb7r2hbkq16afm87rjssic3czrqx-elasticsearch-7.5.1/bin/elasticsearch-setup-passwords /mnt/data/elasticsearch/bin/ +$ /mnt/data/elasticsearch/bin/elasticsearch-setup-passwords interactive +``` +Maybe there are better ways to do this using nix-shell. +If you prefer the passwords to be generated for you, use the argument `auto` instead of `interactive`. + +##### Credentials for Kibana +If you would like to use Kibana (recommended), add the password you set for the 'kibana' user to `/mnt/data/kibana-elasticsearch-password` (mode 600) and rebuild NixOS with `systemctl start nixos-rebuild`. +Kibana can be accessed on port 8443 via any hostname behind the load balancer [https://guidelines.ch:8443/](https://guidelines.ch:9443/). (sign in with the 'elastic' user) + +#### Configuring Guidelines +``` +CREATE DATABASE guidelines; +CREATE USER guidelines WITH ENCRYPTED PASSWORD 'changeme'; +GRANT ALL PRIVILEGES ON DATABASE guidelines TO guidelines; +``` diff --git a/config.json b/config.json new file mode 100644 index 0000000..b96536c --- /dev/null +++ b/config.json @@ -0,0 +1,6 @@ +{ + "terraform_packer_environment": "production", + "nixos_snapshot_id": "27588749", + "domain_name_production": "test.guidelines.ch", + "domain_alternative_names_production": "*.glv5.guidelines.ch *.test-glv5.guidelines.ch *.test.guidelines.ch" +} diff --git a/infrastructure/compute/compute.tf b/infrastructure/compute/compute.tf new file mode 100644 index 0000000..240eabd --- /dev/null +++ b/infrastructure/compute/compute.tf @@ -0,0 +1,5 @@ +module "compute" { + source = "../modules/compute" + + image_default_id = lookup(jsondecode(file("../../config.json")), "nixos_snapshot_id", "") +} diff --git a/infrastructure/compute/provider_backend.tf b/infrastructure/compute/provider_backend.tf new file mode 100644 index 0000000..65f2378 --- /dev/null +++ b/infrastructure/compute/provider_backend.tf @@ -0,0 +1,15 @@ +locals { + environment = lookup(jsondecode(file("../../config.json")), "terraform_packer_environment", "") +} + +terraform { + backend "http" { + lock_method="POST" + unlock_method="DELETE" + retry_wait_min="5" + } +} + +provider "hcloud" { + token = lookup(jsondecode(file("../../secrets.json")), "hcloud_token_${local.environment}", "") +} diff --git a/infrastructure/compute/versions.tf b/infrastructure/compute/versions.tf new file mode 100644 index 0000000..d9b5e57 --- /dev/null +++ b/infrastructure/compute/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = ">= 0.13" +} diff --git a/infrastructure/environment/environment.tf b/infrastructure/environment/environment.tf new file mode 100644 index 0000000..a28a735 --- /dev/null +++ b/infrastructure/environment/environment.tf @@ -0,0 +1,5 @@ +module "environment" { + source = "../modules/environment" + + environment_name = lookup(jsondecode(file("../../config.json")), "terraform_packer_environment", "") +} diff --git a/infrastructure/environment/outputs.tf b/infrastructure/environment/outputs.tf new file mode 100644 index 0000000..b52ea59 --- /dev/null +++ b/infrastructure/environment/outputs.tf @@ -0,0 +1,15 @@ +output "environment_name" { + value = module.environment.environment_name +} + +output "dc_default_id" { + value = module.environment.dc_default_id +} + +output "network_primary_id" { + value = module.environment.network_primary_id +} + +output "network_subnet_a_id" { + value = module.environment.network_subnet_a_id +} diff --git a/infrastructure/environment/provider_backend.tf b/infrastructure/environment/provider_backend.tf new file mode 100644 index 0000000..65f2378 --- /dev/null +++ b/infrastructure/environment/provider_backend.tf @@ -0,0 +1,15 @@ +locals { + environment = lookup(jsondecode(file("../../config.json")), "terraform_packer_environment", "") +} + +terraform { + backend "http" { + lock_method="POST" + unlock_method="DELETE" + retry_wait_min="5" + } +} + +provider "hcloud" { + token = lookup(jsondecode(file("../../secrets.json")), "hcloud_token_${local.environment}", "") +} diff --git a/infrastructure/environment/versions.tf b/infrastructure/environment/versions.tf new file mode 100644 index 0000000..d9b5e57 --- /dev/null +++ b/infrastructure/environment/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = ">= 0.13" +} diff --git a/infrastructure/ingress/ingress.tf b/infrastructure/ingress/ingress.tf new file mode 100644 index 0000000..2dd03a5 --- /dev/null +++ b/infrastructure/ingress/ingress.tf @@ -0,0 +1,4 @@ +module "ingress" { + source = "../modules/ingress" + +} diff --git a/infrastructure/ingress/provider_backend.tf b/infrastructure/ingress/provider_backend.tf new file mode 100644 index 0000000..65f2378 --- /dev/null +++ b/infrastructure/ingress/provider_backend.tf @@ -0,0 +1,15 @@ +locals { + environment = lookup(jsondecode(file("../../config.json")), "terraform_packer_environment", "") +} + +terraform { + backend "http" { + lock_method="POST" + unlock_method="DELETE" + retry_wait_min="5" + } +} + +provider "hcloud" { + token = lookup(jsondecode(file("../../secrets.json")), "hcloud_token_${local.environment}", "") +} diff --git a/infrastructure/ingress/versions.tf b/infrastructure/ingress/versions.tf new file mode 100644 index 0000000..d9b5e57 --- /dev/null +++ b/infrastructure/ingress/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = ">= 0.13" +} diff --git a/infrastructure/modules/compute/certbot.sh b/infrastructure/modules/compute/certbot.sh new file mode 100644 index 0000000..8572773 --- /dev/null +++ b/infrastructure/modules/compute/certbot.sh @@ -0,0 +1,114 @@ +#!/bin/sh + +args="$@" + +set -o nounset +set -e errexit + +certbot=/run/current-system/sw/bin/certbot +jq=/run/current-system/sw/bin/jq +curl=/run/current-system/sw/bin/curl + +cert_name="$SERVICE-$ENVIRONMENT" +domains="-d $DOMAIN_NAME" +for san in $ALTERNATIVE_NAMES; do + domains="$domains -d $san" +done +key="$(sed 's/$/\\n/' $LETSENCRYPT_DIR/etc/live/$DOMAIN_NAME/privkey.pem | tr -d '\n')" +fullchain="$(sed 's/$/\\n/' $LETSENCRYPT_DIR/etc/live/$DOMAIN_NAME/fullchain.pem | tr -d '\n')" +service_ports="$(printf "%s" "$SERVICE_PORTS" | tr ',' ' ' | xargs)" + +case "$args" in + "--renew") + printf "Running Certbot (for renewal) before deploying certificate..\n\n" + #certbot certonly --non-interactive --agree-tos -m webmaster@"$DOMAIN_NAME" --work-dir "$LETSENCRYPT_DIR"/lib --logs-dir "$LETSENCRYPT_DIR"/log --config-dir "$LETSENCRYPT_DIR"/etc --dns-route53 --preferred-challenges dns $domains + ;; + *) + printf "Deploying existing certificate without running Certbot..\n\n" + ;; +esac + + +lb_id=$($curl -s -H "Authorization: Bearer $API_TOKEN" "https://api.hetzner.cloud/v1/load_balancers?label_selector=service==$SERVICE%2Cenvironment==$ENVIRONMENT" | $jq '.load_balancers[0].id') + +case "$lb_id" in + ""|*[!0-9]*) + printf "Load balancer for %s in environment \"%s\" could not be found\n" "$SERVICE" "$ENVIRONMENT" + exit 1 + ;; +esac + +update_load_balancer() { + case "$1" in + *[0-9]*) + _proto=https + _redirect_http="true" + _certs="[$1]" + ;; + "") + printf "Temporarily disabling https for renewal\n" + _proto=http + _redirect_http="false" + _certs="[]" + ;; + *) + printf "Something went wrong, tried to process certificate with ID \"%s\"\n" "$cert_id" + exit 1 + ;; + esac + + for sp in $service_ports; do + error=$($curl -s -XPOST -H "Authorization: Bearer $API_TOKEN" -H "Content-Type: application/json" -d "{\"listen_port\": $sp, \"protocol\": \"$_proto\", \"http\":{\"redirect_http\": $_redirect_http, \"certificates\": $_certs}}" "https://api.hetzner.cloud/v1/load_balancers/$lb_id/actions/update_service" | $jq -r '.error') + case "$(printf "%s" "$error" | $jq -r '.code')" in + "null") + printf "Certificate with ID \"%s\" has been successfully assigned to service port %s on the load balancer with ID \"%s\"\n" "$1" "$sp" "$lb_id" + ;; + *) + printf "There has been an unexpected error: %s\n" "$error" + exit 1 + ;; + esac + done +} + +cert_id=$($curl -s -H "Authorization: Bearer $API_TOKEN" "https://api.hetzner.cloud/v1/certificates?label_selector=service=$SERVICE%2Cenvironment=$ENVIRONMENT" | $jq '.certificates[0].id') + +# +# Determine whether a certificate for the same service and environment already exists +# +case "$cert_id" in + ""|*[0-9]*) + update_load_balancer "" + printf "Deleting existing certificate with id %s so that it can be replaced by a new one\n" "$cert_id" + $curl -s -X DELETE -H "Authorization: Bearer $API_TOKEN" "https://api.hetzner.cloud/v1/certificates/$cert_id" + ;; + "null") + printf "Currently there is no certificate deployed for %s in environment \"%s\", setting up a new one\n" "$SERVICE" "$ENVIRONMENT" + ;; + *) + printf "Something went wrong: %s\n" "$cert_id" + exit 1 + ;; +esac + +# +# Deploy key and certificate to Hetzner security service +# +error=$($curl -s -XPOST -H "Authorization: Bearer $API_TOKEN" -H "Content-Type: application/json" -d "{\"name\":\"$cert_name\",\"labels\":{\"service\": \"$SERVICE\", \"environment\": \"$ENVIRONMENT\"},\"certificate\":\"$fullchain\",\"private_key\":\"$key\"}" 'https://api.hetzner.cloud/v1/certificates' | $jq '.error') + +case "$(printf "%s" "$error" | $jq -r '.code')" in + "null") + cert_id=$($curl -s -H "Authorization: Bearer $API_TOKEN" "https://api.hetzner.cloud/v1/certificates?label_selector=service=$SERVICE%2Cenvironment=$ENVIRONMENT" | $jq '.certificates[0].id') + printf "The certificate %s has been successfully added to the Hetzner security service with ID \"%s\"\n" "$cert_name" "$cert_id" + update_load_balancer "$cert_id" + ;; + "uniqueness_error") + printf "A certificate with the same fingerprint already exists! Exiting\n" + exit 1 + ;; + *) + printf "There has been an unexpected error: %s\n" "$error" + exit 1 + ;; +esac + diff --git a/infrastructure/modules/compute/cloudinit.tpl b/infrastructure/modules/compute/cloudinit.tpl new file mode 100644 index 0000000..a0455cb --- /dev/null +++ b/infrastructure/modules/compute/cloudinit.tpl @@ -0,0 +1,91 @@ +#cloud-config + +write_files: + - path: /opt/cloud-init-misc-data/environment + content: ${environment} + owner: root:root + permissions: '0644' + - encoding: b64 + path: /opt/certbot.sh + content: ${certbot_script} + owner: root:root + permissions: '0700' + - encoding: b64 + path: /etc/nixos/certbot.nix + content: ${nix_certbot} + owner: root:root + permissions: '0644' + - encoding: b64 + path: /etc/nixos/configuration.nix + content: ${nix_configuration} + owner: root:root + permissions: '0644' + - encoding: b64 + path: /etc/nixos/postgresql.nix + content: ${nix_postgresql} + owner: root:root + permissions: '0644' + - encoding: b64 + path: /etc/nixos/elasticsearch.nix + content: ${nix_elasticsearch} + owner: root:root + permissions: '0644' + - encoding: b64 + path: /etc/nixos/vault.nix + content: ${nix_vault} + owner: root:root + permissions: '0644' + - encoding: b64 + path: /etc/nixos/guidelines.nix + content: ${nix_guidelines} + owner: root:root + permissions: '0644' + - path: /opt/cloud-init-misc-data/domain_name + content: ${domain_name} + owner: root:root + permissions: '0644' + - encoding: b64 + path: /opt/cloud-init-misc-data/domain_alternative_names + content: ${domain_alternative_names} + owner: root:root + permissions: '0644' + - path: /opt/cloud-init-misc-data/vault_db_password + content: ${vault_db_password} + owner: root:root + permissions: '0600' + - path: /opt/cloud-init-misc-data/hcloud_token + content: ${hcloud_token} + owner: root:root + permissions: '0600' + - path: /opt/cloud-init-misc-data/aws_access_key_id + content: ${aws_access_key_id} + owner: root:root + permissions: '0600' + - path: /opt/cloud-init-misc-data/aws_secret_access_key + content: ${aws_secret_access_key} + owner: root:root + permissions: '0600' + - path: /root/.docker/config.json + content: | + { + "auths": { + "https://registry.gitlab.com": { + "auth": "${gitlab_password}", + "email": "${gitlab_username}" + } + } + } + owner: root:root + permissions: '0600' + - path: /opt/guidelines.json + content: | + { + "api": "latest", + "web": "latest", + "html2pdf": "latest", + "filestore": "latest" + } + owner: root:root + permissions: '0644' +runcmd: + - systemctl start nixos-rebuild.service diff --git a/infrastructure/modules/compute/nix/#elasticsearch.nix# b/infrastructure/modules/compute/nix/#elasticsearch.nix# new file mode 100644 index 0000000..c52bd67 --- /dev/null +++ b/infrastructure/modules/compute/nix/#elasticsearch.nix# @@ -0,0 +1,29 @@ +{ pkgs, ... }: +{ + nixpkgs.config.allowUnfree = true; + services.elasticsearch.enable = true; + services.elasticsearch.package = pkgs.elasticsearch7; + services.elasticsearch.dataDir = "/mnt/data/elasticsearch"; + services.elasticsearch.listenAddress = "10.0.1.51"; + services.elasticsearch.extraConf = '' + discovery.type: single-node + xpack.security.enabled: true + ''; + + services.kibana.enable = true; + services.kibana.package = pkgs.kibana7; + services.kibana.dataDir = "/mnt/data/kibana"; + services.kibana.listenAddress = "10.0.1.51"; + services.kibana.elasticsearch.hosts = [ "http://10.0.1.51:9200" ]; + services.kibana.elasticsearch.username = "kibana"; + services.kibana.elasticsearch.password = (builtins.readFile /mnt/data/kibana-elasticsearch-password); + + system.activationScripts = { + mnt = { + text = "mkdir -p /mnt/data/{elasticsearch,kibana} && chown -R elasticsearch:elasticsearch /mnt/data/elasticsearch && chown -R kibana:root /mnt/data/kibana"; + deps = []; + }; + }; + + networking.firewall.allowedTCPPorts = [ 9200 9300 5601 ]; +} diff --git a/infrastructure/modules/compute/nix/.#elasticsearch.nix b/infrastructure/modules/compute/nix/.#elasticsearch.nix new file mode 120000 index 0000000..330c640 --- /dev/null +++ b/infrastructure/modules/compute/nix/.#elasticsearch.nix @@ -0,0 +1 @@ +marco@furiosa.local.77847 \ No newline at end of file diff --git a/infrastructure/modules/compute/nix/certbot.nix b/infrastructure/modules/compute/nix/certbot.nix new file mode 100644 index 0000000..f421cb9 --- /dev/null +++ b/infrastructure/modules/compute/nix/certbot.nix @@ -0,0 +1,17 @@ +{ ... }: { + systemd.services.hetzner-certbot = { + environment = { + API_TOKEN = (builtins.readFile /opt/cloud-init-misc-data/hcloud_token); + AWS_ACCESS_KEY_ID = (builtins.readFile /opt/cloud-init-misc-data/aws_access_key_id); + AWS_SECRET_ACCESS_KEY = (builtins.readFile /opt/cloud-init-misc-data/aws_secret_access_key); + ENVIRONMENT = (builtins.readFile /opt/cloud-init-misc-data/environment); + SERVICE = "guidelines"; + DOMAIN_NAME = (builtins.readFile /opt/cloud-init-misc-data/domain_name); + ALTERNATIVE_NAMES = (builtins.readFile /opt/cloud-init-misc-data/domain_alternative_names); + LETSENCRYPT_DIR = "/mnt/data/letsencrypt"; + SERVICE_PORTS = "443,8443,9443"; # guidelines, kibana, vault + }; + serviceConfig.Type = "oneshot"; + script = "/opt/certbot.sh"; + }; +} diff --git a/infrastructure/modules/compute/nix/configuration.nix b/infrastructure/modules/compute/nix/configuration.nix new file mode 100644 index 0000000..d9523d1 --- /dev/null +++ b/infrastructure/modules/compute/nix/configuration.nix @@ -0,0 +1,18 @@ +{ pkgs, ... }: { + imports = [ + ./system.nix + ./postgresql.nix + ./elasticsearch.nix + ./vault.nix + ./guidelines.nix + ./certbot.nix + ]; + + environment.systemPackages = [ + pkgs.certbot-full + pkgs.jq + pkgs.screen + pkgs.vault + pkgs.vim + ]; +} diff --git a/infrastructure/modules/compute/nix/elasticsearch.nix b/infrastructure/modules/compute/nix/elasticsearch.nix new file mode 100644 index 0000000..62acd59 --- /dev/null +++ b/infrastructure/modules/compute/nix/elasticsearch.nix @@ -0,0 +1,29 @@ +{ pkgs, ... }: +{ + nixpkgs.config.allowUnfree = true; + services.elasticsearch.enable = true; + services.elasticsearch.package = pkgs.elasticsearch7; + services.elasticsearch.dataDir = "/mnt/data/elasticsearch"; + services.elasticsearch.listenAddress = "10.0.1.51"; + services.elasticsearch.extraConf = '' + discovery.type: single-node + xpack.security.enabled: true + ''; + + services.kibana.enable = true; + services.kibana.package = pkgs.kibana7; + services.kibana.dataDir = "/mnt/data/kibana"; + services.kibana.listenAddress = "10.0.1.51"; + services.kibana.elasticsearch.hosts = [ "http://10.0.1.51:9200" ]; + services.kibana.elasticsearch.username = "kibana"; + services.kibana.elasticsearch.password = (builtins.readFile /opt/cloud-init-misc-data/environment); + + system.activationScripts = { + mnt = { + text = "mkdir -p /mnt/data/{elasticsearch,kibana} && chown -R elasticsearch:elasticsearch /mnt/data/elasticsearch && chown -R kibana:root /mnt/data/kibana"; + deps = []; + }; + }; + + networking.firewall.allowedTCPPorts = [ 9200 9300 5601 ]; +} diff --git a/infrastructure/modules/compute/nix/guidelines.nix b/infrastructure/modules/compute/nix/guidelines.nix new file mode 100644 index 0000000..bb913db --- /dev/null +++ b/infrastructure/modules/compute/nix/guidelines.nix @@ -0,0 +1,73 @@ +{ pkgs, lib, ... }: +let + releaseVersion = app: (builtins.fromJSON (builtins.readFile "/mnt/data/guidelines.json")).${app}; +in +{ + virtualisation = { + podman = { + enable = true; + dockerCompat = true; + }; + + oci-containers = { + backend = "podman"; + }; + + oci-containers.containers."api" = { + image = "registry.gitlab.com/infektweb/glv5/api:${releaseVersion "api"}"; + ports = [ + "8001:8080" + ]; + extraOptions = [ + "--add-host=host:10.0.1.51" + ]; + environment = { + "PORT" = "8080"; + "BASE_CLIENT_URL" = "http://[space].test-glv5.guidelines.ch"; + "ENVIRONMENT" = (builtins.readFile /opt/cloud-init-misc-data/environment); + "VAULT_SECRET_PATH" = "kv/data/guidelines/${(builtins.readFile /opt/cloud-init-misc-data/environment)}/api"; + "VAULT_URL" = "http://host:8200"; + + }; + volumes = [ + "/mnt/data/vault-guidelines-api-token:/vault-token" + ]; + #extraDockerOptions = [ "--network=foo" ]; + }; + + oci-containers.containers."web" = { + image = "registry.gitlab.com/infektweb/glv5/web"; + ports = [ + "80:8080" + ]; + extraOptions = [ + "--add-host=host:10.0.1.51" + ]; + environment = { + "API_URL" = "http://host:8001"; + }; + }; + + #oci-containers.containers."containerapi" = { + # image = "alpine"; + # volumes = [ + # "/run/podman-containers.sock:/podman-containers.sock" + # ]; + # entrypoint = "/bin/sleep"; + # cmd = ["10000"]; + #}; + }; + + systemd.services.docker-podman-rest-api = { + serviceConfig.Type = "simple"; + serviceConfig.Restart = lib.mkForce "always"; + wantedBy = [ "multi-user.target" ]; + script = '' + /run/current-system/sw/bin/podman system service --time=0 unix:///run/podman-containers.sock + ''; + }; + + services.redis.enable = true; + services.redis.requirePass = "p15c4e6538de2061edd65a52ab216ba071d78b1532a937c1c3d5821d5c571c0cf"; + networking.firewall.allowedTCPPorts = [ 6379 ]; +} diff --git a/infrastructure/modules/compute/nix/postgresql.nix b/infrastructure/modules/compute/nix/postgresql.nix new file mode 100644 index 0000000..ae1a865 --- /dev/null +++ b/infrastructure/modules/compute/nix/postgresql.nix @@ -0,0 +1,26 @@ +{ pkgs, ... }: +{ + services.postgresql.enable = true; + services.postgresql.package = pkgs.postgresql_12; + services.postgresql.dataDir = "/mnt/data/postgresql"; + services.postgresql.enableTCPIP = true; + services.postgresql.authentication = '' + host all all 10.88.0.0/16 trust + ''; + + system.activationScripts = { + mnt = { + text = "chmod 755 /mnt && mkdir -p /mnt/data/postgresql && chown -R postgres:postgres /mnt/data/postgresql"; + deps = []; + }; + }; + + fileSystems."/mnt/data" = { + device = "/dev/sdb"; + fsType = "ext4"; + label = "data"; + options = [ "nofail" ]; + }; + + networking.firewall.allowedTCPPorts = [5432]; +} diff --git a/infrastructure/modules/compute/nix/vault.nix b/infrastructure/modules/compute/nix/vault.nix new file mode 100644 index 0000000..21aa0ff --- /dev/null +++ b/infrastructure/modules/compute/nix/vault.nix @@ -0,0 +1,24 @@ +{ pkgs, ... }: +{ + services.vault.enable = true; + services.vault.package = pkgs.vault-bin; + services.vault.address = "0.0.0.0:8200"; + services.vault.storageBackend = "postgresql"; + services.vault.storageConfig = " + connection_url = \"postgres://vault:" + (builtins.readFile /opt/cloud-init-misc-data/vault_db_password) + "@localhost:5432/vault?sslmode=disable\" + "; + services.vault.extraConfig = " + ui = true + "; + + systemd.services.vault-unseal = { + serviceConfig.Type = "simple"; + wantedBy = [ "multi-user.target" ]; + after = [ "vault.service" ]; + script = '' + file=/mnt/data/vault-root-token; test -f "$file" || { printf "Stopping automatic unseal, no token present at $file\n"; exit; }; count=0; while [ "$count" -le 10 ]; do count=`expr "$count" + 1`; printf "=> Trying to unseal Vault..\n"; /run/current-system/sw/bin/curl -XPUT http://127.0.0.1:8200/v1/sys/unseal -d '{"key": "'$(head -n 1 $file)'"}' && break; sleep 10; done + ''; + }; + + networking.firewall.allowedTCPPorts = [8200]; +} diff --git a/infrastructure/modules/compute/remote_state.tf b/infrastructure/modules/compute/remote_state.tf new file mode 100644 index 0000000..e6f521d --- /dev/null +++ b/infrastructure/modules/compute/remote_state.tf @@ -0,0 +1,34 @@ +locals { + environment = lookup(jsondecode(file("../../config.json")), "terraform_packer_environment", "") + project = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_project", "") + username = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_username", "") + password = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_password", "") +} + +data "terraform_remote_state" "environment" { + backend = "http" + config = { + address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-environment" + lock_address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-environment/lock" + unlock_address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-environment/lock" + username=local.username + password=local.password + lock_method="POST" + unlock_method="DELETE" + retry_wait_min="5" + } +} + +data "terraform_remote_state" "storage" { + backend = "http" + config = { + address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-storage" + lock_address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-storage/lock" + unlock_address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-storage/lock" + username=local.username + password=local.password + lock_method="POST" + unlock_method="DELETE" + retry_wait_min="5" + } +} diff --git a/infrastructure/modules/compute/servers.tf b/infrastructure/modules/compute/servers.tf new file mode 100644 index 0000000..0bd1635 --- /dev/null +++ b/infrastructure/modules/compute/servers.tf @@ -0,0 +1,54 @@ +resource "hcloud_server" "guidelines1" { + name = "guidelines1-${data.terraform_remote_state.environment.outputs.environment_name}" + + server_type = "cx21" + image = var.image_default_id + keep_disk = true + location = data.terraform_remote_state.environment.outputs.dc_default_id + user_data = data.template_cloudinit_config.guidelines.rendered + + labels = { + lb = "guidelines-${data.terraform_remote_state.environment.outputs.environment_name}" + environment = data.terraform_remote_state.environment.outputs.environment_name + } +} + +resource "hcloud_volume_attachment" "guidelines1_data1" { + server_id = hcloud_server.guidelines1.id + volume_id = data.terraform_remote_state.storage.outputs.volume_data1_id + automount = true +} + +resource "hcloud_server_network" "guidelines1_primary" { + server_id = hcloud_server.guidelines1.id + network_id = data.terraform_remote_state.environment.outputs.network_primary_id + ip = "10.0.1.51" +} + +data "template_cloudinit_config" "guidelines" { + gzip = false + base64_encode = false + + part { + filename = "init.cfg" + content_type = "text/cloud-config" + content = templatefile("${path.module}/cloudinit.tpl", { + certbot_script = filebase64("${path.module}/certbot.sh") + environment = data.terraform_remote_state.environment.outputs.environment_name + gitlab_password = lookup(jsondecode(file("../../secrets.json")), "gitlab_deploy_token_password", "not found") + gitlab_username = lookup(jsondecode(file("../../secrets.json")), "gitlab_deploy_token_username", "not found") + hcloud_token = lookup(jsondecode(file("../../secrets.json")), "hcloud_token_${data.terraform_remote_state.environment.outputs.environment_name}", "not found") + aws_access_key_id = lookup(jsondecode(file("../../secrets.json")), "aws_access_key_id", "not found") + aws_secret_access_key = lookup(jsondecode(file("../../secrets.json")), "aws_secret_access_key", "not found") + domain_name = lookup(jsondecode(file("../../config.json")), "domain_name_${data.terraform_remote_state.environment.outputs.environment_name}", "not found") + domain_alternative_names = base64encode(lookup(jsondecode(file("../../config.json")), "domain_alternative_names_${data.terraform_remote_state.environment.outputs.environment_name}", "not found")) + vault_db_password = lookup(jsondecode(file("../../secrets.json")), "vault_db_password_${data.terraform_remote_state.environment.outputs.environment_name}", "not found") + nix_certbot = filebase64("${path.module}/nix/certbot.nix") + nix_configuration = filebase64("${path.module}/nix/configuration.nix") + nix_elasticsearch = filebase64("${path.module}/nix/elasticsearch.nix") + nix_guidelines = filebase64("${path.module}/nix/guidelines.nix") + nix_postgresql = filebase64("${path.module}/nix/postgresql.nix") + nix_vault = filebase64("${path.module}/nix/vault.nix") + }) + } +} diff --git a/infrastructure/modules/compute/variables.tf b/infrastructure/modules/compute/variables.tf new file mode 100644 index 0000000..90b0890 --- /dev/null +++ b/infrastructure/modules/compute/variables.tf @@ -0,0 +1,3 @@ +variable "image_default_id" { + description = "Image (or snapshot) to use when provisoning servers" +} diff --git a/infrastructure/modules/compute/versions.tf b/infrastructure/modules/compute/versions.tf new file mode 100644 index 0000000..d9b5e57 --- /dev/null +++ b/infrastructure/modules/compute/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = ">= 0.13" +} diff --git a/infrastructure/modules/environment/network_subnets.tf b/infrastructure/modules/environment/network_subnets.tf new file mode 100644 index 0000000..39fe81b --- /dev/null +++ b/infrastructure/modules/environment/network_subnets.tf @@ -0,0 +1,6 @@ +resource "hcloud_network_subnet" "a" { + network_id = hcloud_network.primary.id + type = "cloud" + network_zone = var.eu_network_zone + ip_range = var.subnet_a +} diff --git a/infrastructure/modules/environment/networks.tf b/infrastructure/modules/environment/networks.tf new file mode 100644 index 0000000..6c8deac --- /dev/null +++ b/infrastructure/modules/environment/networks.tf @@ -0,0 +1,8 @@ +resource "hcloud_network" "primary" { + name = "primary-${var.environment_name}" + ip_range = var.primary_network + + labels = { + environment = var.environment_name + } +} diff --git a/infrastructure/modules/environment/outputs.tf b/infrastructure/modules/environment/outputs.tf new file mode 100644 index 0000000..0ba1323 --- /dev/null +++ b/infrastructure/modules/environment/outputs.tf @@ -0,0 +1,27 @@ +output "environment_name" { + value = var.environment_name +} + +output "dc_default_id" { + value = var.dc_default_id +} + +output "dc_nuremberg_id" { + value = var.dc_nuremberg_id +} + +output "dc_falkenstein_id" { + value = var.dc_falkenstein_id +} + +output "dc_helsinki_id" { + value = var.dc_helsinki_id +} + +output "network_primary_id" { + value = hcloud_network.primary.id +} + +output "network_subnet_a_id" { + value = hcloud_network_subnet.a.id +} diff --git a/infrastructure/modules/environment/variables.tf b/infrastructure/modules/environment/variables.tf new file mode 100644 index 0000000..0e236fd --- /dev/null +++ b/infrastructure/modules/environment/variables.tf @@ -0,0 +1,40 @@ +variable "environment_name" { + description = "Unique name which identifies the enviornment (e.g. 'production', 'staging', ..)" + default = "testing" +} + +variable "dc_default_id" { + default = "nbg1" +} + +variable "dc_nuremberg_id" { + default = "nbg1" +} + +variable "dc_falkenstein_id" { + default = "fsn1" +} + +variable "dc_helsinki_id" { + default = "hel1" +} + +variable "eu_network_zone" { + default = "eu-central" +} + +variable "primary_network" { + default = "10.0.0.0/16" +} + +variable "subnet_a" { + default = "10.0.1.0/24" +} + +variable "subnet_b" { + default = "10.0.2.0/24" +} + +variable "subnet_c" { + default = "10.0.3.0/24" +} diff --git a/infrastructure/modules/environment/versions.tf b/infrastructure/modules/environment/versions.tf new file mode 100644 index 0000000..d9b5e57 --- /dev/null +++ b/infrastructure/modules/environment/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = ">= 0.13" +} diff --git a/infrastructure/modules/ingress/load_balancers.tf b/infrastructure/modules/ingress/load_balancers.tf new file mode 100644 index 0000000..1765051 --- /dev/null +++ b/infrastructure/modules/ingress/load_balancers.tf @@ -0,0 +1,64 @@ +resource "hcloud_load_balancer" "guidelines" { + name = "guidelines-${data.terraform_remote_state.environment.outputs.environment_name}" + load_balancer_type = "lb11" + location = data.terraform_remote_state.environment.outputs.dc_default_id + + labels = { + environment = data.terraform_remote_state.environment.outputs.environment_name + service = "guidelines" + } +} + +resource "hcloud_load_balancer_target" "guidelines" { + type = "label_selector" + load_balancer_id = hcloud_load_balancer.guidelines.id + label_selector = "lb=guidelines-${data.terraform_remote_state.environment.outputs.environment_name}" +} + +resource "hcloud_load_balancer_network" "guidelines_primary" { + load_balancer_id = hcloud_load_balancer.guidelines.id + network_id = data.terraform_remote_state.environment.outputs.network_primary_id + ip = "10.0.1.11" +} + +resource "hcloud_load_balancer_service" "guidelines-http-to-https-with-termination" { + load_balancer_id = hcloud_load_balancer.guidelines.id + protocol = "http" + listen_port = 443 + destination_port = 80 + + http { + sticky_sessions = false + #certificates = [] + #redirect_http = true + } + // TODO: Add health check +} + +resource "hcloud_load_balancer_service" "guidelines-kibana-http-to-https-with-termination" { + load_balancer_id = hcloud_load_balancer.guidelines.id + protocol = "http" + listen_port = 8443 + destination_port = 5601 + + http { + sticky_sessions = false + #certificates = [] + #redirect_http = true + } + // TODO: Add health check +} + +resource "hcloud_load_balancer_service" "vault-http-to-https-with-termination" { + load_balancer_id = hcloud_load_balancer.guidelines.id + protocol = "http" + listen_port = 9443 + destination_port = 8200 + + http { + sticky_sessions = false + #certificates = [] + #redirect_http = true + } + // TODO: Add health check +} diff --git a/infrastructure/modules/ingress/remote_state.tf b/infrastructure/modules/ingress/remote_state.tf new file mode 100644 index 0000000..28f826e --- /dev/null +++ b/infrastructure/modules/ingress/remote_state.tf @@ -0,0 +1,20 @@ +locals { + environment = lookup(jsondecode(file("../../config.json")), "terraform_packer_environment", "") + project = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_project", "") + username = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_username", "") + password = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_password", "") +} + +data "terraform_remote_state" "environment" { + backend = "http" + config = { + address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-environment" + lock_address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-environment/lock" + unlock_address="https://gitlab.com/api/v4/projects/${local.project}/terraform/state/${local.environment}-environment/lock" + username=local.username + password=local.password + lock_method="POST" + unlock_method="DELETE" + retry_wait_min="5" + } +} diff --git a/infrastructure/modules/ingress/versions.tf b/infrastructure/modules/ingress/versions.tf new file mode 100644 index 0000000..d9b5e57 --- /dev/null +++ b/infrastructure/modules/ingress/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = ">= 0.13" +} diff --git a/infrastructure/modules/storage/outputs.tf b/infrastructure/modules/storage/outputs.tf new file mode 100644 index 0000000..35855f5 --- /dev/null +++ b/infrastructure/modules/storage/outputs.tf @@ -0,0 +1,3 @@ +output "volume_data1_id" { + value = hcloud_volume.data1.id +} diff --git a/infrastructure/modules/storage/remote_state.tf b/infrastructure/modules/storage/remote_state.tf new file mode 100644 index 0000000..b9da03c --- /dev/null +++ b/infrastructure/modules/storage/remote_state.tf @@ -0,0 +1,20 @@ +locals { + environment = lookup(jsondecode(file("../../config.json")), "terraform_packer_environment", "") + project = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_project", "") + username = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_username", "") + password = lookup(jsondecode(file("../../secrets.json")), "terraform_gitlab_backend_password", "") +} + +data "terraform_remote_state" "environment" { + backend = "http" + config = { + address="https://gitlab.com/api/v4/projects/22967934/terraform/state/production-environment" + lock_address="https://gitlab.com/api/v4/projects/22967934/terraform/state/production-environment/lock" + unlock_address="https://gitlab.com/api/v4/projects/22967934/terraform/state/production-environment/lock" + username=local.username + password=local.password + lock_method="POST" + unlock_method="DELETE" + retry_wait_min="5" + } +} diff --git a/infrastructure/modules/storage/versions.tf b/infrastructure/modules/storage/versions.tf new file mode 100644 index 0000000..d9b5e57 --- /dev/null +++ b/infrastructure/modules/storage/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = ">= 0.13" +} diff --git a/infrastructure/modules/storage/volumes.tf b/infrastructure/modules/storage/volumes.tf new file mode 100644 index 0000000..de20696 --- /dev/null +++ b/infrastructure/modules/storage/volumes.tf @@ -0,0 +1,6 @@ +resource "hcloud_volume" "data1" { + name = "data1-${data.terraform_remote_state.environment.outputs.environment_name}" + size = 10 + format = "ext4" + location = data.terraform_remote_state.environment.outputs.dc_default_id +} diff --git a/infrastructure/storage/outputs.tf b/infrastructure/storage/outputs.tf new file mode 100644 index 0000000..7fc039e --- /dev/null +++ b/infrastructure/storage/outputs.tf @@ -0,0 +1,3 @@ +output "volume_data1_id" { + value = module.storage.volume_data1_id +} diff --git a/infrastructure/storage/provider_backend.tf b/infrastructure/storage/provider_backend.tf new file mode 100644 index 0000000..65f2378 --- /dev/null +++ b/infrastructure/storage/provider_backend.tf @@ -0,0 +1,15 @@ +locals { + environment = lookup(jsondecode(file("../../config.json")), "terraform_packer_environment", "") +} + +terraform { + backend "http" { + lock_method="POST" + unlock_method="DELETE" + retry_wait_min="5" + } +} + +provider "hcloud" { + token = lookup(jsondecode(file("../../secrets.json")), "hcloud_token_${local.environment}", "") +} diff --git a/infrastructure/storage/storage.tf b/infrastructure/storage/storage.tf new file mode 100644 index 0000000..19674d9 --- /dev/null +++ b/infrastructure/storage/storage.tf @@ -0,0 +1,4 @@ +module "storage" { + source = "../modules/storage" + +} diff --git a/infrastructure/storage/versions.tf b/infrastructure/storage/versions.tf new file mode 100644 index 0000000..d9b5e57 --- /dev/null +++ b/infrastructure/storage/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + hcloud = { + source = "hetznercloud/hcloud" + } + } + required_version = ">= 0.13" +} diff --git a/nixos/Makefile b/nixos/Makefile new file mode 100644 index 0000000..31dedba --- /dev/null +++ b/nixos/Makefile @@ -0,0 +1,11 @@ +.PHONY: nixos all help + +nixos-image: + test `which packer` || { printf "Please install Packer from https://packer.io/\n"; exit 1; }; \ + vars=""; \ + test "$(VERSION)" && vars=" -var \"nixos-version=$(VERSION)\""; \ + test "$(BUILD)" && vars="$$vars -var \"build-version=$(BUILD)\""; \ + eval packer build "$$vars" . + +all help: + @printf "Please do not call this Makefile directly, use the wrapper located at the project root\n" diff --git a/nixos/build.pkr.hcl b/nixos/build.pkr.hcl new file mode 100644 index 0000000..a91d5a2 --- /dev/null +++ b/nixos/build.pkr.hcl @@ -0,0 +1,71 @@ +locals { + environment = lookup(jsondecode(file("../config.json")), "terraform_packer_environment", "") +} + +variable "nixos-version" { + description = "NixOS version https://nixos.org/manual/nixos/stable/release-notes.html" + default = "20.09" +} + +variable "build-version" { + description = "Internal build version" + default = "0.0.1" +} + +variable "nixos-infect" { + description = "Ensure the desired version of nixos-infect is retrieved https://github.com/elitak/nixos-infect/commits/master" + type = map(string) + default = { + "commit_hash" = "30441b1" + "checksum_hash" = "daa557fa29609e8dbb0f2e9f62c08101" + } +} + +source "hcloud" "nixos-build-spec" { + token = lookup(jsondecode(file("../secrets.json")), "hcloud_token_${local.environment}", "") + image = "ubuntu-20.04" + location = "nbg1" + server_type = "cx11" + ssh_username = "root" + snapshot_name = "nixos-${var.nixos-version}-${var.build-version}" + #ssh_keys = [""] # A user 'operator' with an authorized public key in is set up via Nix (see ./nix/system.nix) +} + +build { + name = "nixos" + + sources = ["sources.hcloud.nixos-build-spec"] + + provisioner "shell" { + expect_disconnect = true + environment_vars = [ + "NIXOS_INFECT_COMMIT_HASH=${lookup(var.nixos-infect, "commit_hash", "")}", + "NIXOS_INFECT_CHECKSUM_HASH=${lookup(var.nixos-infect, "checksum_hash", "")}", + "NIXOS_VERSION=${var.nixos-version}" + ] + script = "install.sh" + } + + provisioner "shell" { + inline = [ + "mkdir /tmp/packer_transfer" + ] + } + + provisioner "file" { + source = "nix/" + destination = "/tmp/packer_transfer" + } + + provisioner "shell" { + inline = [ + "rm /etc/nixos/*.nix", + "install -m 0644 -o root -g root /tmp/packer_transfer/*.nix /etc/nixos/", + "nixos-rebuild build && nixos-rebuild switch" + ] + } +} + +packer { + required_version = ">= 1.6.5, < 2.0.0" +} diff --git a/nixos/install.sh b/nixos/install.sh new file mode 100755 index 0000000..68215bf --- /dev/null +++ b/nixos/install.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -o nounset -o errexit + +curl -O https://raw.githubusercontent.com/elitak/nixos-infect/"$NIXOS_INFECT_COMMIT_HASH"/nixos-infect +test `md5sum nixos-infect | awk '{print $1}'` = "$NIXOS_INFECT_CHECKSUM_HASH" || { printf "Checksum mismatch!\n"; exit 1; } +NIX_CHANNEL=nixos-"$NIXOS_VERSION" bash nixos-infect 2>&1 | tee /tmp/infect.log diff --git a/nixos/nix/configuration.nix b/nixos/nix/configuration.nix new file mode 100644 index 0000000..110c3cc --- /dev/null +++ b/nixos/nix/configuration.nix @@ -0,0 +1,10 @@ +{ pkgs, ... }: { + imports = [ + ./system.nix + ]; + + environment.systemPackages = [ + pkgs.vim + pkgs.screen + ]; +} diff --git a/nixos/nix/system.nix b/nixos/nix/system.nix new file mode 100644 index 0000000..9992763 --- /dev/null +++ b/nixos/nix/system.nix @@ -0,0 +1,39 @@ +{ ... }: +{ + imports = [ + + ]; + + boot.loader.grub.device = "/dev/sda"; + boot.cleanTmpDir = true; + + fileSystems."/" = { device = "/dev/sda1"; fsType = "ext4"; label = "root"; }; + + networking.firewall.allowPing = true; + networking.hostName = (builtins.readFile (builtins.fetchurl "http://169.254.169.254/hetzner/v1/metadata/hostname")); + + services.openssh.enable = true; + services.cloud-init.enable = true; + + security.sudo.wheelNeedsPassword = false; + + # The created service `nixos-rebuild.service` can be used to trigger an unattended configuration change + # See https://nixos.org/manual/nixos/stable/#sec-changing-config + # + # `systemctl start nixos-rebuild` := `nixos-rebuild switch` + systemd.services.nixos-rebuild = { + serviceConfig.Type = "oneshot"; + script = '' + /run/current-system/sw/bin/nixos-rebuild switch -I nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos/nixpkgs -I nixos-config=/etc/nixos/configuration.nix + ''; + }; + + users.extraUsers.operator = { + isNormalUser = true; + uid = 1000; + extraGroups = [ "wheel" ]; + openssh.authorizedKeys.keys = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCAOgDVmgLQ1tEiE7kXoLu14wLW0LoYbNPsKae0DlMeWJe6JcR8HkQnSZm3aFEt30SmaFDtXcw3fyur0wByrIh0cFMUsdiO4e4B+Gke/vTc4/51rfjjzsA/1zipWnD5Yf0lO6KqE6Vm2uTejJ7NIRume3c2nlLCZ/Ajt0GqYwIuMOOGZSA5o/pNKiH88GyW9C+kI0kIOwswMHHQ5bFmpWttTy8JNI0iC4FzcQrAFIMUPTsM2kphJyqTPMGoztzRX64HSfmdr43MfLEWtIWvUXcYiazXFCTfXrStUS/z1GN2kOGvmr6fcC4MX3zhJF9WETRjM0VTFHJbERAQOmw3P87oAK759l0eHGiS7bbmX2hNLz6LLOCmPpaih5TaFp3NjMnVlEd1bGzZC4mgmFqxMUtx8Uqyd3zr3Wlp+u4zHaNiNhZo0USsIzagcdmeGIuXT1deyjnpbJVesixTMcttm6rlhVd4/McO972bP+4qtPSVZcGZd6d01TgK16fXp1WybuO6SpaLUIYcnimM+/zeanJkfgtA419xkZqEHvBf80/RTqmX/NTree8vHBVFSxla2Ru4RDBpGnbDKUYpRFeP9SMSkpGtdjZK45U7ffikK+UdXr24Nl6NFeFFs/PW5gOibfPzTJwpLqeu4E8xXXyakRSHW8aa+BtuV8WKFB4e/4dSQ==" + ]; + }; +} diff --git a/vault/admin_policy.hcl b/vault/admin_policy.hcl new file mode 100644 index 0000000..db1eb7e --- /dev/null +++ b/vault/admin_policy.hcl @@ -0,0 +1,59 @@ +# Read system health check +path "sys/health" +{ + capabilities = ["read", "sudo"] +} + +# Create and manage ACL policies broadly across Vault + +# List existing policies +path "sys/policies/acl" +{ + capabilities = ["list"] +} + +# Create and manage ACL policies +path "sys/policies/acl/*" +{ + capabilities = ["create", "read", "update", "delete", "list", "sudo"] +} + +# Enable and manage authentication methods broadly across Vault + +# Manage auth methods broadly across Vault +path "auth/*" +{ + capabilities = ["create", "read", "update", "delete", "list", "sudo"] +} + +# Create, update, and delete auth methods +path "sys/auth/*" +{ + capabilities = ["create", "update", "delete", "sudo"] +} + +# List auth methods +path "sys/auth" +{ + capabilities = ["read"] +} + +# Enable and manage the key/value secrets engine at `secret/` path + +# List, create, update, and delete key/value secrets +path "secret/*" +{ + capabilities = ["create", "read", "update", "delete", "list", "sudo"] +} + +# Manage secrets engines +path "sys/mounts/*" +{ + capabilities = ["create", "read", "update", "delete", "list", "sudo"] +} + +# List existing secrets engines. +path "sys/mounts" +{ + capabilities = ["read"] +} diff --git a/vault/example_service_policy.hcl b/vault/example_service_policy.hcl new file mode 100644 index 0000000..81970ee --- /dev/null +++ b/vault/example_service_policy.hcl @@ -0,0 +1,8 @@ +# Read-only access to all secrets in key-value store +path "kv/data/guidelines/production/api" { + capabilities = ["read"] +} + +path "kv/data/guidelines/staging/api" { + capabilities = ["read"] +}