Merge pull request #272 from matter-labs/refactor

refactor(tdx_google): modularize tdx_google configuration
This commit is contained in:
Harald Hoyer 2025-02-20 10:04:11 +01:00 committed by GitHub
commit e936f5079d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 163 additions and 111 deletions

View file

@ -6,63 +6,11 @@
imports = [
"${toString modulesPath}/profiles/minimal.nix"
"${toString modulesPath}/profiles/qemu-guest.nix"
./metadata.nix
./vector.nix
./container.nix
];
services.vector.enable = true;
services.vector.settings = {
api.enabled = false;
sources = {
otlp = {
type = "opentelemetry";
grpc = { address = "127.0.0.1:4317"; };
http = {
address = "127.0.0.1:4318";
keepalive = {
max_connection_age_jitter_factor = 0.1;
max_connection_age_secs = 300;
};
};
};
};
sinks = {
console = {
inputs = [ "otlp.logs" ];
target = "stdout";
type = "console";
encoding = { codec = "json"; };
};
kafka = {
type = "kafka";
inputs = [ "otlp.logs" ];
bootstrap_servers = "\${KAFKA_URLS:-127.0.0.1:0}";
topic = "\${KAFKA_TOPIC:-tdx-google}";
encoding = {
codec = "json";
compression = "lz4";
};
};
};
};
systemd.services.vector.path = [ pkgs.curl pkgs.coreutils ];
# `-` means, that the file can be missing, so that `ExecStartPre` can execute and create it
systemd.services.vector.serviceConfig.EnvironmentFile = "-/run/vector/env";
# `+` means, that the process has access to all files, to be able to write to `/run`
systemd.services.vector.serviceConfig.ExecStartPre = "+" + toString (
pkgs.writeShellScript "vector-start-pre" ''
set -eu -o pipefail
: "''${KAFKA_URLS:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/kafka_urls" -H "Metadata-Flavor: Google")}"
: "''${KAFKA_TOPIC:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/kafka_topic" -H "Metadata-Flavor: Google")}"
KAFKA_TOPIC="''${KAFKA_TOPIC:-tdx-google}"
mkdir -p /run/vector
cat >/run/vector/env <<EOF
KAFKA_URLS="''${KAFKA_URLS}"
KAFKA_TOPIC="''${KAFKA_TOPIC}"
EOF
''
);
services.journald.console = "/dev/ttyS0";
systemd.services."serial-getty@ttyS0".enable = lib.mkForce false;
@ -83,58 +31,6 @@
# don't fill up the logs
networking.firewall.logRefusedConnections = false;
virtualisation.docker.enable = true;
systemd.services.docker_start_container = {
description = "The main application container";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "docker.service" "vector.service" "chronyd.service" ];
requires = [ "network-online.target" "docker.service" "vector.service" ];
serviceConfig = {
Type = "exec";
User = "root";
EnvironmentFile = "-/run/container/env";
ExecStartPre = "+" + toString (
pkgs.writeShellScript "container-start-pre" ''
set -eu -o pipefail
: "''${CONTAINER_IMAGE:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_image" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_HUB:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_hub" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_USER:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_user" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_TOKEN:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_token" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_IMAGE:?Error: Missing CONTAINER_IMAGE}"
: "''${CONTAINER_HUB:?Error: Missing CONTAINER_HUB}"
mkdir -p /run/container
cat >/run/container/env <<EOF
CONTAINER_IMAGE="''${CONTAINER_IMAGE}"
CONTAINER_HUB="''${CONTAINER_HUB}"
CONTAINER_USER="''${CONTAINER_USER}"
CONTAINER_TOKEN="''${CONTAINER_TOKEN}"
EOF
''
);
};
path = [ pkgs.curl pkgs.docker pkgs.teepot.teepot.tdx_extend pkgs.coreutils ];
script = ''
set -eu -o pipefail
if [[ $CONTAINER_USER ]] && [[ $CONTAINER_TOKEN ]]; then
docker login -u "$CONTAINER_USER" -p "$CONTAINER_TOKEN" "$CONTAINER_HUB"
fi
docker pull "''${CONTAINER_HUB}/''${CONTAINER_IMAGE}"
DIGEST=$(docker inspect --format '{{.Id}}' "''${CONTAINER_HUB}/''${CONTAINER_IMAGE}")
DIGEST=''${DIGEST#sha256:}
echo "Measuring $DIGEST" >&2
test -c /dev/tdx_guest && tdx-extend --digest "$DIGEST" --rtmr 3
exec docker run --env "GOOGLE_METADATA=1" --network=host --init --privileged "sha256:$DIGEST"
'';
postStop = lib.mkDefault ''
shutdown --reboot +5
'';
};
services.prometheus.exporters.node = {
enable = true;
port = 9100;
@ -147,10 +43,6 @@
];
};
environment.systemPackages = with pkgs; [
teepot.teepot
];
# /var is on tmpfs anyway
services.journald.storage = "volatile";

View file

@ -0,0 +1,40 @@
{ lib
, modulesPath
, pkgs
, ...
}: {
virtualisation.docker.enable = true;
systemd.services.docker_start_container = {
description = "The main application container";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "docker.service" "vector.service" "chronyd.service" "metadata.service" ];
requires = [ "network-online.target" "docker.service" "vector.service" "metadata.service" ];
serviceConfig = {
Type = "exec";
User = "root";
EnvironmentFile = "-/run/env/env";
};
path = [ pkgs.docker pkgs.teepot.teepot.tdx_extend ];
script = ''
set -eu -o pipefail
DIGEST=''${CONTAINER_DIGEST#sha256:}
echo "Measuring $DIGEST" >&2
test -c /dev/tdx_guest && tdx-extend --digest "$DIGEST" --rtmr 3
docker run -d --rm \
--name tdx_container \
--env "GOOGLE_METADATA=1" \
--network=host \
--init \
--privileged \
"sha256:$DIGEST"
exec docker wait tdx_container
'';
postStop = lib.mkDefault ''
shutdown --reboot +5
'';
};
}

View file

@ -0,0 +1,53 @@
{ lib
, modulesPath
, pkgs
, ...
}: {
systemd.services.metadata = {
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
enable = true;
path = [ pkgs.curl pkgs.docker pkgs.teepot.teepot.tdx_extend pkgs.coreutils ];
wantedBy = [ "default.target" ];
after = [ "network-online.target" "docker.service" ];
requires = [ "network-online.target" "docker.service" ];
script = ''
set -eu -o pipefail
: "''${CONTAINER_HUB:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_hub" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_IMAGE:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_image" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_TOKEN:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_token" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_USER:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_user" -H "Metadata-Flavor: Google")}"
: "''${HOST_ID:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/id" -H "Metadata-Flavor: Google")}"
: "''${HOST_IMAGE:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/image" -H "Metadata-Flavor: Google")}"
: "''${HOST_NAME:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/hostname" -H "Metadata-Flavor: Google")}"
: "''${KAFKA_TOPIC:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/kafka_topic" -H "Metadata-Flavor: Google")}"
: "''${KAFKA_URLS:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/kafka_urls" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_IMAGE:?Error: Missing CONTAINER_IMAGE}"
: "''${CONTAINER_HUB:?Error: Missing CONTAINER_HUB}"
if [[ $CONTAINER_USER ]] && [[ $CONTAINER_TOKEN ]]; then
docker login -u "$CONTAINER_USER" -p "$CONTAINER_TOKEN" "$CONTAINER_HUB"
fi
docker pull "''${CONTAINER_HUB}/''${CONTAINER_IMAGE}"
CONTAINER_DIGEST=$(docker inspect --format '{{.Id}}' "''${CONTAINER_HUB}/''${CONTAINER_IMAGE}")
mkdir -p /run/env
cat >/run/env/env <<EOF
CONTAINER_HUB="''${CONTAINER_HUB}"
CONTAINER_IMAGE="''${CONTAINER_IMAGE}"
CONTAINER_TOKEN="''${CONTAINER_TOKEN}"
CONTAINER_USER="''${CONTAINER_USER}"
CONTAINER_DIGEST="''${CONTAINER_DIGEST}"
HOST_ID="''${HOST_ID}"
HOST_IMAGE="''${HOST_IMAGE}"
HOST_NAME="''${HOST_NAME}"
KAFKA_TOPIC="''${KAFKA_TOPIC}"
KAFKA_URLS="''${KAFKA_URLS}"
EOF
'';
};
}

View file

@ -0,0 +1,67 @@
{ lib
, modulesPath
, pkgs
, ...
}: {
services.vector.enable = true;
services.vector.settings = {
api.enabled = false;
sources = {
otlp = {
type = "opentelemetry";
grpc = { address = "127.0.0.1:4317"; };
http = {
address = "127.0.0.1:4318";
keepalive = {
max_connection_age_jitter_factor = 0.1;
max_connection_age_secs = 300;
};
};
};
};
transforms = {
add_custom_fields = {
type = "remap";
inputs = [ "otlp.logs" ];
source = ''
# Create resources if it doesn't exist
if !exists(.resources) {
.resources = {}
}
# https://opentelemetry.io/docs/specs/semconv/resource/host/
.resources.host.name = "''${HOST_NAME:-hostname}"
.resources.host.id = "''${HOST_ID:-hostid}"
.resources.host.image.name = "''${HOST_IMAGE:-host_image}"
# https://opentelemetry.io/docs/specs/semconv/resource/container/
.resources.container.image.name = "''${CONTAINER_HUB:-container_hub}/''${CONTAINER_IMAGE:-container_image}"
.resources.container.image.id = "''${CONTAINER_DIGEST:-container_digest}"
'';
};
};
sinks = {
console = {
inputs = [ "add_custom_fields" ];
target = "stdout";
type = "console";
encoding = { codec = "json"; };
};
kafka = {
type = "kafka";
inputs = [ "add_custom_fields" ];
bootstrap_servers = "\${KAFKA_URLS:-127.0.0.1:0}";
topic = "\${KAFKA_TOPIC:-tdx-google}";
encoding = {
codec = "json";
compression = "lz4";
};
};
};
};
systemd.services.vector = {
after = [ "network-online.target" "metadata.service" ];
requires = [ "network-online.target" "metadata.service" ];
path = [ pkgs.curl pkgs.coreutils ];
serviceConfig.EnvironmentFile = "-/run/env/env";
};
}