Merge remote-tracking branch 'origin/main' into pab/versioned-backward-compatible-offchain-proof-verifier

This commit is contained in:
Patryk Bęza 2025-01-15 17:02:34 +01:00
commit a40675497c
No known key found for this signature in database
GPG key ID: 9AD1B44D9F6258EC
15 changed files with 742 additions and 81 deletions

View file

@ -28,5 +28,5 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
- uses: cachix/install-nix-action@v27 - uses: cachix/install-nix-action@v30
- run: nix run nixpkgs#taplo -- fmt --check - run: nix run nixpkgs#taplo -- fmt --check

View file

@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
- uses: cachix/install-nix-action@v27 - uses: cachix/install-nix-action@v30
with: with:
extra_nix_config: | extra_nix_config: |
access-tokens = github.com=${{ github.token }} access-tokens = github.com=${{ github.token }}
@ -37,7 +37,7 @@ jobs:
runs-on: [ matterlabs-default-infra-runners ] runs-on: [ matterlabs-default-infra-runners ]
steps: steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
- uses: cachix/install-nix-action@v27 - uses: cachix/install-nix-action@v30
with: with:
extra_nix_config: | extra_nix_config: |
access-tokens = github.com=${{ github.token }} access-tokens = github.com=${{ github.token }}
@ -75,7 +75,7 @@ jobs:
- { nixpackage: 'container-verify-era-proof-attestation-sgx' } - { nixpackage: 'container-verify-era-proof-attestation-sgx' }
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: cachix/install-nix-action@v27 - uses: cachix/install-nix-action@v30
with: with:
extra_nix_config: | extra_nix_config: |
access-tokens = github.com=${{ github.token }} access-tokens = github.com=${{ github.token }}

View file

@ -1,13 +1,5 @@
# teepot # teepot
Key Value store in a TEE with Remote Attestation for Authentication
## Introduction
This project is a key-value store that runs in a Trusted Execution Environment (TEE) and uses Remote Attestation for
Authentication.
The key-value store is implemented using Hashicorp Vault running in an Intel SGX enclave via the Gramine runtime.
## Parts of this project ## Parts of this project
- `teepot`: The main rust crate that abstracts TEEs and key-value stores. - `teepot`: The main rust crate that abstracts TEEs and key-value stores.
@ -22,6 +14,18 @@ The key-value store is implemented using Hashicorp Vault running in an Intel SGX
- `verify-attestation`: A client utility that verifies the attestation of an enclave. - `verify-attestation`: A client utility that verifies the attestation of an enclave.
- `tee-key-preexec`: A pre-exec utility that generates a p256 secret key and passes it as an environment variable to the - `tee-key-preexec`: A pre-exec utility that generates a p256 secret key and passes it as an environment variable to the
enclave along with the attestation quote containing the hash of the public key. enclave along with the attestation quote containing the hash of the public key.
- `tdx_google`: A base VM running on Google Cloud TDX. It receives a container URL via the instance metadata,
measures the sha384 of the URL to RTMR3 and launches the container.
- `tdx-extend`: A utility to extend an RTMR register with a hash value.
- `rtmr-calc`: A utility to calculate RTMR1 and RTMR2 from a GPT disk, the linux kernel, the linux initrd
and a UKI (unified kernel image).
- `sha384-extend`: A utility to calculate RTMR registers after extending them with a digest.
## Vault
Part of this project is a key-value store that runs in a Trusted Execution Environment (TEE) and uses Remote Attestation
for Authentication. The key-value store is implemented using Hashicorp Vault running in an Intel SGX enclave via the
Gramine runtime.
## Development ## Development
@ -96,3 +100,9 @@ Attributes:
isv_svn: 0 isv_svn: 0
debug_enclave: False debug_enclave: False
``` ```
### TDX VM testing
```shell
nixos-rebuild -L --flake .#tdxtest build-vm && ./result/bin/run-tdxtest-vm
```

45
assets/gcloud-deploy.sh Executable file
View file

@ -0,0 +1,45 @@
#!/usr/bin/env bash
#
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2025 Matter Labs
#
set -ex
NO=${NO:-1}
nix build -L .#tdx_google
gsutil cp result/tdx_base_1.vmdk gs://tdx_vms/
gcloud migration vms image-imports create \
--location=us-central1 \
--target-project=tdx-pilot \
--project=tdx-pilot \
--skip-os-adaptation \
--source-file=gs://tdx_vms/tdx_base_1.vmdk \
tdx-img-pre-"${NO}"
gcloud compute instances stop tdx-pilot --zone us-central1-c --project tdx-pilot || :
gcloud compute instances delete tdx-pilot --zone us-central1-c --project tdx-pilot || :
while gcloud migration vms image-imports list --location=us-central1 --project=tdx-pilot | grep -F RUNNING; do
sleep 1
done
gcloud compute images create \
--project tdx-pilot \
--guest-os-features=UEFI_COMPATIBLE,TDX_CAPABLE,GVNIC,VIRTIO_SCSI_MULTIQUEUE \
--storage-location=us-central1 \
--source-image=tdx-img-pre-"${NO}" \
tdx-img-f-"${NO}"
gcloud compute instances create tdx-pilot \
--machine-type c3-standard-4 --zone us-central1-c \
--confidential-compute-type=TDX \
--maintenance-policy=TERMINATE \
--image-project=tdx-pilot \
--project tdx-pilot \
--metadata=container_hub="docker.io",container_image="amd64/hello-world@sha256:e2fc4e5012d16e7fe466f5291c476431beaa1f9b90a5c2125b493ed28e2aba57" \
--image tdx-img-f-"${NO}"

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2024 Matter Labs // Copyright (c) 2024-2025 Matter Labs
// SPDX-License-Identifier: BSD-3-Clause // SPDX-License-Identifier: BSD-3-Clause
/* /*
@ -39,22 +39,14 @@
//! This is a safe wrapper for **sgx-dcap-quoteverify-sys**. //! This is a safe wrapper for **sgx-dcap-quoteverify-sys**.
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::marker::PhantomData; use std::{marker::PhantomData, ops::Deref, slice};
use std::mem;
use std::ops::Deref;
use std::slice;
use intel_tee_quote_verification_sys as qvl_sys; use intel_tee_quote_verification_sys as qvl_sys;
pub use qvl_sys::{
pub use qvl_sys::quote3_error_t; quote3_error_t, sgx_ql_qe_report_info_t, sgx_ql_qv_result_t, sgx_ql_qv_supplemental_t,
pub use qvl_sys::sgx_ql_qe_report_info_t; sgx_ql_qve_collateral_t, sgx_ql_request_policy_t, sgx_qv_path_type_t, tdx_ql_qve_collateral_t,
pub use qvl_sys::sgx_ql_qv_result_t; tee_qv_free_collateral, tee_supp_data_descriptor_t,
pub use qvl_sys::sgx_ql_qv_supplemental_t; };
pub use qvl_sys::sgx_ql_qve_collateral_t;
pub use qvl_sys::sgx_ql_request_policy_t;
pub use qvl_sys::sgx_qv_path_type_t;
pub use qvl_sys::tdx_ql_qve_collateral_t;
pub use qvl_sys::tee_supp_data_descriptor_t;
/// When the Quoting Verification Library is linked to a process, it needs to know the proper enclave loading policy. /// When the Quoting Verification Library is linked to a process, it needs to know the proper enclave loading policy.
/// The library may be linked with a long lived process, such as a service, where it can load the enclaves and leave /// The library may be linked with a long lived process, such as a service, where it can load the enclaves and leave
@ -328,43 +320,6 @@ pub struct Collateral {
pub qe_identity: Box<[u8]>, pub qe_identity: Box<[u8]>,
} }
impl TryFrom<&sgx_ql_qve_collateral_t> for Collateral {
type Error = ();
fn try_from(value: &sgx_ql_qve_collateral_t) -> Result<Self, Self::Error> {
fn to_boxed_slice(p: *mut ::std::os::raw::c_char, size: u32) -> Result<Box<[u8]>, ()> {
if p.is_null() {
return Err(());
}
Ok(Box::from(unsafe {
slice::from_raw_parts(p as _, size as _)
}))
}
Ok(Collateral {
major_version: unsafe { value.__bindgen_anon_1.__bindgen_anon_1.major_version },
minor_version: unsafe { value.__bindgen_anon_1.__bindgen_anon_1.minor_version },
tee_type: value.tee_type,
pck_crl_issuer_chain: to_boxed_slice(
value.pck_crl_issuer_chain,
value.pck_crl_issuer_chain_size,
)?,
root_ca_crl: to_boxed_slice(value.root_ca_crl, value.root_ca_crl_size)?,
pck_crl: to_boxed_slice(value.pck_crl, value.pck_crl_size)?,
tcb_info_issuer_chain: to_boxed_slice(
value.tcb_info_issuer_chain,
value.tcb_info_issuer_chain_size,
)?,
tcb_info: to_boxed_slice(value.tcb_info, value.tcb_info_size)?,
qe_identity_issuer_chain: to_boxed_slice(
value.qe_identity_issuer_chain,
value.qe_identity_issuer_chain_size,
)?,
qe_identity: to_boxed_slice(value.qe_identity, value.qe_identity_size)?,
})
}
}
// referential struct // referential struct
struct SgxQlQveCollateralT<'a> { struct SgxQlQveCollateralT<'a> {
inner: sgx_ql_qve_collateral_t, inner: sgx_ql_qve_collateral_t,
@ -432,6 +387,55 @@ impl Deref for SgxQlQveCollateralT<'_> {
/// - *SGX_QL_ERROR_UNEXPECTED* /// - *SGX_QL_ERROR_UNEXPECTED*
/// ///
pub fn tee_qv_get_collateral(quote: &[u8]) -> Result<Collateral, quote3_error_t> { pub fn tee_qv_get_collateral(quote: &[u8]) -> Result<Collateral, quote3_error_t> {
fn try_into_collateral(
buf: *const sgx_ql_qve_collateral_t,
buf_len: u32,
) -> Result<Collateral, quote3_error_t> {
fn try_into_boxed_slice(
p: *mut ::std::os::raw::c_char,
size: u32,
) -> Result<Box<[u8]>, quote3_error_t> {
if p.is_null() || !p.is_aligned() {
return Err(quote3_error_t::SGX_QL_ERROR_MAX);
}
Ok(Box::from(unsafe {
slice::from_raw_parts(p as _, size as _)
}))
}
if buf.is_null()
|| (buf_len as usize) < size_of::<sgx_ql_qve_collateral_t>()
|| !buf.is_aligned()
{
return Err(quote3_error_t::SGX_QL_ERROR_MAX);
}
// SAFETY: buf is not null, buf_len is not zero, and buf is aligned.
let collateral = unsafe { *buf };
Ok(Collateral {
major_version: unsafe { collateral.__bindgen_anon_1.__bindgen_anon_1.major_version },
minor_version: unsafe { collateral.__bindgen_anon_1.__bindgen_anon_1.minor_version },
tee_type: collateral.tee_type,
pck_crl_issuer_chain: try_into_boxed_slice(
collateral.pck_crl_issuer_chain,
collateral.pck_crl_issuer_chain_size,
)?,
root_ca_crl: try_into_boxed_slice(collateral.root_ca_crl, collateral.root_ca_crl_size)?,
pck_crl: try_into_boxed_slice(collateral.pck_crl, collateral.pck_crl_size)?,
tcb_info_issuer_chain: try_into_boxed_slice(
collateral.tcb_info_issuer_chain,
collateral.tcb_info_issuer_chain_size,
)?,
tcb_info: try_into_boxed_slice(collateral.tcb_info, collateral.tcb_info_size)?,
qe_identity_issuer_chain: try_into_boxed_slice(
collateral.qe_identity_issuer_chain,
collateral.qe_identity_issuer_chain_size,
)?,
qe_identity: try_into_boxed_slice(collateral.qe_identity, collateral.qe_identity_size)?,
})
}
let mut buf = std::ptr::null_mut(); let mut buf = std::ptr::null_mut();
let mut buf_len = 0u32; let mut buf_len = 0u32;
@ -439,15 +443,12 @@ pub fn tee_qv_get_collateral(quote: &[u8]) -> Result<Collateral, quote3_error_t>
qvl_sys::tee_qv_get_collateral(quote.as_ptr(), quote.len() as u32, &mut buf, &mut buf_len) qvl_sys::tee_qv_get_collateral(quote.as_ptr(), quote.len() as u32, &mut buf, &mut buf_len)
} { } {
quote3_error_t::SGX_QL_SUCCESS => { quote3_error_t::SGX_QL_SUCCESS => {
assert!(!buf.is_null()); let collateral = try_into_collateral(buf as _, buf_len);
assert!(buf_len > 0);
assert_eq!( match unsafe { tee_qv_free_collateral(buf) } {
(buf as usize) % mem::align_of::<sgx_ql_qve_collateral_t>(), quote3_error_t::SGX_QL_SUCCESS => collateral,
0 error_code => Err(error_code),
); }
// SAFETY: buf is not null, buf_len is not zero, and buf is aligned.
let orig_collateral = &unsafe { *(buf as *const sgx_ql_qve_collateral_t) };
Collateral::try_from(orig_collateral).map_err(|_| quote3_error_t::SGX_QL_ERROR_MAX)
} }
error_code => Err(error_code), error_code => Err(error_code),
} }

View file

@ -25,7 +25,9 @@
}; };
outputs = inputs: outputs = inputs:
let src = ./.; in let
src = ./.;
in
inputs.snowfall-lib.mkFlake { inputs.snowfall-lib.mkFlake {
inherit inputs; inherit inputs;
inherit src; inherit src;

3
lib/default.nix Normal file
View file

@ -0,0 +1,3 @@
{ ... }: {
nixosGenerate = import ./nixos-generate.nix;
}

33
lib/nixos-generate.nix Normal file
View file

@ -0,0 +1,33 @@
{ pkgs
, nixosSystem
, formatModule
, system
, specialArgs ? { }
, modules ? [ ]
}:
let
image = nixosSystem {
inherit pkgs specialArgs;
modules =
[
formatModule
(
{ lib, ... }: {
options = {
fileExtension = lib.mkOption {
type = lib.types.str;
description = "Declare the path of the wanted file in the output directory";
default = "";
};
formatAttr = lib.mkOption {
type = lib.types.str;
description = "Declare the default attribute to build";
};
};
}
)
]
++ modules;
};
in
image.config.system.build.${image.config.formatAttr}

View file

@ -0,0 +1,38 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2024 Matter Labs
{ teepot
, pkgs
, bash
, coreutils
, container-name ? "teepot-key-preexec-dcap"
, tag ? null
}: let
entrypoint = "${bash}/bin/bash";
in
pkgs.lib.tee.sgxGramineContainer {
name = container-name;
inherit tag entrypoint;
packages = [ teepot.teepot.tee_key_preexec coreutils bash ];
manifest = {
loader = {
argv = [
entrypoint
"-c"
("${teepot.teepot.tee_key_preexec}/bin/tee-key-preexec -- bash -c "
+ "'echo \"SIGNING_KEY=$SIGNING_KEY\"; echo \"TEE_TYPE=$TEE_TYPE\";exec base64 \"$ATTESTATION_QUOTE_FILE_PATH\";'")
];
log_level = "error";
env = {
RUST_BACKTRACE = "1";
RUST_LOG = "trace";
};
};
sgx = {
edmm_enable = true;
max_threads = 2;
};
};
}

View file

@ -0,0 +1,180 @@
{ lib
, modulesPath
, pkgs
, ...
}: {
imports = [
"${toString modulesPath}/profiles/minimal.nix"
"${toString modulesPath}/profiles/qemu-guest.nix"
];
/*
# SSH login for debugging
services.sshd.enable = true;
networking.firewall.allowedTCPPorts = [ 22 ];
services.openssh.settings.PermitRootLogin = lib.mkOverride 999 "yes";
users.users.root.openssh.authorizedKeys.keys = [
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIDsb/Tr69YN5MQLweWPuJaRGm+h2kOyxfD6sqKEDTIwoAAAABHNzaDo="
"sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBACLgT81iB1iWWVuXq6PdQ5GAAGhaZhSKnveQCvcNnAOZ5WKH80bZShKHyAYzrzbp8IGwLWJcZQ7TqRK+qZdfagAAAAEc3NoOg=="
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAYbUTKpy4QR3s944/hjJ1UK05asFEs/SmWeUbtS0cdA660sT4xHnRfals73FicOoz+uIucJCwn/SCM804j+wtM="
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMNsmP15vH8BVKo7bdvIiiEjiQboPGcRPqJK0+bH4jKD"
];
*/
# the container might want to listen on ports
networking.firewall.enable = true;
networking.firewall.allowedTCPPortRanges = [{ from = 1024; to = 65535; }];
networking.firewall.allowedUDPPortRanges = [{ from = 1024; to = 65535; }];
networking.useNetworkd = lib.mkDefault true;
# don't fill up the logs
networking.firewall.logRefusedConnections = false;
virtualisation.docker.enable = true;
systemd.services.docker_start_container = {
description = "The main application container";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "docker.service" ];
requires = [ "network-online.target" "docker.service" ];
serviceConfig = {
Type = "exec";
User = "root";
};
path = [ pkgs.curl pkgs.docker pkgs.teepot.teepot.tdx_extend pkgs.coreutils ];
script = ''
set -eu -o pipefail
: "''${CONTAINER_IMAGE:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_image" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_HUB:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_hub" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_USER:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_user" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_TOKEN:=$(curl --silent --fail "http://metadata.google.internal/computeMetadata/v1/instance/attributes/container_token" -H "Metadata-Flavor: Google")}"
: "''${CONTAINER_IMAGE:?Error: Missing CONTAINER_IMAGE}"
: "''${CONTAINER_HUB:?Error: Missing CONTAINER_HUB}"
if [[ $CONTAINER_USER ]] && [[ $CONTAINER_TOKEN ]]; then
docker login -u "$CONTAINER_USER" -p "$CONTAINER_TOKEN" "$CONTAINER_HUB"
fi
docker pull "''${CONTAINER_HUB}/''${CONTAINER_IMAGE}"
DIGEST=$(docker inspect --format '{{.Id}}' "''${CONTAINER_HUB}/''${CONTAINER_IMAGE}")
DIGEST=''${DIGEST#sha256:}
echo "Measuring $DIGEST" >&2
test -c /dev/tdx_guest && tdx-extend --digest "$DIGEST" --rtmr 3
exec docker run --init --privileged "sha256:$DIGEST"
'';
postStop = lib.mkDefault ''
shutdown --reboot +5
'';
};
services.prometheus.exporters.node = {
enable = true;
port = 9100;
enabledCollectors = [
"logind"
"systemd"
];
disabledCollectors = [
"textfile"
];
#openFirewall = true;
#firewallFilter = "-i br0 -p tcp -m tcp --dport 9100";
};
environment.systemPackages = with pkgs; [
teepot.teepot
];
# /var is on tmpfs anyway
services.journald.storage = "volatile";
# we can't rely on/trust the hypervisor
services.timesyncd.enable = false;
services.chrony = {
enable = true;
enableNTS = true;
servers = [
"time.cloudflare.com"
"ntppool1.time.nl"
"ntppool2.time.nl"
];
};
systemd.services."chronyd".after = [ "network-online.target" ];
boot.kernelPackages = lib.mkForce pkgs.linuxPackages_6_12;
boot.kernelPatches = [
{
name = "tdx-rtmr";
patch = pkgs.fetchurl {
url = "https://github.com/haraldh/linux/commit/12d08008a5c94175e7a7dfcee40dff33431d9033.patch";
hash = "sha256-sVDhvC3qnXpL5FRxWiQotH7Nl/oqRBQGjJGyhsKeBTA=";
};
}
];
boot.kernelParams = [
"console=ttyS0,115200n8"
"random.trust_cpu=on"
];
boot.consoleLogLevel = 7;
boot.initrd.includeDefaultModules = false;
boot.initrd.availableKernelModules = [
"tdx_guest"
"nvme"
"sd_mod"
"dm_mod"
"ata_piix"
];
boot.initrd.systemd.enable = lib.mkDefault true;
services.logind.extraConfig = ''
NAutoVTs=0
ReserveVT=0
'';
services.dbus.implementation = "broker";
boot.initrd.systemd.tpm2.enable = lib.mkForce false;
systemd.tpm2.enable = lib.mkForce false;
nix.enable = false; # it's a read-only nix store anyway
security.pam.services.su.forwardXAuth = lib.mkForce false;
users.mutableUsers = false;
users.allowNoPasswordLogin = true;
system.stateVersion = lib.version;
system.switch.enable = lib.mkForce false;
documentation.info.enable = lib.mkForce false;
documentation.nixos.enable = lib.mkForce false;
documentation.man.enable = lib.mkForce false;
documentation.enable = lib.mkForce false;
services.udisks2.enable = false; # udisks has become too bloated to have in a headless system
# Get rid of the perl ecosystem to minimize the TCB and disk size
# Remove perl from activation
system.etc.overlay.enable = lib.mkDefault true;
services.userborn.enable = lib.mkDefault true;
# Random perl remnants
system.disableInstallerTools = lib.mkForce true;
programs.less.lessopen = lib.mkDefault null;
programs.command-not-found.enable = lib.mkDefault false;
boot.enableContainers = lib.mkForce false;
boot.loader.grub.enable = lib.mkDefault false;
environment.defaultPackages = lib.mkDefault [ ];
# Check that the system does not contain a Nix store path that contains the
# string "perl".
system.forbiddenDependenciesRegexes = [ "perl" ];
}

View file

@ -0,0 +1,15 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2024 Matter Labs
{ lib
, pkgs
, system
, ...
}: lib.teepot.nixosGenerate {
inherit (lib) nixosSystem;
inherit system pkgs;
modules = [
./configuration.nix
./google.nix
];
formatModule = ./verity.nix;
}

View file

@ -0,0 +1,33 @@
{ lib
, pkgs
, modulesPath
, ...
}: {
imports = [
"${toString modulesPath}/profiles/headless.nix"
];
system.image.id = "tdx_base";
boot.initrd.kernelModules = [ "virtio_scsi" ];
boot.kernelModules = [ "virtio_pci" "virtio_net" ];
# Force getting the hostname from Google Compute.
networking.hostName = lib.mkForce "";
# Configure default metadata hostnames
networking.extraHosts = ''
169.254.169.254 metadata.google.internal metadata
'';
networking.timeServers = [ "metadata.google.internal" ];
environment.etc."sysctl.d/60-gce-network-security.conf".source = "${pkgs.google-guest-configs}/etc/sysctl.d/60-gce-network-security.conf";
networking.usePredictableInterfaceNames = false;
# GC has 1460 MTU
networking.interfaces.eth0.mtu = 1460;
boot.extraModprobeConfig = lib.readFile "${pkgs.google-guest-configs}/etc/modprobe.d/gce-blacklist.conf";
}

View file

@ -0,0 +1,127 @@
{ config
, pkgs
, lib
, modulesPath
, ...
}:
let
inherit (config.image.repart.verityStore) partitionIds;
in
{
imports = [
"${toString modulesPath}/image/repart.nix"
];
fileSystems = {
"/" = {
fsType = "tmpfs";
options = [ "mode=0755" "noexec" ];
};
"/dev/shm" = {
fsType = "tmpfs";
options = [ "defaults" "nosuid" "noexec" "nodev" "size=2G" ];
};
"/run" = {
fsType = "tmpfs";
options = [ "defaults" "mode=0755" "nosuid" "noexec" "nodev" "size=512M" ];
};
"/usr" = {
device = "/dev/mapper/usr";
# explicitly mount it read-only otherwise systemd-remount-fs will fail
options = [ "ro" ];
fsType = config.image.repart.partitions.${partitionIds.store}.repartConfig.Format;
};
# bind-mount the store
"/nix/store" = {
device = "/usr/nix/store";
options = [ "bind" ];
};
};
image.repart = {
verityStore = {
enable = true;
ukiPath = "/EFI/BOOT/BOOTx64.EFI";
};
partitions = {
${partitionIds.esp} = {
# the UKI is injected into this partition by the verityStore module
repartConfig = {
Type = "esp";
Format = "vfat";
SizeMinBytes = "64M";
};
};
${partitionIds.store-verity}.repartConfig = {
Minimize = "best";
};
${partitionIds.store}.repartConfig = {
Minimize = "best";
Format = "squashfs";
};
};
};
boot = {
loader.grub.enable = false;
initrd.systemd.enable = true;
};
system.image = {
id = lib.mkDefault "nixos-appliance";
version = "1";
};
# don't create /usr/bin/env
# this would require some extra work on read-only /usr
# and it is not a strict necessity
system.activationScripts.usrbinenv = lib.mkForce "";
boot.kernelParams = [
"systemd.verity_usr_options=panic-on-corruption"
"panic=30"
"boot.panic_on_fail" # reboot the machine upon fatal boot issues
"lockdown=1"
];
system.build.vmdk_verity =
config.system.build.finalImage.overrideAttrs
(
finalAttrs: previousAttrs:
let
kernel = config.boot.uki.settings.UKI.Linux;
ukifile = "${config.system.build.uki}/${config.system.boot.loader.ukiFile}";
in
{
nativeBuildInputs =
previousAttrs.nativeBuildInputs
++ [
pkgs.qemu
pkgs.teepot.teepot.rtmr_calc
];
postInstall = ''
qemu-img convert -f raw -O vmdk \
$out/${config.image.repart.imageFileBasename}.raw \
$out/${config.image.repart.imageFileBasename}.vmdk
qemu-img info \
$out/${config.image.repart.imageFileBasename}.vmdk
echo "kernel: ${kernel}"
echo "uki: ${ukifile}"
rtmr-calc \
--image $out/${config.image.repart.imageFileBasename}.raw \
--bootefi "${ukifile}" \
--kernel "${kernel}" | tee $out/${config.image.repart.imageFileBasename}_rtmr.json
rm -vf $out/${config.image.repart.imageFileBasename}.raw
'';
}
);
formatAttr = lib.mkForce "vmdk_verity";
fileExtension = lib.mkForce ".raw";
}

View file

@ -1,16 +1,12 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2024 Matter Labs # Copyright (c) 2024 Matter Labs
{ lib { lib
, pkgs
, mkShell , mkShell
, teepot , teepot
, dive
, taplo
, vault
, cargo-release
, nixsgx , nixsgx
, stdenv , stdenv
, teepotCrate , teepotCrate
, pkg-config
}: }:
let let
toolchain_with_src = (teepotCrate.rustVersion.override { toolchain_with_src = (teepotCrate.rustVersion.override {
@ -20,20 +16,26 @@ in
mkShell { mkShell {
inputsFrom = [ teepot.teepot ]; inputsFrom = [ teepot.teepot ];
nativeBuildInputs = [ nativeBuildInputs = with pkgs; [
toolchain_with_src toolchain_with_src
pkg-config pkg-config
teepotCrate.rustPlatform.bindgenHook teepotCrate.rustPlatform.bindgenHook
]; ];
packages = [ packages = with pkgs; [
dive dive
taplo taplo
vault vault
cargo-release cargo-release
google-cloud-sdk-gce
azure-cli
kubectl
kubectx
k9s
]; ];
TEE_LD_LIBRARY_PATH = lib.makeLibraryPath [ TEE_LD_LIBRARY_PATH = lib.makeLibraryPath [
pkgs.curl
nixsgx.sgx-dcap nixsgx.sgx-dcap
nixsgx.sgx-dcap.quote_verify nixsgx.sgx-dcap.quote_verify
nixsgx.sgx-dcap.default_qpl nixsgx.sgx-dcap.default_qpl

View file

@ -0,0 +1,172 @@
{ config
, pkgs
, lib
, ...
}: {
imports = [
./../../../packages/tdx_google/configuration.nix
];
systemd.services.docker_start_container = {
environment = {
CONTAINER_IMAGE = "amd64/hello-world@sha256:e2fc4e5012d16e7fe466f5291c476431beaa1f9b90a5c2125b493ed28e2aba57";
CONTAINER_HUB = "docker.io";
CONTAINER_USER = "";
CONTAINER_TOKEN = "";
};
postStop = ''
:
'';
};
console.enable = true;
services.getty.autologinUser = lib.mkOverride 999 "root";
networking.firewall.allowedTCPPorts = [ 22 ];
services.sshd.enable = true;
services.openssh.settings.PermitRootLogin = lib.mkOverride 999 "yes";
users.users.root.openssh.authorizedKeys.keys = [
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIDsb/Tr69YN5MQLweWPuJaRGm+h2kOyxfD6sqKEDTIwoAAAABHNzaDo="
"sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBACLgT81iB1iWWVuXq6PdQ5GAAGhaZhSKnveQCvcNnAOZ5WKH80bZShKHyAYzrzbp8IGwLWJcZQ7TqRK+qZdfagAAAAEc3NoOg=="
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAYbUTKpy4QR3s944/hjJ1UK05asFEs/SmWeUbtS0cdA660sT4xHnRfals73FicOoz+uIucJCwn/SCM804j+wtM="
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMNsmP15vH8BVKo7bdvIiiEjiQboPGcRPqJK0+bH4jKD"
];
fileSystems = {
"/" = {
fsType = "ext4";
device = "/dev/disk/by-id/test";
options = [ "mode=0755" ];
};
};
boot = {
loader.grub.enable = false;
initrd.systemd.enable = true;
};
virtualisation.vmVariant = {
# following configuration is added only when building VM with build-vm
virtualisation = {
memorySize = 2048; # Use 2048MiB memory.
cores = 4;
};
};
/*
services.loki = {
enable = true;
configuration = {
server.http_listen_port = 3030;
auth_enabled = false;
analytics.reporting_enabled = false;
ingester = {
lifecycler = {
address = "127.0.0.1";
ring = {
kvstore = {
store = "inmemory";
};
replication_factor = 1;
};
};
chunk_idle_period = "1h";
max_chunk_age = "1h";
chunk_target_size = 999999;
chunk_retain_period = "30s";
};
schema_config = {
configs = [
{
from = "2024-04-25";
store = "tsdb";
object_store = "filesystem";
schema = "v13";
index = {
prefix = "index_";
period = "24h";
};
}
];
};
storage_config = {
tsdb_shipper = {
active_index_directory = "/var/lib/loki/tsdb-shipper-active";
cache_location = "/var/lib/loki/tsdb-shipper-cache";
cache_ttl = "24h";
};
filesystem = {
directory = "/var/lib/loki/chunks";
};
};
limits_config = {
reject_old_samples = true;
reject_old_samples_max_age = "168h";
volume_enabled = true;
};
table_manager = {
retention_deletes_enabled = false;
retention_period = "0s";
};
compactor = {
working_directory = "/var/lib/loki";
compactor_ring = {
kvstore = {
store = "inmemory";
};
};
};
};
};
services.promtail = {
enable = true;
configuration = {
server = {
http_listen_port = 3031;
grpc_listen_port = 0;
};
clients = [
{
url = "http://127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
}
];
scrape_configs = [{
job_name = "journal";
journal = {
max_age = "12h";
labels = {
job = "systemd-journal";
};
};
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "systemd_unit";
}
{
source_labels = [ "__journal__hostname" ];
target_label = "nodename";
}
{
source_labels = [ "__journal_container_id" ];
target_label = "container_id";
}
];
}];
};
# extraFlags
};
*/
}