mirror of
https://github.com/matter-labs/teepot.git
synced 2025-07-21 07:03:56 +02:00
Merge pull request #2 from matter-labs/initial_commit
feat: initial commit
This commit is contained in:
commit
e74e9b7e86
123 changed files with 16508 additions and 0 deletions
2
.dockerignore
Normal file
2
.dockerignore
Normal file
|
@ -0,0 +1,2 @@
|
|||
**/Dockerfile*
|
||||
target/
|
4
.github/CODEOWNERS
vendored
Normal file
4
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Each line is a file pattern followed by one or more owners.
|
||||
# Owners will be automatically notified about new PRs and
|
||||
# an owner's approval is required to merge to protected branches.
|
||||
* @matter-labs/tee
|
6
.github/renovate.json
vendored
Normal file
6
.github/renovate.json
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:base"
|
||||
]
|
||||
}
|
52
.github/workflows/container.yml
vendored
Normal file
52
.github/workflows/container.yml
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
name: Container
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
push_to_registry:
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
name: Build and push containers image to GitHub Packages
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.config.dockerfile }}
|
||||
cancel-in-progress: true
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- { dockerfile: 'vault/Dockerfile', tag: 'vault:latest', repository: 'teepot-vault' }
|
||||
- { dockerfile: 'bin/tee-vault-unseal/Dockerfile-azure', tag: 'tvu:latest', repository: 'teepot-tvu' }
|
||||
- { dockerfile: 'bin/tee-vault-admin/Dockerfile-azure', tag: 'tva:latest', repository: 'teepot-tva' }
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Set up env
|
||||
run: echo "repository_owner=${GITHUB_REPOSITORY_OWNER,,}" >>${GITHUB_ENV}
|
||||
- name: Build and Push Container
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: ${{ matrix.config.dockerfile }}
|
||||
tags: |
|
||||
ghcr.io/${{env.repository_owner}}/${{ github.event.repository.name }}-${{ matrix.config.tag }}
|
||||
matterlabsrobot/${{ matrix.config.repository }}:latest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max,ignore-error=true
|
||||
push: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
|
47
.github/workflows/lint.yml
vendored
Normal file
47
.github/workflows/lint.yml
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
name: lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
fmt:
|
||||
name: cargo fmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- name: Setup Rust toolchain
|
||||
run: rustup show
|
||||
- run: cargo fmt --all -- --check
|
||||
|
||||
deny:
|
||||
name: cargo deny
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- uses: EmbarkStudios/cargo-deny-action@v1
|
||||
with:
|
||||
arguments: --workspace
|
||||
|
||||
check-spdx-headers:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- uses: enarx/spdx@master
|
||||
with:
|
||||
licenses: Apache-2.0 BSD-3-Clause MIT
|
||||
|
||||
taplo:
|
||||
name: taplo
|
||||
runs-on: ubuntu-latest
|
||||
container: tamasfe/taplo:latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- run: taplo fmt --check
|
66
.github/workflows/nix.yml
vendored
Normal file
66
.github/workflows/nix.yml
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
name: nix
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
|
||||
with:
|
||||
extra_nix_config: |
|
||||
access-tokens = github.com=${{ github.token }}
|
||||
- run: nix flake check -L --show-trace --keep-going
|
||||
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
|
||||
with:
|
||||
extra_nix_config: |
|
||||
access-tokens = github.com=${{ github.token }}
|
||||
- run: nix fmt
|
||||
|
||||
clippy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
|
||||
with:
|
||||
extra_nix_config: |
|
||||
access-tokens = github.com=${{ github.token }}
|
||||
- uses: cachix/cachix-action@v14
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: teepot
|
||||
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||
extraPullNames: nixsgx
|
||||
- name: cargo clippy
|
||||
run: nix develop -L --ignore-environment -c cargo clippy --all --locked
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
|
||||
with:
|
||||
extra_nix_config: |
|
||||
access-tokens = github.com=${{ github.token }}
|
||||
- uses: cachix/cachix-action@v14
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: teepot
|
||||
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||
extraPullNames: nixsgx
|
||||
- name: nix build
|
||||
run: nix run nixpkgs#nixci
|
18
.github/workflows/secrets_scanner.yaml
vendored
Normal file
18
.github/workflows/secrets_scanner.yaml
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
name: Leaked Secrets Scan
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
TruffleHog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: TruffleHog OSS
|
||||
uses: trufflesecurity/trufflehog@4db20e29f8568502b8d69ca2be6ce47a533925d3 # v3.63.3
|
||||
with:
|
||||
path: ./
|
||||
base: ${{ github.event.repository.default_branch }}
|
||||
head: HEAD
|
||||
extra_args: --debug --only-verified
|
||||
|
18
.gitignore
vendored
Normal file
18
.gitignore
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
|
||||
# Intellij
|
||||
/.idea
|
||||
/.fleet
|
||||
|
||||
/.envrc
|
||||
/.direnv
|
||||
/result
|
3292
Cargo.lock
generated
Normal file
3292
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
111
Cargo.toml
Normal file
111
Cargo.toml
Normal file
|
@ -0,0 +1,111 @@
|
|||
[package]
|
||||
name = "teepot"
|
||||
description = "TEE secret manager"
|
||||
# no MIT license, because of copied code from:
|
||||
# * https://github.com/enarx/enarx
|
||||
# * https://github.com/enarx/sgx
|
||||
license = "Apache-2.0"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
actix-http.workspace = true
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
awc.workspace = true
|
||||
bitflags.workspace = true
|
||||
bytemuck.workspace = true
|
||||
bytes.workspace = true
|
||||
clap.workspace = true
|
||||
const-oid.workspace = true
|
||||
enumset.workspace = true
|
||||
futures-core.workspace = true
|
||||
getrandom.workspace = true
|
||||
hex.workspace = true
|
||||
intel-tee-quote-verification-rs.workspace = true
|
||||
num-integer.workspace = true
|
||||
num-traits.workspace = true
|
||||
pgp.workspace = true
|
||||
pkcs8.workspace = true
|
||||
rand.workspace = true
|
||||
ring.workspace = true
|
||||
rsa.workspace = true
|
||||
rustls.workspace = true
|
||||
sec1.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
sha2.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
x509-cert.workspace = true
|
||||
zeroize.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow.workspace = true
|
||||
base64.workspace = true
|
||||
hex.workspace = true
|
||||
testaso.workspace = true
|
||||
|
||||
[workspace]
|
||||
members = ["crates/*", "bin/*"]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Harald Hoyer <hh@matterlabs.dev>"]
|
||||
# rest of the workspace, if not specified in the package section
|
||||
# has the standard Apache-2.0 OR MIT license
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository = "https://github.com/matter-labs/teepot"
|
||||
|
||||
[workspace.dependencies]
|
||||
actix-http = "3"
|
||||
actix-tls = "3"
|
||||
actix-web = { version = "4.5", features = ["rustls-0_22"] }
|
||||
anyhow = "1.0.79"
|
||||
awc = { version = "3.4", features = ["rustls-0_22-webpki-roots"] }
|
||||
base64 = "0.21.7"
|
||||
bindgen = "0.69.4"
|
||||
bitflags = "2.4"
|
||||
bytemuck = { version = "1.14.2", features = ["derive", "min_const_generics", "extern_crate_std"] }
|
||||
bytes = "1"
|
||||
clap = { version = "4.4", features = ["std", "derive", "env", "error-context", "help", "usage", "wrap_help"], default-features = false }
|
||||
const-oid = { version = "0.9", default-features = false }
|
||||
der = "0.7.8"
|
||||
enumset = { version = "1.1", features = ["serde", "std"] }
|
||||
futures-core = { version = "0.3.30", features = ["alloc"], default-features = false }
|
||||
getrandom = "0.2.12"
|
||||
hex = { version = "0.4.3", features = ["std"], default-features = false }
|
||||
intel-tee-quote-verification-rs = { path = "crates/intel-tee-quote-verification-rs", version = "0.2.1" }
|
||||
intel-tee-quote-verification-sys = { path = "crates/intel-tee-quote-verification-sys", version = "0.2.0" }
|
||||
k256 = "0.13"
|
||||
log = "0.4"
|
||||
mio = "0.8.10"
|
||||
num-integer = "0.1.46"
|
||||
num-traits = "0.2.18"
|
||||
pgp = "0.10"
|
||||
pkcs8 = { version = "0.10" }
|
||||
rand = "0.8"
|
||||
ring = { version = "0.17.7", features = ["std"], default-features = false }
|
||||
rsa = { version = "0.9.6", features = ["sha2"] }
|
||||
rustls = { version = "0.22" }
|
||||
rustls-pemfile = "1"
|
||||
sec1 = { version = "0.7.3", features = ["der"], default-features = false }
|
||||
serde = { version = "1", features = ["derive", "rc"] }
|
||||
serde_json = "1"
|
||||
serde_with = { version = "3.6", features = ["base64", "hex"] }
|
||||
sha2 = "0.10.8"
|
||||
teepot = { path = "." }
|
||||
testaso = "0.1.0"
|
||||
thiserror = "1.0.56"
|
||||
tokio = { version = "1", features = ["sync", "macros", "rt-multi-thread", "fs", "time"] }
|
||||
tracing = "0.1"
|
||||
tracing-actix-web = "0.7"
|
||||
tracing-log = "0.2"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
x509 = { version = "0.2", package = "x509-cert", default-features = false }
|
||||
x509-cert = "0.2.5"
|
||||
zeroize = { version = "1.7.0", features = ["serde"] }
|
176
LICENSE-APACHE
Normal file
176
LICENSE-APACHE
Normal file
|
@ -0,0 +1,176 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
21
LICENSE-MIT
Normal file
21
LICENSE-MIT
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2019 Matter Labs
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
17
README.md
17
README.md
|
@ -1,2 +1,19 @@
|
|||
# teepot
|
||||
Key Value store in a TEE with Remote Attestation for Authentication
|
||||
|
||||
## Introduction
|
||||
|
||||
This project is a key-value store that runs in a Trusted Execution Environment (TEE) and uses Remote Attestation for Authentication.
|
||||
The key-value store is implemented using Hashicorp Vault running in an Intel SGX enclave via the Gramine runtime.
|
||||
|
||||
## Parts of this project
|
||||
|
||||
- `teepot`: The main rust crate that abstracts TEEs and key-value stores.
|
||||
- `tee-vault-unseal`: An enclave that uses the Vault API to unseal a vault as a proxy.
|
||||
- `vault-unseal`: A client utility, that talks to `tee-vault-unseal` to unseal a vault.
|
||||
- `tee-vault-admin`: An enclave that uses the Vault API to administer a vault as a proxy.
|
||||
- `vault-admin`: A client utility, that talks to `tee-vault-admin` to administer a vault.
|
||||
- `teepot-read` : A pre-exec utility that reads from the key-value store and passes the key-value pairs as environment variables to the enclave.
|
||||
- `teepot-write` : A pre-exec utility that reads key-values from the environment variables and writes them to the key-value store.
|
||||
- `verify-attestation`: A client utility that verifies the attestation of an enclave.
|
||||
- `tee-key-preexec`: A pre-exec utility that generates a p256 secret key and passes it as an environment variable to the enclave along with the attestation quote containing the hash of the public key.
|
||||
|
|
38
assets/Azure-DCAP-Client.patch
Normal file
38
assets/Azure-DCAP-Client.patch
Normal file
|
@ -0,0 +1,38 @@
|
|||
diff --git a/src/dcap_provider.cpp b/src/dcap_provider.cpp
|
||||
index d23c250..34c4d9d 100644
|
||||
--- a/src/dcap_provider.cpp
|
||||
+++ b/src/dcap_provider.cpp
|
||||
@@ -1333,7 +1333,7 @@ static std::string build_tcb_info_url(
|
||||
tcb_info_url << base_url;
|
||||
}
|
||||
else
|
||||
- tcb_info_url << get_base_url();
|
||||
+ tcb_info_url << "https://api.trustedservices.intel.com/sgx/certification";
|
||||
|
||||
if (!version.empty())
|
||||
{
|
||||
@@ -1426,7 +1426,7 @@ static std::string build_enclave_id_url(
|
||||
qe_id_url << base_url;
|
||||
}
|
||||
else
|
||||
- qe_id_url << get_base_url();
|
||||
+ qe_id_url << "https://api.trustedservices.intel.com/sgx/certification/";
|
||||
|
||||
// Select the correct issuer header name
|
||||
if (!version.empty())
|
||||
@@ -1521,6 +1521,7 @@ static quote3_error_t get_collateral(
|
||||
"Successfully fetched %s from URL: '%s'.",
|
||||
friendly_name.c_str(),
|
||||
url.c_str());
|
||||
+/*
|
||||
std::string cache_control;
|
||||
auto get_cache_header_operation = get_unescape_header(*curl_operation, headers::CACHE_CONTROL, &cache_control);
|
||||
retval = convert_to_intel_error(get_cache_header_operation);
|
||||
@@ -1534,6 +1535,7 @@ static quote3_error_t get_collateral(
|
||||
local_cache_add(issuer_chain_cache_name, expiry, issuer_chain.size(), issuer_chain.c_str());
|
||||
}
|
||||
}
|
||||
+*/
|
||||
}
|
||||
|
||||
return retval;
|
11
assets/extract-sig.sh
Normal file
11
assets/extract-sig.sh
Normal file
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script is used to extract the signature of a gramine docker image.
|
||||
#
|
||||
# Usage: ./extract-sig.sh <image-name> <tee-name> <output-file>
|
||||
# Example: ./extract-sig.sh tva tee-vault-admin
|
||||
|
||||
id=$(docker create $1)
|
||||
trap 'docker rm -v $id' EXIT
|
||||
docker cp "$id:/app/$2.sig" "$3"
|
18
assets/replace-sig.sh
Normal file
18
assets/replace-sig.sh
Normal file
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script is used to replace the signature of a gramine docker image with a new one.
|
||||
#
|
||||
# Usage: ./replace-sig.sh <image> <new-signature-file> <old-signature-file>
|
||||
# Example: ./replace-sig.sh tva tee-vault-admin.sig /app/tee-vault-admin.sig
|
||||
|
||||
DOCKERFILE="Dockerfile-tmp-$$"
|
||||
|
||||
trap 'rm -f $DOCKERFILE' EXIT
|
||||
|
||||
cat > "$DOCKERFILE" <<EOF
|
||||
FROM $1
|
||||
COPY $2 $3
|
||||
EOF
|
||||
|
||||
docker build -f "$DOCKERFILE" -t "$1" .
|
10
assets/sgx_default_qcnl.conf.json
Normal file
10
assets/sgx_default_qcnl.conf.json
Normal file
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"pccs_url": "https://host.containers.internal:8081/sgx/certification/v4/",
|
||||
"use_secure_cert": false,
|
||||
"collateral_service": "https://api.trustedservices.intel.com/sgx/certification/v4/",
|
||||
"retry_times": 6,
|
||||
"retry_delay": 10,
|
||||
"pck_cache_expire_hours": 168,
|
||||
"verify_collateral_cache_expire_hours": 168,
|
||||
"local_cache_only": false
|
||||
}
|
1
assets/vault-auth-tee.sha256
Normal file
1
assets/vault-auth-tee.sha256
Normal file
|
@ -0,0 +1 @@
|
|||
7e417b65b9a4fa46dfcc72feb0f0c2e5fa4391c3f6b668cb2c5ba7174b95c43b
|
20
bin/tee-key-preexec/Cargo.toml
Normal file
20
bin/tee-key-preexec/Cargo.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[package]
|
||||
name = "tee-key-preexec"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
hex.workspace = true
|
||||
k256.workspace = true
|
||||
rand.workspace = true
|
||||
sha2.workspace = true
|
||||
teepot.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
75
bin/tee-key-preexec/src/main.rs
Normal file
75
bin/tee-key-preexec/src/main.rs
Normal file
|
@ -0,0 +1,75 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2024 Matter Labs
|
||||
|
||||
//! Pre-exec for binary running in a TEE needing attestation of a secret signing key
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use hex::ToHex;
|
||||
use k256::ecdsa::SigningKey;
|
||||
use sha2::Digest;
|
||||
use sha2::Sha256;
|
||||
use std::env;
|
||||
use std::os::unix::process::CommandExt;
|
||||
use std::process::Command;
|
||||
use teepot::quote::get_quote;
|
||||
use tracing::error;
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry};
|
||||
|
||||
const TEE_QUOTE_FILE: &str = "/tmp/tee_quote";
|
||||
|
||||
fn main_with_error() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(fmt::layer().with_writer(std::io::stderr));
|
||||
tracing::subscriber::set_global_default(subscriber).context("Failed to set logger")?;
|
||||
|
||||
let args = env::args_os().collect::<Box<_>>();
|
||||
|
||||
if args.len() < 2 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Usage: {} <command> [args...]",
|
||||
args[0].to_string_lossy()
|
||||
));
|
||||
}
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let signing_key = SigningKey::random(&mut rng);
|
||||
let verifying_key_bytes = signing_key.verifying_key().to_sec1_bytes();
|
||||
let hash_verifying_key = Sha256::digest(verifying_key_bytes);
|
||||
let signing_key_string = signing_key.to_bytes().encode_hex::<String>();
|
||||
let tee_type = match get_quote(&hash_verifying_key) {
|
||||
Ok(quote) => {
|
||||
// save quote to file
|
||||
std::fs::write(TEE_QUOTE_FILE, quote)?;
|
||||
"sgx"
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to get quote: {}", e);
|
||||
std::fs::write(TEE_QUOTE_FILE, [])?;
|
||||
"none"
|
||||
}
|
||||
};
|
||||
|
||||
let err = Command::new(&args[1])
|
||||
.args(&args[2..])
|
||||
.env("TEE_SIGNING_KEY", signing_key_string)
|
||||
.env("TEE_QUOTE_FILE", TEE_QUOTE_FILE)
|
||||
.env("TEE_TYPE", tee_type)
|
||||
.exec();
|
||||
|
||||
Err(err).with_context(|| format!("exec of `{cmd}` failed", cmd = args[1].to_string_lossy()))
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let ret = main_with_error();
|
||||
if let Err(e) = &ret {
|
||||
error!("Error: {}", e);
|
||||
}
|
||||
ret
|
||||
}
|
15
bin/tee-self-attestation-test/Cargo.toml
Normal file
15
bin/tee-self-attestation-test/Cargo.toml
Normal file
|
@ -0,0 +1,15 @@
|
|||
[package]
|
||||
name = "tee-self-attestation-test"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
teepot.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
85
bin/tee-self-attestation-test/Dockerfile-azure
Normal file
85
bin/tee-self-attestation-test/Dockerfile-azure
Normal file
|
@ -0,0 +1,85 @@
|
|||
FROM docker.io/ubuntu:20.04 AS azuredcap
|
||||
WORKDIR /build
|
||||
ADD https://github.com/microsoft/Azure-DCAP-Client/archive/refs/tags/1.12.0.tar.gz ./Azure-DCAP-Client.tar.gz
|
||||
RUN tar -xvf Azure-DCAP-Client.tar.gz
|
||||
COPY assets/Azure-DCAP-Client.patch ./Azure-DCAP-Client.patch
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y software-properties-common; \
|
||||
add-apt-repository ppa:team-xbmc/ppa -y; \
|
||||
apt-get update; \
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
pkg-config \
|
||||
nlohmann-json3-dev \
|
||||
wget \
|
||||
dos2unix \
|
||||
;
|
||||
|
||||
WORKDIR /build/Azure-DCAP-Client-1.12.0
|
||||
RUN dos2unix src/dcap_provider.cpp && patch -p1 < ../Azure-DCAP-Client.patch
|
||||
WORKDIR /build/Azure-DCAP-Client-1.12.0/src/Linux
|
||||
RUN ./configure && make && make install
|
||||
|
||||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-self-attestation-test.manifest.template.toml' -av /data/ ./
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p tee-self-attestation-test --bin tee-self-attestation-test \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/tee-self-attestation-test ./
|
||||
|
||||
FROM docker.io/gramineproject/gramine:v1.5
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/microsoft.asc https://packages.microsoft.com/keys/microsoft.asc \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/microsoft.asc] https://packages.microsoft.com/ubuntu/20.04/prod focal main" > /etc/apt/sources.list.d/msprod.list \
|
||||
&& apt-get update \
|
||||
&& apt purge -y libsgx-dcap-default-qpl \
|
||||
&& apt-get install -y az-dcap-client
|
||||
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
# libsgx-urts
|
||||
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# So we only have to use one gramine template
|
||||
RUN touch /etc/sgx_default_qcnl.conf
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=buildtee /build/tee-self-attestation-test .
|
||||
COPY ./bin/tee-self-attestation-test/tee-self-attestation-test.manifest.template.toml .
|
||||
COPY vault/enclave-key.pem .
|
||||
|
||||
# The original Azure library is still delivering expired collateral, so we have to use a patched version
|
||||
COPY --from=azuredcap /usr/local/lib/libdcap_quoteprov.so /usr/lib/
|
||||
|
||||
RUN gramine-manifest -Darch_libdir=/lib/x86_64-linux-gnu -Dexecdir=/usr/bin -Dlog_level=warning tee-self-attestation-test.manifest.template.toml tee-self-attestation-test.manifest \
|
||||
&& gramine-sgx-sign --manifest tee-self-attestation-test.manifest --output tee-self-attestation-test.manifest.sgx --key enclave-key.pem \
|
||||
&& rm enclave-key.pem
|
||||
|
||||
EXPOSE 8443
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "/restart_aesm.sh ; exec gramine-sgx tee-self-attestation-test" ]
|
58
bin/tee-self-attestation-test/Dockerfile-intel
Normal file
58
bin/tee-self-attestation-test/Dockerfile-intel
Normal file
|
@ -0,0 +1,58 @@
|
|||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-vault-admin.manifest.template' -av /data/ ./
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p tee-self-attestation-test --bin tee-self-attestation-test \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/tee-self-attestation-test ./
|
||||
|
||||
FROM docker.io/gramineproject/gramine:v1.5
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
libsgx-dcap-default-qpl \
|
||||
libsgx-urts \
|
||||
libsgx-enclave-common \
|
||||
libsgx-dcap-quote-verify
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# So we only have to use one gramine template
|
||||
RUN touch /lib/libdcap_quoteprov.so
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=buildtee /build/tee-self-attestation-test .
|
||||
COPY ./bin/tee-self-attestation-test/tee-self-attestation-test.template.toml .
|
||||
COPY vault/enclave-key.pem .
|
||||
|
||||
COPY assets/sgx_default_qcnl.conf.json /etc/sgx_default_qcnl.conf
|
||||
|
||||
RUN gramine-manifest -Darch_libdir=/lib/x86_64-linux-gnu -Dexecdir=/usr/bin -Dlog_level=warning tee-self-attestation-test.template.toml tee-self-attestation-test.manifest \
|
||||
&& gramine-sgx-sign --manifest tee-self-attestation-test.manifest --output tee-self-attestation-test.manifest.sgx --key enclave-key.pem \
|
||||
&& rm enclave-key.pem
|
||||
|
||||
EXPOSE 8443
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "exec gramine-sgx tee-self-attestation-test" ]
|
30
bin/tee-self-attestation-test/src/main.rs
Normal file
30
bin/tee-self-attestation-test/src/main.rs
Normal file
|
@ -0,0 +1,30 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Simple TEE self-attestation test
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use teepot::server::attestation::get_quote_and_collateral;
|
||||
use tracing::error;
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry};
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(fmt::layer().with_writer(std::io::stderr));
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
|
||||
let report_data = [0u8; 64];
|
||||
if let Err(e) = get_quote_and_collateral(None, &report_data) {
|
||||
error!("failed to get quote and collateral: {e:?}");
|
||||
return Err(e);
|
||||
}
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
libos.entrypoint = "/app/tee-self-attestation-test"
|
||||
|
||||
[loader]
|
||||
argv = ["/app/tee-self-attestation-test"]
|
||||
entrypoint = "file:{{ gramine.libos }}"
|
||||
env.LD_LIBRARY_PATH = "/lib:{{ arch_libdir }}:/usr{{ arch_libdir }}:/lib"
|
||||
env.HOME = "/app"
|
||||
env.MALLOC_ARENA_MAX = "1"
|
||||
env.AZDCAP_DEBUG_LOG_LEVEL = "ignore"
|
||||
env.AZDCAP_COLLATERAL_VERSION = "v4"
|
||||
|
||||
### DEBUG ###
|
||||
env.RUST_BACKTRACE = "1"
|
||||
env.RUST_LOG = "info"
|
||||
|
||||
[fs]
|
||||
root.uri = "file:/"
|
||||
start_dir = "/app"
|
||||
mounts = [
|
||||
{ path = "{{ execdir }}", uri = "file:{{ execdir }}" },
|
||||
{ path = "/lib", uri = "file:{{ gramine.runtimedir() }}" },
|
||||
{ path = "{{ arch_libdir }}", uri = "file:{{ arch_libdir }}" },
|
||||
{ path = "/etc", uri = "file:/etc" },
|
||||
{ type = "tmpfs", path = "/var/tmp" },
|
||||
{ type = "tmpfs", path = "/tmp" },
|
||||
{ type = "tmpfs", path = "/app/.dcap-qcnl" },
|
||||
{ type = "tmpfs", path = "/app/.az-dcap-client" },
|
||||
{ path = "/lib/libdcap_quoteprov.so", uri = "file:/lib/libdcap_quoteprov.so" },
|
||||
]
|
||||
|
||||
[sgx]
|
||||
trusted_files = [
|
||||
"file:/etc/ld.so.cache",
|
||||
"file:/app/",
|
||||
"file:{{ execdir }}/",
|
||||
"file:{{ arch_libdir }}/",
|
||||
"file:/usr/{{ arch_libdir }}/",
|
||||
"file:{{ gramine.libos }}",
|
||||
"file:{{ gramine.runtimedir() }}/",
|
||||
"file:/usr/lib/ssl/openssl.cnf",
|
||||
"file:/etc/ssl/",
|
||||
"file:/etc/sgx_default_qcnl.conf",
|
||||
"file:/lib/libdcap_quoteprov.so",
|
||||
]
|
||||
remote_attestation = "dcap"
|
||||
max_threads = 64
|
||||
edmm_enable = false
|
||||
## max enclave size
|
||||
enclave_size = "1G"
|
||||
|
||||
[sys]
|
||||
enable_extra_runtime_domain_names_conf = true
|
||||
enable_sigterm_injection = true
|
||||
|
||||
# possible tweak option, if problems with mio
|
||||
# currently mio is compiled with `mio_unsupported_force_waker_pipe`
|
||||
# insecure__allow_eventfd = true
|
26
bin/tee-stress-client/Cargo.toml
Normal file
26
bin/tee-stress-client/Cargo.toml
Normal file
|
@ -0,0 +1,26 @@
|
|||
[package]
|
||||
name = "tee-stress-client"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
awc.workspace = true
|
||||
base64.workspace = true
|
||||
bytemuck.workspace = true
|
||||
clap.workspace = true
|
||||
hex.workspace = true
|
||||
mio.workspace = true
|
||||
rustls.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
sha2.workspace = true
|
||||
teepot.workspace = true
|
||||
tracing-actix-web.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
85
bin/tee-stress-client/Dockerfile-azure
Normal file
85
bin/tee-stress-client/Dockerfile-azure
Normal file
|
@ -0,0 +1,85 @@
|
|||
FROM docker.io/ubuntu:20.04 AS azuredcap
|
||||
WORKDIR /build
|
||||
ADD https://github.com/microsoft/Azure-DCAP-Client/archive/refs/tags/1.12.0.tar.gz ./Azure-DCAP-Client.tar.gz
|
||||
RUN tar -xvf Azure-DCAP-Client.tar.gz
|
||||
COPY assets/Azure-DCAP-Client.patch ./Azure-DCAP-Client.patch
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y software-properties-common; \
|
||||
add-apt-repository ppa:team-xbmc/ppa -y; \
|
||||
apt-get update; \
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
pkg-config \
|
||||
nlohmann-json3-dev \
|
||||
wget \
|
||||
dos2unix \
|
||||
;
|
||||
|
||||
WORKDIR /build/Azure-DCAP-Client-1.12.0
|
||||
RUN dos2unix src/dcap_provider.cpp && patch -p1 < ../Azure-DCAP-Client.patch
|
||||
WORKDIR /build/Azure-DCAP-Client-1.12.0/src/Linux
|
||||
RUN ./configure && make && make install
|
||||
|
||||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-stress-client.manifest.template' -av /data/ ./
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p tee-stress-client --bin tee-stress-client \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/tee-stress-client ./
|
||||
|
||||
FROM docker.io/gramineproject/gramine:v1.5
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/microsoft.asc https://packages.microsoft.com/keys/microsoft.asc \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/microsoft.asc] https://packages.microsoft.com/ubuntu/20.04/prod focal main" > /etc/apt/sources.list.d/msprod.list \
|
||||
&& apt-get update \
|
||||
&& apt purge -y libsgx-dcap-default-qpl \
|
||||
&& apt-get install -y az-dcap-client
|
||||
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
# libsgx-urts
|
||||
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# So we only have to use one gramine template
|
||||
RUN touch /etc/sgx_default_qcnl.conf
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=buildtee /build/tee-stress-client .
|
||||
COPY ./bin/tee-stress-client/tee-stress-client.manifest.template .
|
||||
COPY vault/enclave-key.pem .
|
||||
|
||||
# The original Azure library is still delivering expired collateral, so we have to use a patched version
|
||||
COPY --from=azuredcap /usr/local/lib/libdcap_quoteprov.so /usr/lib/
|
||||
|
||||
RUN gramine-manifest -Darch_libdir=/lib/x86_64-linux-gnu -Dexecdir=/usr/bin -Dlog_level=warning tee-stress-client.manifest.template tee-stress-client.manifest \
|
||||
&& gramine-sgx-sign --manifest tee-stress-client.manifest --output tee-stress-client.manifest.sgx --key enclave-key.pem \
|
||||
&& rm enclave-key.pem
|
||||
|
||||
EXPOSE 8443
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "/restart_aesm.sh ; exec gramine-sgx tee-stress-client" ]
|
58
bin/tee-stress-client/Dockerfile-intel
Normal file
58
bin/tee-stress-client/Dockerfile-intel
Normal file
|
@ -0,0 +1,58 @@
|
|||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-stress-client.manifest.template' -av /data/ ./
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p tee-stress-client --bin tee-stress-client \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/tee-stress-client ./
|
||||
|
||||
FROM docker.io/gramineproject/gramine:v1.5
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
libsgx-dcap-default-qpl \
|
||||
libsgx-urts \
|
||||
libsgx-enclave-common \
|
||||
libsgx-dcap-quote-verify
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# So we only have to use one gramine template
|
||||
RUN touch /lib/libdcap_quoteprov.so
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=buildtee /build/tee-stress-client .
|
||||
COPY ./bin/tee-stress-client/tee-stress-client.manifest.template .
|
||||
COPY vault/enclave-key.pem .
|
||||
|
||||
COPY assets/sgx_default_qcnl.conf.json /etc/sgx_default_qcnl.conf
|
||||
|
||||
RUN gramine-manifest -Darch_libdir=/lib/x86_64-linux-gnu -Dexecdir=/usr/bin -Dlog_level=warning tee-stress-client.manifest.template tee-stress-client.manifest \
|
||||
&& gramine-sgx-sign --manifest tee-stress-client.manifest --output tee-stress-client.manifest.sgx --key enclave-key.pem \
|
||||
&& rm enclave-key.pem
|
||||
|
||||
EXPOSE 8443
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "/restart_aesm.sh ; exec gramine-sgx tee-stress-client" ]
|
101
bin/tee-stress-client/src/main.rs
Normal file
101
bin/tee-stress-client/src/main.rs
Normal file
|
@ -0,0 +1,101 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//! Server to handle requests to the Vault TEE
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use actix_web::rt::time::sleep;
|
||||
use anyhow::{Context, Result};
|
||||
use clap::Parser;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
use teepot::client::vault::VaultConnection;
|
||||
use teepot::server::attestation::{get_quote_and_collateral, VaultAttestationArgs};
|
||||
use teepot::server::pki::make_self_signed_cert;
|
||||
use teepot::sgx::{parse_tcb_levels, EnumSet, TcbLevel};
|
||||
use tracing::{error, trace};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::Registry;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Arguments {
|
||||
/// allowed TCB levels, comma separated
|
||||
#[arg(long, value_parser = parse_tcb_levels, env = "ALLOWED_TCB_LEVELS", default_value = "Ok")]
|
||||
my_sgx_allowed_tcb_levels: EnumSet<TcbLevel>,
|
||||
#[clap(flatten)]
|
||||
pub attestation: VaultAttestationArgs,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct MySecret {
|
||||
val: usize,
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(fmt::layer().with_writer(std::io::stderr));
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
|
||||
let args = Arguments::parse();
|
||||
|
||||
let (report_data, _cert_chain, _priv_key) = make_self_signed_cert()?;
|
||||
if let Err(e) = get_quote_and_collateral(Some(args.my_sgx_allowed_tcb_levels), &report_data) {
|
||||
error!("failed to get quote and collateral: {e:?}");
|
||||
// don't return for now, we can still serve requests but we won't be able to attest
|
||||
}
|
||||
|
||||
let mut vault_1 = args.attestation.clone();
|
||||
let mut vault_2 = args.attestation.clone();
|
||||
let mut vault_3 = args.attestation.clone();
|
||||
|
||||
vault_1.vault_addr = "https://vault-1:8210".to_string();
|
||||
vault_2.vault_addr = "https://vault-2:8210".to_string();
|
||||
vault_3.vault_addr = "https://vault-3:8210".to_string();
|
||||
|
||||
let servers = vec![vault_1.clone(), vault_2.clone(), vault_3.clone()];
|
||||
|
||||
let mut val: usize = 1;
|
||||
|
||||
loop {
|
||||
let mut conns = Vec::new();
|
||||
for server in &servers {
|
||||
match VaultConnection::new(&server.into(), "stress".to_string()).await {
|
||||
Ok(conn) => conns.push(conn),
|
||||
Err(e) => {
|
||||
error!("connecting to {}: {}", server.vault_addr, e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if conns.is_empty() {
|
||||
error!("no connections");
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
|
||||
let i = val % conns.len();
|
||||
trace!("storing secret");
|
||||
conns[i]
|
||||
.store_secret(MySecret { val }, "val")
|
||||
.await
|
||||
.context("storing secret")?;
|
||||
for conn in conns {
|
||||
let got: MySecret = conn
|
||||
.load_secret("val")
|
||||
.await
|
||||
.context("loading secret")?
|
||||
.context("loading secret")?;
|
||||
assert_eq!(got.val, val,);
|
||||
}
|
||||
val += 1;
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
66
bin/tee-stress-client/tee-stress-client.manifest.template
Normal file
66
bin/tee-stress-client/tee-stress-client.manifest.template
Normal file
|
@ -0,0 +1,66 @@
|
|||
libos.entrypoint = "/app/tee-stress-client"
|
||||
|
||||
[loader]
|
||||
argv = [ "/app/tee-stress-client" ]
|
||||
entrypoint = "file:{{ gramine.libos }}"
|
||||
env.LD_LIBRARY_PATH = "/lib:{{ arch_libdir }}:/usr{{ arch_libdir }}:/lib"
|
||||
env.HOME = "/app"
|
||||
env.MALLOC_ARENA_MAX = "1"
|
||||
env.AZDCAP_DEBUG_LOG_LEVEL = "ignore"
|
||||
env.AZDCAP_COLLATERAL_VERSION = "v4"
|
||||
|
||||
### Admin Config ###
|
||||
env.PORT = { passthrough = true }
|
||||
|
||||
### VAULT attestation ###
|
||||
env.VAULT_ADDR = { passthrough = true }
|
||||
env.VAULT_SGX_MRENCLAVE = { passthrough = true }
|
||||
env.VAULT_SGX_MRSIGNER = { passthrough = true }
|
||||
env.VAULT_SGX_ALLOWED_TCB_LEVELS = { passthrough = true }
|
||||
|
||||
### DEBUG ###
|
||||
env.RUST_BACKTRACE = "1"
|
||||
env.RUST_LOG="info"
|
||||
|
||||
[fs]
|
||||
root.uri = "file:/"
|
||||
start_dir = "/app"
|
||||
mounts = [
|
||||
{ path = "{{ execdir }}", uri = "file:{{ execdir }}" },
|
||||
{ path = "/lib", uri = "file:{{ gramine.runtimedir() }}" },
|
||||
{ path = "{{ arch_libdir }}", uri = "file:{{ arch_libdir }}" },
|
||||
{ path = "/etc", uri = "file:/etc" },
|
||||
{ type = "tmpfs", path = "/var/tmp" },
|
||||
{ type = "tmpfs", path = "/tmp" },
|
||||
{ type = "tmpfs", path = "/app/.dcap-qcnl" },
|
||||
{ type = "tmpfs", path = "/app/.az-dcap-client" },
|
||||
{ path = "/lib/libdcap_quoteprov.so", uri = "file:/lib/libdcap_quoteprov.so" },
|
||||
]
|
||||
|
||||
[sgx]
|
||||
trusted_files = [
|
||||
"file:/etc/ld.so.cache",
|
||||
"file:/app/",
|
||||
"file:{{ execdir }}/",
|
||||
"file:{{ arch_libdir }}/",
|
||||
"file:/usr/{{ arch_libdir }}/",
|
||||
"file:{{ gramine.libos }}",
|
||||
"file:{{ gramine.runtimedir() }}/",
|
||||
"file:/usr/lib/ssl/openssl.cnf",
|
||||
"file:/etc/ssl/",
|
||||
"file:/etc/sgx_default_qcnl.conf",
|
||||
"file:/lib/libdcap_quoteprov.so",
|
||||
]
|
||||
remote_attestation = "dcap"
|
||||
max_threads = 64
|
||||
edmm_enable = false
|
||||
## max enclave size
|
||||
enclave_size = "8G"
|
||||
|
||||
[sys]
|
||||
enable_extra_runtime_domain_names_conf = true
|
||||
enable_sigterm_injection = true
|
||||
|
||||
# possible tweak option, if problems with mio
|
||||
# currently mio is compiled with `mio_unsupported_force_waker_pipe`
|
||||
# insecure__allow_eventfd = true
|
26
bin/tee-vault-admin/Cargo.toml
Normal file
26
bin/tee-vault-admin/Cargo.toml
Normal file
|
@ -0,0 +1,26 @@
|
|||
[package]
|
||||
name = "tee-vault-admin"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
awc.workspace = true
|
||||
base64.workspace = true
|
||||
bytemuck.workspace = true
|
||||
clap.workspace = true
|
||||
hex.workspace = true
|
||||
mio.workspace = true
|
||||
rustls.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
sha2.workspace = true
|
||||
teepot.workspace = true
|
||||
tracing-actix-web.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
85
bin/tee-vault-admin/Dockerfile-azure
Normal file
85
bin/tee-vault-admin/Dockerfile-azure
Normal file
|
@ -0,0 +1,85 @@
|
|||
FROM docker.io/ubuntu:20.04 AS azuredcap
|
||||
WORKDIR /build
|
||||
ADD https://github.com/microsoft/Azure-DCAP-Client/archive/refs/tags/1.12.0.tar.gz ./Azure-DCAP-Client.tar.gz
|
||||
RUN tar -xvf Azure-DCAP-Client.tar.gz
|
||||
COPY assets/Azure-DCAP-Client.patch ./Azure-DCAP-Client.patch
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y software-properties-common; \
|
||||
add-apt-repository ppa:team-xbmc/ppa -y; \
|
||||
apt-get update; \
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
pkg-config \
|
||||
nlohmann-json3-dev \
|
||||
wget \
|
||||
dos2unix \
|
||||
;
|
||||
|
||||
WORKDIR /build/Azure-DCAP-Client-1.12.0
|
||||
RUN dos2unix src/dcap_provider.cpp && patch -p1 < ../Azure-DCAP-Client.patch
|
||||
WORKDIR /build/Azure-DCAP-Client-1.12.0/src/Linux
|
||||
RUN ./configure && make && make install
|
||||
|
||||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-vault-admin.manifest.template' -av /data/ ./
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p tee-vault-admin --bin tee-vault-admin \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/tee-vault-admin ./
|
||||
|
||||
FROM docker.io/gramineproject/gramine:v1.5
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/microsoft.asc https://packages.microsoft.com/keys/microsoft.asc \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/microsoft.asc] https://packages.microsoft.com/ubuntu/20.04/prod focal main" > /etc/apt/sources.list.d/msprod.list \
|
||||
&& apt-get update \
|
||||
&& apt purge -y libsgx-dcap-default-qpl \
|
||||
&& apt-get install -y az-dcap-client
|
||||
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
# libsgx-urts
|
||||
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# So we only have to use one gramine template
|
||||
RUN touch /etc/sgx_default_qcnl.conf
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=buildtee /build/tee-vault-admin .
|
||||
COPY ./bin/tee-vault-admin/tee-vault-admin.manifest.template .
|
||||
COPY vault/enclave-key.pem .
|
||||
|
||||
# The original Azure library is still delivering expired collateral, so we have to use a patched version
|
||||
COPY --from=azuredcap /usr/local/lib/libdcap_quoteprov.so /usr/lib/
|
||||
|
||||
RUN gramine-manifest -Darch_libdir=/lib/x86_64-linux-gnu -Dexecdir=/usr/bin -Dlog_level=warning tee-vault-admin.manifest.template tee-vault-admin.manifest \
|
||||
&& gramine-sgx-sign --manifest tee-vault-admin.manifest --output tee-vault-admin.manifest.sgx --key enclave-key.pem \
|
||||
&& rm enclave-key.pem
|
||||
|
||||
EXPOSE 8443
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "/restart_aesm.sh ; exec gramine-sgx tee-vault-admin" ]
|
58
bin/tee-vault-admin/Dockerfile-intel
Normal file
58
bin/tee-vault-admin/Dockerfile-intel
Normal file
|
@ -0,0 +1,58 @@
|
|||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-vault-admin.manifest.template' -av /data/ ./
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p tee-vault-admin --bin tee-vault-admin \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/tee-vault-admin ./
|
||||
|
||||
FROM docker.io/gramineproject/gramine:v1.5
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
libsgx-dcap-default-qpl \
|
||||
libsgx-urts \
|
||||
libsgx-enclave-common \
|
||||
libsgx-dcap-quote-verify
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# So we only have to use one gramine template
|
||||
RUN touch /lib/libdcap_quoteprov.so
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=buildtee /build/tee-vault-admin .
|
||||
COPY ./bin/tee-vault-admin/tee-vault-admin.manifest.template .
|
||||
COPY vault/enclave-key.pem .
|
||||
|
||||
COPY assets/sgx_default_qcnl.conf.json /etc/sgx_default_qcnl.conf
|
||||
|
||||
RUN gramine-manifest -Darch_libdir=/lib/x86_64-linux-gnu -Dexecdir=/usr/bin -Dlog_level=warning tee-vault-admin.manifest.template tee-vault-admin.manifest \
|
||||
&& gramine-sgx-sign --manifest tee-vault-admin.manifest --output tee-vault-admin.manifest.sgx --key enclave-key.pem \
|
||||
&& rm enclave-key.pem
|
||||
|
||||
EXPOSE 8443
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "/restart_aesm.sh ; exec gramine-sgx tee-vault-admin" ]
|
25
bin/tee-vault-admin/src/attestation.rs
Normal file
25
bin/tee-vault-admin/src/attestation.rs
Normal file
|
@ -0,0 +1,25 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! attestation
|
||||
|
||||
use crate::ServerState;
|
||||
use actix_web::http::StatusCode;
|
||||
use actix_web::web::{Data, Json};
|
||||
use anyhow::{Context, Result};
|
||||
use std::sync::Arc;
|
||||
use teepot::json::http::AttestationResponse;
|
||||
use teepot::server::attestation::get_quote_and_collateral;
|
||||
use teepot::server::{HttpResponseError, Status};
|
||||
use tracing::instrument;
|
||||
|
||||
/// Get attestation
|
||||
#[instrument(level = "info", name = "/v1/sys/attestation", skip_all)]
|
||||
pub async fn get_attestation(
|
||||
worker: Data<Arc<ServerState>>,
|
||||
) -> Result<Json<AttestationResponse>, HttpResponseError> {
|
||||
get_quote_and_collateral(None, &worker.report_data)
|
||||
.context("Error getting attestation")
|
||||
.map(Json)
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
89
bin/tee-vault-admin/src/command.rs
Normal file
89
bin/tee-vault-admin/src/command.rs
Normal file
|
@ -0,0 +1,89 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! post commands
|
||||
|
||||
use crate::ServerState;
|
||||
use actix_web::web;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use awc::http::StatusCode;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::sync::Arc;
|
||||
use teepot::client::vault::VaultConnection;
|
||||
use teepot::json::http::{
|
||||
VaultCommandRequest, VaultCommandResponse, VaultCommands, VaultCommandsResponse,
|
||||
};
|
||||
use teepot::json::secrets::{AdminConfig, AdminState};
|
||||
use teepot::server::{signatures::VerifySig, HttpResponseError, Status};
|
||||
use tracing::instrument;
|
||||
|
||||
/// Post command
|
||||
#[instrument(level = "info", name = "/v1/command", skip_all)]
|
||||
pub async fn post_command(
|
||||
state: web::Data<Arc<ServerState>>,
|
||||
item: web::Json<VaultCommandRequest>,
|
||||
) -> Result<web::Json<VaultCommandsResponse>, HttpResponseError> {
|
||||
let conn = VaultConnection::new(&state.vault_attestation.clone().into(), "admin".to_string())
|
||||
.await
|
||||
.context("connecting to vault")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
let mut admin_state: AdminState = conn
|
||||
.load_secret("state")
|
||||
.await?
|
||||
.context("empty admin state")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
let commands: VaultCommands = serde_json::from_str(&item.commands)
|
||||
.context("parsing commands")
|
||||
.status(StatusCode::BAD_REQUEST)?;
|
||||
|
||||
if admin_state.last_digest.to_ascii_lowercase() != commands.last_digest {
|
||||
return Err(anyhow!(
|
||||
"last digest does not match {} != {}",
|
||||
admin_state.last_digest.to_ascii_lowercase(),
|
||||
commands.last_digest
|
||||
))
|
||||
.status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let admin_config: AdminConfig = conn
|
||||
.load_secret("config")
|
||||
.await?
|
||||
.context("empty admin config")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
admin_config.check_sigs(&item.signatures, item.commands.as_bytes())?;
|
||||
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(item.commands.as_bytes());
|
||||
let hash = hasher.finalize();
|
||||
let digest = hex::encode(hash);
|
||||
admin_state.last_digest = digest.clone();
|
||||
conn.store_secret(admin_state, "state").await?;
|
||||
|
||||
let mut responds = VaultCommandsResponse {
|
||||
digest,
|
||||
results: vec![],
|
||||
};
|
||||
|
||||
for (pos, command) in commands.commands.iter().enumerate() {
|
||||
let resp = conn
|
||||
.vault_put(
|
||||
&format!("Executing command {pos}"),
|
||||
&command.url,
|
||||
&command.data,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let vcr = VaultCommandResponse {
|
||||
status_code: resp.0.as_u16(),
|
||||
value: resp.1,
|
||||
};
|
||||
|
||||
responds.results.push(vcr);
|
||||
}
|
||||
|
||||
let _ = conn.revoke_token().await;
|
||||
|
||||
Ok(web::Json(responds))
|
||||
}
|
34
bin/tee-vault-admin/src/digest.rs
Normal file
34
bin/tee-vault-admin/src/digest.rs
Normal file
|
@ -0,0 +1,34 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! digest
|
||||
|
||||
use crate::ServerState;
|
||||
use actix_web::{web, HttpResponse};
|
||||
use anyhow::{Context, Result};
|
||||
use awc::http::StatusCode;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use teepot::client::vault::VaultConnection;
|
||||
use teepot::json::secrets::AdminState;
|
||||
use teepot::server::{HttpResponseError, Status};
|
||||
use tracing::instrument;
|
||||
|
||||
/// Get last digest
|
||||
#[instrument(level = "info", name = "/v1/digest", skip_all)]
|
||||
pub async fn get_digest(
|
||||
state: web::Data<Arc<ServerState>>,
|
||||
) -> Result<HttpResponse, HttpResponseError> {
|
||||
let conn = VaultConnection::new(&state.vault_attestation.clone().into(), "admin".to_string())
|
||||
.await
|
||||
.context("connecting to vault")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
let admin_state: AdminState = conn
|
||||
.load_secret("state")
|
||||
.await?
|
||||
.context("empty admin state")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
Ok(HttpResponse::Ok().json(json!({"last_digest": admin_state.last_digest })))
|
||||
}
|
148
bin/tee-vault-admin/src/main.rs
Normal file
148
bin/tee-vault-admin/src/main.rs
Normal file
|
@ -0,0 +1,148 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Server to handle requests to the Vault TEE
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
mod attestation;
|
||||
mod command;
|
||||
mod digest;
|
||||
mod sign;
|
||||
|
||||
use actix_web::web::Data;
|
||||
use actix_web::{web, App, HttpServer};
|
||||
use anyhow::{Context, Result};
|
||||
use attestation::get_attestation;
|
||||
use clap::Parser;
|
||||
use command::post_command;
|
||||
use digest::get_digest;
|
||||
use rustls::ServerConfig;
|
||||
use sign::post_sign;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::Arc;
|
||||
use teepot::json::http::{SignRequest, VaultCommandRequest, ATTESTATION_URL, DIGEST_URL};
|
||||
use teepot::server::attestation::{get_quote_and_collateral, VaultAttestationArgs};
|
||||
use teepot::server::new_json_cfg;
|
||||
use teepot::server::pki::make_self_signed_cert;
|
||||
use teepot::sgx::{parse_tcb_levels, EnumSet, TcbLevel};
|
||||
use tracing::{error, info};
|
||||
use tracing_actix_web::TracingLogger;
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::Registry;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
/// Server state
|
||||
pub struct ServerState {
|
||||
/// Server TLS public key hash
|
||||
pub report_data: [u8; 64],
|
||||
/// Vault attestation args
|
||||
pub vault_attestation: VaultAttestationArgs,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Arguments {
|
||||
/// allowed TCB levels, comma separated
|
||||
#[arg(long, value_parser = parse_tcb_levels, env = "ALLOWED_TCB_LEVELS", default_value = "Ok")]
|
||||
server_sgx_allowed_tcb_levels: EnumSet<TcbLevel>,
|
||||
/// port to listen on
|
||||
#[arg(long, env = "PORT", default_value = "8444")]
|
||||
port: u16,
|
||||
#[clap(flatten)]
|
||||
pub attestation: VaultAttestationArgs,
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(fmt::layer().with_writer(std::io::stderr));
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
|
||||
let args = Arguments::parse();
|
||||
|
||||
let (report_data, cert_chain, priv_key) = make_self_signed_cert()?;
|
||||
|
||||
if let Err(e) = get_quote_and_collateral(Some(args.server_sgx_allowed_tcb_levels), &report_data)
|
||||
{
|
||||
error!("failed to get quote and collateral: {e:?}");
|
||||
// don't return for now, we can still serve requests but we won't be able to attest
|
||||
}
|
||||
|
||||
// init server config builder with safe defaults
|
||||
let config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert([cert_chain].into(), priv_key)
|
||||
.context("Failed to load TLS key/cert files")?;
|
||||
|
||||
info!("Starting HTTPS server at port {}", args.port);
|
||||
|
||||
info!("Quote verified! Connection secure!");
|
||||
|
||||
let server_state = Arc::new(ServerState {
|
||||
report_data,
|
||||
vault_attestation: args.attestation,
|
||||
});
|
||||
|
||||
let server = match HttpServer::new(move || {
|
||||
App::new()
|
||||
// enable logger
|
||||
.wrap(TracingLogger::default())
|
||||
.app_data(new_json_cfg())
|
||||
.app_data(Data::new(server_state.clone()))
|
||||
.service(web::resource(ATTESTATION_URL).route(web::get().to(get_attestation)))
|
||||
.service(web::resource(VaultCommandRequest::URL).route(web::post().to(post_command)))
|
||||
.service(web::resource(SignRequest::URL).route(web::post().to(post_sign)))
|
||||
.service(web::resource(DIGEST_URL).route(web::get().to(get_digest)))
|
||||
})
|
||||
.bind_rustls_0_22((Ipv6Addr::UNSPECIFIED, args.port), config)
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
error!("Failed to bind to port {}: {e:?}", args.port);
|
||||
return Err(e).context(format!("Failed to bind to port {}", args.port));
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = server.worker_max_blocking_threads(2).workers(8).run().await {
|
||||
error!("failed to start HTTPS server: {e:?}");
|
||||
return Err(e).context("Failed to start HTTPS server");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use serde_json::json;
|
||||
use teepot::json::http::{VaultCommand, VaultCommands};
|
||||
|
||||
const TEST_DATA: &str = include_str!("../../../tests/data/test.json");
|
||||
|
||||
#[test]
|
||||
fn test_vault_commands() {
|
||||
let cmd = VaultCommand {
|
||||
url: "/v1/auth/tee/tees/test".to_string(),
|
||||
data: json!({
|
||||
"lease": "1000",
|
||||
"name": "test",
|
||||
"types": "sgx",
|
||||
"sgx_allowed_tcb_levels": "Ok,SwHardeningNeeded",
|
||||
"sgx_mrsigner": "c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d",
|
||||
"token_policies": "test"
|
||||
}),
|
||||
};
|
||||
let cmds = VaultCommands {
|
||||
commands: vec![cmd],
|
||||
last_digest: "".into(),
|
||||
};
|
||||
|
||||
let test_data_cmds: VaultCommands = serde_json::from_str(TEST_DATA).unwrap();
|
||||
|
||||
assert_eq!(cmds, test_data_cmds);
|
||||
}
|
||||
}
|
149
bin/tee-vault-admin/src/sign.rs
Normal file
149
bin/tee-vault-admin/src/sign.rs
Normal file
|
@ -0,0 +1,149 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! post signing request
|
||||
|
||||
use crate::ServerState;
|
||||
use actix_web::http::StatusCode;
|
||||
use actix_web::web;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::sync::Arc;
|
||||
use teepot::client::vault::VaultConnection;
|
||||
use teepot::json::http::{SignRequest, SignRequestData, SignResponse};
|
||||
use teepot::json::secrets::{AdminConfig, AdminState, SGXSigningKey};
|
||||
use teepot::server::signatures::VerifySig as _;
|
||||
use teepot::server::{HttpResponseError, Status};
|
||||
use teepot::sgx::sign::PrivateKey as _;
|
||||
use teepot::sgx::sign::{Author, Signature};
|
||||
use teepot::sgx::sign::{Body, RS256PrivateKey};
|
||||
use tracing::instrument;
|
||||
|
||||
/// Sign command
|
||||
#[instrument(level = "info", name = "/v1/sign", skip_all)]
|
||||
pub async fn post_sign(
|
||||
state: web::Data<Arc<ServerState>>,
|
||||
item: web::Json<SignRequest>,
|
||||
) -> Result<web::Json<SignResponse>, HttpResponseError> {
|
||||
let conn = VaultConnection::new(&state.vault_attestation.clone().into(), "admin".to_string())
|
||||
.await
|
||||
.context("connecting to vault")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
let mut admin_state: AdminState = conn
|
||||
.load_secret("state")
|
||||
.await?
|
||||
.context("empty admin state")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
let sign_request: SignRequestData = serde_json::from_str(&item.sign_request_data)
|
||||
.context("parsing sign request data")
|
||||
.status(StatusCode::BAD_REQUEST)?;
|
||||
|
||||
// Sanity checks
|
||||
if sign_request.tee_type != "sgx" {
|
||||
return Err(anyhow!("tee_type not supported")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let tee_name = sign_request.tee_name;
|
||||
|
||||
if !tee_name.is_ascii() {
|
||||
return Err(anyhow!("tee_name is not ascii")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
// check if tee_name is alphanumeric
|
||||
if !tee_name.chars().all(|c| c.is_alphanumeric()) {
|
||||
return Err(anyhow!("tee_name is not alphanumeric")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
// check if tee_name starts with an alphabetic char
|
||||
if !tee_name.chars().next().unwrap().is_alphabetic() {
|
||||
return Err(anyhow!("tee_name does not start with an alphabetic char"))
|
||||
.status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
if admin_state.last_digest != sign_request.last_digest {
|
||||
return Err(anyhow!(
|
||||
"last digest does not match {} != {}",
|
||||
admin_state.last_digest.to_ascii_lowercase(),
|
||||
sign_request.last_digest
|
||||
))
|
||||
.status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let admin_config: AdminConfig = conn
|
||||
.load_secret("config")
|
||||
.await?
|
||||
.context("empty admin config")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
admin_config.check_sigs(&item.signatures, item.sign_request_data.as_bytes())?;
|
||||
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(item.sign_request_data.as_bytes());
|
||||
let hash = hasher.finalize();
|
||||
let digest = hex::encode(hash);
|
||||
admin_state.last_digest = digest.clone();
|
||||
conn.store_secret(admin_state, "state").await?;
|
||||
|
||||
// Sign SGX enclave
|
||||
let key_path = format!("signing_keys/{}", tee_name);
|
||||
|
||||
let sgx_key = match conn
|
||||
.load_secret::<SGXSigningKey>(&key_path)
|
||||
.await
|
||||
.context("Error loading signing key")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?
|
||||
{
|
||||
Some(key) => RS256PrivateKey::from_pem(&key.pem_pk)
|
||||
.context("Failed to parse private key")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?,
|
||||
None => {
|
||||
let private_key = RS256PrivateKey::generate(3)
|
||||
.context("Failed to generate private key")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
let pem_pk = private_key
|
||||
.to_pem()
|
||||
.context("Failed to convert private key to pem")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
let key = SGXSigningKey { pem_pk };
|
||||
|
||||
conn.store_secret(key.clone(), &key_path)
|
||||
.await
|
||||
.context("Error storing generated private key")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
private_key
|
||||
}
|
||||
};
|
||||
|
||||
let signed_data = sign_sgx(&sign_request.data, &sgx_key)?;
|
||||
let respond = SignResponse {
|
||||
digest,
|
||||
signed_data,
|
||||
};
|
||||
|
||||
let _ = conn.revoke_token().await;
|
||||
|
||||
Ok(web::Json(respond))
|
||||
}
|
||||
|
||||
fn sign_sgx(body_bytes: &[u8], sgx_key: &RS256PrivateKey) -> Result<Vec<u8>, HttpResponseError> {
|
||||
let body: Body = bytemuck::try_pod_read_unaligned(body_bytes)
|
||||
.context("Invalid SGX input data")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
if body.can_set_debug() {
|
||||
return Err(anyhow!("Not signing SGX enclave with debug flag"))
|
||||
.status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
// FIXME: do we need the date and sw defined value?
|
||||
let author = Author::new(0, 0);
|
||||
let sig = Signature::new(sgx_key, author, body)
|
||||
.context("Failed to create RSA signature")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
Ok(bytemuck::bytes_of(&sig).to_vec())
|
||||
}
|
66
bin/tee-vault-admin/tee-vault-admin.manifest.template
Normal file
66
bin/tee-vault-admin/tee-vault-admin.manifest.template
Normal file
|
@ -0,0 +1,66 @@
|
|||
libos.entrypoint = "/app/tee-vault-admin"
|
||||
|
||||
[loader]
|
||||
argv = [ "/app/tee-vault-admin" ]
|
||||
entrypoint = "file:{{ gramine.libos }}"
|
||||
env.LD_LIBRARY_PATH = "/lib:{{ arch_libdir }}:/usr{{ arch_libdir }}:/lib"
|
||||
env.HOME = "/app"
|
||||
env.MALLOC_ARENA_MAX = "1"
|
||||
env.AZDCAP_DEBUG_LOG_LEVEL = "ignore"
|
||||
env.AZDCAP_COLLATERAL_VERSION = "v4"
|
||||
|
||||
### Admin Config ###
|
||||
env.PORT = { passthrough = true }
|
||||
|
||||
### VAULT attestation ###
|
||||
env.VAULT_ADDR = { passthrough = true }
|
||||
env.VAULT_SGX_MRENCLAVE = { passthrough = true }
|
||||
env.VAULT_SGX_MRSIGNER = { passthrough = true }
|
||||
env.VAULT_SGX_ALLOWED_TCB_LEVELS = { passthrough = true }
|
||||
|
||||
### DEBUG ###
|
||||
env.RUST_BACKTRACE = "1"
|
||||
env.RUST_LOG="info,tee_vault_admin=trace,teepot=trace,vault_tee_client=trace,tee_client=trace,awc=debug"
|
||||
|
||||
[fs]
|
||||
root.uri = "file:/"
|
||||
start_dir = "/app"
|
||||
mounts = [
|
||||
{ path = "{{ execdir }}", uri = "file:{{ execdir }}" },
|
||||
{ path = "/lib", uri = "file:{{ gramine.runtimedir() }}" },
|
||||
{ path = "{{ arch_libdir }}", uri = "file:{{ arch_libdir }}" },
|
||||
{ path = "/etc", uri = "file:/etc" },
|
||||
{ type = "tmpfs", path = "/var/tmp" },
|
||||
{ type = "tmpfs", path = "/tmp" },
|
||||
{ type = "tmpfs", path = "/app/.dcap-qcnl" },
|
||||
{ type = "tmpfs", path = "/app/.az-dcap-client" },
|
||||
{ path = "/lib/libdcap_quoteprov.so", uri = "file:/lib/libdcap_quoteprov.so" },
|
||||
]
|
||||
|
||||
[sgx]
|
||||
trusted_files = [
|
||||
"file:/etc/ld.so.cache",
|
||||
"file:/app/",
|
||||
"file:{{ execdir }}/",
|
||||
"file:{{ arch_libdir }}/",
|
||||
"file:/usr/{{ arch_libdir }}/",
|
||||
"file:{{ gramine.libos }}",
|
||||
"file:{{ gramine.runtimedir() }}/",
|
||||
"file:/usr/lib/ssl/openssl.cnf",
|
||||
"file:/etc/ssl/",
|
||||
"file:/etc/sgx_default_qcnl.conf",
|
||||
"file:/lib/libdcap_quoteprov.so",
|
||||
]
|
||||
remote_attestation = "dcap"
|
||||
max_threads = 64
|
||||
edmm_enable = false
|
||||
## max enclave size
|
||||
enclave_size = "8G"
|
||||
|
||||
[sys]
|
||||
enable_extra_runtime_domain_names_conf = true
|
||||
enable_sigterm_injection = true
|
||||
|
||||
# possible tweak option, if problems with mio
|
||||
# currently mio is compiled with `mio_unsupported_force_waker_pipe`
|
||||
# insecure__allow_eventfd = true
|
28
bin/tee-vault-unseal/Cargo.toml
Normal file
28
bin/tee-vault-unseal/Cargo.toml
Normal file
|
@ -0,0 +1,28 @@
|
|||
[package]
|
||||
name = "tee-vault-unseal"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
actix-tls.workspace = true
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
awc.workspace = true
|
||||
clap.workspace = true
|
||||
hex.workspace = true
|
||||
mio.workspace = true
|
||||
rustls-pemfile.workspace = true
|
||||
rustls.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
sha2.workspace = true
|
||||
teepot.workspace = true
|
||||
thiserror.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
||||
x509-cert.workspace = true
|
92
bin/tee-vault-unseal/Dockerfile-azure
Normal file
92
bin/tee-vault-unseal/Dockerfile-azure
Normal file
|
@ -0,0 +1,92 @@
|
|||
FROM ghcr.io/matter-labs/vault-auth-tee:latest AS vault-auth-tee
|
||||
|
||||
FROM docker.io/ubuntu:20.04 AS azuredcap
|
||||
WORKDIR /build
|
||||
ADD https://github.com/microsoft/Azure-DCAP-Client/archive/refs/tags/1.12.0.tar.gz ./Azure-DCAP-Client.tar.gz
|
||||
RUN tar -xvf Azure-DCAP-Client.tar.gz
|
||||
COPY assets/Azure-DCAP-Client.patch ./Azure-DCAP-Client.patch
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y software-properties-common; \
|
||||
add-apt-repository ppa:team-xbmc/ppa -y; \
|
||||
apt-get update; \
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
pkg-config \
|
||||
nlohmann-json3-dev \
|
||||
wget \
|
||||
dos2unix \
|
||||
;
|
||||
|
||||
WORKDIR /build/Azure-DCAP-Client-1.12.0
|
||||
RUN dos2unix src/dcap_provider.cpp && patch -p1 < ../Azure-DCAP-Client.patch
|
||||
WORKDIR /build/Azure-DCAP-Client-1.12.0/src/Linux
|
||||
RUN ./configure && make && make install
|
||||
|
||||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
COPY --from=vault-auth-tee /opt/vault/plugins/vault-auth-tee ./
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-vault-unseal.manifest.template' -av /data/ ./
|
||||
RUN sha256sum /opt/vault/plugins/vault-auth-tee | ( read a _ ; echo -n $a ) | tee assets/vault-auth-tee.sha256
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p tee-vault-unseal --bin tee-vault-unseal \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/tee-vault-unseal ./
|
||||
|
||||
FROM docker.io/gramineproject/gramine:v1.5
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/microsoft.asc https://packages.microsoft.com/keys/microsoft.asc \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/microsoft.asc] https://packages.microsoft.com/ubuntu/20.04/prod focal main" > /etc/apt/sources.list.d/msprod.list \
|
||||
&& apt-get update \
|
||||
&& apt purge -y libsgx-dcap-default-qpl \
|
||||
&& apt-get install -y az-dcap-client
|
||||
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
# libsgx-urts
|
||||
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# So we only have to use one gramine template
|
||||
RUN touch /etc/sgx_default_qcnl.conf
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=buildtee /build/tee-vault-unseal .
|
||||
COPY ./bin/tee-vault-unseal/tee-vault-unseal.manifest.template .
|
||||
COPY vault/enclave-key.pem .
|
||||
RUN mkdir -p /opt/vault/tls && rm -rf /opt/vault/tls/*
|
||||
|
||||
# The original Azure library is still delivering expired collateral, so we have to use a patched version
|
||||
COPY --from=azuredcap /usr/local/lib/libdcap_quoteprov.so /usr/lib/
|
||||
|
||||
RUN gramine-manifest -Darch_libdir=/lib/x86_64-linux-gnu -Dexecdir=/usr/bin -Dlog_level=warning tee-vault-unseal.manifest.template tee-vault-unseal.manifest \
|
||||
&& gramine-sgx-sign --manifest tee-vault-unseal.manifest --output tee-vault-unseal.manifest.sgx --key enclave-key.pem \
|
||||
&& rm enclave-key.pem
|
||||
|
||||
VOLUME /opt/vault/tls
|
||||
|
||||
EXPOSE 8443
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "/restart_aesm.sh ; exec gramine-sgx tee-vault-unseal" ]
|
65
bin/tee-vault-unseal/Dockerfile-intel
Normal file
65
bin/tee-vault-unseal/Dockerfile-intel
Normal file
|
@ -0,0 +1,65 @@
|
|||
FROM ghcr.io/matter-labs/vault-auth-tee:latest AS vault-auth-tee
|
||||
|
||||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
COPY --from=vault-auth-tee /opt/vault/plugins/vault-auth-tee ./
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-vault-unseal.manifest.template' -av /data/ ./
|
||||
RUN sha256sum /opt/vault/plugins/vault-auth-tee | ( read a _ ; echo -n $a ) | tee assets/vault-auth-tee.sha256
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p tee-vault-unseal --bin tee-vault-unseal \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/tee-vault-unseal ./
|
||||
|
||||
FROM docker.io/gramineproject/gramine:v1.5
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
libsgx-dcap-default-qpl \
|
||||
libsgx-urts \
|
||||
libsgx-enclave-common \
|
||||
libsgx-dcap-quote-verify
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# So we only have to use one gramine template
|
||||
RUN touch /lib/libdcap_quoteprov.so
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=buildtee /build/tee-vault-unseal .
|
||||
COPY ./bin/tee-vault-unseal/tee-vault-unseal.manifest.template .
|
||||
COPY vault/enclave-key.pem .
|
||||
RUN mkdir -p /opt/vault/tls && rm -rf /opt/vault/tls/*
|
||||
|
||||
COPY assets/sgx_default_qcnl.conf.json /etc/sgx_default_qcnl.conf
|
||||
|
||||
RUN gramine-manifest -Darch_libdir=/lib/x86_64-linux-gnu -Dexecdir=/usr/bin -Dlog_level=warning tee-vault-unseal.manifest.template tee-vault-unseal.manifest \
|
||||
&& gramine-sgx-sign --manifest tee-vault-unseal.manifest --output tee-vault-unseal.manifest.sgx --key enclave-key.pem \
|
||||
&& rm enclave-key.pem
|
||||
|
||||
VOLUME /opt/vault/tls
|
||||
|
||||
EXPOSE 8443
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "/restart_aesm.sh ; exec gramine-sgx tee-vault-unseal" ]
|
80
bin/tee-vault-unseal/src/admin-policy.hcl
Normal file
80
bin/tee-vault-unseal/src/admin-policy.hcl
Normal file
|
@ -0,0 +1,80 @@
|
|||
# Read system health check
|
||||
path "sys/health"
|
||||
{
|
||||
capabilities = ["read", "sudo"]
|
||||
}
|
||||
|
||||
# Create and manage ACL policies broadly across Vault
|
||||
|
||||
# List existing policies
|
||||
path "sys/policies/acl"
|
||||
{
|
||||
capabilities = ["list"]
|
||||
}
|
||||
|
||||
# Create and manage ACL policies
|
||||
path "sys/policies/acl/*"
|
||||
{
|
||||
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
|
||||
}
|
||||
|
||||
# Enable and manage authentication methods broadly across Vault
|
||||
|
||||
# Manage auth methods broadly across Vault
|
||||
path "auth/*"
|
||||
{
|
||||
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
|
||||
}
|
||||
|
||||
# Create, update, and delete auth methods
|
||||
path "sys/auth/*"
|
||||
{
|
||||
capabilities = ["create", "update", "delete", "sudo"]
|
||||
}
|
||||
|
||||
# List auth methods
|
||||
path "sys/auth"
|
||||
{
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
# Enable and manage the key/value secrets engine at `secret/` path
|
||||
|
||||
# List, create, update, and delete key/value secrets
|
||||
path "secret/*"
|
||||
{
|
||||
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
|
||||
}
|
||||
|
||||
# Manage secrets engines
|
||||
path "sys/mounts/*"
|
||||
{
|
||||
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
|
||||
}
|
||||
|
||||
# List existing secrets engines.
|
||||
path "sys/mounts"
|
||||
{
|
||||
capabilities = ["read"]
|
||||
}
|
||||
|
||||
# Manage plugins
|
||||
# https://developer.hashicorp.com/vault/api-docs/system/plugins-catalog
|
||||
path "sys/plugins/catalog/*"
|
||||
{
|
||||
capabilities = ["create", "read", "update", "delete", "list", "sudo"]
|
||||
}
|
||||
|
||||
# List existing plugins
|
||||
# https://developer.hashicorp.com/vault/api-docs/system/plugins-catalog
|
||||
path "sys/plugins/catalog"
|
||||
{
|
||||
capabilities = ["list"]
|
||||
}
|
||||
|
||||
# Reload plugins
|
||||
# https://developer.hashicorp.com/vault/api-docs/system/plugins-reload-backend
|
||||
path "sys/plugins/reload/backend"
|
||||
{
|
||||
capabilities = ["create", "update", "sudo"]
|
||||
}
|
27
bin/tee-vault-unseal/src/attestation.rs
Normal file
27
bin/tee-vault-unseal/src/attestation.rs
Normal file
|
@ -0,0 +1,27 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
use crate::Worker;
|
||||
use actix_web::http::StatusCode;
|
||||
use actix_web::web::{Data, Json};
|
||||
use anyhow::{Context, Result};
|
||||
use teepot::json::http::AttestationResponse;
|
||||
use teepot::server::attestation::get_quote_and_collateral;
|
||||
use teepot::server::{HttpResponseError, Status};
|
||||
use tracing::instrument;
|
||||
|
||||
#[instrument(level = "info", name = "/v1/sys/attestation", skip_all)]
|
||||
pub async fn get_attestation(
|
||||
worker: Data<Worker>,
|
||||
) -> Result<Json<AttestationResponse>, HttpResponseError> {
|
||||
let report_data: [u8; 64] = worker
|
||||
.config
|
||||
.report_data
|
||||
.clone()
|
||||
.try_into()
|
||||
.map_err(|_| "Error getting attestation")?;
|
||||
get_quote_and_collateral(None, &report_data)
|
||||
.context("Error getting attestation")
|
||||
.map(Json)
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
135
bin/tee-vault-unseal/src/init.rs
Normal file
135
bin/tee-vault-unseal/src/init.rs
Normal file
|
@ -0,0 +1,135 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
use crate::{create_https_client, get_vault_status, UnsealServerState, Worker};
|
||||
use actix_web::error::ErrorBadRequest;
|
||||
use actix_web::{web, HttpResponse};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use awc::http::StatusCode;
|
||||
use serde_json::json;
|
||||
use teepot::json::http::{Init, InitResponse, VaultInitRequest};
|
||||
use teepot::json::secrets::AdminConfig;
|
||||
use teepot::server::{HttpResponseError, Status};
|
||||
use tracing::{debug, error, info, instrument, trace};
|
||||
|
||||
#[instrument(level = "info", name = "/v1/sys/init", skip_all)]
|
||||
pub async fn post_init(
|
||||
worker: web::Data<Worker>,
|
||||
init: web::Json<Init>,
|
||||
) -> Result<HttpResponse, HttpResponseError> {
|
||||
let Init {
|
||||
pgp_keys,
|
||||
secret_shares,
|
||||
secret_threshold,
|
||||
admin_pgp_keys,
|
||||
admin_threshold,
|
||||
admin_tee_mrenclave,
|
||||
} = init.into_inner();
|
||||
let client = create_https_client(worker.client_tls_config.clone());
|
||||
let vault_url = &worker.config.vault_url;
|
||||
|
||||
let vault_init = VaultInitRequest {
|
||||
pgp_keys,
|
||||
secret_shares,
|
||||
secret_threshold,
|
||||
};
|
||||
|
||||
if admin_threshold < 1 {
|
||||
return Ok(HttpResponse::from_error(ErrorBadRequest(
|
||||
json!({"error": "admin_threshold must be at least 1"}),
|
||||
)));
|
||||
}
|
||||
|
||||
if admin_threshold > admin_pgp_keys.len() {
|
||||
return Ok(HttpResponse::from_error(ErrorBadRequest(
|
||||
json!({"error": "admin_threshold must be less than or equal to the number of admin_pgp_keys"}),
|
||||
)));
|
||||
}
|
||||
|
||||
loop {
|
||||
let current_state = worker.state.read().unwrap().clone();
|
||||
match current_state {
|
||||
UnsealServerState::VaultUninitialized => {
|
||||
break;
|
||||
}
|
||||
UnsealServerState::VaultUnsealed => {
|
||||
return Err(anyhow!("Vault already unsealed")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
UnsealServerState::VaultInitialized { .. } => {
|
||||
return Err(anyhow!("Vault already initialized")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
UnsealServerState::VaultInitializedAndConfigured => {
|
||||
return Err(anyhow!("Vault already initialized")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
UnsealServerState::Undefined => {
|
||||
let state = get_vault_status(vault_url, client.clone()).await;
|
||||
*worker.state.write().unwrap() = state;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
"Sending init request to Vault {}",
|
||||
serde_json::to_string(&vault_init).unwrap()
|
||||
);
|
||||
let mut response = client
|
||||
.post(format!("{}/v1/sys/init", vault_url))
|
||||
.send_json(&vault_init)
|
||||
.await?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("Vault returned server error: {}", status_code);
|
||||
return Err(HttpResponseError::from_proxy(response).await);
|
||||
}
|
||||
|
||||
let response = response
|
||||
.json::<serde_json::Value>()
|
||||
.await
|
||||
.context("Failed to convert to json")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
info!("Vault initialized");
|
||||
trace!("response {}", response);
|
||||
|
||||
let root_token = response["root_token"]
|
||||
.as_str()
|
||||
.ok_or(anyhow!("No `root_token` field"))
|
||||
.status(StatusCode::BAD_GATEWAY)?
|
||||
.to_string();
|
||||
|
||||
debug!("Root token: {root_token}");
|
||||
|
||||
let unseal_keys = response["keys_base64"]
|
||||
.as_array()
|
||||
.ok_or(anyhow!("No `keys_base64` field"))
|
||||
.status(StatusCode::BAD_GATEWAY)?
|
||||
.iter()
|
||||
.map(|v| v.as_str().unwrap().to_string())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
debug!("Unseal keys: {}", unseal_keys.join(", "));
|
||||
|
||||
/*
|
||||
FIXME: use unseal keys to create new token
|
||||
let mut output = File::create("/opt/vault/data/root_token")
|
||||
.context("Failed to create `/opt/vault/data/root_token`")?;
|
||||
output
|
||||
.write_all(root_token.as_bytes())
|
||||
.context("Failed to write root_token")?;
|
||||
*/
|
||||
|
||||
*worker.state.write().unwrap() = UnsealServerState::VaultInitialized {
|
||||
admin_config: AdminConfig {
|
||||
admin_pgp_keys,
|
||||
admin_threshold,
|
||||
},
|
||||
admin_tee_mrenclave,
|
||||
root_token,
|
||||
};
|
||||
|
||||
let response = InitResponse { unseal_keys };
|
||||
|
||||
Ok(HttpResponse::Ok().json(response)) // <- send response
|
||||
}
|
366
bin/tee-vault-unseal/src/main.rs
Normal file
366
bin/tee-vault-unseal/src/main.rs
Normal file
|
@ -0,0 +1,366 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Server to initialize and unseal the Vault TEE.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
mod attestation;
|
||||
mod init;
|
||||
mod unseal;
|
||||
|
||||
use actix_web::http::header;
|
||||
use actix_web::rt::time::sleep;
|
||||
use actix_web::web::Data;
|
||||
use actix_web::{web, App, HttpServer};
|
||||
use anyhow::{Context, Result};
|
||||
use attestation::get_attestation;
|
||||
use awc::{Client, Connector};
|
||||
use clap::Parser;
|
||||
use init::post_init;
|
||||
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
|
||||
use rustls::client::WebPkiServerVerifier;
|
||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||
use rustls::{ClientConfig, DigitallySignedStruct, Error, ServerConfig, SignatureScheme};
|
||||
use rustls_pemfile::{certs, read_one};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fmt::Debug;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use std::{fs::File, io::BufReader};
|
||||
use teepot::json::http::{Init, Unseal, ATTESTATION_URL};
|
||||
use teepot::json::secrets::AdminConfig;
|
||||
use teepot::server::attestation::get_quote_and_collateral;
|
||||
use teepot::server::new_json_cfg;
|
||||
use teepot::sgx::{parse_tcb_levels, EnumSet, TcbLevel};
|
||||
use tracing::{error, info, trace};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry};
|
||||
use unseal::post_unseal;
|
||||
use x509_cert::der::Decode as _;
|
||||
use x509_cert::der::Encode as _;
|
||||
use x509_cert::Certificate;
|
||||
|
||||
const VAULT_AUTH_TEE_SHA256: &str = include_str!("../../../assets/vault-auth-tee.sha256");
|
||||
const VAULT_TOKEN_HEADER: &str = "X-Vault-Token";
|
||||
|
||||
/// Worker thread state and data
|
||||
pub struct Worker {
|
||||
/// TLS config for the HTTPS client
|
||||
pub client_tls_config: Arc<ClientConfig>,
|
||||
/// Server config
|
||||
pub config: Arc<UnsealServerConfig>,
|
||||
/// Server state
|
||||
pub state: Arc<RwLock<UnsealServerState>>,
|
||||
}
|
||||
|
||||
/// Global Server config
|
||||
#[derive(Debug, Default)]
|
||||
pub struct UnsealServerConfig {
|
||||
/// Vault URL
|
||||
pub vault_url: String,
|
||||
/// The expected report_data for the Vault TEE
|
||||
pub report_data: Vec<u8>,
|
||||
/// allowed TCB levels
|
||||
pub allowed_tcb_levels: Option<EnumSet<TcbLevel>>,
|
||||
}
|
||||
|
||||
/// Server state
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum UnsealServerState {
|
||||
/// Undefined
|
||||
Undefined,
|
||||
/// Vault is not yet initialized
|
||||
VaultUninitialized,
|
||||
/// Vault is initialized but not unsealed
|
||||
VaultInitialized {
|
||||
/// config for the admin TEE
|
||||
admin_config: AdminConfig,
|
||||
/// initial admin TEE mrenclave
|
||||
admin_tee_mrenclave: String,
|
||||
/// Vault root token
|
||||
root_token: String,
|
||||
},
|
||||
/// Vault is already initialized but not unsealed
|
||||
/// and should already be configured
|
||||
VaultInitializedAndConfigured,
|
||||
/// Vault is unsealed
|
||||
VaultUnsealed,
|
||||
}
|
||||
|
||||
impl UnsealServerConfig {
|
||||
/// Create a new ServerState
|
||||
pub fn new(
|
||||
vault_url: String,
|
||||
report_data: [u8; 64],
|
||||
allowed_tcb_levels: Option<EnumSet<TcbLevel>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
report_data: report_data.to_vec(),
|
||||
vault_url,
|
||||
allowed_tcb_levels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Args {
|
||||
/// allowed TCB levels, comma separated
|
||||
#[arg(long, value_parser = parse_tcb_levels, env = "ALLOWED_TCB_LEVELS", default_value = "Ok")]
|
||||
allowed_tcb_levels: EnumSet<TcbLevel>,
|
||||
/// port to listen on
|
||||
#[arg(long, env = "PORT", default_value = "8443")]
|
||||
port: u16,
|
||||
/// vault url
|
||||
#[arg(long, env = "VAULT_ADDR", default_value = "https://vault:8210")]
|
||||
vault_url: String,
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(
|
||||
fmt::layer()
|
||||
.with_span_events(fmt::format::FmtSpan::NEW)
|
||||
.with_writer(std::io::stderr),
|
||||
);
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
let tls_ok = std::path::Path::new("/opt/vault/tls/tls.ok");
|
||||
loop {
|
||||
info!("Waiting for TLS key/cert files to be generated");
|
||||
|
||||
// Wait for the file `data/tls.key` to exist
|
||||
if tls_ok.exists() {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
|
||||
info!("Starting up");
|
||||
|
||||
let (config, client_tls_config, report_data) = load_rustls_config().or_else(|e| {
|
||||
error!("failed to load rustls config: {e:?}");
|
||||
Err(e).context("Failed to load rustls config")
|
||||
})?;
|
||||
|
||||
if let Err(e) = get_quote_and_collateral(Some(args.allowed_tcb_levels), &report_data) {
|
||||
error!("failed to get quote and collateral: {e:?}");
|
||||
// don't return for now, we can still serve requests but we won't be able to attest
|
||||
}
|
||||
|
||||
let client = create_https_client(client_tls_config.clone());
|
||||
|
||||
let server_state = get_vault_status(&args.vault_url, client).await;
|
||||
|
||||
info!("Starting HTTPS server at port {}", args.port);
|
||||
let server_config = Arc::new(UnsealServerConfig::new(
|
||||
args.vault_url,
|
||||
report_data,
|
||||
Some(args.allowed_tcb_levels),
|
||||
));
|
||||
|
||||
let server_state = Arc::new(RwLock::new(server_state));
|
||||
|
||||
let server = match HttpServer::new(move || {
|
||||
let worker = Worker {
|
||||
client_tls_config: client_tls_config.clone(),
|
||||
config: server_config.clone(),
|
||||
state: server_state.clone(),
|
||||
};
|
||||
|
||||
App::new()
|
||||
// enable logger
|
||||
//.wrap(TracingLogger::default())
|
||||
.app_data(new_json_cfg())
|
||||
.app_data(Data::new(worker))
|
||||
.service(web::resource(ATTESTATION_URL).route(web::get().to(get_attestation)))
|
||||
.service(web::resource(Init::URL).route(web::post().to(post_init)))
|
||||
.service(web::resource(Unseal::URL).route(web::post().to(post_unseal)))
|
||||
})
|
||||
.bind_rustls_0_22((Ipv6Addr::UNSPECIFIED, args.port), config)
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
error!("Failed to bind to port {}: {e:?}", args.port);
|
||||
return Err(e).context(format!("Failed to bind to port {}", args.port));
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = server.worker_max_blocking_threads(2).workers(8).run().await {
|
||||
error!("failed to start HTTPS server: {e:?}");
|
||||
return Err(e).context("Failed to start HTTPS server");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_vault_status(vault_url: &str, client: Client) -> UnsealServerState {
|
||||
loop {
|
||||
let r = client
|
||||
.get(format!("{}/v1/sys/health", vault_url))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
if let Ok(r) = r {
|
||||
// https://developer.hashicorp.com/vault/api-docs/system/health
|
||||
match r.status().as_u16() {
|
||||
200 | 429 | 472 | 473 => {
|
||||
info!("Vault is initialized and unsealed");
|
||||
break UnsealServerState::VaultUnsealed;
|
||||
}
|
||||
501 => {
|
||||
info!("Vault is not initialized");
|
||||
break UnsealServerState::VaultUninitialized;
|
||||
}
|
||||
503 => {
|
||||
info!("Vault is initialized but not unsealed");
|
||||
break UnsealServerState::VaultInitializedAndConfigured;
|
||||
}
|
||||
s => {
|
||||
error!("Vault is not ready: status code {s}");
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("Waiting for vault to be ready");
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Save the hash of the public server key to `REPORT_DATA` to check
|
||||
// the attestations against it and it does not change on reconnect.
|
||||
fn make_verifier(server_cert: Box<[u8]>) -> impl ServerCertVerifier {
|
||||
#[derive(Debug)]
|
||||
struct V {
|
||||
server_cert: Box<[u8]>,
|
||||
server_verifier: Arc<WebPkiServerVerifier>,
|
||||
}
|
||||
impl ServerCertVerifier for V {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
end_entity: &CertificateDer,
|
||||
_intermediates: &[CertificateDer],
|
||||
_server_name: &ServerName,
|
||||
_ocsp_response: &[u8],
|
||||
_now: UnixTime,
|
||||
) -> std::result::Result<ServerCertVerified, Error> {
|
||||
let data = &self.server_cert;
|
||||
|
||||
if data.as_ref() == end_entity.as_ref() {
|
||||
info!("Server certificate matches expected certificate");
|
||||
Ok(ServerCertVerified::assertion())
|
||||
} else {
|
||||
error!("Server certificate does not match expected certificate");
|
||||
Err(rustls::Error::General(
|
||||
"Server certificate does not match expected certificate".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
message: &[u8],
|
||||
cert: &CertificateDer<'_>,
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> std::result::Result<HandshakeSignatureValid, Error> {
|
||||
self.server_verifier
|
||||
.verify_tls12_signature(message, cert, dss)
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
message: &[u8],
|
||||
cert: &CertificateDer<'_>,
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> std::result::Result<HandshakeSignatureValid, Error> {
|
||||
self.server_verifier
|
||||
.verify_tls13_signature(message, cert, dss)
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<SignatureScheme> {
|
||||
self.server_verifier.supported_verify_schemes()
|
||||
}
|
||||
}
|
||||
let root_store = Arc::new(rustls::RootCertStore::empty());
|
||||
let server_verifier = WebPkiServerVerifier::builder(root_store).build().unwrap();
|
||||
V {
|
||||
server_cert,
|
||||
server_verifier,
|
||||
}
|
||||
}
|
||||
|
||||
/// Load TLS key/cert files
|
||||
pub fn load_rustls_config() -> Result<(ServerConfig, Arc<ClientConfig>, [u8; 64])> {
|
||||
// init server config builder with safe defaults
|
||||
let config = ServerConfig::builder().with_no_client_auth();
|
||||
|
||||
// load TLS key/cert files
|
||||
let cert_file = &mut BufReader::new(
|
||||
File::open("/opt/vault/tls/tls.crt").context("Failed to open TLS cert file")?,
|
||||
);
|
||||
let key_file = &mut BufReader::new(
|
||||
File::open("/opt/vault/tls/tls.key").context("Failed to open TLS key file")?,
|
||||
);
|
||||
|
||||
// convert files to key/cert objects
|
||||
let cert_chain: Vec<_> = certs(cert_file)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(rustls::pki_types::CertificateDer::from)
|
||||
.collect();
|
||||
let priv_key: rustls::pki_types::PrivateKeyDer = match read_one(key_file).unwrap() {
|
||||
Some(rustls_pemfile::Item::RSAKey(key)) => {
|
||||
rustls::pki_types::PrivatePkcs1KeyDer::from(key).into()
|
||||
}
|
||||
Some(rustls_pemfile::Item::PKCS8Key(key)) => {
|
||||
rustls::pki_types::PrivatePkcs8KeyDer::from(key).into()
|
||||
}
|
||||
_ => panic!("no keys found"),
|
||||
};
|
||||
|
||||
let tls_config = Arc::new(
|
||||
rustls::ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(make_verifier(
|
||||
cert_chain[0].as_ref().into(),
|
||||
)))
|
||||
.with_no_client_auth(),
|
||||
);
|
||||
|
||||
let cert = Certificate::from_der(cert_chain[0].as_ref()).unwrap();
|
||||
let pub_key = cert
|
||||
.tbs_certificate
|
||||
.subject_public_key_info
|
||||
.to_der()
|
||||
.unwrap();
|
||||
|
||||
let hash = Sha256::digest(pub_key);
|
||||
let mut report_data = [0u8; 64];
|
||||
report_data[..32].copy_from_slice(&hash[..32]);
|
||||
|
||||
let report_data_hex = hex::encode(report_data);
|
||||
trace!(report_data_hex);
|
||||
|
||||
let config = config
|
||||
.with_single_cert(cert_chain, priv_key)
|
||||
.context("Failed to load TLS key/cert files")?;
|
||||
|
||||
Ok((config, tls_config, report_data))
|
||||
}
|
||||
|
||||
/// Create an HTTPS client with the default headers and config
|
||||
pub fn create_https_client(client_tls_config: Arc<ClientConfig>) -> Client {
|
||||
Client::builder()
|
||||
.add_default_header((header::USER_AGENT, "teepot/1.0"))
|
||||
// a "connector" wraps the stream into an encrypted connection
|
||||
.connector(Connector::new().rustls_0_22(client_tls_config))
|
||||
.timeout(Duration::from_secs(12000))
|
||||
.finish()
|
||||
}
|
412
bin/tee-vault-unseal/src/unseal.rs
Normal file
412
bin/tee-vault-unseal/src/unseal.rs
Normal file
|
@ -0,0 +1,412 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
use crate::{
|
||||
create_https_client, get_vault_status, UnsealServerConfig, UnsealServerState, Worker,
|
||||
VAULT_AUTH_TEE_SHA256, VAULT_TOKEN_HEADER,
|
||||
};
|
||||
use actix_web::http::StatusCode;
|
||||
use actix_web::rt::time::sleep;
|
||||
use actix_web::{web, HttpResponse};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use awc::{Client, ClientRequest, SendClientRequest};
|
||||
use serde_json::{json, Value};
|
||||
use std::fs::File;
|
||||
use std::future::Future;
|
||||
use std::io::Read;
|
||||
use std::time::Duration;
|
||||
use teepot::client::vault::VaultConnection;
|
||||
use teepot::json::http::Unseal;
|
||||
use teepot::json::secrets::{AdminConfig, AdminState};
|
||||
use teepot::server::{HttpResponseError, Status};
|
||||
use tracing::{debug, error, info, instrument, trace};
|
||||
|
||||
#[instrument(level = "info", name = "/v1/sys/unseal", skip_all)]
|
||||
pub async fn post_unseal(
|
||||
worker: web::Data<Worker>,
|
||||
item: web::Json<Unseal>,
|
||||
) -> Result<HttpResponse, HttpResponseError> {
|
||||
let client = create_https_client(worker.client_tls_config.clone());
|
||||
let app = &worker.config;
|
||||
let vault_url = &app.vault_url;
|
||||
|
||||
loop {
|
||||
let current_state = worker.state.read().unwrap().clone();
|
||||
match current_state {
|
||||
UnsealServerState::VaultUninitialized => {
|
||||
return Err(anyhow!("Vault not yet initialized")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
UnsealServerState::VaultUnsealed => {
|
||||
return Err(anyhow!("Vault already unsealed")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
UnsealServerState::VaultInitialized { .. } => {
|
||||
break;
|
||||
}
|
||||
UnsealServerState::VaultInitializedAndConfigured => {
|
||||
break;
|
||||
}
|
||||
UnsealServerState::Undefined => {
|
||||
let state = get_vault_status(vault_url, client.clone()).await;
|
||||
*worker.state.write().unwrap() = state;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut response = client
|
||||
.post(format!("{}/v1/sys/unseal", vault_url))
|
||||
.send_json(&item.0)
|
||||
.await?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("Vault returned server error: {}", status_code);
|
||||
let mut client_resp = HttpResponse::build(status_code);
|
||||
for (header_name, header_value) in response.headers().iter() {
|
||||
client_resp.insert_header((header_name.clone(), header_value.clone()));
|
||||
}
|
||||
return Ok(client_resp.streaming(response));
|
||||
}
|
||||
|
||||
let response: Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("parsing unseal response")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
debug!("unseal: {:?}", response);
|
||||
|
||||
if response.get("errors").is_some() {
|
||||
return Ok(HttpResponse::Ok().json(response));
|
||||
}
|
||||
|
||||
let sealed = response
|
||||
.get("sealed")
|
||||
.map(|v| v.as_bool().unwrap_or(true))
|
||||
.unwrap_or(true);
|
||||
|
||||
debug!(sealed);
|
||||
|
||||
// if unsealed
|
||||
if !sealed {
|
||||
let mut state = UnsealServerState::VaultUnsealed;
|
||||
std::mem::swap(&mut *worker.state.write().unwrap(), &mut state);
|
||||
|
||||
match state {
|
||||
UnsealServerState::VaultUninitialized => {
|
||||
return Err(anyhow!("Invalid internal state")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
UnsealServerState::VaultUnsealed => {
|
||||
return Err(anyhow!("Invalid internal state")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
UnsealServerState::VaultInitialized {
|
||||
admin_config,
|
||||
admin_tee_mrenclave,
|
||||
root_token,
|
||||
} => {
|
||||
debug!(root_token);
|
||||
info!("Vault is unsealed");
|
||||
let app = &worker.config;
|
||||
let client = create_https_client(worker.client_tls_config.clone());
|
||||
|
||||
vault_configure_unsealed(
|
||||
app,
|
||||
&admin_config,
|
||||
&root_token,
|
||||
&admin_tee_mrenclave,
|
||||
&client,
|
||||
)
|
||||
.await
|
||||
.context("Failed to configure unsealed vault")
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
// destroy root token
|
||||
let _response = client
|
||||
.post(format!("{}/v1/auth/token/revoke-self", app.vault_url))
|
||||
.insert_header((VAULT_TOKEN_HEADER, root_token.to_string()))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
info!("Vault unsealed and configured!");
|
||||
}
|
||||
UnsealServerState::VaultInitializedAndConfigured => {
|
||||
info!("Vault is unsealed and hopefully configured!");
|
||||
info!("Initiating raft join");
|
||||
// load TLS cert chain
|
||||
let mut cert_file = File::open("/opt/vault/tls/cacert.pem")
|
||||
.context("Failed to open TLS cert chain")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
let mut cert_buf = Vec::new();
|
||||
cert_file
|
||||
.read_to_end(&mut cert_buf)
|
||||
.context("Failed to read TLS cert chain")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
let cert_chain = std::str::from_utf8(&cert_buf)
|
||||
.context("Failed to parse TLS cert chain as UTF-8")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?
|
||||
.to_string();
|
||||
|
||||
let payload = json!({"leader_ca_cert": cert_chain, "retry": true });
|
||||
|
||||
let mut response = client
|
||||
.post(format!("{}/v1/sys/storage/raft/join", vault_url))
|
||||
.send_json(&payload)
|
||||
.await?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("Vault returned server error: {}", status_code);
|
||||
let mut client_resp = HttpResponse::build(status_code);
|
||||
for (header_name, header_value) in response.headers().iter() {
|
||||
client_resp.insert_header((header_name.clone(), header_value.clone()));
|
||||
}
|
||||
return Ok(client_resp.streaming(response));
|
||||
}
|
||||
|
||||
let response: Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("parsing raft join response")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
debug!("raft join: {:?}", response);
|
||||
|
||||
if response.get("errors").is_some() {
|
||||
return Ok(HttpResponse::Ok().json(response));
|
||||
}
|
||||
}
|
||||
UnsealServerState::Undefined => {
|
||||
unreachable!("Invalid internal state");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(HttpResponse::Accepted().json(response)) // <- send response
|
||||
}
|
||||
|
||||
pub async fn vault_configure_unsealed(
|
||||
app: &UnsealServerConfig,
|
||||
admin_config: &AdminConfig,
|
||||
root_token: &str,
|
||||
admin_tee_mrenclave: &str,
|
||||
c: &Client,
|
||||
) -> Result<(), HttpResponseError> {
|
||||
wait_for_plugins_catalog(app, root_token, c).await;
|
||||
|
||||
if !plugin_is_already_running(app, root_token, c).await? {
|
||||
let r = vault(
|
||||
"Installing vault-auth-tee plugin",
|
||||
c.put(format!(
|
||||
"{}/v1/sys/plugins/catalog/auth/vault-auth-tee",
|
||||
app.vault_url
|
||||
)),
|
||||
root_token,
|
||||
json!({
|
||||
"sha256": VAULT_AUTH_TEE_SHA256,
|
||||
"command": "vault-auth-tee",
|
||||
"version": "0.1.0+dev"
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow!("{:?}", e))
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
if !r.status().is_success() {
|
||||
let err = HttpResponseError::from_proxy(r).await;
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
info!("vault-auth-tee plugin already installed");
|
||||
}
|
||||
|
||||
if !plugin_is_already_running(app, root_token, c).await? {
|
||||
let r = vault(
|
||||
"Activating vault-auth-tee plugin",
|
||||
c.post(format!("{}/v1/sys/auth/tee", app.vault_url)),
|
||||
root_token,
|
||||
json!({"type": "vault-auth-tee"}),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow!("{:?}", e))
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
if !r.status().is_success() {
|
||||
let err = HttpResponseError::from_proxy(r).await;
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
info!("vault-auth-tee plugin already activated");
|
||||
}
|
||||
|
||||
if let Ok(mut r) = c
|
||||
.get(format!("{}/v1/auth/tee/tees?list=true", app.vault_url))
|
||||
.insert_header((VAULT_TOKEN_HEADER, root_token))
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
let r: Value = r
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| anyhow!("{:?}", e))
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
trace!("{:?}", r);
|
||||
if let Some(tees) = r.get("data").and_then(|v| v.get("keys")) {
|
||||
if let Some(tees) = tees.as_array() {
|
||||
if tees.contains(&json!("root")) {
|
||||
info!("root TEE already installed");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vault(
|
||||
"Installing root TEE",
|
||||
c.put(format!("{}/v1/auth/tee/tees/admin", app.vault_url)),
|
||||
root_token,
|
||||
json!({
|
||||
"lease": "1000",
|
||||
"name": "admin",
|
||||
"types": "sgx",
|
||||
"sgx_allowed_tcb_levels": "Ok,SwHardeningNeeded",
|
||||
"sgx_mrenclave": &admin_tee_mrenclave,
|
||||
"token_policies": "admin"
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow!("{:?}", e))
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
// Install admin policies
|
||||
let admin_policy = include_str!("admin-policy.hcl");
|
||||
vault(
|
||||
"Installing admin policy",
|
||||
c.put(format!("{}/v1/sys/policies/acl/admin", app.vault_url)),
|
||||
root_token,
|
||||
json!({ "policy": admin_policy }),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow!("{:?}", e))
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
vault(
|
||||
"Enable the key/value secrets engine v1 at secret/.",
|
||||
c.put(format!("{}/v1/sys/mounts/secret", app.vault_url)),
|
||||
root_token,
|
||||
json!({ "type": "kv", "description": "K/V v1" } ),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow!("{:?}", e))
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
|
||||
// Create a `VaultConnection` for the `admin` tee to initialize the secrets for it.
|
||||
// Safety: the connection was already attested
|
||||
let admin_vcon = unsafe {
|
||||
VaultConnection::new_from_client_without_attestation(
|
||||
app.vault_url.clone(),
|
||||
c.clone(),
|
||||
"admin".into(),
|
||||
root_token.to_string(),
|
||||
)
|
||||
};
|
||||
|
||||
// initialize the admin config
|
||||
admin_vcon.store_secret(admin_config, "config").await?;
|
||||
admin_vcon
|
||||
.store_secret(AdminState::default(), "state")
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn wait_for_plugins_catalog(app: &UnsealServerConfig, root_token: &str, c: &Client) {
|
||||
info!("Waiting for plugins to be loaded");
|
||||
loop {
|
||||
let r = c
|
||||
.get(format!("{}/v1/sys/plugins/catalog", app.vault_url))
|
||||
.insert_header((VAULT_TOKEN_HEADER, root_token))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
match r {
|
||||
Ok(r) => {
|
||||
if r.status().is_success() {
|
||||
break;
|
||||
} else {
|
||||
debug!("/v1/sys/plugins/catalog status: {:#?}", r)
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("/v1/sys/plugins/catalog error: {}", e)
|
||||
}
|
||||
}
|
||||
|
||||
info!("Waiting for plugins to be loaded");
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn plugin_is_already_running(
|
||||
app: &UnsealServerConfig,
|
||||
root_token: &str,
|
||||
c: &Client,
|
||||
) -> std::result::Result<bool, HttpResponseError> {
|
||||
if let Ok(mut r) = c
|
||||
.get(format!("{}/v1/sys/auth", app.vault_url))
|
||||
.insert_header((VAULT_TOKEN_HEADER, root_token))
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
if !r.status().is_success() {
|
||||
return Ok(false);
|
||||
}
|
||||
let r: Value = r
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| anyhow!("{:?}", e))
|
||||
.status(StatusCode::BAD_GATEWAY)?;
|
||||
trace!("{}", r.to_string());
|
||||
|
||||
let is_running = r
|
||||
.get("data")
|
||||
.and_then(|v| v.get("tee/"))
|
||||
.and_then(|v| v.get("running_sha256"))
|
||||
.and_then(|v| v.as_str())
|
||||
.and_then(|v| if v.is_empty() { None } else { Some(v) })
|
||||
.and_then(|v| {
|
||||
if v == VAULT_AUTH_TEE_SHA256 {
|
||||
Some(v)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.is_some();
|
||||
Ok(is_running)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
async fn vault(
|
||||
action: &str,
|
||||
req: ClientRequest,
|
||||
token: &str,
|
||||
json: Value,
|
||||
) -> <SendClientRequest as Future>::Output {
|
||||
info!("{}", action);
|
||||
debug!("json: {:?}", json);
|
||||
match req
|
||||
.insert_header((VAULT_TOKEN_HEADER, token))
|
||||
.send_json(&json)
|
||||
.await
|
||||
{
|
||||
Ok(r) => {
|
||||
debug!("response {:?}", r);
|
||||
Ok(r)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{}: {}", action, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
62
bin/tee-vault-unseal/tee-vault-unseal.manifest.template
Normal file
62
bin/tee-vault-unseal/tee-vault-unseal.manifest.template
Normal file
|
@ -0,0 +1,62 @@
|
|||
libos.entrypoint = "/app/tee-vault-unseal"
|
||||
|
||||
[loader]
|
||||
argv = [ "/app/tee-vault-unseal" ]
|
||||
entrypoint = "file:{{ gramine.libos }}"
|
||||
env.LD_LIBRARY_PATH = "/lib:{{ arch_libdir }}:/usr{{ arch_libdir }}:/lib"
|
||||
env.HOME = "/app"
|
||||
env.MALLOC_ARENA_MAX = "1"
|
||||
env.AZDCAP_DEBUG_LOG_LEVEL = "ignore"
|
||||
env.AZDCAP_COLLATERAL_VERSION = "v4"
|
||||
|
||||
### Required configuration ###
|
||||
env.ALLOWED_TCB_LEVELS = { passthrough = true }
|
||||
env.VAULT_ADDR = { passthrough = true }
|
||||
|
||||
### DEBUG ###
|
||||
env.RUST_BACKTRACE = "1"
|
||||
env.RUST_LOG="info,tee_vault_unseal=trace,teepot=trace,awc=debug"
|
||||
|
||||
[fs]
|
||||
root.uri = "file:/"
|
||||
start_dir = "/app"
|
||||
mounts = [
|
||||
{ path = "{{ execdir }}", uri = "file:{{ execdir }}" },
|
||||
{ path = "/lib", uri = "file:{{ gramine.runtimedir() }}" },
|
||||
{ path = "{{ arch_libdir }}", uri = "file:{{ arch_libdir }}" },
|
||||
{ path = "/etc", uri = "file:/etc" },
|
||||
{ type = "tmpfs", path = "/var/tmp" },
|
||||
{ type = "tmpfs", path = "/tmp" },
|
||||
{ type = "tmpfs", path = "/app/.dcap-qcnl" },
|
||||
{ type = "tmpfs", path = "/app/.az-dcap-client" },
|
||||
{ type = "encrypted", path = "/opt/vault/tls", uri = "file:/opt/vault/tls", key_name = "_sgx_mrsigner" },
|
||||
{ path = "/lib/libdcap_quoteprov.so", uri = "file:/lib/libdcap_quoteprov.so" },
|
||||
]
|
||||
|
||||
[sgx]
|
||||
trusted_files = [
|
||||
"file:/etc/ld.so.cache",
|
||||
"file:/app/",
|
||||
"file:{{ execdir }}/",
|
||||
"file:{{ arch_libdir }}/",
|
||||
"file:/usr/{{ arch_libdir }}/",
|
||||
"file:{{ gramine.libos }}",
|
||||
"file:{{ gramine.runtimedir() }}/",
|
||||
"file:/usr/lib/ssl/openssl.cnf",
|
||||
"file:/etc/ssl/",
|
||||
"file:/etc/sgx_default_qcnl.conf",
|
||||
"file:/lib/libdcap_quoteprov.so",
|
||||
]
|
||||
remote_attestation = "dcap"
|
||||
max_threads = 64
|
||||
edmm_enable = false
|
||||
## max enclave size
|
||||
enclave_size = "2G"
|
||||
|
||||
[sys]
|
||||
enable_extra_runtime_domain_names_conf = true
|
||||
enable_sigterm_injection = true
|
||||
|
||||
# possible tweak option, if problems with mio
|
||||
# currently mio is compiled with `mio_unsupported_force_waker_pipe`
|
||||
# insecure__allow_eventfd = true
|
22
bin/teepot-read/Cargo.toml
Normal file
22
bin/teepot-read/Cargo.toml
Normal file
|
@ -0,0 +1,22 @@
|
|||
[package]
|
||||
name = "teepot-read"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
awc.workspace = true
|
||||
clap.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
teepot.workspace = true
|
||||
tracing-actix-web.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
111
bin/teepot-read/src/main.rs
Normal file
111
bin/teepot-read/src/main.rs
Normal file
|
@ -0,0 +1,111 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Get the secrets from a Vault TEE and pass them as environment variables to a command
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use clap::Parser;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::os::unix::process::CommandExt;
|
||||
use std::process::Command;
|
||||
use teepot::client::vault::VaultConnection;
|
||||
use teepot::server::attestation::VaultAttestationArgs;
|
||||
use tracing::{debug, info, warn};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Arguments {
|
||||
/// turn on test mode
|
||||
#[arg(long, hide = true)]
|
||||
pub test: bool,
|
||||
/// vault token
|
||||
#[arg(long, env = "VAULT_TOKEN", hide = true)]
|
||||
pub vault_token: String,
|
||||
#[clap(flatten)]
|
||||
pub attestation: VaultAttestationArgs,
|
||||
/// name of this TEE to login to vault
|
||||
#[arg(long, required = true)]
|
||||
pub name: String,
|
||||
/// secrets to get from vault and pass as environment variables
|
||||
#[arg(long, required = true)]
|
||||
pub secrets: Vec<String>,
|
||||
/// command to run
|
||||
pub command: Vec<String>,
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(fmt::layer().with_writer(std::io::stderr));
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
|
||||
let args = Arguments::parse();
|
||||
|
||||
// Split every string with a ',' into a vector of strings, flatten them and collect them.
|
||||
let secrets = args
|
||||
.secrets
|
||||
.iter()
|
||||
.flat_map(|s| s.split(','))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
info!("args: {:?}", args);
|
||||
|
||||
let conn = if args.test {
|
||||
warn!("TEST MODE");
|
||||
let client = awc::Client::builder()
|
||||
.add_default_header((actix_web::http::header::USER_AGENT, "teepot/1.0"))
|
||||
.finish();
|
||||
// SAFETY: TEST MODE
|
||||
unsafe {
|
||||
VaultConnection::new_from_client_without_attestation(
|
||||
args.attestation.vault_addr.clone(),
|
||||
client,
|
||||
args.name.clone(),
|
||||
args.vault_token.clone(),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
VaultConnection::new(&args.attestation.clone().into(), args.name.clone())
|
||||
.await
|
||||
.expect("connecting to vault")
|
||||
};
|
||||
|
||||
let mut env: HashMap<String, String> = HashMap::new();
|
||||
|
||||
for secret_name in secrets {
|
||||
debug!("getting secret {secret_name}");
|
||||
let secret_val: serde_json::Value = match conn.load_secret(secret_name).await? {
|
||||
Some(val) => val,
|
||||
None => {
|
||||
debug!("secret {secret_name} not found");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
debug!("got secret {secret_name}: {secret_val}");
|
||||
|
||||
// Plain strings can be converted to strings.
|
||||
let env_val = match secret_val {
|
||||
Value::String(s) => s,
|
||||
_ => secret_val.to_string(),
|
||||
};
|
||||
|
||||
env.insert(secret_name.to_string(), env_val);
|
||||
}
|
||||
|
||||
let err = Command::new(&args.command[0])
|
||||
.args(&args.command[1..])
|
||||
.envs(env)
|
||||
.exec();
|
||||
|
||||
Err(err).context("exec failed")
|
||||
}
|
22
bin/teepot-write/Cargo.toml
Normal file
22
bin/teepot-write/Cargo.toml
Normal file
|
@ -0,0 +1,22 @@
|
|||
[package]
|
||||
name = "teepot-write"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
awc.workspace = true
|
||||
clap.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
teepot.workspace = true
|
||||
tracing-actix-web.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
98
bin/teepot-write/src/main.rs
Normal file
98
bin/teepot-write/src/main.rs
Normal file
|
@ -0,0 +1,98 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Write secrets to a Vault TEE from environment variables
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use clap::Parser;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use teepot::client::vault::VaultConnection;
|
||||
use teepot::server::attestation::VaultAttestationArgs;
|
||||
use tracing::{debug, info, warn};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Arguments {
|
||||
/// turn on test mode
|
||||
#[arg(long, hide = true)]
|
||||
pub test: bool,
|
||||
/// vault token
|
||||
#[arg(long, env = "VAULT_TOKEN", hide = true)]
|
||||
pub vault_token: String,
|
||||
#[clap(flatten)]
|
||||
pub attestation: VaultAttestationArgs,
|
||||
/// name of this TEE to login to vault
|
||||
#[arg(long, required = true)]
|
||||
pub name: String,
|
||||
/// name of this TEE to login to vault
|
||||
#[arg(long)]
|
||||
pub store_name: Option<String>,
|
||||
/// secrets to write to vault with the value of the environment variables
|
||||
#[arg(long, required = true)]
|
||||
pub secrets: Vec<String>,
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(fmt::layer().with_writer(std::io::stderr));
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
|
||||
let args = Arguments::parse();
|
||||
|
||||
// Split every string with a ',' into a vector of strings, flatten them and collect them.
|
||||
let secrets = args
|
||||
.secrets
|
||||
.iter()
|
||||
.flat_map(|s| s.split(','))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
info!("args: {:?}", args);
|
||||
|
||||
let conn = if args.test {
|
||||
warn!("TEST MODE");
|
||||
let client = awc::Client::builder()
|
||||
.add_default_header((actix_web::http::header::USER_AGENT, "teepot/1.0"))
|
||||
.finish();
|
||||
// SAFETY: TEST MODE
|
||||
unsafe {
|
||||
VaultConnection::new_from_client_without_attestation(
|
||||
args.attestation.vault_addr.clone(),
|
||||
client,
|
||||
args.name.clone(),
|
||||
args.vault_token.clone(),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
VaultConnection::new(&args.attestation.clone().into(), args.name.clone())
|
||||
.await
|
||||
.expect("connecting to vault")
|
||||
};
|
||||
|
||||
let tee_name = args.store_name.unwrap_or(args.name.clone());
|
||||
|
||||
let env = env::vars()
|
||||
.filter(|(k, _)| secrets.contains(&k.as_str()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for (secret_name, secret_val) in env {
|
||||
debug!("storing secret {secret_name}: {secret_val}");
|
||||
let secret_val = Value::String(secret_val);
|
||||
conn.store_secret_for_tee(&tee_name, &secret_val, &secret_name)
|
||||
.await
|
||||
.expect("storing secret");
|
||||
info!("stored secret {secret_name}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
23
bin/vault-admin/Cargo.toml
Normal file
23
bin/vault-admin/Cargo.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[package]
|
||||
name = "vault-admin"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
awc.workspace = true
|
||||
bytemuck.workspace = true
|
||||
clap.workspace = true
|
||||
hex.workspace = true
|
||||
pgp.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
sha2.workspace = true
|
||||
teepot.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
46
bin/vault-admin/README.md
Normal file
46
bin/vault-admin/README.md
Normal file
|
@ -0,0 +1,46 @@
|
|||
```bash
|
||||
❯ idents=( tests/data/pub*.asc )
|
||||
❯ cargo run --bin vault-admin -p vault-admin -- \
|
||||
verify \
|
||||
${idents[@]/#/-i } \
|
||||
tests/data/test.json \
|
||||
tests/data/test.json.asc
|
||||
|
||||
Verified signature for `81A312C59D679D930FA9E8B06D728F29A2DBABF8`
|
||||
|
||||
❯ RUST_LOG=info cargo run -p vault-admin -- \
|
||||
send \
|
||||
--sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d \
|
||||
--sgx-allowed-tcb-levels SwHardeningNeeded \
|
||||
--server https://127.0.0.1:8444 \
|
||||
bin/tee-vault-admin/tests/data/test.json \
|
||||
bin/tee-vault-admin/tests/data/test.json.asc
|
||||
|
||||
2023-08-04T10:51:14.919941Z INFO vault_admin: Quote verified! Connection secure!
|
||||
2023-08-04T10:51:14.920430Z INFO tee_client: Getting attestation report
|
||||
2023-08-04T10:51:15.020459Z INFO tee_client: Checked or set server certificate public key hash `f6dc06b9f2a14fa16a94c076a85eab8513f99ec0091801cc62c8761e42908fc1`
|
||||
2023-08-04T10:51:15.024310Z INFO tee_client: Verifying attestation report
|
||||
2023-08-04T10:51:15.052712Z INFO tee_client: TcbLevel is allowed: SwHardeningNeeded: Software hardening is needed
|
||||
2023-08-04T10:51:15.054508Z WARN tee_client: Info: Advisory ID: INTEL-SA-00615
|
||||
2023-08-04T10:51:15.054572Z INFO tee_client: Report data matches `f6dc06b9f2a14fa16a94c076a85eab8513f99ec0091801cc62c8761e42908fc1`
|
||||
2023-08-04T10:51:15.054602Z INFO tee_client: mrsigner `c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d` matches
|
||||
[
|
||||
{
|
||||
"request": {
|
||||
"data": {
|
||||
"lease": "1000",
|
||||
"name": "test",
|
||||
"sgx_allowed_tcb_levels": "Ok,SwHardeningNeeded",
|
||||
"sgx_mrsigner": "c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d",
|
||||
"token_policies": "test",
|
||||
"types": "sgx"
|
||||
},
|
||||
"url": "/v1/auth/tee/tees/test"
|
||||
},
|
||||
"response": {
|
||||
"status_code": 204,
|
||||
"value": null
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
366
bin/vault-admin/src/main.rs
Normal file
366
bin/vault-admin/src/main.rs
Normal file
|
@ -0,0 +1,366 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use clap::{Args, Parser, Subcommand};
|
||||
use pgp::types::KeyTrait;
|
||||
use pgp::{Deserializable, SignedPublicKey};
|
||||
use serde_json::Value;
|
||||
use std::default::Default;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::{Read, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use teepot::client::{AttestationArgs, TeeConnection};
|
||||
use teepot::json::http::{
|
||||
SignRequest, SignRequestData, SignResponse, VaultCommandRequest, VaultCommands,
|
||||
VaultCommandsResponse, ATTESTATION_URL, DIGEST_URL,
|
||||
};
|
||||
use teepot::server::signatures::verify_sig;
|
||||
use teepot::sgx::sign::Signature;
|
||||
use tracing::{error, info};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::Registry;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct SendArgs {
|
||||
#[clap(flatten)]
|
||||
pub attestation: AttestationArgs,
|
||||
/// Vault command file
|
||||
#[arg(required = true)]
|
||||
pub command_file: PathBuf,
|
||||
/// GPG signature files
|
||||
#[arg(required = true)]
|
||||
pub sigs: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct SignTeeArgs {
|
||||
#[clap(flatten)]
|
||||
pub attestation: AttestationArgs,
|
||||
/// output file
|
||||
#[arg(short, long, required = true)]
|
||||
pub out: PathBuf,
|
||||
/// signature request file
|
||||
#[arg(required = true)]
|
||||
pub sig_request_file: PathBuf,
|
||||
/// GPG signature files
|
||||
#[arg(required = true)]
|
||||
pub sigs: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct DigestArgs {
|
||||
#[clap(flatten)]
|
||||
pub attestation: AttestationArgs,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct VerifyArgs {
|
||||
/// GPG identity files
|
||||
#[arg(short, long, required = true)]
|
||||
pub idents: Vec<PathBuf>,
|
||||
/// Vault command file
|
||||
#[arg(required = true)]
|
||||
pub command_file: PathBuf,
|
||||
/// GPG signature files
|
||||
#[arg(required = true)]
|
||||
pub sigs: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct CreateSignRequestArgs {
|
||||
/// Last digest
|
||||
#[arg(long)]
|
||||
pub last_digest: Option<String>,
|
||||
/// TEE name
|
||||
#[arg(long)]
|
||||
pub tee_name: Option<String>,
|
||||
/// Vault command file
|
||||
#[arg(required = true)]
|
||||
pub sig_file: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum SubCommands {
|
||||
/// Send the signed commands to execute to the vault
|
||||
Command(SendArgs),
|
||||
/// Verify the signature(s) for the commands to send
|
||||
Verify(VerifyArgs),
|
||||
/// Get the digest of the last executed commands
|
||||
Digest(DigestArgs),
|
||||
/// Send the signed commands to execute to the vault
|
||||
SignTee(SignTeeArgs),
|
||||
/// Create a sign request
|
||||
CreateSignRequest(CreateSignRequestArgs),
|
||||
}
|
||||
|
||||
/// Admin tool for the vault
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Arguments {
|
||||
#[clap(subcommand)]
|
||||
cmd: SubCommands,
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(fmt::layer().with_writer(std::io::stderr));
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
|
||||
let args = Arguments::parse();
|
||||
info!("Quote verified! Connection secure!");
|
||||
|
||||
match args.cmd {
|
||||
SubCommands::Command(args) => send_commands(args).await?,
|
||||
SubCommands::SignTee(args) => send_sig_request(args).await?,
|
||||
SubCommands::Verify(args) => {
|
||||
verify(args.command_file, args.idents.iter(), args.sigs.iter())?
|
||||
}
|
||||
SubCommands::Digest(args) => digest(args).await?,
|
||||
SubCommands::CreateSignRequest(args) => create_sign_request(args)?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_sign_request(args: CreateSignRequestArgs) -> Result<()> {
|
||||
let mut sigstruct_file = File::open(&args.sig_file)?;
|
||||
let mut sigstruct_bytes = Vec::new();
|
||||
sigstruct_file.read_to_end(&mut sigstruct_bytes)?;
|
||||
|
||||
let sigstruct = bytemuck::try_from_bytes::<Signature>(&sigstruct_bytes)
|
||||
.context(format!("parsing signature file {:?}", &args.sig_file))?;
|
||||
|
||||
let body = sigstruct.body();
|
||||
let data = bytemuck::bytes_of(&body).to_vec();
|
||||
|
||||
let sign_request_data = SignRequestData {
|
||||
data,
|
||||
last_digest: args.last_digest.unwrap_or_default(),
|
||||
tee_name: args.tee_name.unwrap_or_default(),
|
||||
tee_type: "sgx".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&sign_request_data)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify(
|
||||
msg: impl AsRef<Path>,
|
||||
idents_file_paths: impl Iterator<Item = impl AsRef<Path>>,
|
||||
sig_paths: impl Iterator<Item = impl AsRef<Path>>,
|
||||
) -> Result<()> {
|
||||
let mut cmd_file = File::open(msg.as_ref())?;
|
||||
let mut cmd_buf = Vec::new();
|
||||
cmd_file
|
||||
.read_to_end(&mut cmd_buf)
|
||||
.context(format!("reading command file {:?}", &cmd_file))?;
|
||||
|
||||
let mut idents = Vec::new();
|
||||
for ident_file_path in idents_file_paths {
|
||||
let ident_file = File::open(ident_file_path.as_ref()).context(format!(
|
||||
"reading identity file {:?}",
|
||||
ident_file_path.as_ref()
|
||||
))?;
|
||||
idents.push(
|
||||
SignedPublicKey::from_armor_single(ident_file)
|
||||
.context(format!(
|
||||
"reading identity file {:?}",
|
||||
ident_file_path.as_ref()
|
||||
))?
|
||||
.0,
|
||||
);
|
||||
}
|
||||
|
||||
for sig_path in sig_paths {
|
||||
let mut sig_file = File::open(&sig_path)
|
||||
.context(format!("reading signature file {:?}", sig_path.as_ref()))?;
|
||||
let mut sig = String::new();
|
||||
sig_file
|
||||
.read_to_string(&mut sig)
|
||||
.context(format!("reading signature file {:?}", sig_path.as_ref()))?;
|
||||
let ident_pos = verify_sig(&sig, &cmd_buf, &idents)?;
|
||||
println!(
|
||||
"Verified signature for `{}`",
|
||||
hex::encode_upper(idents.get(ident_pos).unwrap().fingerprint())
|
||||
);
|
||||
// Remove the identity from the list of identities to verify
|
||||
idents.remove(ident_pos);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_commands(args: SendArgs) -> Result<()> {
|
||||
// Read the command file into a string
|
||||
let mut cmd_file = File::open(&args.command_file)?;
|
||||
let mut commands = String::new();
|
||||
cmd_file.read_to_string(&mut commands)?;
|
||||
|
||||
// Check that the command file is valid JSON
|
||||
let vault_commands: VaultCommands = serde_json::from_str(&commands)
|
||||
.context(format!("parsing command file {:?}", &args.command_file))?;
|
||||
|
||||
let mut signatures = Vec::new();
|
||||
|
||||
for sig in args.sigs {
|
||||
let mut sig_file = File::open(sig)?;
|
||||
let mut sig = String::new();
|
||||
sig_file.read_to_string(&mut sig)?;
|
||||
signatures.push(sig);
|
||||
}
|
||||
|
||||
let send_req = VaultCommandRequest {
|
||||
commands,
|
||||
signatures,
|
||||
};
|
||||
|
||||
let conn = TeeConnection::new(&args.attestation, ATTESTATION_URL).await?;
|
||||
|
||||
let mut response = conn
|
||||
.client()
|
||||
.post(&format!(
|
||||
"{server}{url}",
|
||||
server = conn.server(),
|
||||
url = VaultCommandRequest::URL
|
||||
))
|
||||
.send_json(&send_req)
|
||||
.await
|
||||
.map_err(|e| anyhow!("sending command request: {}", e))?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("sending command request: {}", status_code);
|
||||
if let Ok(r) = response.json::<Value>().await {
|
||||
eprintln!(
|
||||
"Error sending command request: {}",
|
||||
serde_json::to_string(&r).unwrap_or_default()
|
||||
);
|
||||
}
|
||||
bail!("sending command request: {}", status_code);
|
||||
}
|
||||
|
||||
let cmd_responses: VaultCommandsResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("failed parsing command response")?;
|
||||
|
||||
println!("digest: {}", &cmd_responses.digest);
|
||||
|
||||
let pairs = cmd_responses
|
||||
.results
|
||||
.iter()
|
||||
.zip(vault_commands.commands.iter())
|
||||
.map(|(resp, cmd)| {
|
||||
let mut pair = serde_json::Map::new();
|
||||
pair.insert("request".to_string(), serde_json::to_value(cmd).unwrap());
|
||||
pair.insert("response".to_string(), serde_json::to_value(resp).unwrap());
|
||||
pair
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&pairs)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_sig_request(args: SignTeeArgs) -> Result<()> {
|
||||
// Read the command file into a string
|
||||
let mut cmd_file = File::open(&args.sig_request_file)?;
|
||||
let mut sign_request_data_str = String::new();
|
||||
cmd_file.read_to_string(&mut sign_request_data_str)?;
|
||||
|
||||
// Check that the command file is valid JSON
|
||||
let _sign_request_data: SignRequestData = serde_json::from_str(&sign_request_data_str)
|
||||
.context(format!("parsing command file {:?}", &args.sig_request_file))?;
|
||||
|
||||
let mut signatures = Vec::new();
|
||||
|
||||
for sig in args.sigs {
|
||||
let mut sig_file = File::open(sig)?;
|
||||
let mut sig = String::new();
|
||||
sig_file.read_to_string(&mut sig)?;
|
||||
signatures.push(sig);
|
||||
}
|
||||
|
||||
// open out_file early to fail fast if it is not writable
|
||||
let mut out_file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(&args.out)?;
|
||||
|
||||
let send_req = SignRequest {
|
||||
sign_request_data: sign_request_data_str,
|
||||
signatures,
|
||||
};
|
||||
|
||||
let conn = TeeConnection::new(&args.attestation, ATTESTATION_URL).await?;
|
||||
|
||||
let mut response = conn
|
||||
.client()
|
||||
.post(&format!(
|
||||
"{server}{url}",
|
||||
server = conn.server(),
|
||||
url = SignRequest::URL
|
||||
))
|
||||
.send_json(&send_req)
|
||||
.await
|
||||
.map_err(|e| anyhow!("sending sign request: {}", e))?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("sending sign request: {}", status_code);
|
||||
if let Ok(r) = response.json::<Value>().await {
|
||||
eprintln!(
|
||||
"Error sending sign request: {}",
|
||||
serde_json::to_string(&r).unwrap_or_default()
|
||||
);
|
||||
}
|
||||
bail!("sending sign request: {}", status_code);
|
||||
}
|
||||
|
||||
let sign_response: SignResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("failed parsing sign response")?;
|
||||
|
||||
println!("digest: {}", &sign_response.digest);
|
||||
|
||||
out_file.write_all(&sign_response.signed_data)?;
|
||||
|
||||
println!("{{ \"digest\": \"{}\" }}", sign_response.digest);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn digest(args: DigestArgs) -> Result<()> {
|
||||
let conn = TeeConnection::new(&args.attestation, ATTESTATION_URL).await?;
|
||||
|
||||
let mut response = conn
|
||||
.client()
|
||||
.get(&format!("{server}{DIGEST_URL}", server = conn.server()))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| anyhow!("sending digest request: {}", e))?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("sending digest request: {}", status_code);
|
||||
if let Ok(r) = response.json::<Value>().await {
|
||||
eprintln!("Error sending digest request: {}", r);
|
||||
}
|
||||
bail!("sending digest request: {}", status_code);
|
||||
}
|
||||
|
||||
let digest_response: Value = response
|
||||
.json()
|
||||
.await
|
||||
.context("failed parsing digest response")?;
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&digest_response)?);
|
||||
Ok(())
|
||||
}
|
22
bin/vault-unseal/Cargo.toml
Normal file
22
bin/vault-unseal/Cargo.toml
Normal file
|
@ -0,0 +1,22 @@
|
|||
[package]
|
||||
name = "vault-unseal"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
awc.workspace = true
|
||||
base64.workspace = true
|
||||
clap.workspace = true
|
||||
hex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
sha2.workspace = true
|
||||
teepot.workspace = true
|
||||
tracing-log.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
220
bin/vault-unseal/src/main.rs
Normal file
220
bin/vault-unseal/src/main.rs
Normal file
|
@ -0,0 +1,220 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use clap::{Args, Parser, Subcommand};
|
||||
use serde_json::Value;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use teepot::client::{AttestationArgs, TeeConnection};
|
||||
use teepot::json::http::{Init, InitResponse, Unseal, ATTESTATION_URL};
|
||||
use tracing::{error, info, trace, warn};
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_subscriber::Registry;
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
pub struct InitArgs {
|
||||
/// admin threshold
|
||||
#[arg(long)]
|
||||
admin_threshold: usize,
|
||||
/// PGP keys to sign commands for the admin tee
|
||||
#[arg(short, long)]
|
||||
admin_pgp_key_file: Vec<String>,
|
||||
/// admin TEE mrenclave
|
||||
#[arg(long)]
|
||||
admin_tee_mrenclave: String,
|
||||
/// secret threshold
|
||||
#[arg(long)]
|
||||
unseal_threshold: usize,
|
||||
/// PGP keys to encrypt the unseal keys with
|
||||
#[arg(short, long)]
|
||||
unseal_pgp_key_file: Vec<String>,
|
||||
}
|
||||
|
||||
/// subcommands and their options/arguments.
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum SubCommands {
|
||||
Init(InitArgs),
|
||||
Unseal,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Arguments {
|
||||
#[clap(flatten)]
|
||||
pub attestation: AttestationArgs,
|
||||
/// Subcommands (with their own options)
|
||||
#[clap(subcommand)]
|
||||
cmd: SubCommands,
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> Result<()> {
|
||||
LogTracer::init().context("Failed to set logger")?;
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(EnvFilter::from_default_env())
|
||||
.with(fmt::layer().with_writer(std::io::stderr));
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
|
||||
let args = Arguments::parse();
|
||||
|
||||
match args.cmd {
|
||||
SubCommands::Init(_) => init(args).await?,
|
||||
SubCommands::Unseal => unseal(args).await?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn init(args: Arguments) -> Result<()> {
|
||||
let conn = TeeConnection::new(&args.attestation, ATTESTATION_URL).await?;
|
||||
|
||||
info!("Quote verified! Connection secure!");
|
||||
|
||||
let SubCommands::Init(init_args) = args.cmd else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
if init_args.admin_threshold == 0 {
|
||||
bail!("admin threshold must be greater than 0");
|
||||
}
|
||||
|
||||
if init_args.unseal_threshold == 0 {
|
||||
bail!("unseal threshold must be greater than 0");
|
||||
}
|
||||
|
||||
if init_args.admin_threshold > init_args.admin_pgp_key_file.len() {
|
||||
bail!("admin threshold must be less than or equal to the number of admin pgp keys");
|
||||
}
|
||||
|
||||
if init_args.unseal_threshold > init_args.unseal_pgp_key_file.len() {
|
||||
bail!("unseal threshold must be less than or equal to the number of unseal pgp keys");
|
||||
}
|
||||
|
||||
let mut pgp_keys = Vec::new();
|
||||
|
||||
for filename in init_args.unseal_pgp_key_file {
|
||||
let mut file =
|
||||
File::open(&filename).context(format!("Failed to open pgp key file {}", &filename))?;
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf)?;
|
||||
let key = std::str::from_utf8(&buf)?.trim().to_string();
|
||||
pgp_keys.push(key);
|
||||
}
|
||||
|
||||
let mut admin_pgp_keys = Vec::new();
|
||||
|
||||
for filename in init_args.admin_pgp_key_file {
|
||||
let mut file =
|
||||
File::open(&filename).context(format!("Failed to open pgp key file {}", &filename))?;
|
||||
// read all lines from file and concatenate them
|
||||
let mut key = String::new();
|
||||
file.read_to_string(&mut key)
|
||||
.context(format!("Failed to read pgp key file {}", &filename))?;
|
||||
key.retain(|c| !c.is_ascii_whitespace());
|
||||
|
||||
let bytes = general_purpose::STANDARD.decode(key).context(format!(
|
||||
"Failed to base64 decode pgp key file {}",
|
||||
&filename
|
||||
))?;
|
||||
admin_pgp_keys.push(bytes.into_boxed_slice());
|
||||
}
|
||||
|
||||
let init = Init {
|
||||
secret_shares: pgp_keys.len() as _,
|
||||
secret_threshold: init_args.unseal_threshold,
|
||||
admin_threshold: init_args.admin_threshold,
|
||||
admin_tee_mrenclave: init_args.admin_tee_mrenclave,
|
||||
admin_pgp_keys: admin_pgp_keys.into_boxed_slice(),
|
||||
pgp_keys,
|
||||
};
|
||||
|
||||
info!("Inititalizing vault");
|
||||
|
||||
let mut response = conn
|
||||
.client()
|
||||
.post(&format!(
|
||||
"{server}{url}",
|
||||
server = conn.server(),
|
||||
url = Init::URL
|
||||
))
|
||||
.send_json(&init)
|
||||
.await
|
||||
.map_err(|e| anyhow!("Error sending init request: {}", e))?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("Failed to init vault: {}", status_code);
|
||||
if let Ok(r) = response.json::<Value>().await {
|
||||
eprintln!("Failed to init vault: {}", r);
|
||||
}
|
||||
bail!("failed to init vault: {}", status_code);
|
||||
}
|
||||
|
||||
let init_response: Value = response.json().await.context("failed to init vault")?;
|
||||
|
||||
info!("Got Response: {}", init_response.to_string());
|
||||
|
||||
let resp: InitResponse =
|
||||
serde_json::from_value(init_response).context("Failed to parse init response")?;
|
||||
println!("{}", serde_json::to_string(&resp).unwrap());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn unseal(args: Arguments) -> Result<()> {
|
||||
info!("Reading unencrypted key from stdin");
|
||||
|
||||
// read all bytes from stdin
|
||||
let mut stdin = std::io::stdin();
|
||||
let mut buf = Vec::new();
|
||||
stdin.read_to_end(&mut buf)?;
|
||||
let key = std::str::from_utf8(&buf)?.trim().to_string();
|
||||
|
||||
if key.is_empty() {
|
||||
bail!("Error reading key from stdin");
|
||||
}
|
||||
|
||||
let conn = TeeConnection::new(&args.attestation, ATTESTATION_URL).await?;
|
||||
|
||||
info!("Quote verified! Connection secure!");
|
||||
|
||||
info!("Unsealing vault");
|
||||
|
||||
let unseal_data = Unseal { key };
|
||||
|
||||
let mut response = conn
|
||||
.client()
|
||||
.post(&format!(
|
||||
"{server}{url}",
|
||||
server = conn.server(),
|
||||
url = Unseal::URL
|
||||
))
|
||||
.send_json(&unseal_data)
|
||||
.await
|
||||
.map_err(|e| anyhow!("Error sending unseal request: {}", e))?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("Failed to unseal vault: {}", status_code);
|
||||
if let Ok(r) = response.json::<Value>().await {
|
||||
eprintln!("Failed to unseal vault: {}", r);
|
||||
}
|
||||
bail!("failed to unseal vault: {}", status_code);
|
||||
}
|
||||
|
||||
let unseal_response: Value = response.json().await.context("failed to unseal vault")?;
|
||||
|
||||
trace!("Got Response: {}", unseal_response.to_string());
|
||||
|
||||
if matches!(unseal_response["sealed"].as_bool(), Some(true)) {
|
||||
warn!("Vault is still sealed!");
|
||||
println!("Vault is still sealed!");
|
||||
} else {
|
||||
info!("Vault is unsealed!");
|
||||
println!("Vault is unsealed!");
|
||||
}
|
||||
Ok(())
|
||||
}
|
13
bin/verify-attestation/Cargo.toml
Normal file
13
bin/verify-attestation/Cargo.toml
Normal file
|
@ -0,0 +1,13 @@
|
|||
[package]
|
||||
name = "verify-attestation"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
hex.workspace = true
|
||||
intel-tee-quote-verification-rs.workspace = true
|
||||
teepot.workspace = true
|
46
bin/verify-attestation/Dockerfile
Normal file
46
bin/verify-attestation/Dockerfile
Normal file
|
@ -0,0 +1,46 @@
|
|||
FROM docker.io/rust:1-bullseye AS buildtee
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
rsync \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler \
|
||||
clang \
|
||||
libsgx-headers \
|
||||
libsgx-dcap-quote-verify-dev
|
||||
|
||||
WORKDIR /opt/vault/plugins
|
||||
|
||||
WORKDIR /build
|
||||
RUN --mount=type=bind,target=/data rsync --exclude='/.git' --filter="dir-merge,- .gitignore" --exclude "Dockerfile-*" --exclude 'tee-vault-admin.manifest.template' -av /data/ ./
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry --mount=type=cache,target=target \
|
||||
RUSTFLAGS="-C target-cpu=icelake-server --cfg mio_unsupported_force_waker_pipe" \
|
||||
cargo build --locked --target x86_64-unknown-linux-gnu --release -p verify-attestation --bin verify-attestation \
|
||||
&& mv ./target/x86_64-unknown-linux-gnu/release/verify-attestation ./
|
||||
|
||||
FROM docker.io/ubuntu:20.04
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y curl
|
||||
|
||||
RUN curl -fsSLo /usr/share/keyrings/intel.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key \
|
||||
&& echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main" > /etc/apt/sources.list.d/intel-sgx.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
libsgx-dcap-default-qpl \
|
||||
libsgx-urts \
|
||||
libsgx-enclave-common \
|
||||
libsgx-dcap-quote-verify
|
||||
RUN apt purge -y libsgx-ae-qve
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=buildtee /build/verify-attestation /bin/verify-attestation
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c"]
|
||||
CMD [ "verify-attestation" ]
|
58
bin/verify-attestation/src/main.rs
Normal file
58
bin/verify-attestation/src/main.rs
Normal file
|
@ -0,0 +1,58 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Simple TEE attestation verification test
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use anyhow::{bail, Context, Result};
|
||||
use std::io::Read;
|
||||
use std::time::UNIX_EPOCH;
|
||||
use teepot::client::TcbLevel;
|
||||
use teepot::sgx::{tee_qv_get_collateral, verify_quote_with_collateral, QuoteVerificationResult};
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// read myquote from stdin
|
||||
let mut myquote = Vec::new();
|
||||
std::io::stdin()
|
||||
.read_to_end(&mut myquote)
|
||||
.context("Failed to read quote from stdin")?;
|
||||
|
||||
let collateral = tee_qv_get_collateral(&myquote).context("Failed to get collateral")?;
|
||||
|
||||
let unix_time: i64 = std::time::SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as _;
|
||||
|
||||
let QuoteVerificationResult {
|
||||
collateral_expired,
|
||||
result,
|
||||
|
||||
quote,
|
||||
advisories,
|
||||
..
|
||||
} = verify_quote_with_collateral(&myquote, Some(&collateral), unix_time.saturating_add(60))
|
||||
.context("Failed to verify quote with collateral")?;
|
||||
|
||||
if collateral_expired {
|
||||
bail!("Freshly fetched collateral expired");
|
||||
}
|
||||
|
||||
let tcblevel = TcbLevel::from(result);
|
||||
if tcblevel != TcbLevel::Ok {
|
||||
println!("Quote verification result: {}", tcblevel);
|
||||
}
|
||||
|
||||
for advisory in advisories {
|
||||
println!("\tInfo: Advisory ID: {advisory}");
|
||||
}
|
||||
|
||||
println!("Quote verified successfully: {}", tcblevel);
|
||||
println!("mrsigner: {}", hex::encode(quote.report_body.mrsigner));
|
||||
println!("mrenclave: {}", hex::encode(quote.report_body.mrenclave));
|
||||
println!("reportdata: {}", hex::encode(quote.report_body.reportdata));
|
||||
|
||||
Ok(())
|
||||
}
|
11
crates/intel-tee-quote-verification-rs/Cargo.toml
Normal file
11
crates/intel-tee-quote-verification-rs/Cargo.toml
Normal file
|
@ -0,0 +1,11 @@
|
|||
# Fork of the original crate: https://github.com/intel/SGXDataCenterAttestationPrimitives
|
||||
|
||||
[package]
|
||||
name = "intel-tee-quote-verification-rs"
|
||||
version = "0.2.1"
|
||||
edition = "2021"
|
||||
license = "BSD-3-Clause"
|
||||
|
||||
[dependencies]
|
||||
intel-tee-quote-verification-sys.workspace = true
|
||||
serde.workspace = true
|
38
crates/intel-tee-quote-verification-rs/License.txt
Normal file
38
crates/intel-tee-quote-verification-rs/License.txt
Normal file
|
@ -0,0 +1,38 @@
|
|||
BSD License
|
||||
|
||||
Copyright (C) 2011-2021 Intel Corporation. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
==============================================================
|
||||
|
||||
pce.signed.dll, qve.signed.dll,id_enclave.signed.dll and qe3.signed.dll,
|
||||
libsgx_pce.signed.so, libsgx_qve.signed.so, libsgx_id_enclave.signed.so,
|
||||
libsgx_qe3.signed.so and libsgx_tdqe.signed.so are licensed under
|
||||
3-Clause BSD License.
|
||||
|
553
crates/intel-tee-quote-verification-rs/src/lib.rs
Normal file
553
crates/intel-tee-quote-verification-rs/src/lib.rs
Normal file
|
@ -0,0 +1,553 @@
|
|||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2011-2021 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
//! Intel(R) Software Guard Extensions Data Center Attestation Primitives (Intel(R) SGX DCAP)
|
||||
//! Rust wrapper for Quote Verification Library
|
||||
//! ================================================
|
||||
//!
|
||||
//! This is a safe wrapper for **sgx-dcap-quoteverify-sys**.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
use std::slice;
|
||||
|
||||
use intel_tee_quote_verification_sys as qvl_sys;
|
||||
|
||||
pub use qvl_sys::quote3_error_t;
|
||||
pub use qvl_sys::sgx_ql_qe_report_info_t;
|
||||
pub use qvl_sys::sgx_ql_qv_result_t;
|
||||
pub use qvl_sys::sgx_ql_qv_supplemental_t;
|
||||
pub use qvl_sys::sgx_ql_qve_collateral_t;
|
||||
pub use qvl_sys::sgx_ql_request_policy_t;
|
||||
pub use qvl_sys::sgx_qv_path_type_t;
|
||||
pub use qvl_sys::tdx_ql_qve_collateral_t;
|
||||
pub use qvl_sys::tee_supp_data_descriptor_t;
|
||||
|
||||
/// When the Quoting Verification Library is linked to a process, it needs to know the proper enclave loading policy.
|
||||
/// The library may be linked with a long lived process, such as a service, where it can load the enclaves and leave
|
||||
/// them loaded (persistent). This better ensures that the enclaves will be available upon quote requests and not subject
|
||||
/// to EPC limitations if loaded on demand. However, if the Quoting library is linked with an application process, there
|
||||
/// may be many applications with the Quoting library and a better utilization of EPC is to load and unloaded the quoting
|
||||
/// enclaves on demand (ephemeral). The library will be shipped with a default policy of loading enclaves and leaving
|
||||
/// them loaded until the library is unloaded (PERSISTENT). If the policy is set to EPHEMERAL, then the QE and PCE will
|
||||
/// be loaded and unloaded on-demand. If either enclave is already loaded when the policy is change to EPHEMERAL, the
|
||||
/// enclaves will be unloaded before returning.
|
||||
///
|
||||
/// # Param
|
||||
/// - **policy**\
|
||||
/// Set the requested enclave loading policy to either *SGX_QL_PERSISTENT*, *SGX_QL_EPHEMERAL* or *SGX_QL_DEFAULT*.
|
||||
///
|
||||
/// # Return
|
||||
/// - ***SGX_QL_SUCCESS***\
|
||||
/// Successfully set the enclave loading policy for the quoting library's enclaves.\
|
||||
/// - ***SGX_QL_UNSUPPORTED_LOADING_POLICY***\
|
||||
/// The selected policy is not support by the quoting library.\
|
||||
/// - ***SGX_QL_ERROR_UNEXPECTED***\
|
||||
/// Unexpected internal error.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use intel_tee_quote_verification_rs::*;
|
||||
///
|
||||
/// let policy = sgx_ql_request_policy_t::SGX_QL_DEFAULT;
|
||||
/// let ret = sgx_qv_set_enclave_load_policy(policy);
|
||||
///
|
||||
/// assert_eq!(ret, quote3_error_t::SGX_QL_SUCCESS);
|
||||
/// ```
|
||||
pub fn sgx_qv_set_enclave_load_policy(policy: sgx_ql_request_policy_t) -> quote3_error_t {
|
||||
unsafe { qvl_sys::sgx_qv_set_enclave_load_policy(policy) }
|
||||
}
|
||||
|
||||
/// Get SGX supplemental data required size.
|
||||
///
|
||||
/// # Return
|
||||
/// Size of the supplemental data in bytes.
|
||||
///
|
||||
/// Status code of the operation, one of:
|
||||
/// - *SGX_QL_ERROR_INVALID_PARAMETER*
|
||||
/// - *SGX_QL_ERROR_QVL_QVE_MISMATCH*
|
||||
/// - *SGX_QL_ENCLAVE_LOAD_ERROR*
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use intel_tee_quote_verification_rs::*;
|
||||
///
|
||||
/// let data_size = sgx_qv_get_quote_supplemental_data_size().unwrap();
|
||||
///
|
||||
/// assert_eq!(data_size, std::mem::size_of::<sgx_ql_qv_supplemental_t>() as u32);
|
||||
/// ```
|
||||
pub fn sgx_qv_get_quote_supplemental_data_size() -> Result<u32, quote3_error_t> {
|
||||
let mut data_size = 0u32;
|
||||
unsafe {
|
||||
match qvl_sys::sgx_qv_get_quote_supplemental_data_size(&mut data_size) {
|
||||
quote3_error_t::SGX_QL_SUCCESS => Ok(data_size),
|
||||
error_code => Err(error_code),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform SGX ECDSA quote verification.
|
||||
///
|
||||
/// # Param
|
||||
/// - **quote**\
|
||||
/// SGX Quote, presented as u8 vector.
|
||||
/// - **quote_collateral**\
|
||||
/// Quote Certification Collateral provided by the caller.
|
||||
/// - **expiration_check_date**\
|
||||
/// This is the date that the QvE will use to determine if any of the inputted collateral have expired.
|
||||
/// - **qve_report_info**\
|
||||
/// This parameter can be used in 2 ways.\
|
||||
/// - If qve_report_info is NOT None, the API will use Intel QvE to perform quote verification, and QvE will generate a report using the target_info in sgx_ql_qe_report_info_t structure.\
|
||||
/// - if qve_report_info is None, the API will use QVL library to perform quote verification, note that the results can not be cryptographically authenticated in this mode.
|
||||
/// - **supplemental_data_size**\
|
||||
/// Size of the supplemental data (in bytes).
|
||||
/// - **supplemental_data**\
|
||||
/// The parameter is optional. If it is None, supplemental_data_size must be 0.
|
||||
///
|
||||
/// # Return
|
||||
/// Result type of (collateral_expiration_status, verification_result).
|
||||
///
|
||||
/// Status code of the operation, one of:
|
||||
/// - *SGX_QL_ERROR_INVALID_PARAMETER*
|
||||
/// - *SGX_QL_QUOTE_FORMAT_UNSUPPORTED*
|
||||
/// - *SGX_QL_QUOTE_CERTIFICATION_DATA_UNSUPPORTED*
|
||||
/// - *SGX_QL_UNABLE_TO_GENERATE_REPORT*
|
||||
/// - *SGX_QL_CRL_UNSUPPORTED_FORMAT*
|
||||
/// - *SGX_QL_ERROR_UNEXPECTED*
|
||||
///
|
||||
pub fn sgx_qv_verify_quote(
|
||||
quote: &[u8],
|
||||
quote_collateral: Option<&Collateral>,
|
||||
expiration_check_date: i64,
|
||||
qve_report_info: Option<&mut sgx_ql_qe_report_info_t>,
|
||||
supplemental_data_size: u32,
|
||||
supplemental_data: Option<&mut sgx_ql_qv_supplemental_t>,
|
||||
) -> Result<(u32, sgx_ql_qv_result_t), quote3_error_t> {
|
||||
let mut collateral_expiration_status = 1u32;
|
||||
let mut quote_verification_result = sgx_ql_qv_result_t::SGX_QL_QV_RESULT_UNSPECIFIED;
|
||||
|
||||
let quote_collateral = quote_collateral.map(SgxQlQveCollateralT::from);
|
||||
let p_quote_collateral = quote_collateral.as_deref().map_or(std::ptr::null(), |p| p);
|
||||
|
||||
let p_qve_report_info = match qve_report_info {
|
||||
Some(p) => p,
|
||||
None => std::ptr::null_mut(),
|
||||
};
|
||||
let p_supplemental_data = match supplemental_data {
|
||||
Some(p) => p as *mut sgx_ql_qv_supplemental_t as *mut u8,
|
||||
None => std::ptr::null_mut(),
|
||||
};
|
||||
|
||||
unsafe {
|
||||
match qvl_sys::sgx_qv_verify_quote(
|
||||
quote.as_ptr(),
|
||||
quote.len() as u32,
|
||||
p_quote_collateral,
|
||||
expiration_check_date,
|
||||
&mut collateral_expiration_status,
|
||||
&mut quote_verification_result,
|
||||
p_qve_report_info,
|
||||
supplemental_data_size,
|
||||
p_supplemental_data,
|
||||
) {
|
||||
quote3_error_t::SGX_QL_SUCCESS => {
|
||||
Ok((collateral_expiration_status, quote_verification_result))
|
||||
}
|
||||
error_code => Err(error_code),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get TDX supplemental data required size.
|
||||
///
|
||||
/// # Return
|
||||
/// Size of the supplemental data in bytes.
|
||||
///
|
||||
/// Status code of the operation, one of:
|
||||
/// - *SGX_QL_ERROR_INVALID_PARAMETER*
|
||||
/// - *SGX_QL_ERROR_QVL_QVE_MISMATCH*
|
||||
/// - *SGX_QL_ENCLAVE_LOAD_ERROR*
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use intel_tee_quote_verification_rs::*;
|
||||
///
|
||||
/// let data_size = tdx_qv_get_quote_supplemental_data_size().unwrap();
|
||||
///
|
||||
/// assert_eq!(data_size, std::mem::size_of::<sgx_ql_qv_supplemental_t>() as u32);
|
||||
/// ```
|
||||
pub fn tdx_qv_get_quote_supplemental_data_size() -> Result<u32, quote3_error_t> {
|
||||
let mut data_size = 0u32;
|
||||
unsafe {
|
||||
match qvl_sys::tdx_qv_get_quote_supplemental_data_size(&mut data_size) {
|
||||
quote3_error_t::SGX_QL_SUCCESS => Ok(data_size),
|
||||
error_code => Err(error_code),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform TDX ECDSA quote verification.
|
||||
///
|
||||
/// # Param
|
||||
/// - **quote**\
|
||||
/// TDX Quote, presented as u8 vector.
|
||||
/// - **quote_collateral**\
|
||||
/// Quote Certification Collateral provided by the caller.
|
||||
/// - **expiration_check_date**\
|
||||
/// This is the date that the QvE will use to determine if any of the inputted collateral have expired.
|
||||
/// - **qve_report_info**\
|
||||
/// This parameter can be used in 2 ways.\
|
||||
/// - If qve_report_info is NOT None, the API will use Intel QvE to perform quote verification, and QvE will generate a report using the target_info in sgx_ql_qe_report_info_t structure.\
|
||||
/// - if qve_report_info is None, the API will use QVL library to perform quote verification, note that the results can not be cryptographically authenticated in this mode.
|
||||
/// - **supplemental_data_size**\
|
||||
/// Size of the supplemental data (in bytes).
|
||||
/// - **supplemental_data**\
|
||||
/// The parameter is optional. If it is None, supplemental_data_size must be 0.
|
||||
///
|
||||
/// # Return
|
||||
/// Result type of (collateral_expiration_status, verification_result).
|
||||
///
|
||||
/// Status code of the operation, one of:
|
||||
/// - *SGX_QL_ERROR_INVALID_PARAMETER*
|
||||
/// - *SGX_QL_QUOTE_FORMAT_UNSUPPORTED*
|
||||
/// - *SGX_QL_QUOTE_CERTIFICATION_DATA_UNSUPPORTED*
|
||||
/// - *SGX_QL_UNABLE_TO_GENERATE_REPORT*
|
||||
/// - *SGX_QL_CRL_UNSUPPORTED_FORMAT*
|
||||
/// - *SGX_QL_ERROR_UNEXPECTED*
|
||||
///
|
||||
pub fn tdx_qv_verify_quote(
|
||||
quote: &[u8],
|
||||
quote_collateral: Option<&Collateral>,
|
||||
expiration_check_date: i64,
|
||||
qve_report_info: Option<&mut sgx_ql_qe_report_info_t>,
|
||||
supplemental_data_size: u32,
|
||||
supplemental_data: Option<&mut sgx_ql_qv_supplemental_t>,
|
||||
) -> Result<(u32, sgx_ql_qv_result_t), quote3_error_t> {
|
||||
let mut collateral_expiration_status = 1u32;
|
||||
let mut quote_verification_result = sgx_ql_qv_result_t::SGX_QL_QV_RESULT_UNSPECIFIED;
|
||||
|
||||
let quote_collateral = quote_collateral.map(SgxQlQveCollateralT::from);
|
||||
let p_quote_collateral = quote_collateral.as_deref().map_or(std::ptr::null(), |p| p);
|
||||
|
||||
let p_qve_report_info = match qve_report_info {
|
||||
Some(p) => p,
|
||||
None => std::ptr::null_mut(),
|
||||
};
|
||||
let p_supplemental_data = match supplemental_data {
|
||||
Some(p) => p as *mut sgx_ql_qv_supplemental_t as *mut u8,
|
||||
None => std::ptr::null_mut(),
|
||||
};
|
||||
|
||||
unsafe {
|
||||
match qvl_sys::tdx_qv_verify_quote(
|
||||
quote.as_ptr(),
|
||||
quote.len() as u32,
|
||||
p_quote_collateral,
|
||||
expiration_check_date,
|
||||
&mut collateral_expiration_status,
|
||||
&mut quote_verification_result,
|
||||
p_qve_report_info,
|
||||
supplemental_data_size,
|
||||
p_supplemental_data,
|
||||
) {
|
||||
quote3_error_t::SGX_QL_SUCCESS => {
|
||||
Ok((collateral_expiration_status, quote_verification_result))
|
||||
}
|
||||
error_code => Err(error_code),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the full path of QVE and QPL library.\
|
||||
/// The function takes the enum and the corresponding full path.
|
||||
///
|
||||
/// # Param
|
||||
/// - **path_type**\
|
||||
/// The type of binary being passed in.
|
||||
/// - **path**\
|
||||
/// It should be a valid full path.
|
||||
///
|
||||
/// # Return
|
||||
/// - ***SGX_QL_SUCCESS***\
|
||||
/// Successfully set the full path.
|
||||
/// - ***SGX_QL_ERROR_INVALID_PARAMETER***\
|
||||
/// Path is not a valid full path or the path is too long.
|
||||
///
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn sgx_qv_set_path(path_type: sgx_qv_path_type_t, path: &str) -> quote3_error_t {
|
||||
match std::ffi::CString::new(path) {
|
||||
Ok(path) => unsafe { qvl_sys::sgx_qv_set_path(path_type, path.as_ptr()) },
|
||||
_ => quote3_error_t::SGX_QL_ERROR_INVALID_PARAMETER,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Collateral {
|
||||
pub major_version: u16,
|
||||
pub minor_version: u16,
|
||||
pub tee_type: u32,
|
||||
pub pck_crl_issuer_chain: Box<[u8]>,
|
||||
pub root_ca_crl: Box<[u8]>,
|
||||
pub pck_crl: Box<[u8]>,
|
||||
pub tcb_info_issuer_chain: Box<[u8]>,
|
||||
pub tcb_info: Box<[u8]>,
|
||||
pub qe_identity_issuer_chain: Box<[u8]>,
|
||||
pub qe_identity: Box<[u8]>,
|
||||
}
|
||||
|
||||
impl TryFrom<&sgx_ql_qve_collateral_t> for Collateral {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: &sgx_ql_qve_collateral_t) -> Result<Self, Self::Error> {
|
||||
fn to_boxed_slice(p: *mut ::std::os::raw::c_char, size: u32) -> Result<Box<[u8]>, ()> {
|
||||
if p.is_null() {
|
||||
return Err(());
|
||||
}
|
||||
Ok(Box::from(unsafe {
|
||||
slice::from_raw_parts(p as _, size as _)
|
||||
}))
|
||||
}
|
||||
|
||||
Ok(Collateral {
|
||||
major_version: unsafe { value.__bindgen_anon_1.__bindgen_anon_1.major_version },
|
||||
minor_version: unsafe { value.__bindgen_anon_1.__bindgen_anon_1.minor_version },
|
||||
tee_type: value.tee_type,
|
||||
pck_crl_issuer_chain: to_boxed_slice(
|
||||
value.pck_crl_issuer_chain,
|
||||
value.pck_crl_issuer_chain_size,
|
||||
)?,
|
||||
root_ca_crl: to_boxed_slice(value.root_ca_crl, value.root_ca_crl_size)?,
|
||||
pck_crl: to_boxed_slice(value.pck_crl, value.pck_crl_size)?,
|
||||
tcb_info_issuer_chain: to_boxed_slice(
|
||||
value.tcb_info_issuer_chain,
|
||||
value.tcb_info_issuer_chain_size,
|
||||
)?,
|
||||
tcb_info: to_boxed_slice(value.tcb_info, value.tcb_info_size)?,
|
||||
qe_identity_issuer_chain: to_boxed_slice(
|
||||
value.qe_identity_issuer_chain,
|
||||
value.qe_identity_issuer_chain_size,
|
||||
)?,
|
||||
qe_identity: to_boxed_slice(value.qe_identity, value.qe_identity_size)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// referential struct
|
||||
struct SgxQlQveCollateralT<'a> {
|
||||
inner: sgx_ql_qve_collateral_t,
|
||||
_phantom: PhantomData<&'a ()>,
|
||||
}
|
||||
|
||||
// create the referential struct
|
||||
impl<'a> From<&'a Collateral> for SgxQlQveCollateralT<'a> {
|
||||
fn from(data: &'a Collateral) -> Self {
|
||||
let mut this = SgxQlQveCollateralT {
|
||||
inner: sgx_ql_qve_collateral_t {
|
||||
__bindgen_anon_1: Default::default(),
|
||||
tee_type: data.tee_type,
|
||||
pck_crl_issuer_chain: data.pck_crl_issuer_chain.as_ptr() as _,
|
||||
pck_crl_issuer_chain_size: data.pck_crl_issuer_chain.len() as _,
|
||||
root_ca_crl: data.root_ca_crl.as_ptr() as _,
|
||||
root_ca_crl_size: data.root_ca_crl.len() as _,
|
||||
pck_crl: data.pck_crl.as_ptr() as _,
|
||||
pck_crl_size: data.pck_crl.len() as _,
|
||||
tcb_info_issuer_chain: data.tcb_info_issuer_chain.as_ptr() as _,
|
||||
tcb_info_issuer_chain_size: data.tcb_info_issuer_chain.len() as _,
|
||||
tcb_info: data.tcb_info.as_ptr() as _,
|
||||
tcb_info_size: data.tcb_info.len() as _,
|
||||
qe_identity_issuer_chain: data.qe_identity_issuer_chain.as_ptr() as _,
|
||||
qe_identity_issuer_chain_size: data.qe_identity_issuer_chain.len() as _,
|
||||
qe_identity: data.qe_identity.as_ptr() as _,
|
||||
qe_identity_size: data.qe_identity.len() as _,
|
||||
},
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
this.inner.__bindgen_anon_1.__bindgen_anon_1.major_version = data.major_version;
|
||||
this.inner.__bindgen_anon_1.__bindgen_anon_1.minor_version = data.minor_version;
|
||||
this
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Deref for SgxQlQveCollateralT<'a> {
|
||||
type Target = sgx_ql_qve_collateral_t;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
/// Get quote verification collateral.
|
||||
///
|
||||
/// # Param
|
||||
/// - **quote**\
|
||||
/// SGX/TDX Quote, presented as u8 vector.
|
||||
///
|
||||
/// # Return
|
||||
/// Result type of quote_collecteral.
|
||||
///
|
||||
/// - **quote_collateral**\
|
||||
/// This is the Quote Certification Collateral retrieved based on Quote.
|
||||
///
|
||||
/// Status code of the operation, one of:
|
||||
/// - *SGX_QL_ERROR_INVALID_PARAMETER*
|
||||
/// - *SGX_QL_PLATFORM_LIB_UNAVAILABLE*
|
||||
/// - *SGX_QL_PCK_CERT_CHAIN_ERROR*
|
||||
/// - *SGX_QL_PCK_CERT_UNSUPPORTED_FORMAT*
|
||||
/// - *SGX_QL_QUOTE_FORMAT_UNSUPPORTED*
|
||||
/// - *SGX_QL_OUT_OF_MEMORY*
|
||||
/// - *SGX_QL_NO_QUOTE_COLLATERAL_DATA*
|
||||
/// - *SGX_QL_ERROR_UNEXPECTED*
|
||||
///
|
||||
pub fn tee_qv_get_collateral(quote: &[u8]) -> Result<Collateral, quote3_error_t> {
|
||||
let mut buf = std::ptr::null_mut();
|
||||
let mut buf_len = 0u32;
|
||||
|
||||
match unsafe {
|
||||
qvl_sys::tee_qv_get_collateral(quote.as_ptr(), quote.len() as u32, &mut buf, &mut buf_len)
|
||||
} {
|
||||
quote3_error_t::SGX_QL_SUCCESS => {
|
||||
assert!(!buf.is_null());
|
||||
assert!(buf_len > 0);
|
||||
assert_eq!(
|
||||
(buf as usize) % mem::align_of::<sgx_ql_qve_collateral_t>(),
|
||||
0
|
||||
);
|
||||
// SAFETY: buf is not null, buf_len is not zero, and buf is aligned.
|
||||
let orig_collateral = &unsafe { *(buf as *const sgx_ql_qve_collateral_t) };
|
||||
Collateral::try_from(orig_collateral).map_err(|_| quote3_error_t::SGX_QL_ERROR_MAX)
|
||||
}
|
||||
error_code => Err(error_code),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get supplemental data latest version and required size, support both SGX and TDX.
|
||||
///
|
||||
/// # Param
|
||||
/// - **quote**\
|
||||
/// SGX/TDX Quote, presented as u8 vector.
|
||||
///
|
||||
/// # Return
|
||||
/// Result type of (version, data_size) tuple.
|
||||
///
|
||||
/// - **version**\
|
||||
/// Latest version of the supplemental data.
|
||||
/// - **data_size**\
|
||||
/// The size of the buffer in bytes required to contain all of the supplemental data.
|
||||
///
|
||||
pub fn tee_get_supplemental_data_version_and_size(
|
||||
quote: &[u8],
|
||||
) -> Result<(u32, u32), quote3_error_t> {
|
||||
let mut version = 0u32;
|
||||
let mut data_size = 0u32;
|
||||
|
||||
unsafe {
|
||||
match qvl_sys::tee_get_supplemental_data_version_and_size(
|
||||
quote.as_ptr(),
|
||||
quote.len() as u32,
|
||||
&mut version,
|
||||
&mut data_size,
|
||||
) {
|
||||
quote3_error_t::SGX_QL_SUCCESS => Ok((version, data_size)),
|
||||
error_code => Err(error_code),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform quote verification for SGX and TDX.\
|
||||
/// This API works the same as the old one, but takes a new parameter to describe the supplemental data (supp_data_descriptor).
|
||||
///
|
||||
/// # Param
|
||||
/// - **quote**\
|
||||
/// SGX/TDX Quote, presented as u8 vector.
|
||||
/// - **quote_collateral**\
|
||||
/// Quote Certification Collateral provided by the caller.
|
||||
/// - **expiration_check_date**\
|
||||
/// This is the date that the QvE will use to determine if any of the inputted collateral have expired.
|
||||
/// - **qve_report_info**\
|
||||
/// This parameter can be used in 2 ways.\
|
||||
/// - If qve_report_info is NOT None, the API will use Intel QvE to perform quote verification, and QvE will generate a report using the target_info in sgx_ql_qe_report_info_t structure.\
|
||||
/// - if qve_report_info is None, the API will use QVL library to perform quote verification, note that the results can not be cryptographically authenticated in this mode.
|
||||
/// - **supp_datal_descriptor**\
|
||||
/// *tee_supp_data_descriptor_t* structure.\
|
||||
/// You can specify the major version of supplemental data by setting supp_datal_descriptor.major_version.\
|
||||
/// If supp_datal_descriptor is None, no supplemental data is returned.\
|
||||
/// If supp_datal_descriptor.major_version == 0, then return the latest version of the *sgx_ql_qv_supplemental_t* structure.\
|
||||
/// If supp_datal_descriptor.major_version <= latest supported version, return the latest minor version associated with that major version.\
|
||||
/// If supp_datal_descriptor.major_version > latest supported version, return an error *SGX_QL_SUPPLEMENTAL_DATA_VERSION_NOT_SUPPORTED*.
|
||||
///
|
||||
/// # Return
|
||||
/// Result type of (collateral_expiration_status, verification_result).
|
||||
///
|
||||
/// Status code of the operation, one of:
|
||||
/// - *SGX_QL_ERROR_INVALID_PARAMETER*
|
||||
/// - *SGX_QL_QUOTE_FORMAT_UNSUPPORTED*
|
||||
/// - *SGX_QL_QUOTE_CERTIFICATION_DATA_UNSUPPORTED*
|
||||
/// - *SGX_QL_UNABLE_TO_GENERATE_REPORT*
|
||||
/// - *SGX_QL_CRL_UNSUPPORTED_FORMAT*
|
||||
/// - *SGX_QL_ERROR_UNEXPECTED*
|
||||
///
|
||||
pub fn tee_verify_quote(
|
||||
quote: &[u8],
|
||||
quote_collateral: Option<&Collateral>,
|
||||
expiration_check_date: i64,
|
||||
qve_report_info: Option<&mut sgx_ql_qe_report_info_t>,
|
||||
supp_data_descriptor: Option<&mut tee_supp_data_descriptor_t>,
|
||||
) -> Result<(u32, sgx_ql_qv_result_t), quote3_error_t> {
|
||||
let mut collateral_expiration_status = 1u32;
|
||||
let mut quote_verification_result = sgx_ql_qv_result_t::SGX_QL_QV_RESULT_UNSPECIFIED;
|
||||
|
||||
let quote_collateral = quote_collateral.map(SgxQlQveCollateralT::from);
|
||||
let p_quote_collateral = quote_collateral.as_deref().map_or(std::ptr::null(), |p| p);
|
||||
|
||||
let p_qve_report_info = qve_report_info.map_or(std::ptr::null_mut(), |p| p);
|
||||
|
||||
let p_supp_data_descriptor = supp_data_descriptor.map_or(std::ptr::null_mut(), |p| p);
|
||||
|
||||
unsafe {
|
||||
match qvl_sys::tee_verify_quote(
|
||||
quote.as_ptr(),
|
||||
quote.len() as u32,
|
||||
p_quote_collateral as _,
|
||||
expiration_check_date,
|
||||
&mut collateral_expiration_status,
|
||||
&mut quote_verification_result,
|
||||
p_qve_report_info,
|
||||
p_supp_data_descriptor,
|
||||
) {
|
||||
quote3_error_t::SGX_QL_SUCCESS => {
|
||||
Ok((collateral_expiration_status, quote_verification_result))
|
||||
}
|
||||
error_code => Err(error_code),
|
||||
}
|
||||
}
|
||||
}
|
10
crates/intel-tee-quote-verification-sys/Cargo.toml
Normal file
10
crates/intel-tee-quote-verification-sys/Cargo.toml
Normal file
|
@ -0,0 +1,10 @@
|
|||
# Fork of the original crate: https://github.com/intel/SGXDataCenterAttestationPrimitives
|
||||
|
||||
[package]
|
||||
name = "intel-tee-quote-verification-sys"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
license = "BSD-3-Clause"
|
||||
|
||||
[build-dependencies]
|
||||
bindgen.workspace = true
|
38
crates/intel-tee-quote-verification-sys/License.txt
Normal file
38
crates/intel-tee-quote-verification-sys/License.txt
Normal file
|
@ -0,0 +1,38 @@
|
|||
BSD License
|
||||
|
||||
Copyright (C) 2011-2021 Intel Corporation. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
==============================================================
|
||||
|
||||
pce.signed.dll, qve.signed.dll,id_enclave.signed.dll and qe3.signed.dll,
|
||||
libsgx_pce.signed.so, libsgx_qve.signed.so, libsgx_id_enclave.signed.so,
|
||||
libsgx_qe3.signed.so and libsgx_tdqe.signed.so are licensed under
|
||||
3-Clause BSD License.
|
||||
|
33
crates/intel-tee-quote-verification-sys/bindings.h
Normal file
33
crates/intel-tee-quote-verification-sys/bindings.h
Normal file
|
@ -0,0 +1,33 @@
|
|||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2011-2021 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "sgx_dcap_quoteverify.h"
|
88
crates/intel-tee-quote-verification-sys/build.rs
Normal file
88
crates/intel-tee-quote-verification-sys/build.rs
Normal file
|
@ -0,0 +1,88 @@
|
|||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2011-2021 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() {
|
||||
// Tell cargo to tell rustc to link the system
|
||||
// sgx-dcap-quoteverify shared library.
|
||||
println!("cargo:rustc-link-lib=sgx_dcap_quoteverify");
|
||||
|
||||
// Tell cargo to invalidate the built crate whenever the wrapper changes
|
||||
println!("cargo:rerun-if-changed=bindings.h");
|
||||
|
||||
// Set sdk to search path if SGX_SDK is in environment variable
|
||||
let mut sdk_inc = String::from("");
|
||||
if let Ok(val) = env::var("SGX_SDK") {
|
||||
sdk_inc.push_str("-I");
|
||||
sdk_inc.push_str(&val);
|
||||
sdk_inc.push_str("/include/");
|
||||
}
|
||||
|
||||
// The bindgen::Builder is the main entry point
|
||||
// to bindgen, and lets you build up options for
|
||||
// the resulting bindings.
|
||||
let bindings = bindgen::Builder::default()
|
||||
// The input header we would like to generate
|
||||
// bindings for.
|
||||
.header("bindings.h")
|
||||
// Include search path
|
||||
.clang_arg(sdk_inc)
|
||||
// Convert C enum to Rust enum
|
||||
.rustified_enum("_quote3_error_t")
|
||||
.rustified_enum("_sgx_ql_request_policy")
|
||||
.rustified_enum("_sgx_ql_qv_result_t")
|
||||
.rustified_enum("sgx_qv_path_type_t")
|
||||
// Disable Debug trait for packed C structures
|
||||
.no_debug("_quote_t")
|
||||
.no_debug("_sgx_ql_auth_data_t")
|
||||
.no_debug("_sgx_ql_certification_data_t")
|
||||
.no_debug("_sgx_ql_ecdsa_sig_data_t")
|
||||
.no_debug("_sgx_quote3_t")
|
||||
.no_debug("_sgx_ql_att_key_id_param_t")
|
||||
// Enable Default trait
|
||||
.derive_default(true)
|
||||
// Tell cargo to invalidate the built crate whenever any of the
|
||||
// included header files changed.
|
||||
.parse_callbacks(Box::new(bindgen::CargoCallbacks::new()))
|
||||
// Finish the builder and generate the bindings.
|
||||
.generate()
|
||||
// Unwrap the Result and panic on failure.
|
||||
.expect("Unable to generate bindings");
|
||||
|
||||
// Write the bindings to the $OUT_DIR/bindings.rs file.
|
||||
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
|
||||
bindings
|
||||
.write_to_file(out_path.join("bindings.rs"))
|
||||
.expect("Couldn't write bindings!");
|
||||
}
|
52
crates/intel-tee-quote-verification-sys/src/lib.rs
Normal file
52
crates/intel-tee-quote-verification-sys/src/lib.rs
Normal file
|
@ -0,0 +1,52 @@
|
|||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2011-2021 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
//! Intel(R) Software Guard Extensions Data Center Attestation Primitives (Intel(R) SGX DCAP)
|
||||
//! Rust raw FFI bindings for Quote Verification Library
|
||||
//! ================================================
|
||||
//!
|
||||
//! Please install the following prerequisite:
|
||||
//! * Intel(R) SGX DCAP Driver
|
||||
//! * Intel(R) SGX SDK
|
||||
//! * Intel(R) SGX DCAP Packages
|
||||
//! * Intel(R) SGX DCAP PCCS (Provisioning Certificate Caching Service)
|
||||
//!
|
||||
//! *Please refer to [SGX DCAP Linux installation guide](
|
||||
//! https://download.01.org/intel-sgx/latest/linux-latest/docs/Intel_SGX_SW_Installation_Guide_for_Linux.pdf)
|
||||
//! to install above dependencies.*
|
||||
//!
|
||||
//! *Note that you need to install **libsgx-dcap-quote-verify-dev** and **clang** for this package.*
|
||||
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(clippy::missing_safety_doc)]
|
||||
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
|
76
deny.toml
Normal file
76
deny.toml
Normal file
|
@ -0,0 +1,76 @@
|
|||
targets = []
|
||||
all-features = false
|
||||
no-default-features = false
|
||||
feature-depth = 1
|
||||
|
||||
[advisories]
|
||||
db-path = "~/.cargo/advisory-db"
|
||||
db-urls = ["https://github.com/rustsec/advisory-db"]
|
||||
vulnerability = "deny"
|
||||
unmaintained = "warn"
|
||||
yanked = "warn"
|
||||
notice = "warn"
|
||||
ignore = [
|
||||
# Sidechannel attack to get the private key https://rustsec.org/advisories/RUSTSEC-2023-0071
|
||||
# currently no rsa private key is used in the codebase,
|
||||
# except for signing SGX enclaves, which is only triggered with enough admin signatures
|
||||
"RUSTSEC-2023-0071",
|
||||
# ed25519-dalek 2.0.0-rc.3 already contains the fix for RUSTSEC-2022-0093
|
||||
"RUSTSEC-2022-0093",
|
||||
]
|
||||
|
||||
[licenses]
|
||||
unlicensed = "deny"
|
||||
copyleft = "deny"
|
||||
allow = [
|
||||
"MIT",
|
||||
"Apache-2.0",
|
||||
"ISC",
|
||||
"Unlicense",
|
||||
"MPL-2.0",
|
||||
"Unicode-DFS-2016",
|
||||
"CC0-1.0",
|
||||
"BSD-2-Clause",
|
||||
"BSD-3-Clause",
|
||||
"OpenSSL",
|
||||
]
|
||||
deny = []
|
||||
allow-osi-fsf-free = "neither"
|
||||
default = "deny"
|
||||
confidence-threshold = 0.8
|
||||
exceptions = []
|
||||
|
||||
[[licenses.clarify]]
|
||||
name = "ring"
|
||||
version = "*"
|
||||
expression = "MIT AND ISC AND OpenSSL"
|
||||
license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }]
|
||||
|
||||
[licenses.private]
|
||||
ignore = false
|
||||
registries = []
|
||||
|
||||
[bans]
|
||||
multiple-versions = "warn"
|
||||
wildcards = "allow"
|
||||
highlight = "all"
|
||||
workspace-default-features = "allow"
|
||||
external-default-features = "allow"
|
||||
allow = []
|
||||
deny = []
|
||||
skip = []
|
||||
skip-tree = []
|
||||
|
||||
[sources]
|
||||
unknown-registry = "warn"
|
||||
unknown-git = "warn"
|
||||
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
|
||||
allow-git = [
|
||||
# we need the mio override to disable eventfd for SGX
|
||||
"https://github.com/tokio-rs/mio?rev=ec0776f5af29548e4e1f48f86f5fa123a46caa07#ec0776f5af29548e4e1f48f86f5fa123a46caa07",
|
||||
]
|
||||
|
||||
[sources.allow-org]
|
||||
github = [""]
|
||||
gitlab = [""]
|
||||
bitbucket = [""]
|
241
examples/README.md
Normal file
241
examples/README.md
Normal file
|
@ -0,0 +1,241 @@
|
|||
# Scratch Notes for running the teepot vault setup
|
||||
|
||||
## Build and Run on SGX server
|
||||
|
||||
```bash
|
||||
$ docker compose build
|
||||
$ docker compose up
|
||||
```
|
||||
|
||||
## Build and Run on client machine
|
||||
```bash
|
||||
$ cd teepot
|
||||
$ gpg --export username@example.com | base64 > gpgkey.pub
|
||||
❯ RUST_LOG=info cargo run -p vault-unseal -- --sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d --sgx-allowed-tcb-levels SwHardeningNeeded --server https://20.172.154.218:8443 init --unseal-threshold 1 -u bin/tee-vault-admin/tests/data/gpgkey.pub --admin-threshold 1 -a bin/tee-vault-admin/tests/data/gpgkey.pub --admin-tee-mrenclave 21c8c1a4dbcce04798f5119eb47203084bc74e564a3c954d1a21172c656cb801
|
||||
Finished dev [unoptimized + debuginfo] target(s) in 0.09s
|
||||
Running `target/debug/vault-unseal --sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d --sgx-allowed-tcb-levels SwHardeningNeeded --server 'https://20.172.154.218:8443' init --unseal-threshold 1 -u bin/tee-vault-admin/tests/data/gpgkey.pub --admin-threshold 1 -a bin/tee-vault-admin/tests/data/gpgkey.pub --admin-tee-mrenclave 21c8c1a4dbcce04798f5119eb47203084bc74e564a3c954d1a21172c656cb801`
|
||||
2023-08-23T14:47:56.902422Z INFO tee_client: Getting attestation report
|
||||
2023-08-23T14:47:57.340877Z INFO tee_client: Checked or set server certificate public key hash `b4bf52fdb37431c8531fb310be389c2d17ad9bd41d662e10308c9147c007d0d0`
|
||||
2023-08-23T14:47:57.741599Z INFO tee_client: Verifying attestation report
|
||||
2023-08-23T14:47:57.763320Z INFO tee_client: TcbLevel is allowed: SwHardeningNeeded: Software hardening is needed
|
||||
2023-08-23T14:47:57.763356Z WARN tee_client: Info: Advisory ID: INTEL-SA-00615
|
||||
2023-08-23T14:47:57.763371Z INFO tee_client: Report data matches `b4bf52fdb37431c8531fb310be389c2d17ad9bd41d662e10308c9147c007d0d0`
|
||||
2023-08-23T14:47:57.763391Z INFO tee_client: mrsigner `c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d` matches
|
||||
2023-08-23T14:47:57.763412Z INFO vault_unseal: Quote verified! Connection secure!
|
||||
2023-08-23T14:47:57.763418Z INFO vault_unseal: Inititalizing vault
|
||||
2023-08-23T14:48:07.278090Z INFO vault_unseal: Got Response: {"unseal_keys":["wcDMA9FaOxXbOhL7AQv7BoGfG5K+78RHV6LGqT5k/M1e8GP3pvBHTeY1lReCo2bkLmm4k4KBxdqNLSE8lV4urN5iWTAt74jCoC+uuAeA2OSL7AidX+HcftzcAXhJp2INtkyqsL8xGaPgpZxXj77fJ/Z7HW1mUlAxJowdZudvA5DmJls6u8VK6YtY3deLGbMRVygXFG+NGabNrRQ0nnFMMMCPXZ39ETitJyfFX6x4BizVQixagN9IqkozXLiupoHD4N0LOESDIm2MuqPnGAk0X6YgyZhFZc8uCrN9W/zNkXQ7eJxIamsLysVnPGaNQ92VQlz4aFAJLKrMCvGrtrxQJk9N+P47EArGCl9bP2hXfg783arXF6Bp/YgGgpvJRFZ04nMNDlIcIFuV5QBfiJX1hNIXg0MVlqmzVeGDVHlys+2mOvOO8seIBG1p4FGRQr6YWI4KxaN6sVA5DNclvITWiH/6H50SUJqXQ5M6rfEoBajYenpzZwYXb0oGzVHrUg5AnfPSuYRT0p8dAPz3/9vE0nEBzNeNVedEwwbgHP1aSPK8J3pPgoRVMyiq7gXzJEXoG5PLJEq4poQ1QwevAVTNv5Pu/TvTacDkJfVcBL5fukB9fj/WJktxEXmznEK3GMBBmvIAVLkgCEl+dH17CxvKq2ik6AfAHVdmEPcNw0ViNCZj1Q=="]}
|
||||
{"unseal_keys":["wcDMA9FaOxXbOhL7AQv7BoGfG5K+78RHV6LGqT5k/M1e8GP3pvBHTeY1lReCo2bkLmm4k4KBxdqNLSE8lV4urN5iWTAt74jCoC+uuAeA2OSL7AidX+HcftzcAXhJp2INtkyqsL8xGaPgpZxXj77fJ/Z7HW1mUlAxJowdZudvA5DmJls6u8VK6YtY3deLGbMRVygXFG+NGabNrRQ0nnFMMMCPXZ39ETitJyfFX6x4BizVQixagN9IqkozXLiupoHD4N0LOESDIm2MuqPnGAk0X6YgyZhFZc8uCrN9W/zNkXQ7eJxIamsLysVnPGaNQ92VQlz4aFAJLKrMCvGrtrxQJk9N+P47EArGCl9bP2hXfg783arXF6Bp/YgGgpvJRFZ04nMNDlIcIFuV5QBfiJX1hNIXg0MVlqmzVeGDVHlys+2mOvOO8seIBG1p4FGRQr6YWI4KxaN6sVA5DNclvITWiH/6H50SUJqXQ5M6rfEoBajYenpzZwYXb0oGzVHrUg5AnfPSuYRT0p8dAPz3/9vE0nEBzNeNVedEwwbgHP1aSPK8J3pPgoRVMyiq7gXzJEXoG5PLJEq4poQ1QwevAVTNv5Pu/TvTacDkJfVcBL5fukB9fj/WJktxEXmznEK3GMBBmvIAVLkgCEl+dH17CxvKq2ik6AfAHVdmEPcNw0ViNCZj1Q=="]}
|
||||
|
||||
❯ echo wcDMA9FaOxXbOhL7AQv7BoGfG5K+78RHV6LGqT5k/M1e8GP3pvBHTeY1lReCo2bkLmm4k4KBxdqNLSE8lV4urN5iWTAt74jCoC+uuAeA2OSL7AidX+HcftzcAXhJp2INtkyqsL8xGaPgpZxXj77fJ/Z7HW1mUlAxJowdZudvA5DmJls6u8VK6YtY3deLGbMRVygXFG+NGabNrRQ0nnFMMMCPXZ39ETitJyfFX6x4BizVQixagN9IqkozXLiupoHD4N0LOESDIm2MuqPnGAk0X6YgyZhFZc8uCrN9W/zNkXQ7eJxIamsLysVnPGaNQ92VQlz4aFAJLKrMCvGrtrxQJk9N+P47EArGCl9bP2hXfg783arXF6Bp/YgGgpvJRFZ04nMNDlIcIFuV5QBfiJX1hNIXg0MVlqmzVeGDVHlys+2mOvOO8seIBG1p4FGRQr6YWI4KxaN6sVA5DNclvITWiH/6H50SUJqXQ5M6rfEoBajYenpzZwYXb0oGzVHrUg5AnfPSuYRT0p8dAPz3/9vE0nEBzNeNVedEwwbgHP1aSPK8J3pPgoRVMyiq7gXzJEXoG5PLJEq4poQ1QwevAVTNv5Pu/TvTacDkJfVcBL5fukB9fj/WJktxEXmznEK3GMBBmvIAVLkgCEl+dH17CxvKq2ik6AfAHVdmEPcNw0ViNCZj1Q== | base64 --decode | gpg -dq | RUST_LOG=info cargo run -p vault-unseal -- --sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d --sgx-allowed-tcb-levels SwHardeningNeeded --server https://20.172.154.218:8443 unseal
|
||||
Finished dev [unoptimized + debuginfo] target(s) in 0.09s
|
||||
Running `target/debug/vault-unseal --sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d --sgx-allowed-tcb-levels SwHardeningNeeded --server 'https://20.172.154.218:8443' unseal`
|
||||
2023-08-23T14:48:20.735605Z INFO tee_client: Getting attestation report
|
||||
2023-08-23T14:48:21.349424Z INFO tee_client: Checked or set server certificate public key hash `b4bf52fdb37431c8531fb310be389c2d17ad9bd41d662e10308c9147c007d0d0`
|
||||
2023-08-23T14:48:21.742086Z INFO tee_client: Verifying attestation report
|
||||
2023-08-23T14:48:21.757960Z INFO tee_client: TcbLevel is allowed: SwHardeningNeeded: Software hardening is needed
|
||||
2023-08-23T14:48:21.757996Z WARN tee_client: Info: Advisory ID: INTEL-SA-00615
|
||||
2023-08-23T14:48:21.758014Z INFO tee_client: Report data matches `b4bf52fdb37431c8531fb310be389c2d17ad9bd41d662e10308c9147c007d0d0`
|
||||
2023-08-23T14:48:21.758039Z INFO tee_client: mrsigner `c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d` matches
|
||||
2023-08-23T14:48:21.758060Z INFO vault_unseal: Quote verified! Connection secure!
|
||||
2023-08-23T14:48:21.758065Z INFO vault_unseal: Unsealing vault
|
||||
2023-08-23T14:49:28.144877Z INFO vault_unseal: Vault is unsealed!
|
||||
Vault is unsealed!
|
||||
|
||||
```
|
||||
|
||||
```bash
|
||||
❯ (id=$(docker create tva); docker cp $id:/app/tee-vault-admin.sig ~/tee-vault-admin.sig; docker rm -v $id)
|
||||
❯ cargo run -p vault-admin -- create-sign-request ~/tee-vault-admin.sig > ~/sign_admin_tee.json
|
||||
❯ vim sign_admin_tee.json
|
||||
❯ gpg --local-user test@example.com --detach-sign --armor ~/sign_admin_tee.json
|
||||
❯ RUST_LOG=info cargo run -p vault-admin -- \
|
||||
sign-tee \
|
||||
--sgx-mrenclave 080c3210d5b6bcf47887101a554c117c21d80e75240bb70846c3e158a713ec65 \
|
||||
--sgx-allowed-tcb-levels SwHardeningNeeded \
|
||||
--server https://127.0.0.1:8444 \
|
||||
--out new_admin.sig \
|
||||
~/sign_admin_tee.json ~/sign_admin_tee.json.asc
|
||||
|
||||
❯ gramine-sgx-sigstruct-view new_admin.sig
|
||||
Attributes:
|
||||
mr_signer: 8392a970ea57f1f37fb8985d9394b26611b18a5d5591b7d9d58d23998a116298
|
||||
mr_enclave: 080c3210d5b6bcf47887101a554c117c21d80e75240bb70846c3e158a713ec65
|
||||
isv_prod_id: 0
|
||||
isv_svn: 0
|
||||
debug_enclave: False
|
||||
|
||||
❯ RUST_LOG=info cargo run -p vault-admin -- digest --sgx-mrsigner 8392a970ea57f1f37fb8985d9394b26611b18a5d5591b7d9d58d23998a116298 --sgx-allowed-tcb-levels SwHardeningNeeded --server https://127.0.0.1:8444
|
||||
Finished dev [unoptimized + debuginfo] target(s) in 0.12s
|
||||
Running `target/debug/vault-admin digest --sgx-mrsigner 8392a970ea57f1f37fb8985d9394b26611b18a5d5591b7d9d58d23998a116298 --sgx-allowed-tcb-levels SwHardeningNeeded --server 'https://127.0.0.1:8444'`
|
||||
2023-09-01T09:13:40.502841Z INFO vault_admin: Quote verified! Connection secure!
|
||||
2023-09-01T09:13:40.503374Z INFO tee_client: Getting attestation report
|
||||
2023-09-01T09:13:40.810238Z INFO tee_client: Checked or set server certificate public key hash `6296a59283e8b70b5501cf391457bd618159df4c206a4c5b206afc5b324cdd91`
|
||||
2023-09-01T09:13:41.110855Z INFO tee_client: Verifying attestation report
|
||||
2023-09-01T09:13:41.131057Z INFO tee_client: TcbLevel is allowed: SwHardeningNeeded: Software hardening is needed
|
||||
2023-09-01T09:13:41.131099Z WARN tee_client: Info: Advisory ID: INTEL-SA-00615
|
||||
2023-09-01T09:13:41.131121Z INFO tee_client: Report data matches `6296a59283e8b70b5501cf391457bd618159df4c206a4c5b206afc5b324cdd91`
|
||||
2023-09-01T09:13:41.131143Z INFO tee_client: mrsigner `8392a970ea57f1f37fb8985d9394b26611b18a5d5591b7d9d58d23998a116298` matches
|
||||
{
|
||||
"last_digest": "c9929fef9c87b5c7bb7c47b563c83c4609741245847f173de0bedb2b3a00daa8"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
```bash
|
||||
❯ docker compose build && (docker compose rm; docker volume rm teepot_vault-storage teepot_ha-raft-1 teepot_shared-1 teepot_ha-raft-2 teepot_shared-2 teepot_ha-raft-3 teepot_shared-3; docke
|
||||
r compose up --remove-orphans vault-1 tvu-1)
|
||||
❯ (id=$(docker create teepot-admin); docker cp $id:/app/tee-vault-admin.sig ~/tee-vault-admin.sig; docker rm -v $id)
|
||||
❯ gramine-sgx-sigstruct-view ~/tee-vault-admin.sig
|
||||
Attributes:
|
||||
mr_signer: c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d
|
||||
mr_enclave: 265ca491bf13e2486fd67d12038fcce02f133c5d91277e42f58c0ab464d5b46b
|
||||
isv_prod_id: 0
|
||||
isv_svn: 0
|
||||
debug_enclave: False
|
||||
❯ RUST_LOG=info cargo run -p vault-unseal -- --sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d --sgx-allowed-tcb-levels SwHardeningNeeded --server https://127.0.0.1:8413 init --unseal-threshold 1 -u tests/data/gpgkey.pub --admin-threshold 1 -a tests/data/gpgkey.pub --admin-tee-mrenclave 265ca491bf13e2486fd67d12038fcce02f133c5d91277e42f58c0ab464d5b46b
|
||||
❯ export GPG_TTY=$(tty)
|
||||
❯ gpg-connect-agent updatestartuptty /bye >/dev/null
|
||||
❯ gpg-connect-agent reloadagent /bye
|
||||
OK
|
||||
❯ echo wcDMA9FaOxXbOhL7AQwAgMxP/gTv/3RY/lMGPyEAfmIgIRdvfkWf8Sl07blUXmMKfIYyTkksMZLNc0Kiqx1oUR1qbT85WjWDhwhWADEbIhNFnTGdZ/CI24Bl4Nc8Dv7EnvJ0hmJw5AydE5YHACktSYTVgqXR9W8j5BO5K/+LyudJaMvcZFJH44MwYL8hMDKZbIdvIVFFEg2O/cBQgZc+UHljZEX+ptmR1q4BJM0dK6Ol5+v+zQ8FiByf6wgXJ2SQCERkhkiAaKkcIpyW1q8zgqVy29e46B6hfalYe0wD7U9L4QPiAr7Ik8rHEXB5iQucyDuWj65CVJXPVZ2Y+Q1Fk+OPrtYe7yDqZwJs3SlgzI7GNL4x7UqWALhroYzbiWETNwlhF4UZLOQRP5gkCQlAP3LkJJAFtUAbeJy8IgMRCz4F4f8nUCVLf6MDelr9ZXukmuc9U0tkmidNO8R2QAQUMLCCLUCkNnNa/hZz+81EUcNrI24kGqTlZfJxBpc+nr3MJxqSQ+btvqt8eJWlP9UJ0nEBdm74wj7nsekgwwttyq77Z8lciHgTLsjtSwk4tMse6uedWcPEGXxDKGzLd3dyaQD96NCUYt/GbGXVYTkH5mZci59+fkbGFEsJZYGffFmt7pcL69aoctgEKwBUxVR+BESo+UV1qUKAfO92QTYeXCA4/A== | base64 --decode | gpg -dq | RUST_LOG=info cargo run -p vault-unseal -- --sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d --sgx-allowed-tcb-levels SwHardeningNeeded --server https://127.0.0.1:8413 unseal Please enter the passphrase to unlock the OpenPGP secret key:
|
||||
"test <test@example.com>"
|
||||
3072-bit RSA key, ID D15A3B15DB3A12FB,
|
||||
created 2023-08-08 (main key ID 6D728F29A2DBABF8).
|
||||
|
||||
Passphrase:
|
||||
❯ (id=$(docker create teepot-stress); docker cp $id:/app/tee-stress-client.sig ~/tee-stress-client.sig; docker rm -v $id)
|
||||
|
||||
|
||||
```
|
||||
|
||||
## Kubernetes
|
||||
|
||||
Find out the `mr_enclave` value of the tee-vault-admin enclave and extract the sigstruct file:
|
||||
|
||||
```bash
|
||||
❯ docker run -v .:/mnt --pull always -it matterlabsrobot/teepot-tva:latest 'gramine-sgx-sigstruct-view tee-vault-admin.sig; cp tee-vault-admin.sig /mnt'
|
||||
[...]
|
||||
Attributes:
|
||||
mr_signer: c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d
|
||||
mr_enclave: 98a540dd7056584e2009c7cf7374f932fbb8e30a4c66cc815c9809620653f751
|
||||
isv_prod_id: 0
|
||||
isv_svn: 0
|
||||
debug_enclave: False
|
||||
❯ ls -l ~/tee-vault-admin.sig
|
||||
-rw-r--r--. 1 harald harald 1808 2. Nov 10:46 tee-vault-admin.sig
|
||||
```
|
||||
|
||||
Start the vault service and pod and forward the port
|
||||
|
||||
```bash
|
||||
❯ kubectl apply \
|
||||
-f examples/k8s/data-1-persistentvolumeclaim.yaml \
|
||||
-f examples/k8s/shared-1-persistentvolumeclaim.yaml \
|
||||
-f examples/k8s/vault-1-pod.yaml \
|
||||
-f examples/k8s/vault-1-service.yaml
|
||||
❯ kubectl port-forward pods/vault-1 8443
|
||||
```
|
||||
|
||||
Initialize the instance.
|
||||
This can take up to 6 minutes, depending on the `performance_multiplier` setting in vault.
|
||||
Adjust the `--admin-tee-mrenclave` parameter to match the `mr_enclave` value of the tee-vault-admin container.
|
||||
|
||||
```bash
|
||||
❯ RUST_LOG=info cargo run -p vault-unseal -- \
|
||||
--sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d \
|
||||
--sgx-allowed-tcb-levels SwHardeningNeeded \
|
||||
--server https://127.0.0.1:8443 \
|
||||
init \
|
||||
--unseal-threshold 1 \
|
||||
--unseal-pgp-key-file ./tests/data/gpgkey.pub \
|
||||
--admin-threshold 1 \
|
||||
--admin-pgp-key-file ./tests/data/gpgkey.pub \
|
||||
--admin-tee-mrenclave 98a540dd7056584e2009c7cf7374f932fbb8e30a4c66cc815c9809620653f751
|
||||
```
|
||||
|
||||
Unseal the instance
|
||||
|
||||
```bash
|
||||
❯ echo <one of the unseal secrets from the init output> \
|
||||
| base64 --decode \
|
||||
| gpg -dq \
|
||||
| RUST_LOG=info cargo run -p vault-unseal -- \
|
||||
--sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d \
|
||||
--sgx-allowed-tcb-levels SwHardeningNeeded \
|
||||
--server https://127.0.0.1:8443 \
|
||||
unseal
|
||||
```
|
||||
|
||||
End the port forwarding of vault-1 and start the rest of the nodes:
|
||||
|
||||
```bash
|
||||
❯ kubectl apply -f examples/k8s
|
||||
```
|
||||
|
||||
Unseal the other vault instances:
|
||||
|
||||
Every unseal secret holder has to do it, until the threshold is reached.
|
||||
|
||||
```bash
|
||||
❯ kubectl port-forward pods/vault-$NUM 8443
|
||||
❯ echo <one of the unseal secrets from the init output> \
|
||||
| base64 --decode \
|
||||
| gpg -dq \
|
||||
| RUST_LOG=info cargo run -p vault-unseal -- \
|
||||
--sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d \
|
||||
--sgx-allowed-tcb-levels SwHardeningNeeded \
|
||||
--server https://127.0.0.1:8443 \
|
||||
unseal
|
||||
❯ kubectl port-forward pods/vault-3 8443
|
||||
❯ echo <one of the unseal secrets from the init output> \
|
||||
| base64 --decode \
|
||||
| gpg -dq \
|
||||
| RUST_LOG=info cargo run -p vault-unseal -- \
|
||||
--sgx-mrsigner c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d \
|
||||
--sgx-allowed-tcb-levels SwHardeningNeeded \
|
||||
--server https://127.0.0.1:8443 \
|
||||
unseal
|
||||
```
|
||||
|
||||
The vault cluster should now settle to be completely unsealed and synced.
|
||||
|
||||
Start the vault-admin pod and forward the port:
|
||||
|
||||
```bash
|
||||
❯ kubectl port-forward pods/tee-vault-admin 8444
|
||||
```
|
||||
|
||||
Next is to sign the admin tee with the vault-admin tool:
|
||||
|
||||
```bash
|
||||
❯ cargo run -p vault-admin -- create-sign-request --tee-name admin ~/tee-vault-admin.sig > ~/tee-vault-admin.json
|
||||
❯ gpg --local-user test@example.com --detach-sign --armor ~/tee-vault-admin.json
|
||||
❯ cargo run -p vault-admin -- command \
|
||||
--server https://127.0.0.1:8444 \
|
||||
--sgx-allowed-tcb-levels SwHardeningNeeded \
|
||||
--out ~/tee-vault-admin-new.sig \
|
||||
~/tee-vault-admin.json ~/tee-vault-admin.json.asc
|
||||
```
|
||||
|
||||
Then replace `tee-vault-admin.sig` with `tee-vault-admin-new.sig` in the container image `matterlabsrobot/teepot-tva:latest` with this Dockerfile:
|
||||
|
||||
```Dockerfile
|
||||
FROM matterlabsrobot/teepot-tva:latest
|
||||
COPY tee-vault-admin-new.sig /app/tee-vault-admin.sig
|
||||
```
|
||||
|
||||
Build and push the new image:
|
||||
|
||||
```bash
|
||||
❯ docker build -t matterlabsrobot/teepot-tva-signed:latest .
|
||||
❯ docker push matterlabsrobot/teepot-tva-signed:latest
|
||||
```
|
||||
|
||||
Delete the old vault-admin pod and start the new one:
|
||||
|
||||
```bash
|
||||
❯ kubectl delete pod/tee-vault-admin
|
||||
❯ kubectl apply -f examples/k8s/vault-admin-signed-pod.yaml
|
||||
```
|
||||
|
||||
The new signed admin tee can now be used.
|
146
examples/docker-compose.yml
Normal file
146
examples/docker-compose.yml
Normal file
|
@ -0,0 +1,146 @@
|
|||
# From the main directory run:
|
||||
# ❯ docker compose -f examples/docker-compose.yml --project-directory $PWD up
|
||||
services:
|
||||
tvu-1:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: bin/tee-vault-unseal/Dockerfile-azure
|
||||
image: tee-vault-unseal
|
||||
restart: "no"
|
||||
ports:
|
||||
- 8413:8443
|
||||
environment:
|
||||
VAULT_ADDR: "https://vault-1:8210"
|
||||
ALLOWED_TCB_LEVELS: "SwHardeningNeeded"
|
||||
privileged: true
|
||||
init: true
|
||||
volumes:
|
||||
- /run/aesmd:/run/aesmd
|
||||
- /dev/sgx_enclave:/dev/sgx_enclave
|
||||
- shared-1:/opt/vault/tls
|
||||
vault-1:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: vault/Dockerfile
|
||||
image: vault
|
||||
restart: "no"
|
||||
ports:
|
||||
- 8210:8210
|
||||
# - 8211:8211
|
||||
environment:
|
||||
VAULT_API_ADDR: "https://vault-1:8210"
|
||||
VAULT_CLUSTER_ADDR: "https://vault-1:8211"
|
||||
VAULT_RAFT_NODE_ID: "vault-1"
|
||||
privileged: true
|
||||
init: true
|
||||
volumes:
|
||||
- /run/aesmd:/run/aesmd
|
||||
- /dev/sgx_enclave:/dev/sgx_enclave
|
||||
- shared-1:/opt/vault/tls
|
||||
- data-1:/opt/vault/data
|
||||
|
||||
tvu-2:
|
||||
image: tee-vault-unseal
|
||||
restart: "no"
|
||||
ports:
|
||||
- 8423:8443
|
||||
environment:
|
||||
VAULT_ADDR: "https://vault-2:8210"
|
||||
ALLOWED_TCB_LEVELS: "SwHardeningNeeded"
|
||||
privileged: true
|
||||
init: true
|
||||
volumes:
|
||||
- /run/aesmd:/run/aesmd
|
||||
- /dev/sgx_enclave:/dev/sgx_enclave
|
||||
- shared-2:/opt/vault/tls
|
||||
vault-2:
|
||||
image: vault
|
||||
restart: "no"
|
||||
ports:
|
||||
- 8220:8210
|
||||
# - 8221:8211
|
||||
environment:
|
||||
VAULT_API_ADDR: "https://vault-2:8210"
|
||||
VAULT_CLUSTER_ADDR: "https://vault-2:8211"
|
||||
VAULT_RAFT_NODE_ID: "vault-2"
|
||||
privileged: true
|
||||
init: true
|
||||
volumes:
|
||||
- /run/aesmd:/run/aesmd
|
||||
- /dev/sgx_enclave:/dev/sgx_enclave
|
||||
- shared-2:/opt/vault/tls
|
||||
- data-2:/opt/vault/data
|
||||
|
||||
tvu-3:
|
||||
image: tee-vault-unseal
|
||||
restart: "no"
|
||||
ports:
|
||||
- 8433:8443
|
||||
environment:
|
||||
VAULT_ADDR: "https://vault-3:8210"
|
||||
ALLOWED_TCB_LEVELS: "SwHardeningNeeded"
|
||||
privileged: true
|
||||
init: true
|
||||
volumes:
|
||||
- /run/aesmd:/run/aesmd
|
||||
- /dev/sgx_enclave:/dev/sgx_enclave
|
||||
- shared-3:/opt/vault/tls
|
||||
vault-3:
|
||||
image: vault
|
||||
restart: "no"
|
||||
ports:
|
||||
- 8230:8210
|
||||
# - 8231:8211
|
||||
environment:
|
||||
VAULT_API_ADDR: "https://vault-3:8210"
|
||||
VAULT_CLUSTER_ADDR: "https://vault-3:8211"
|
||||
VAULT_RAFT_NODE_ID: "vault-3"
|
||||
privileged: true
|
||||
init: true
|
||||
volumes:
|
||||
- /run/aesmd:/run/aesmd
|
||||
- /dev/sgx_enclave:/dev/sgx_enclave
|
||||
- shared-3:/opt/vault/tls
|
||||
- data-3:/opt/vault/data
|
||||
|
||||
admin:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: bin/tee-vault-admin/Dockerfile-azure
|
||||
restart: "no"
|
||||
ports:
|
||||
- 8444:8444
|
||||
environment:
|
||||
VAULT_ADDR: "https://vault-1:8210"
|
||||
VAULT_SGX_MRSIGNER: "c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d"
|
||||
VAULT_SGX_ALLOWED_TCB_LEVELS: "SwHardeningNeeded"
|
||||
ALLOWED_TCB_LEVELS: "SwHardeningNeeded"
|
||||
privileged: true
|
||||
init: true
|
||||
volumes:
|
||||
- /run/aesmd:/run/aesmd
|
||||
- /dev/sgx_enclave:/dev/sgx_enclave
|
||||
|
||||
stress:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: bin/tee-stress-client/Dockerfile-azure
|
||||
restart: "no"
|
||||
environment:
|
||||
VAULT_ADDR: "https://vault-1:8210"
|
||||
VAULT_SGX_MRSIGNER: "c5591a72b8b86e0d8814d6e8750e3efe66aea2d102b8ba2405365559b858697d"
|
||||
VAULT_SGX_ALLOWED_TCB_LEVELS: "SwHardeningNeeded"
|
||||
ALLOWED_TCB_LEVELS: "SwHardeningNeeded"
|
||||
privileged: true
|
||||
init: true
|
||||
volumes:
|
||||
- /run/aesmd:/run/aesmd
|
||||
- /dev/sgx_enclave:/dev/sgx_enclave
|
||||
|
||||
volumes:
|
||||
shared-1:
|
||||
data-1:
|
||||
shared-2:
|
||||
data-2:
|
||||
shared-3:
|
||||
data-3:
|
14
examples/k8s/data-1-persistentvolumeclaim.yaml
Normal file
14
examples/k8s/data-1-persistentvolumeclaim.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: data-1
|
||||
name: data-1
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
status: {}
|
14
examples/k8s/data-2-persistentvolumeclaim.yaml
Normal file
14
examples/k8s/data-2-persistentvolumeclaim.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: data-2
|
||||
name: data-2
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
status: {}
|
14
examples/k8s/data-3-persistentvolumeclaim.yaml
Normal file
14
examples/k8s/data-3-persistentvolumeclaim.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: data-3
|
||||
name: data-3
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
status: {}
|
14
examples/k8s/shared-1-persistentvolumeclaim.yaml
Normal file
14
examples/k8s/shared-1-persistentvolumeclaim.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: shared-1
|
||||
name: shared-1
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
status: {}
|
14
examples/k8s/shared-2-persistentvolumeclaim.yaml
Normal file
14
examples/k8s/shared-2-persistentvolumeclaim.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: shared-2
|
||||
name: shared-2
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
status: {}
|
14
examples/k8s/shared-3-persistentvolumeclaim.yaml
Normal file
14
examples/k8s/shared-3-persistentvolumeclaim.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
io.kompose.service: shared-3
|
||||
name: shared-3
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Mi
|
||||
status: {}
|
97
examples/k8s/vault-1-pod.yaml
Normal file
97
examples/k8s/vault-1-pod.yaml
Normal file
|
@ -0,0 +1,97 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: kompose convert
|
||||
labels:
|
||||
io.kompose.network/teepot-default: "true"
|
||||
io.kompose.service: vault-1
|
||||
app: vault
|
||||
name: vault-1
|
||||
namespace: default
|
||||
spec:
|
||||
tolerations:
|
||||
- key: sgx.intel.com/provision
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- vault
|
||||
imagePullSecrets:
|
||||
- name: docker-regcred
|
||||
containers:
|
||||
- image: matterlabsrobot/teepot-vault:latest
|
||||
name: vault
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: VAULT_API_ADDR
|
||||
value: "https://vault-1:8210"
|
||||
- name: VAULT_CLUSTER_ADDR
|
||||
value: "https://vault-1:8211"
|
||||
- name: VAULT_RAFT_NODE_ID
|
||||
value: "vault-1"
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- curl
|
||||
- -k
|
||||
- https://localhost:8210/v1/sys/health
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8210
|
||||
hostPort: 8210
|
||||
protocol: TCP
|
||||
- containerPort: 8211
|
||||
hostPort: 8211
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
requests:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /opt/vault/tls
|
||||
name: shared-1
|
||||
- mountPath: /opt/vault/data
|
||||
name: data-1
|
||||
- image: matterlabsrobot/teepot-tvu:latest
|
||||
name: vault-unseal
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: VAULT_ADDR
|
||||
value: "https://vault-1:8210"
|
||||
- name: ALLOWED_TCB_LEVELS
|
||||
value: "SwHardeningNeeded"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
hostPort: 8443
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
requests:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /opt/vault/tls
|
||||
name: shared-1
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: shared-1
|
||||
persistentVolumeClaim:
|
||||
claimName: shared-1
|
||||
- name: data-1
|
||||
persistentVolumeClaim:
|
||||
claimName: data-1
|
||||
status: {}
|
20
examples/k8s/vault-1-service.yaml
Normal file
20
examples/k8s/vault-1-service.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: kompose convert
|
||||
|
||||
labels:
|
||||
io.kompose.service: vault-1
|
||||
name: vault-1
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: "8210"
|
||||
port: 8210
|
||||
targetPort: 8210
|
||||
- name: "8211"
|
||||
port: 8211
|
||||
targetPort: 8211
|
||||
selector:
|
||||
io.kompose.service: vault-1
|
97
examples/k8s/vault-2-pod.yaml
Normal file
97
examples/k8s/vault-2-pod.yaml
Normal file
|
@ -0,0 +1,97 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: kompose convert
|
||||
labels:
|
||||
io.kompose.network/teepot-default: "true"
|
||||
io.kompose.service: vault-2
|
||||
app: vault
|
||||
name: vault-2
|
||||
namespace: default
|
||||
spec:
|
||||
tolerations:
|
||||
- key: sgx.intel.com/provision
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- vault
|
||||
imagePullSecrets:
|
||||
- name: docker-regcred
|
||||
containers:
|
||||
- image: matterlabsrobot/teepot-vault:latest
|
||||
name: vault
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: VAULT_API_ADDR
|
||||
value: "https://vault-2:8210"
|
||||
- name: VAULT_CLUSTER_ADDR
|
||||
value: "https://vault-2:8211"
|
||||
- name: VAULT_RAFT_NODE_ID
|
||||
value: "vault-2"
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- curl
|
||||
- -k
|
||||
- https://localhost:8210/v1/sys/health
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8210
|
||||
hostPort: 8210
|
||||
protocol: TCP
|
||||
- containerPort: 8211
|
||||
hostPort: 8211
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
requests:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /opt/vault/tls
|
||||
name: shared-2
|
||||
- mountPath: /opt/vault/data
|
||||
name: data-2
|
||||
- image: matterlabsrobot/teepot-tvu:latest
|
||||
name: vault-unseal
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: VAULT_ADDR
|
||||
value: "https://vault-2:8210"
|
||||
- name: ALLOWED_TCB_LEVELS
|
||||
value: "SwHardeningNeeded"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
hostPort: 8443
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
requests:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /opt/vault/tls
|
||||
name: shared-2
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: shared-2
|
||||
persistentVolumeClaim:
|
||||
claimName: shared-2
|
||||
- name: data-2
|
||||
persistentVolumeClaim:
|
||||
claimName: data-2
|
||||
status: {}
|
20
examples/k8s/vault-2-service.yaml
Normal file
20
examples/k8s/vault-2-service.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: kompose convert
|
||||
|
||||
labels:
|
||||
io.kompose.service: vault-2
|
||||
name: vault-2
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: "8210"
|
||||
port: 8210
|
||||
targetPort: 8210
|
||||
- name: "8211"
|
||||
port: 8211
|
||||
targetPort: 8211
|
||||
selector:
|
||||
io.kompose.service: vault-2
|
97
examples/k8s/vault-3-pod.yaml
Normal file
97
examples/k8s/vault-3-pod.yaml
Normal file
|
@ -0,0 +1,97 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: kompose convert
|
||||
labels:
|
||||
io.kompose.network/teepot-default: "true"
|
||||
io.kompose.service: vault-3
|
||||
app: vault
|
||||
name: vault-3
|
||||
namespace: default
|
||||
spec:
|
||||
tolerations:
|
||||
- key: sgx.intel.com/provision
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- vault
|
||||
imagePullSecrets:
|
||||
- name: docker-regcred
|
||||
containers:
|
||||
- image: matterlabsrobot/teepot-vault:latest
|
||||
name: vault
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: VAULT_API_ADDR
|
||||
value: "https://vault-3:8210"
|
||||
- name: VAULT_CLUSTER_ADDR
|
||||
value: "https://vault-3:8211"
|
||||
- name: VAULT_RAFT_NODE_ID
|
||||
value: "vault-3"
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- curl
|
||||
- -k
|
||||
- https://localhost:8210/v1/sys/health
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8210
|
||||
hostPort: 8210
|
||||
protocol: TCP
|
||||
- containerPort: 8211
|
||||
hostPort: 8211
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
requests:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /opt/vault/tls
|
||||
name: shared-3
|
||||
- mountPath: /opt/vault/data
|
||||
name: data-3
|
||||
- image: matterlabsrobot/teepot-tvu:latest
|
||||
name: vault-unseal
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: VAULT_ADDR
|
||||
value: "https://vault-3:8210"
|
||||
- name: ALLOWED_TCB_LEVELS
|
||||
value: "SwHardeningNeeded"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
hostPort: 8443
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
requests:
|
||||
sgx.intel.com/epc: "10Mi"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /opt/vault/tls
|
||||
name: shared-3
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: shared-3
|
||||
persistentVolumeClaim:
|
||||
claimName: shared-3
|
||||
- name: data-3
|
||||
persistentVolumeClaim:
|
||||
claimName: data-3
|
||||
status: {}
|
20
examples/k8s/vault-3-service.yaml
Normal file
20
examples/k8s/vault-3-service.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: kompose convert
|
||||
|
||||
labels:
|
||||
io.kompose.service: vault-3
|
||||
name: vault-3
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: "8210"
|
||||
port: 8210
|
||||
targetPort: 8210
|
||||
- name: "8211"
|
||||
port: 8211
|
||||
targetPort: 8211
|
||||
selector:
|
||||
io.kompose.service: vault-3
|
19
examples/k8s/vault-ha-serice.yaml
Normal file
19
examples/k8s/vault-ha-serice.yaml
Normal file
|
@ -0,0 +1,19 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
kompose.cmd: kompose convert
|
||||
labels:
|
||||
io.kompose.service: vault-ha
|
||||
name: vault-ha
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: "8210"
|
||||
port: 8210
|
||||
targetPort: 8210
|
||||
- name: "8211"
|
||||
port: 8211
|
||||
targetPort: 8211
|
||||
selector:
|
||||
app: vault
|
258
flake.lock
generated
Normal file
258
flake.lock
generated
Normal file
|
@ -0,0 +1,258 @@
|
|||
{
|
||||
"nodes": {
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1650374568,
|
||||
"narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "b4a34015c698c7793d592d66adbab377907a2be8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1705309234,
|
||||
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils-plus": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1696331477,
|
||||
"narHash": "sha256-YkbRa/1wQWdWkVJ01JvV+75KIdM37UErqKgTf0L54Fk=",
|
||||
"owner": "gytis-ivaskevicius",
|
||||
"repo": "flake-utils-plus",
|
||||
"rev": "bfc53579db89de750b25b0c5e7af299e0c06d7d3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "gytis-ivaskevicius",
|
||||
"repo": "flake-utils-plus",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1694529238,
|
||||
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_3": {
|
||||
"inputs": {
|
||||
"systems": "systems_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1705309234,
|
||||
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-filter": {
|
||||
"locked": {
|
||||
"lastModified": 1705332318,
|
||||
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1706515015,
|
||||
"narHash": "sha256-eFfY5A7wlYy3jD/75lx6IJRueg4noE+jowl0a8lIlVo=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "f4a8d6d5324c327dcc2d863eb7f3cc06ad630df4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-23.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1706487304,
|
||||
"narHash": "sha256-LE8lVX28MV2jWJsidW13D2qrHU/RUUONendL2Q/WlJg=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "90f456026d284c22b3e3497be980b2e47d0b28ac",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixsgx-flake": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"snowfall-lib": "snowfall-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1706717028,
|
||||
"narHash": "sha256-paQ5fBXWRpYdQMXphi8/gahl3/ptej4kncqxsMFguH4=",
|
||||
"owner": "haraldh",
|
||||
"repo": "nixsgx",
|
||||
"rev": "ff39bbbbbf7e88a28eeace784784839f1bf7e3b0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "haraldh",
|
||||
"repo": "nixsgx",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nix-filter": "nix-filter",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixsgx-flake": "nixsgx-flake",
|
||||
"rust-overlay": "rust-overlay"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_3",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1706753617,
|
||||
"narHash": "sha256-ZKqTFzhFwSWFEpQTJ0uXnfJBs5Y/po9/8TK4bzssdbs=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "58be43ae223034217ea1bd58c73210644031b687",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"snowfall-lib": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-utils-plus": "flake-utils-plus",
|
||||
"nixpkgs": [
|
||||
"nixsgx-flake",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1696432959,
|
||||
"narHash": "sha256-oJQZv2MYyJaVyVJY5IeevzqpGvMGKu5pZcCCJvb+xjc=",
|
||||
"owner": "snowfallorg",
|
||||
"repo": "lib",
|
||||
"rev": "92803a029b5314d4436a8d9311d8707b71d9f0b6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "snowfallorg",
|
||||
"repo": "lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
115
flake.nix
Normal file
115
flake.nix
Normal file
|
@ -0,0 +1,115 @@
|
|||
{
|
||||
description = "teepot";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-23.11";
|
||||
|
||||
nix-filter.url = "github:numtide/nix-filter";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
nixsgx-flake = {
|
||||
url = "github:matter-labs/nixsgx";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils, nix-filter, nixsgx-flake, rust-overlay }:
|
||||
flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = import nixpkgs { inherit system; overlays = [ (import rust-overlay) nixsgx-flake.overlays.default ]; };
|
||||
rustVersion = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml;
|
||||
makeRustPlatform = pkgs.makeRustPlatform.override {
|
||||
stdenv = pkgs.stdenvAdapters.useMoldLinker pkgs.gccStdenv;
|
||||
};
|
||||
rustPlatform = makeRustPlatform {
|
||||
cargo = rustVersion;
|
||||
rustc = rustVersion;
|
||||
};
|
||||
|
||||
filter = nix-filter.lib;
|
||||
|
||||
bin = rustPlatform.buildRustPackage {
|
||||
pname = "teepot";
|
||||
version = "0.1.0";
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
pkg-config
|
||||
rustPlatform.bindgenHook
|
||||
];
|
||||
|
||||
buildInputs = with pkgs; [
|
||||
nixsgx.sgx-sdk
|
||||
nixsgx.sgx-dcap
|
||||
nixsgx.sgx-dcap.quote_verify
|
||||
];
|
||||
|
||||
src = filter {
|
||||
root = ./.;
|
||||
exclude = [
|
||||
".github"
|
||||
".gitignore"
|
||||
"flake.lock"
|
||||
"flake.nix"
|
||||
"LICENSE-APACHE"
|
||||
"LICENSE-MIT"
|
||||
"README.md"
|
||||
"renovate.json"
|
||||
"deny.toml"
|
||||
(filter.inDirectory "examples")
|
||||
(filter.inDirectory "vault")
|
||||
];
|
||||
};
|
||||
RUSTFLAGS = "--cfg mio_unsupported_force_waker_pipe";
|
||||
cargoBuildFlags = "--all";
|
||||
checkType = "debug";
|
||||
cargoLock = {
|
||||
lockFile = ./Cargo.lock;
|
||||
};
|
||||
|
||||
outputs = [
|
||||
"out"
|
||||
"tee_key_preexec"
|
||||
"tee_self_attestation_test"
|
||||
"tee_stress_client"
|
||||
"tee_vault_admin"
|
||||
"tee_vault_unseal"
|
||||
"teepot_read"
|
||||
"teepot_write"
|
||||
"vault_admin"
|
||||
"vault_unseal"
|
||||
"verify_attestation"
|
||||
];
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
for i in $outputs; do
|
||||
[[ $i == "out" ]] && continue
|
||||
mkdir -p "''${!i}/bin"
|
||||
echo "''${!i}" >> $out/nix-support/propagated-user-env-packages
|
||||
binname=''${i//_/-}
|
||||
mv "$out/bin/$binname" "''${!i}/bin/"
|
||||
done
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
formatter = pkgs.nixpkgs-fmt;
|
||||
|
||||
packages = rec {
|
||||
teepot = bin;
|
||||
default = teepot;
|
||||
};
|
||||
|
||||
devShells = {
|
||||
default = pkgs.mkShell {
|
||||
inputsFrom = [ bin ];
|
||||
nativeBuildInputs = with pkgs; [
|
||||
rustup
|
||||
rustVersion
|
||||
];
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
3
rust-toolchain.toml
Normal file
3
rust-toolchain.toml
Normal file
|
@ -0,0 +1,3 @@
|
|||
[toolchain]
|
||||
channel = "1.75"
|
||||
components = ["rustfmt", "clippy", "rust-src"]
|
305
src/client/mod.rs
Normal file
305
src/client/mod.rs
Normal file
|
@ -0,0 +1,305 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023-2024 Matter Labs
|
||||
|
||||
//! Helper functions for CLI clients to verify Intel SGX enclaves and other TEEs.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
pub mod vault;
|
||||
|
||||
use crate::json::http::AttestationResponse;
|
||||
use crate::sgx::Collateral;
|
||||
pub use crate::sgx::{
|
||||
parse_tcb_levels, sgx_ql_qv_result_t, verify_quote_with_collateral, EnumSet,
|
||||
QuoteVerificationResult, TcbLevel,
|
||||
};
|
||||
use actix_web::http::header;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use awc::{Client, Connector};
|
||||
use clap::Args;
|
||||
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerifier};
|
||||
use rustls::client::WebPkiServerVerifier;
|
||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||
use rustls::{ClientConfig, DigitallySignedStruct, Error, SignatureScheme};
|
||||
use serde_json::Value;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::time;
|
||||
use std::time::Duration;
|
||||
use tracing::{error, info, warn};
|
||||
use x509_cert::der::{Decode as _, Encode as _};
|
||||
use x509_cert::Certificate;
|
||||
|
||||
/// Options and arguments needed to attest a TEE
|
||||
#[derive(Args, Debug, Clone)]
|
||||
pub struct AttestationArgs {
|
||||
/// hex encoded SGX mrsigner of the enclave to attest
|
||||
#[arg(long)]
|
||||
pub sgx_mrsigner: Option<String>,
|
||||
/// hex encoded SGX mrenclave of the enclave to attest
|
||||
#[arg(long)]
|
||||
pub sgx_mrenclave: Option<String>,
|
||||
/// URL of the server
|
||||
#[arg(long, required = true)]
|
||||
pub server: String,
|
||||
/// allowed TCB levels, comma separated:
|
||||
/// Ok, ConfigNeeded, ConfigAndSwHardeningNeeded, SwHardeningNeeded, OutOfDate, OutOfDateConfigNeeded
|
||||
#[arg(long, value_parser = parse_tcb_levels)]
|
||||
pub sgx_allowed_tcb_levels: Option<EnumSet<TcbLevel>>,
|
||||
}
|
||||
|
||||
/// A connection to a TEE, which implements the `teepot` attestation API
|
||||
pub struct TeeConnection {
|
||||
/// Options and arguments needed to attest a TEE
|
||||
server: String,
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl TeeConnection {
|
||||
/// Create a new connection to a TEE
|
||||
///
|
||||
/// This will verify the attestation report and check that the enclave
|
||||
/// is running the expected code.
|
||||
pub async fn new(args: &AttestationArgs, attestation_url: &str) -> Result<Self> {
|
||||
let pk_hash = Arc::new(OnceLock::new());
|
||||
|
||||
let tls_config = Arc::new(
|
||||
ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(Self::make_verifier(pk_hash.clone())))
|
||||
.with_no_client_auth(),
|
||||
);
|
||||
|
||||
let agent = Client::builder()
|
||||
.add_default_header((header::USER_AGENT, "teepot/1.0"))
|
||||
// a "connector" wraps the stream into an encrypted connection
|
||||
.connector(Connector::new().rustls_0_22(tls_config))
|
||||
.timeout(Duration::from_secs(12000))
|
||||
.finish();
|
||||
|
||||
let this = Self {
|
||||
server: args.server.clone(),
|
||||
client: agent,
|
||||
};
|
||||
|
||||
this.check_attestation(args, attestation_url, pk_hash)
|
||||
.await?;
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
/// Create a new connection to a TEE
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is unsafe, because it does not verify the attestation report.
|
||||
pub unsafe fn new_from_client_without_attestation(server: String, client: Client) -> Self {
|
||||
Self { server, client }
|
||||
}
|
||||
|
||||
/// Get a reference to the agent, which can be used to make requests to the TEE
|
||||
///
|
||||
/// Note, that it will refuse to connect to any other TLS server than the one
|
||||
/// specified in the `AttestationArgs` of the `Self::new` function.
|
||||
pub fn client(&self) -> &Client {
|
||||
&self.client
|
||||
}
|
||||
|
||||
/// Get a reference to the server URL
|
||||
pub fn server(&self) -> &str {
|
||||
&self.server
|
||||
}
|
||||
|
||||
async fn check_attestation(
|
||||
&self,
|
||||
args: &AttestationArgs,
|
||||
attestation_url: &str,
|
||||
pk_hash: Arc<OnceLock<[u8; 32]>>,
|
||||
) -> Result<()> {
|
||||
info!("Getting attestation report");
|
||||
|
||||
let mut response = self
|
||||
.client
|
||||
.get(&format!("{}{attestation_url}", args.server))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| anyhow!("Error sending attestation request: {}", e))?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("Failed to get attestation: {}", status_code);
|
||||
if let Ok(r) = response.json::<Value>().await {
|
||||
eprintln!("Failed to get attestation: {}", r);
|
||||
}
|
||||
bail!("failed to get attestation: {}", status_code);
|
||||
}
|
||||
|
||||
let attestation: AttestationResponse =
|
||||
response.json().await.context("failed to get attestation")?;
|
||||
|
||||
let current_time: i64 = time::SystemTime::now()
|
||||
.duration_since(time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as _;
|
||||
|
||||
info!("Verifying attestation report");
|
||||
|
||||
let quote: &[u8] = &attestation.quote;
|
||||
let collateral: Option<&Collateral> = Some(&attestation.collateral);
|
||||
let pk_hash = pk_hash.get().unwrap();
|
||||
|
||||
Self::check_attestation_args(args, current_time, quote, collateral, pk_hash)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check the attestation report against `AttestationArgs`
|
||||
pub fn check_attestation_args(
|
||||
args: &AttestationArgs,
|
||||
current_time: i64,
|
||||
quote: &[u8],
|
||||
collateral: Option<&Collateral>,
|
||||
pk_hash: &[u8; 32],
|
||||
) -> Result<()> {
|
||||
let QuoteVerificationResult {
|
||||
collateral_expired,
|
||||
result,
|
||||
quote,
|
||||
advisories,
|
||||
..
|
||||
} = verify_quote_with_collateral(quote, collateral, current_time).unwrap();
|
||||
|
||||
if collateral_expired || !matches!(result, sgx_ql_qv_result_t::SGX_QL_QV_RESULT_OK) {
|
||||
if collateral_expired {
|
||||
error!("Collateral is out of date!");
|
||||
bail!("Collateral is out of date!");
|
||||
}
|
||||
|
||||
let tcblevel = TcbLevel::from(result);
|
||||
if args
|
||||
.sgx_allowed_tcb_levels
|
||||
.map_or(true, |levels| !levels.contains(tcblevel))
|
||||
{
|
||||
error!("Quote verification result: {}", tcblevel);
|
||||
bail!("Quote verification result: {}", tcblevel);
|
||||
}
|
||||
|
||||
info!("TcbLevel is allowed: {}", tcblevel);
|
||||
}
|
||||
|
||||
for advisory in advisories {
|
||||
warn!("Info: Advisory ID: {advisory}");
|
||||
}
|
||||
|
||||
if "e.report_body.reportdata[..32] != pk_hash {
|
||||
error!("Report data mismatch");
|
||||
bail!("Report data mismatch");
|
||||
} else {
|
||||
info!(
|
||||
"Report data matches `{}`",
|
||||
hex::encode("e.report_body.reportdata[..32])
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(mrsigner) = &args.sgx_mrsigner {
|
||||
let mrsigner_bytes = hex::decode(mrsigner).context("Failed to decode mrsigner")?;
|
||||
if quote.report_body.mrsigner[..] != mrsigner_bytes {
|
||||
bail!(
|
||||
"mrsigner mismatch: got {}, expected {}",
|
||||
hex::encode(quote.report_body.mrsigner),
|
||||
&mrsigner
|
||||
);
|
||||
} else {
|
||||
info!("mrsigner `{mrsigner}` matches");
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mrenclave) = &args.sgx_mrenclave {
|
||||
let mrenclave_bytes = hex::decode(mrenclave).context("Failed to decode mrenclave")?;
|
||||
if quote.report_body.mrenclave[..] != mrenclave_bytes {
|
||||
bail!(
|
||||
"mrenclave mismatch: got {}, expected {}",
|
||||
hex::encode(quote.report_body.mrenclave),
|
||||
&mrenclave
|
||||
);
|
||||
} else {
|
||||
info!("mrenclave `{mrenclave}` matches");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Save the hash of the public server key to `REPORT_DATA` to check
|
||||
/// the attestations against it and it does not change on reconnect.
|
||||
pub fn make_verifier(pk_hash: Arc<OnceLock<[u8; 32]>>) -> impl ServerCertVerifier {
|
||||
#[derive(Debug)]
|
||||
struct V {
|
||||
pk_hash: Arc<OnceLock<[u8; 32]>>,
|
||||
server_verifier: Arc<WebPkiServerVerifier>,
|
||||
}
|
||||
impl ServerCertVerifier for V {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
end_entity: &CertificateDer,
|
||||
_intermediates: &[CertificateDer],
|
||||
_server_name: &ServerName,
|
||||
_ocsp_response: &[u8],
|
||||
_now: UnixTime,
|
||||
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
|
||||
let cert = Certificate::from_der(end_entity.as_ref())
|
||||
.map_err(|e| Error::General(format!("Failed get certificate {e:?}")))?;
|
||||
let pub_key = cert
|
||||
.tbs_certificate
|
||||
.subject_public_key_info
|
||||
.to_der()
|
||||
.unwrap();
|
||||
|
||||
let hash = Sha256::digest(pub_key);
|
||||
let data = self.pk_hash.get_or_init(|| hash[..32].try_into().unwrap());
|
||||
|
||||
if data == &hash[..32] {
|
||||
info!(
|
||||
"Checked or set server certificate public key hash `{}`",
|
||||
hex::encode(&hash[..32])
|
||||
);
|
||||
Ok(rustls::client::danger::ServerCertVerified::assertion())
|
||||
} else {
|
||||
error!("Server certificate does not match expected certificate");
|
||||
Err(rustls::Error::General(
|
||||
"Server certificate does not match expected certificate".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
message: &[u8],
|
||||
cert: &CertificateDer<'_>,
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> std::result::Result<HandshakeSignatureValid, Error> {
|
||||
self.server_verifier
|
||||
.verify_tls12_signature(message, cert, dss)
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
message: &[u8],
|
||||
cert: &CertificateDer<'_>,
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> std::result::Result<HandshakeSignatureValid, Error> {
|
||||
self.server_verifier
|
||||
.verify_tls13_signature(message, cert, dss)
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<SignatureScheme> {
|
||||
self.server_verifier.supported_verify_schemes()
|
||||
}
|
||||
}
|
||||
let root_store = Arc::new(rustls::RootCertStore::empty());
|
||||
let server_verifier = WebPkiServerVerifier::builder(root_store).build().unwrap();
|
||||
V {
|
||||
pk_hash,
|
||||
server_verifier,
|
||||
}
|
||||
}
|
||||
}
|
393
src/client/vault.rs
Normal file
393
src/client/vault.rs
Normal file
|
@ -0,0 +1,393 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Helper functions for CLI clients to verify Intel SGX enclaves and other TEEs.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use super::{AttestationArgs, TeeConnection};
|
||||
use crate::json::http::{AuthRequest, AuthResponse};
|
||||
use crate::server::pki::make_self_signed_cert;
|
||||
use crate::server::{AnyHowResponseError, HttpResponseError, Status};
|
||||
pub use crate::sgx::{
|
||||
parse_tcb_levels, sgx_gramine_get_quote, sgx_ql_qv_result_t, tee_qv_get_collateral,
|
||||
verify_quote_with_collateral, Collateral, EnumSet, QuoteVerificationResult, TcbLevel,
|
||||
};
|
||||
use actix_http::error::PayloadError;
|
||||
use actix_web::http::header;
|
||||
use actix_web::ResponseError;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use awc::error::{SendRequestError, StatusCode};
|
||||
use awc::{Client, ClientResponse, Connector};
|
||||
use bytes::Bytes;
|
||||
use futures_core::Stream;
|
||||
use getrandom::getrandom;
|
||||
use rustls::ClientConfig;
|
||||
use serde_json::{json, Value};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::time;
|
||||
use tracing::{debug, error, info, trace};
|
||||
|
||||
const VAULT_TOKEN_HEADER: &str = "X-Vault-Token";
|
||||
|
||||
/// Error returned when sending a request to Vault
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum VaultSendError {
|
||||
/// Error sending the request
|
||||
SendRequest(String),
|
||||
/// Error returned by the Vault API
|
||||
#[error(transparent)]
|
||||
Vault(#[from] HttpResponseError),
|
||||
}
|
||||
|
||||
impl Display for VaultSendError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
VaultSendError::SendRequest(e) => write!(f, "VaultSendError: {}", e),
|
||||
VaultSendError::Vault(e) => write!(f, "VaultSendError: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const _: () = {
|
||||
fn assert_send<T: Send>() {}
|
||||
let _ = assert_send::<VaultSendError>;
|
||||
};
|
||||
|
||||
impl From<VaultSendError> for HttpResponseError {
|
||||
fn from(value: VaultSendError) -> Self {
|
||||
match value {
|
||||
VaultSendError::SendRequest(e) => HttpResponseError::Anyhow(AnyHowResponseError {
|
||||
status_code: StatusCode::BAD_GATEWAY,
|
||||
error: anyhow!(e),
|
||||
}),
|
||||
VaultSendError::Vault(e) => e,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A connection to a Vault TEE, which implements the `teepot` attestation API
|
||||
/// called by a TEE itself. This authenticates the TEE to Vault and gets a token,
|
||||
/// which can be used to access the Vault API.
|
||||
pub struct VaultConnection {
|
||||
/// Options and arguments needed to attest Vault
|
||||
pub conn: TeeConnection,
|
||||
key_hash: [u8; 64],
|
||||
client_token: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl VaultConnection {
|
||||
/// Create a new connection to Vault
|
||||
///
|
||||
/// This will verify the attestation report and check that the enclave
|
||||
/// is running the expected code.
|
||||
pub async fn new(args: &AttestationArgs, name: String) -> Result<Self> {
|
||||
let pk_hash = Arc::new(OnceLock::new());
|
||||
|
||||
let (key_hash, rustls_certificate, rustls_pk) = make_self_signed_cert()?;
|
||||
|
||||
let tls_config = Arc::new(
|
||||
ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(TeeConnection::make_verifier(
|
||||
pk_hash.clone(),
|
||||
)))
|
||||
.with_client_auth_cert(vec![rustls_certificate], rustls_pk)?,
|
||||
);
|
||||
|
||||
let client = Client::builder()
|
||||
.add_default_header((header::USER_AGENT, "teepot/1.0"))
|
||||
// a "connector" wraps the stream into an encrypted connection
|
||||
.connector(Connector::new().rustls_0_22(tls_config))
|
||||
.timeout(time::Duration::from_secs(12000))
|
||||
.finish();
|
||||
|
||||
let mut this = Self {
|
||||
name,
|
||||
key_hash,
|
||||
conn: unsafe {
|
||||
TeeConnection::new_from_client_without_attestation(args.server.clone(), client)
|
||||
},
|
||||
client_token: Default::default(),
|
||||
};
|
||||
|
||||
this.client_token = this.auth(args).await?.auth.client_token;
|
||||
|
||||
trace!("Got Token: {:#?}", &this.client_token);
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
/// create a new [`VaultConnection`] to Vault from an existing connection
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is unsafe, because it does not verify the attestation report.
|
||||
pub unsafe fn new_from_client_without_attestation(
|
||||
server: String,
|
||||
client: Client,
|
||||
name: String,
|
||||
client_token: String,
|
||||
) -> Self {
|
||||
Self {
|
||||
name,
|
||||
client_token,
|
||||
conn: unsafe { TeeConnection::new_from_client_without_attestation(server, client) },
|
||||
key_hash: [0u8; 64],
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a reference to the agent, which can be used to make requests to the TEE
|
||||
///
|
||||
/// Note, that it will refuse to connect to any other TLS server than the one
|
||||
/// specified in the `AttestationArgs` of the `Self::new` function.
|
||||
pub fn agent(&self) -> &Client {
|
||||
self.conn.client()
|
||||
}
|
||||
|
||||
async fn auth(&self, args: &AttestationArgs) -> Result<AuthResponse> {
|
||||
info!("Getting attestation report");
|
||||
let attestation_url = AuthRequest::URL;
|
||||
let quote = sgx_gramine_get_quote(&self.key_hash).context("Failed to get own quote")?;
|
||||
let collateral = tee_qv_get_collateral("e).context("Failed to get own collateral")?;
|
||||
|
||||
let mut challenge_bytes = [0u8; 32];
|
||||
getrandom(&mut challenge_bytes)?;
|
||||
let challenge = hex::encode(challenge_bytes);
|
||||
info!("Challenging Vault with: {}", challenge);
|
||||
let challenge = Some(challenge_bytes);
|
||||
|
||||
let auth_req = AuthRequest {
|
||||
name: self.name.clone(),
|
||||
tee_type: "sgx".to_string(),
|
||||
quote,
|
||||
collateral: serde_json::to_string(&collateral)?,
|
||||
challenge,
|
||||
};
|
||||
|
||||
let mut response = self
|
||||
.agent()
|
||||
.post(&format!(
|
||||
"{server}{attestation_url}",
|
||||
server = self.conn.server,
|
||||
))
|
||||
.send_json(&auth_req)
|
||||
.await
|
||||
.map_err(|e| anyhow!("Error sending attestation request: {}", e))?;
|
||||
|
||||
let status_code = response.status();
|
||||
if !status_code.is_success() {
|
||||
error!("Failed to login to vault: {}", status_code);
|
||||
if let Ok(r) = response.json::<Value>().await {
|
||||
eprintln!("Failed to login to vault: {}", r);
|
||||
}
|
||||
bail!("failed to login to vault: {}", status_code);
|
||||
}
|
||||
|
||||
let auth_response: Value = response.json().await.context("failed to login to vault")?;
|
||||
trace!(
|
||||
"Got AuthResponse: {:?}",
|
||||
serde_json::to_string(&auth_response)
|
||||
);
|
||||
|
||||
let auth_response: AuthResponse =
|
||||
serde_json::from_value(auth_response).context("Failed to parse AuthResponse")?;
|
||||
|
||||
trace!("Got AuthResponse: {:#?}", &auth_response);
|
||||
|
||||
let current_time: i64 = time::SystemTime::now()
|
||||
.duration_since(time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as _;
|
||||
|
||||
info!("Verifying attestation report");
|
||||
|
||||
let collateral: Option<Collateral> =
|
||||
serde_json::from_str(&auth_response.data.collateral).ok();
|
||||
let collateral = collateral.as_ref();
|
||||
|
||||
TeeConnection::check_attestation_args(
|
||||
args,
|
||||
current_time,
|
||||
&auth_response.data.quote,
|
||||
collateral,
|
||||
&challenge_bytes,
|
||||
)
|
||||
.context("Failed to verify Vault attestation report")?;
|
||||
|
||||
Ok(auth_response)
|
||||
}
|
||||
|
||||
/// Send a put request to the vault
|
||||
pub async fn vault_put(
|
||||
&self,
|
||||
action: &str,
|
||||
url: &str,
|
||||
json: &Value,
|
||||
) -> std::result::Result<(StatusCode, Option<Value>), VaultSendError> {
|
||||
let full_url = format!("{}{url}", self.conn.server);
|
||||
info!("{action} via put {full_url}");
|
||||
debug!(
|
||||
"sending json: {:?}",
|
||||
serde_json::to_string(json).unwrap_or_default()
|
||||
);
|
||||
let res = self
|
||||
.agent()
|
||||
.put(full_url)
|
||||
.insert_header((VAULT_TOKEN_HEADER, self.client_token.clone()))
|
||||
.send_json(json)
|
||||
.await;
|
||||
Self::handle_client_response(action, res).await
|
||||
}
|
||||
|
||||
/// Send a get request to the vault
|
||||
pub async fn vault_get(
|
||||
&self,
|
||||
action: &str,
|
||||
url: &str,
|
||||
) -> std::result::Result<(StatusCode, Option<Value>), VaultSendError> {
|
||||
let full_url = format!("{}{url}", self.conn.server);
|
||||
info!("{action} via get {full_url}");
|
||||
let res = self
|
||||
.agent()
|
||||
.get(full_url)
|
||||
.insert_header((VAULT_TOKEN_HEADER, self.client_token.clone()))
|
||||
.send()
|
||||
.await;
|
||||
Self::handle_client_response(action, res).await
|
||||
}
|
||||
|
||||
async fn handle_client_response<S>(
|
||||
action: &str,
|
||||
res: std::result::Result<ClientResponse<S>, SendRequestError>,
|
||||
) -> std::result::Result<(StatusCode, Option<Value>), VaultSendError>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, PayloadError>>,
|
||||
{
|
||||
match res {
|
||||
Ok(mut r) => {
|
||||
let status_code = r.status();
|
||||
if status_code.is_success() {
|
||||
let msg = r.json().await.ok();
|
||||
debug!(
|
||||
"{action}: status code: {status_code} {:?}",
|
||||
serde_json::to_string(&msg)
|
||||
);
|
||||
Ok((status_code, msg))
|
||||
} else {
|
||||
let err = HttpResponseError::from_proxy(r).await;
|
||||
error!("{action}: {err:?}");
|
||||
Err(err.into())
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{}: {}", action, e);
|
||||
Err(VaultSendError::SendRequest(e.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Revoke the token
|
||||
pub async fn revoke_token(&self) -> std::result::Result<(), VaultSendError> {
|
||||
self.vault_put(
|
||||
"Revoke the token",
|
||||
"/v1/auth/token/revoke-self",
|
||||
&Value::default(),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_rel_path(rel_path: &str) -> Result<(), HttpResponseError> {
|
||||
if !rel_path.is_ascii() {
|
||||
return Err(anyhow!("path is not ascii")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
// check if rel_path is alphanumeric
|
||||
if !rel_path.chars().all(|c| c.is_alphanumeric() || c == '_') {
|
||||
return Err(anyhow!("path is not alphanumeric")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// set a secret in the vault
|
||||
pub async fn store_secret<'de, T: serde::Serialize>(
|
||||
&self,
|
||||
val: T,
|
||||
rel_path: &str,
|
||||
) -> Result<(), HttpResponseError> {
|
||||
self.store_secret_for_tee(&self.name, val, rel_path).await
|
||||
}
|
||||
|
||||
/// set a secret in the vault for a different TEE
|
||||
pub async fn store_secret_for_tee<'de, T: serde::Serialize>(
|
||||
&self,
|
||||
tee_name: &str,
|
||||
val: T,
|
||||
rel_path: &str,
|
||||
) -> Result<(), HttpResponseError> {
|
||||
Self::check_rel_path(rel_path)?;
|
||||
|
||||
let value = serde_json::to_value(val)
|
||||
.context("converting value to json")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
|
||||
let value = json!({ "data" : { "_" : value } });
|
||||
|
||||
self.vault_put(
|
||||
"Setting secret",
|
||||
&format!("/v1/secret/data/tee/{}/{}", tee_name, rel_path),
|
||||
&value,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// get a secret from the vault
|
||||
pub async fn load_secret<'de, T: serde::de::DeserializeOwned>(
|
||||
&self,
|
||||
rel_path: &str,
|
||||
) -> Result<Option<T>, HttpResponseError> {
|
||||
self.load_secret_for_tee(&self.name, rel_path).await
|
||||
}
|
||||
|
||||
/// get a secret from the vault for a specific TEE
|
||||
pub async fn load_secret_for_tee<'de, T: serde::de::DeserializeOwned>(
|
||||
&self,
|
||||
tee_name: &str,
|
||||
rel_path: &str,
|
||||
) -> Result<Option<T>, HttpResponseError> {
|
||||
Self::check_rel_path(rel_path)?;
|
||||
let v = self
|
||||
.vault_get(
|
||||
"Getting secret",
|
||||
&format!("/v1/secret/data/tee/{}/{}", tee_name, rel_path),
|
||||
)
|
||||
.await
|
||||
.or_else(|e| match e {
|
||||
VaultSendError::Vault(ref se) => {
|
||||
if se.status_code() == StatusCode::NOT_FOUND {
|
||||
debug!("Secret not found: {}", rel_path);
|
||||
Ok((StatusCode::OK, None))
|
||||
} else {
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
VaultSendError::SendRequest(_) => Err(e),
|
||||
})?
|
||||
.1
|
||||
.as_ref()
|
||||
.and_then(|v| v.get("data"))
|
||||
.and_then(|v| v.get("data"))
|
||||
.and_then(|v| v.get("_"))
|
||||
.and_then(|v| serde_json::from_value(v.clone()).transpose())
|
||||
.transpose()
|
||||
.context("Error getting value from vault")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?
|
||||
.flatten();
|
||||
Ok(v)
|
||||
}
|
||||
}
|
280
src/json/http.rs
Normal file
280
src/json/http.rs
Normal file
|
@ -0,0 +1,280 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Common types for the teepot http JSON API
|
||||
|
||||
use crate::sgx::Collateral;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use serde_with::base64::Base64;
|
||||
use serde_with::serde_as;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// The unseal request data
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Unseal {
|
||||
/// The unseal key
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
impl Unseal {
|
||||
/// The unseal URL
|
||||
pub const URL: &'static str = "/v1/sys/unseal";
|
||||
}
|
||||
|
||||
/// The attestation URL
|
||||
pub const ATTESTATION_URL: &str = "/v1/sys/attestation";
|
||||
|
||||
/// The attestation response
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AttestationResponse {
|
||||
/// The quote
|
||||
pub quote: Arc<[u8]>,
|
||||
/// The collateral
|
||||
pub collateral: Arc<Collateral>,
|
||||
}
|
||||
|
||||
/// The init request data
|
||||
#[serde_as]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Init {
|
||||
/// PGP keys to encrypt the unseal keys with
|
||||
pub pgp_keys: Vec<String>,
|
||||
/// number of secret shares
|
||||
pub secret_shares: usize,
|
||||
/// secret threshold
|
||||
pub secret_threshold: usize,
|
||||
/// PGP keys to sign commands for the admin tee
|
||||
#[serde_as(as = "Box<[Base64]>")]
|
||||
pub admin_pgp_keys: Box<[Box<[u8]>]>,
|
||||
/// admin threshold
|
||||
pub admin_threshold: usize,
|
||||
/// admin TEE mrenclave
|
||||
pub admin_tee_mrenclave: String,
|
||||
}
|
||||
|
||||
impl Init {
|
||||
/// The init URL
|
||||
pub const URL: &'static str = "/v1/sys/init";
|
||||
}
|
||||
|
||||
/// The init request data
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VaultInitRequest {
|
||||
/// PGP keys to encrypt the unseal keys with
|
||||
pub pgp_keys: Vec<String>,
|
||||
/// number of secret shares
|
||||
pub secret_shares: usize,
|
||||
/// secret threshold
|
||||
pub secret_threshold: usize,
|
||||
}
|
||||
|
||||
/// The init response data
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct InitResponse {
|
||||
/// The unseal keys (gpg encrypted)
|
||||
pub unseal_keys: Vec<String>,
|
||||
}
|
||||
|
||||
/// The Vault TEE auth request data
|
||||
#[serde_as]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AuthRequest {
|
||||
/// The name of the TEE
|
||||
pub name: String,
|
||||
/// The type of the TEE
|
||||
#[serde(rename = "type")]
|
||||
pub tee_type: String,
|
||||
/// The attestation report data base64 encoded
|
||||
#[serde_as(as = "Base64")]
|
||||
pub quote: Box<[u8]>,
|
||||
/// The attestation collateral json encoded
|
||||
pub collateral: String,
|
||||
/// The vault attestation challenge (hex encoded)
|
||||
#[serde_as(as = "Option<serde_with::hex::Hex>")]
|
||||
#[serde(skip_serializing_if = "Option::is_none", default = "Option::default")]
|
||||
pub challenge: Option<[u8; 32]>,
|
||||
}
|
||||
|
||||
impl AuthRequest {
|
||||
/// The auth URL
|
||||
pub const URL: &'static str = "/v1/auth/tee/login";
|
||||
}
|
||||
|
||||
/// Vault auth metadata
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct AuthMetadataField {
|
||||
collateral_expiration_date: String,
|
||||
tee_name: String,
|
||||
}
|
||||
|
||||
/// Vault auth data
|
||||
#[serde_as]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct AuthDataField {
|
||||
/// The attestation report data base64 encoded
|
||||
#[serde_as(as = "Base64")]
|
||||
#[serde(default)]
|
||||
pub quote: Box<[u8]>,
|
||||
/// The attestation collateral json encoded
|
||||
#[serde(default)]
|
||||
pub collateral: String,
|
||||
}
|
||||
|
||||
/// Vault auth
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct AuthField {
|
||||
/// TODO
|
||||
pub renewable: bool,
|
||||
/// TODO
|
||||
pub lease_duration: isize,
|
||||
/// TODO
|
||||
pub policies: Vec<String>,
|
||||
/// TODO
|
||||
pub accessor: String,
|
||||
/// TODO
|
||||
pub client_token: String,
|
||||
/// TODO
|
||||
pub metadata: AuthMetadataField,
|
||||
}
|
||||
|
||||
/// The Vault TEE auth response data
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct AuthResponse {
|
||||
/// vault auth
|
||||
pub auth: AuthField,
|
||||
///
|
||||
pub data: AuthDataField,
|
||||
}
|
||||
|
||||
/// One command datum
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub struct VaultCommand {
|
||||
/// The command to execute
|
||||
pub url: String,
|
||||
/// The command to execute
|
||||
pub data: Value,
|
||||
}
|
||||
|
||||
impl Display for VaultCommand {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if f.alternate() {
|
||||
f.write_str(
|
||||
serde_json::to_string_pretty(self)
|
||||
.unwrap_or("{}".into())
|
||||
.as_str(),
|
||||
)
|
||||
} else {
|
||||
f.write_str(serde_json::to_string(self).unwrap_or("{}".into()).as_str())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Multiple command data
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub struct VaultCommands {
|
||||
/// The sha-256 hash of the last command hex encoded
|
||||
pub last_digest: String,
|
||||
/// The actual commands
|
||||
pub commands: Vec<VaultCommand>,
|
||||
}
|
||||
|
||||
/// The command request data
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VaultCommandRequest {
|
||||
/// The commands to execute
|
||||
///
|
||||
/// The commands are json serialized `VaultCommands`,
|
||||
/// because they are signed with multiple signatures.
|
||||
///
|
||||
/// The commands are executed in order.
|
||||
pub commands: String,
|
||||
/// The signatures of the commands
|
||||
pub signatures: Vec<String>,
|
||||
}
|
||||
|
||||
impl VaultCommandRequest {
|
||||
/// The command request URL
|
||||
pub const URL: &'static str = "/v1/command";
|
||||
}
|
||||
|
||||
/// The command response
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VaultCommandResponse {
|
||||
/// The status code
|
||||
pub status_code: u16,
|
||||
/// The response body
|
||||
pub value: Option<Value>,
|
||||
}
|
||||
|
||||
/// The command response
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VaultCommandsResponse {
|
||||
/// The stored digest for the execution
|
||||
pub digest: String,
|
||||
/// The results of the individual commands
|
||||
pub results: Vec<VaultCommandResponse>,
|
||||
}
|
||||
|
||||
impl Display for VaultCommandResponse {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if f.alternate() {
|
||||
f.write_str(
|
||||
serde_json::to_string_pretty(self)
|
||||
.unwrap_or("{}".into())
|
||||
.as_str(),
|
||||
)
|
||||
} else {
|
||||
f.write_str(serde_json::to_string(self).unwrap_or("{}".into()).as_str())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The command request URL
|
||||
pub const DIGEST_URL: &str = "/v1/digest";
|
||||
|
||||
/// The signing request
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SignRequest {
|
||||
/// json serialized `SignRequestData`, because it is signed with multiple signatures.
|
||||
pub sign_request_data: String,
|
||||
/// The signatures of the SignRequestData
|
||||
pub signatures: Vec<String>,
|
||||
}
|
||||
|
||||
impl SignRequest {
|
||||
/// The sign request URL
|
||||
pub const URL: &'static str = "/v1/sign";
|
||||
}
|
||||
|
||||
/// The signing request data
|
||||
#[serde_as]
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct SignRequestData {
|
||||
/// The sha-256 hash of the last command hex encoded
|
||||
pub last_digest: String,
|
||||
/// The name of the TEE
|
||||
pub tee_name: String,
|
||||
/// The type of the TEE
|
||||
#[serde(rename = "type")]
|
||||
pub tee_type: String,
|
||||
/// The TEE security version number
|
||||
pub tee_svn: u16,
|
||||
/// The data to be signed.
|
||||
///
|
||||
/// In case of `tee_type == "sgx"`, it's the SGX Sigstruct Body
|
||||
#[serde_as(as = "Base64")]
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// The signing request
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SignResponse {
|
||||
/// The stored digest for the execution
|
||||
pub digest: String,
|
||||
/// The signed data for the tee.
|
||||
///
|
||||
/// In case of `tee_type == "sgx"`, it's the SGX Sigstruct
|
||||
pub signed_data: Vec<u8>,
|
||||
}
|
10
src/json/mod.rs
Normal file
10
src/json/mod.rs
Normal file
|
@ -0,0 +1,10 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Common types for the teepot JSON API
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
pub mod http;
|
||||
pub mod secrets;
|
34
src/json/secrets.rs
Normal file
34
src/json/secrets.rs
Normal file
|
@ -0,0 +1,34 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Common types for the teepot secrets JSON API
|
||||
|
||||
use crate::sgx::sign::Zeroizing;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::base64::Base64;
|
||||
use serde_with::serde_as;
|
||||
|
||||
/// Configuration for the admin tee
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct AdminConfig {
|
||||
/// PGP keys to sign commands for the admin tee
|
||||
#[serde_as(as = "Box<[Base64]>")]
|
||||
pub admin_pgp_keys: Box<[Box<[u8]>]>,
|
||||
/// admin threshold
|
||||
pub admin_threshold: usize,
|
||||
}
|
||||
|
||||
/// Configuration for the admin tee
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct AdminState {
|
||||
/// last digest of executed commands
|
||||
pub last_digest: String,
|
||||
}
|
||||
|
||||
/// Configuration for the admin tee
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct SGXSigningKey {
|
||||
/// private key in PEM format
|
||||
pub pem_pk: Zeroizing<String>,
|
||||
}
|
14
src/lib.rs
Normal file
14
src/lib.rs
Normal file
|
@ -0,0 +1,14 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Helper functions to verify Intel SGX enclaves and other TEEs.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
pub mod client;
|
||||
pub mod json;
|
||||
pub mod server;
|
||||
pub mod sgx;
|
||||
|
||||
pub mod quote;
|
40
src/quote/mod.rs
Normal file
40
src/quote/mod.rs
Normal file
|
@ -0,0 +1,40 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023-2024 Matter Labs
|
||||
|
||||
//! Get a quote from a TEE
|
||||
|
||||
use crate::sgx::sgx_gramine_get_quote;
|
||||
use std::io;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(missing_docs)]
|
||||
#[error("{msg}")]
|
||||
pub struct GetQuoteError {
|
||||
pub(crate) msg: Box<str>,
|
||||
#[source] // optional if field name is `source`
|
||||
pub(crate) source: io::Error,
|
||||
}
|
||||
|
||||
/// Get the attestation quote from a TEE
|
||||
pub fn get_quote(report_data: &[u8]) -> Result<Box<[u8]>, GetQuoteError> {
|
||||
// check, if we are running in a TEE
|
||||
if std::fs::metadata("/dev/attestation").is_ok() {
|
||||
if report_data.len() > 64 {
|
||||
return Err(GetQuoteError {
|
||||
msg: "Report data too long".into(),
|
||||
source: io::Error::new(io::ErrorKind::Other, "Report data too long"),
|
||||
});
|
||||
}
|
||||
|
||||
let mut report_data_fixed = [0u8; 64];
|
||||
report_data_fixed[..report_data.len()].copy_from_slice(report_data);
|
||||
|
||||
sgx_gramine_get_quote(&report_data_fixed)
|
||||
} else {
|
||||
// if not, return an error
|
||||
Err(GetQuoteError {
|
||||
msg: "Not running in a TEE".into(),
|
||||
source: io::Error::new(io::ErrorKind::Other, "Not running in a TEE"),
|
||||
})
|
||||
}
|
||||
}
|
147
src/server/attestation.rs
Normal file
147
src/server/attestation.rs
Normal file
|
@ -0,0 +1,147 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Common attestation API for all TEEs
|
||||
|
||||
use crate::client::AttestationArgs;
|
||||
use crate::json::http::AttestationResponse;
|
||||
use crate::sgx::{
|
||||
parse_tcb_levels, sgx_gramine_get_quote, tee_qv_get_collateral, verify_quote_with_collateral,
|
||||
Collateral, EnumSet, QuoteVerificationResult, TcbLevel,
|
||||
};
|
||||
use anyhow::{bail, Context, Result};
|
||||
use clap::Args;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, UNIX_EPOCH};
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
struct Attestation {
|
||||
quote: Arc<[u8]>,
|
||||
collateral: Arc<Collateral>,
|
||||
report_data: [u8; 64],
|
||||
earliest_expiration_date: i64,
|
||||
}
|
||||
|
||||
/// Returns the quote and collateral for the current TEE.
|
||||
///
|
||||
/// if `allowed_tcb_levels` is `None`, then any TCB level is accepted.
|
||||
/// Otherwise, the quote must be verified and the collateral must be
|
||||
/// within the allowed TCB levels.
|
||||
pub fn get_quote_and_collateral(
|
||||
allowed_tcb_levels: Option<EnumSet<TcbLevel>>,
|
||||
report_data: &[u8; 64],
|
||||
) -> Result<AttestationResponse> {
|
||||
static ATTESTATION: RwLock<Option<Attestation>> = RwLock::new(None);
|
||||
|
||||
let unix_time: i64 = std::time::SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as _;
|
||||
|
||||
if let Some(attestation) = ATTESTATION.read().unwrap().as_ref() {
|
||||
trace!(attestation.earliest_expiration_date);
|
||||
|
||||
if attestation.earliest_expiration_date > unix_time.saturating_add(60)
|
||||
&& report_data.eq(&attestation.report_data)
|
||||
{
|
||||
debug!("return cache attestation quote and collateral");
|
||||
return Ok(AttestationResponse {
|
||||
quote: attestation.quote.clone(),
|
||||
collateral: attestation.collateral.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let myquote = sgx_gramine_get_quote(report_data).context("Failed to get own quote")?;
|
||||
let collateral = tee_qv_get_collateral(&myquote).context("Failed to get own collateral")?;
|
||||
|
||||
let QuoteVerificationResult {
|
||||
collateral_expired,
|
||||
result,
|
||||
earliest_expiration_date,
|
||||
tcb_level_date_tag,
|
||||
quote,
|
||||
advisories,
|
||||
} = verify_quote_with_collateral(&myquote, Some(&collateral), unix_time.saturating_add(60))
|
||||
.context("Failed to verify own quote with collateral")?;
|
||||
|
||||
debug!(tcb_level_date_tag);
|
||||
|
||||
if collateral_expired {
|
||||
bail!("Freshly fetched collateral expired");
|
||||
}
|
||||
|
||||
let tcblevel = TcbLevel::from(result);
|
||||
if tcblevel != TcbLevel::Ok
|
||||
&& allowed_tcb_levels.map_or(false, |levels| !levels.contains(tcblevel))
|
||||
{
|
||||
error!("Quote verification result: {}", tcblevel);
|
||||
bail!("Quote verification result: {}", tcblevel);
|
||||
}
|
||||
|
||||
for advisory in advisories {
|
||||
warn!("\tInfo: Advisory ID: {advisory}");
|
||||
}
|
||||
|
||||
info!("Own quote verified successfully: {}", tcblevel);
|
||||
info!(
|
||||
"Earliest expiration in {:?}",
|
||||
Duration::from_secs((earliest_expiration_date - unix_time) as _)
|
||||
);
|
||||
info!("mrsigner: {}", hex::encode(quote.report_body.mrsigner));
|
||||
info!("mrenclave: {}", hex::encode(quote.report_body.mrenclave));
|
||||
|
||||
let quote: Arc<[u8]> = Arc::from(myquote);
|
||||
let collateral = Arc::from(collateral);
|
||||
|
||||
let mut attestation = ATTESTATION.write().unwrap();
|
||||
*attestation = Some(Attestation {
|
||||
quote: quote.clone(),
|
||||
collateral: collateral.clone(),
|
||||
report_data: *report_data,
|
||||
earliest_expiration_date,
|
||||
});
|
||||
|
||||
Ok(AttestationResponse { quote, collateral })
|
||||
}
|
||||
|
||||
/// Options and arguments needed to attest a TEE
|
||||
#[derive(Args, Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct VaultAttestationArgs {
|
||||
/// hex encoded SGX mrsigner of the enclave to attest
|
||||
#[arg(long, env = "VAULT_SGX_MRSIGNER")]
|
||||
pub vault_sgx_mrsigner: Option<String>,
|
||||
/// hex encoded SGX mrenclave of the enclave to attest
|
||||
#[arg(long, env = "VAULT_SGX_MRENCLAVE")]
|
||||
pub vault_sgx_mrenclave: Option<String>,
|
||||
/// URL of the server
|
||||
#[arg(long, required = true, env = "VAULT_ADDR")]
|
||||
pub vault_addr: String,
|
||||
/// allowed TCB levels, comma separated:
|
||||
/// Ok, ConfigNeeded, ConfigAndSwHardeningNeeded, SwHardeningNeeded, OutOfDate, OutOfDateConfigNeeded
|
||||
#[arg(long, value_parser = parse_tcb_levels, env = "VAULT_SGX_ALLOWED_TCB_LEVELS")]
|
||||
pub vault_sgx_allowed_tcb_levels: Option<EnumSet<TcbLevel>>,
|
||||
}
|
||||
|
||||
impl From<VaultAttestationArgs> for AttestationArgs {
|
||||
fn from(value: VaultAttestationArgs) -> Self {
|
||||
AttestationArgs {
|
||||
sgx_mrsigner: value.vault_sgx_mrsigner,
|
||||
sgx_mrenclave: value.vault_sgx_mrenclave,
|
||||
server: value.vault_addr,
|
||||
sgx_allowed_tcb_levels: value.vault_sgx_allowed_tcb_levels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&VaultAttestationArgs> for AttestationArgs {
|
||||
fn from(value: &VaultAttestationArgs) -> Self {
|
||||
AttestationArgs {
|
||||
sgx_mrsigner: value.vault_sgx_mrsigner.clone(),
|
||||
sgx_mrenclave: value.vault_sgx_mrenclave.clone(),
|
||||
server: value.vault_addr.clone(),
|
||||
sgx_allowed_tcb_levels: value.vault_sgx_allowed_tcb_levels,
|
||||
}
|
||||
}
|
||||
}
|
181
src/server/mod.rs
Normal file
181
src/server/mod.rs
Normal file
|
@ -0,0 +1,181 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! # tee-server
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
pub mod attestation;
|
||||
pub mod pki;
|
||||
pub mod signatures;
|
||||
|
||||
use actix_web::http::StatusCode;
|
||||
use actix_web::web::Bytes;
|
||||
use actix_web::{error, HttpRequest, HttpResponse};
|
||||
use actix_web::{HttpMessage, ResponseError};
|
||||
use anyhow::anyhow;
|
||||
use awc::error::{PayloadError, SendRequestError};
|
||||
use awc::ClientResponse;
|
||||
use futures_core::Stream;
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use tracing::error;
|
||||
|
||||
/// Anyhow error with an HTTP status code
|
||||
pub struct AnyHowResponseError {
|
||||
/// error message
|
||||
pub error: anyhow::Error,
|
||||
/// HTTP status code
|
||||
pub status_code: StatusCode,
|
||||
}
|
||||
|
||||
/// Proxy response error
|
||||
pub struct ProxyResponseError {
|
||||
/// HTTP status code
|
||||
pub status_code: StatusCode,
|
||||
/// HTTP body
|
||||
pub body: Option<Bytes>,
|
||||
/// HTTP content type
|
||||
pub content_type: String,
|
||||
}
|
||||
|
||||
/// custom HTTP response error
|
||||
pub enum HttpResponseError {
|
||||
/// Anyhow error
|
||||
Anyhow(AnyHowResponseError),
|
||||
/// Proxy error
|
||||
Proxy(ProxyResponseError),
|
||||
}
|
||||
|
||||
impl std::error::Error for HttpResponseError {}
|
||||
|
||||
/// Attach an HTTP status code to an anyhow error turning it into an HttpResponseError
|
||||
pub trait Status {
|
||||
/// The Ok type
|
||||
type Ok;
|
||||
/// Attach an HTTP status code to an anyhow error turning it into an HttpResponseError
|
||||
fn status(self, status: StatusCode) -> Result<Self::Ok, HttpResponseError>;
|
||||
}
|
||||
|
||||
impl<T> Status for Result<T, anyhow::Error> {
|
||||
type Ok = T;
|
||||
fn status(self, status: StatusCode) -> Result<T, HttpResponseError> {
|
||||
match self {
|
||||
Ok(value) => Ok(value),
|
||||
Err(error) => Err(HttpResponseError::new(error, status)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpResponseError {
|
||||
fn new(error: anyhow::Error, status_code: StatusCode) -> Self {
|
||||
Self::Anyhow(AnyHowResponseError { error, status_code })
|
||||
}
|
||||
|
||||
/// Create a new HTTP response error from a proxy response
|
||||
pub async fn from_proxy<S>(mut response: ClientResponse<S>) -> Self
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, PayloadError>>,
|
||||
{
|
||||
let status_code = response.status();
|
||||
error!("Vault returned server error: {}", status_code);
|
||||
let body = response.body().await.ok();
|
||||
let content_type = response.content_type().to_string();
|
||||
Self::Proxy(ProxyResponseError {
|
||||
status_code,
|
||||
body,
|
||||
content_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for HttpResponseError {
|
||||
fn from(value: &str) -> Self {
|
||||
error!("{}", value);
|
||||
HttpResponseError::new(
|
||||
anyhow!(value.to_string()),
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SendRequestError> for HttpResponseError {
|
||||
fn from(error: SendRequestError) -> Self {
|
||||
error!("Error sending request: {:?}", error);
|
||||
HttpResponseError::new(
|
||||
anyhow!(error.to_string()),
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for HttpResponseError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
if let Self::Anyhow(e) = self {
|
||||
if f.alternate() {
|
||||
write!(f, "{:#?}", e.error)
|
||||
} else {
|
||||
write!(f, "{:?}", e.error)
|
||||
}
|
||||
} else {
|
||||
write!(f, "HttpResponseError")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for HttpResponseError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
if let Self::Anyhow(e) = self {
|
||||
if f.alternate() {
|
||||
write!(f, "{:#}", e.error)
|
||||
} else {
|
||||
write!(f, "{}", e.error)
|
||||
}
|
||||
} else {
|
||||
write!(f, "HttpResponseError")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseError for HttpResponseError {
|
||||
fn status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
HttpResponseError::Anyhow(e) => e.status_code,
|
||||
HttpResponseError::Proxy(e) => e.status_code,
|
||||
}
|
||||
}
|
||||
|
||||
fn error_response(&self) -> HttpResponse {
|
||||
match self {
|
||||
HttpResponseError::Anyhow(e) => HttpResponse::build(self.status_code())
|
||||
.content_type("application/json")
|
||||
.body(format!(r#"{{"error":"{}"}}"#, e.error)),
|
||||
HttpResponseError::Proxy(e) => {
|
||||
if let Some(ref body) = e.body {
|
||||
HttpResponse::build(self.status_code())
|
||||
.content_type(e.content_type.clone())
|
||||
.body(body.clone())
|
||||
} else {
|
||||
HttpResponse::new(self.status_code())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new json config
|
||||
pub fn new_json_cfg() -> actix_web::web::JsonConfig {
|
||||
actix_web::web::JsonConfig::default()
|
||||
.limit(1024 * 1024)
|
||||
.error_handler(json_error_handler)
|
||||
}
|
||||
|
||||
fn json_error_handler(err: error::JsonPayloadError, _: &HttpRequest) -> actix_web::Error {
|
||||
error::InternalError::from_response(
|
||||
"",
|
||||
HttpResponse::BadRequest()
|
||||
.content_type("application/json")
|
||||
.body(format!(r#"{{"error":"json error: {}"}}"#, err)),
|
||||
)
|
||||
.into()
|
||||
}
|
215
src/server/pki.rs
Normal file
215
src/server/pki.rs
Normal file
|
@ -0,0 +1,215 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023-2024 Matter Labs
|
||||
|
||||
//! Some cryptographic utilities
|
||||
|
||||
pub use crate::sgx::{
|
||||
parse_tcb_levels, sgx_ql_qv_result_t, verify_quote_with_collateral, EnumSet,
|
||||
QuoteVerificationResult, TcbLevel,
|
||||
};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use const_oid::db::rfc5280::{
|
||||
ID_CE_BASIC_CONSTRAINTS, ID_CE_EXT_KEY_USAGE, ID_CE_KEY_USAGE, ID_KP_CLIENT_AUTH,
|
||||
ID_KP_SERVER_AUTH,
|
||||
};
|
||||
use const_oid::db::rfc5912::SECP_256_R_1;
|
||||
use getrandom::getrandom;
|
||||
use pkcs8::der::asn1::OctetString;
|
||||
use pkcs8::der::referenced::OwnedToRef;
|
||||
use pkcs8::der::referenced::RefToOwned;
|
||||
use pkcs8::{
|
||||
AlgorithmIdentifierRef, ObjectIdentifier, PrivateKeyInfo, SubjectPublicKeyInfo,
|
||||
SubjectPublicKeyInfoRef,
|
||||
};
|
||||
use rustls::pki_types::PrivatePkcs8KeyDer;
|
||||
use sec1::EcPrivateKey;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use x509_cert::der::asn1::BitString;
|
||||
use x509_cert::der::{Decode as _, Encode as _};
|
||||
use x509_cert::ext::pkix::{BasicConstraints, ExtendedKeyUsage, KeyUsage, KeyUsages};
|
||||
use x509_cert::name::RdnSequence;
|
||||
use x509_cert::serial_number::SerialNumber;
|
||||
use x509_cert::time::Validity;
|
||||
use x509_cert::{Certificate, TbsCertificate};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use const_oid::db::rfc5912::{
|
||||
ECDSA_WITH_SHA_256, ECDSA_WITH_SHA_384, ID_EC_PUBLIC_KEY as ECPK, SECP_256_R_1 as P256,
|
||||
SECP_384_R_1 as P384,
|
||||
};
|
||||
use pkcs8::der::asn1::BitStringRef;
|
||||
|
||||
const ES256: AlgorithmIdentifierRef<'static> = AlgorithmIdentifierRef {
|
||||
oid: ECDSA_WITH_SHA_256,
|
||||
parameters: None,
|
||||
};
|
||||
|
||||
const ES384: AlgorithmIdentifierRef<'static> = AlgorithmIdentifierRef {
|
||||
oid: ECDSA_WITH_SHA_384,
|
||||
parameters: None,
|
||||
};
|
||||
|
||||
/// Utility trait for signing with a private key
|
||||
pub trait PrivateKeyInfoExt {
|
||||
/// Generates a keypair
|
||||
///
|
||||
/// Returns the DER encoding of the `PrivateKeyInfo` type.
|
||||
fn generate(oid: ObjectIdentifier) -> Result<Zeroizing<Vec<u8>>>;
|
||||
|
||||
/// Get the public key
|
||||
///
|
||||
/// This function creates a `SubjectPublicKeyInfo` which corresponds with
|
||||
/// this private key. Note that this function does not do any cryptographic
|
||||
/// calculations. It expects that the `PrivateKeyInfo` already contains the
|
||||
/// public key.
|
||||
fn public_key(&self) -> Result<SubjectPublicKeyInfoRef<'_>>;
|
||||
|
||||
/// Get the default signing algorithm for this `SubjectPublicKeyInfo`
|
||||
fn signs_with(&self) -> Result<AlgorithmIdentifierRef<'_>>;
|
||||
|
||||
/// Signs the body with the specified algorithm
|
||||
///
|
||||
/// Note that the signature is returned in its encoded form as it will
|
||||
/// appear in an X.509 certificate or PKCS#10 certification request.
|
||||
fn sign(&self, body: &[u8], algo: AlgorithmIdentifierRef<'_>) -> Result<Vec<u8>>;
|
||||
}
|
||||
|
||||
impl<'a> PrivateKeyInfoExt for PrivateKeyInfo<'a> {
|
||||
fn generate(oid: ObjectIdentifier) -> Result<Zeroizing<Vec<u8>>> {
|
||||
let rand = ring::rand::SystemRandom::new();
|
||||
|
||||
let doc = match oid {
|
||||
P256 => {
|
||||
use ring::signature::{EcdsaKeyPair, ECDSA_P256_SHA256_ASN1_SIGNING as ALG};
|
||||
EcdsaKeyPair::generate_pkcs8(&ALG, &rand)?
|
||||
}
|
||||
|
||||
P384 => {
|
||||
use ring::signature::{EcdsaKeyPair, ECDSA_P384_SHA384_ASN1_SIGNING as ALG};
|
||||
EcdsaKeyPair::generate_pkcs8(&ALG, &rand)?
|
||||
}
|
||||
|
||||
_ => return Err(anyhow!("unsupported")),
|
||||
};
|
||||
|
||||
Ok(doc.as_ref().to_vec().into())
|
||||
}
|
||||
|
||||
fn public_key(&self) -> Result<SubjectPublicKeyInfoRef<'_>> {
|
||||
match self.algorithm.oids()? {
|
||||
(ECPK, ..) => {
|
||||
let ec = EcPrivateKey::from_der(self.private_key)?;
|
||||
let pk = ec.public_key.ok_or_else(|| anyhow!("missing public key"))?;
|
||||
Ok(SubjectPublicKeyInfo {
|
||||
algorithm: self.algorithm,
|
||||
subject_public_key: BitStringRef::new(0, pk)?,
|
||||
})
|
||||
}
|
||||
_ => Err(anyhow!("unsupported")),
|
||||
}
|
||||
}
|
||||
|
||||
fn signs_with(&self) -> Result<AlgorithmIdentifierRef<'_>> {
|
||||
match self.algorithm.oids()? {
|
||||
(ECPK, Some(P256)) => Ok(ES256),
|
||||
(ECPK, Some(P384)) => Ok(ES384),
|
||||
_ => Err(anyhow!("unsupported")),
|
||||
}
|
||||
}
|
||||
|
||||
fn sign(&self, body: &[u8], algo: AlgorithmIdentifierRef<'_>) -> Result<Vec<u8>> {
|
||||
let rng = ring::rand::SystemRandom::new();
|
||||
match (self.algorithm.oids()?, algo) {
|
||||
((ECPK, Some(P256)), ES256) => {
|
||||
use ring::signature::{EcdsaKeyPair, ECDSA_P256_SHA256_ASN1_SIGNING as ALG};
|
||||
let kp = EcdsaKeyPair::from_pkcs8(&ALG, &self.to_der()?, &rng)?;
|
||||
Ok(kp.sign(&rng, body)?.as_ref().to_vec())
|
||||
}
|
||||
|
||||
((ECPK, Some(P384)), ES384) => {
|
||||
use ring::signature::{EcdsaKeyPair, ECDSA_P384_SHA384_ASN1_SIGNING as ALG};
|
||||
let kp = EcdsaKeyPair::from_pkcs8(&ALG, &self.to_der()?, &rng)?;
|
||||
Ok(kp.sign(&rng, body)?.as_ref().to_vec())
|
||||
}
|
||||
|
||||
_ => Err(anyhow!("unsupported")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a private key and a self-signed certificate
|
||||
pub fn make_self_signed_cert() -> Result<(
|
||||
[u8; 64],
|
||||
rustls::pki_types::CertificateDer<'static>,
|
||||
rustls::pki_types::PrivateKeyDer<'static>,
|
||||
)> {
|
||||
// Generate a keypair.
|
||||
let raw = PrivateKeyInfo::generate(SECP_256_R_1).context("failed to generate a private key")?;
|
||||
let pki = PrivateKeyInfo::from_der(raw.as_ref())
|
||||
.context("failed to parse DER-encoded private key")?;
|
||||
let der = pki.public_key().unwrap().to_der().unwrap();
|
||||
|
||||
let mut key_hash = [0u8; 64];
|
||||
let hash = Sha256::digest(der);
|
||||
key_hash[..32].copy_from_slice(&hash);
|
||||
|
||||
// Create a relative distinguished name.
|
||||
let rdns = RdnSequence::from_str("CN=localhost")?;
|
||||
|
||||
// Create the extensions.
|
||||
let ku = KeyUsage(KeyUsages::DigitalSignature | KeyUsages::KeyEncipherment).to_der()?;
|
||||
let eu = ExtendedKeyUsage(vec![ID_KP_SERVER_AUTH, ID_KP_CLIENT_AUTH]).to_der()?;
|
||||
let bc = BasicConstraints {
|
||||
ca: false,
|
||||
path_len_constraint: None,
|
||||
}
|
||||
.to_der()?;
|
||||
|
||||
let mut serial = [0u8; 16];
|
||||
getrandom(&mut serial)?;
|
||||
|
||||
// Create the certificate body.
|
||||
let tbs = TbsCertificate {
|
||||
version: x509_cert::Version::V3,
|
||||
serial_number: SerialNumber::new(&serial)?,
|
||||
signature: pki.signs_with()?.ref_to_owned(),
|
||||
issuer: rdns.clone(),
|
||||
validity: Validity::from_now(Duration::from_secs(60 * 60 * 24 * 365))?,
|
||||
subject: rdns,
|
||||
subject_public_key_info: pki.public_key()?.ref_to_owned(),
|
||||
issuer_unique_id: None,
|
||||
subject_unique_id: None,
|
||||
extensions: Some(vec![
|
||||
x509_cert::ext::Extension {
|
||||
extn_id: ID_CE_KEY_USAGE,
|
||||
critical: true,
|
||||
extn_value: OctetString::new(ku)?,
|
||||
},
|
||||
x509_cert::ext::Extension {
|
||||
extn_id: ID_CE_BASIC_CONSTRAINTS,
|
||||
critical: true,
|
||||
extn_value: OctetString::new(bc)?,
|
||||
},
|
||||
x509_cert::ext::Extension {
|
||||
extn_id: ID_CE_EXT_KEY_USAGE,
|
||||
critical: false,
|
||||
extn_value: OctetString::new(eu)?,
|
||||
},
|
||||
]),
|
||||
};
|
||||
|
||||
// Self-sign the certificate.
|
||||
let alg = tbs.signature.clone();
|
||||
let sig = pki.sign(&tbs.to_der()?, alg.owned_to_ref())?;
|
||||
let crt = Certificate {
|
||||
tbs_certificate: tbs,
|
||||
signature_algorithm: alg,
|
||||
signature: BitString::from_bytes(&sig)?,
|
||||
};
|
||||
|
||||
let rustls_certificate = rustls::pki_types::CertificateDer::from(crt.to_der()?);
|
||||
let rustls_pk = rustls::pki_types::PrivateKeyDer::from(PrivatePkcs8KeyDer::from(pki.to_der()?));
|
||||
Ok((key_hash, rustls_certificate, rustls_pk))
|
||||
}
|
120
src/server/signatures.rs
Normal file
120
src/server/signatures.rs
Normal file
|
@ -0,0 +1,120 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Signature checking utilities
|
||||
|
||||
use crate::json::secrets::AdminConfig;
|
||||
use crate::server::{HttpResponseError, Status as _};
|
||||
use actix_web::http::StatusCode;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use pgp::types::KeyTrait;
|
||||
use pgp::{Deserializable, SignedPublicKey, StandaloneSignature};
|
||||
use tracing::debug;
|
||||
|
||||
/// Verify a pgp signature for some message given some public keys
|
||||
pub fn verify_sig(sig: &str, msg: &[u8], keys: &[SignedPublicKey]) -> anyhow::Result<usize> {
|
||||
let (signatures, _) =
|
||||
StandaloneSignature::from_string_many(sig).context(format!("reading signature {}", sig))?;
|
||||
|
||||
for signature in signatures {
|
||||
let signature = match signature {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
debug!("Failed to parse signature: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
for (pos, key) in keys.iter().enumerate() {
|
||||
let actual_key = &key.primary_key;
|
||||
if actual_key.is_signing_key() && signature.verify(&actual_key, msg).is_ok() {
|
||||
return Ok(pos);
|
||||
}
|
||||
for sub_key in &key.public_subkeys {
|
||||
if sub_key.is_signing_key() && signature.verify(sub_key, msg).is_ok() {
|
||||
return Ok(pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
eprintln!("Failed to verify signature for `{sig}`");
|
||||
bail!("Failed to verify signature for `{sig}`");
|
||||
}
|
||||
|
||||
/// Verify pgp signatures for a message with some threshold
|
||||
pub fn check_sigs(
|
||||
pgp_keys: &[Box<[u8]>],
|
||||
threshold: usize,
|
||||
signatures: &[String],
|
||||
msg: &[u8],
|
||||
) -> Result<(), HttpResponseError> {
|
||||
let mut keys = Vec::new();
|
||||
|
||||
for bytes in pgp_keys {
|
||||
let key = SignedPublicKey::from_bytes(bytes.as_ref())
|
||||
.context("parsing public key")
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)?;
|
||||
keys.push(key);
|
||||
}
|
||||
|
||||
let mut verified: usize = 0;
|
||||
|
||||
for sig in signatures {
|
||||
if let Ok(pos) = verify_sig(sig, msg, &keys) {
|
||||
keys.remove(pos);
|
||||
verified += 1;
|
||||
}
|
||||
if verified >= threshold {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if verified < threshold {
|
||||
return Err(anyhow!("not enough valid signatures")).status(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify pgp signatures for a message
|
||||
pub trait VerifySig {
|
||||
/// Verify pgp signatures for a message
|
||||
fn check_sigs(&self, signatures: &[String], msg: &[u8]) -> Result<(), HttpResponseError>;
|
||||
}
|
||||
|
||||
impl VerifySig for AdminConfig {
|
||||
fn check_sigs(&self, signatures: &[String], msg: &[u8]) -> Result<(), HttpResponseError> {
|
||||
check_sigs(&self.admin_pgp_keys, self.admin_threshold, signatures, msg)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::verify_sig;
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use pgp::{Deserializable, SignedPublicKey};
|
||||
|
||||
const TEST_DATA: &str = include_str!("../../tests/data/test.json");
|
||||
|
||||
// gpg --armor --local-user test@example.com --detach-sign bin/tee-vault-admin/tests/data/test.json
|
||||
const TEST_SIG: &str = include_str!("../../tests/data/test.json.asc");
|
||||
|
||||
// gpg --armor --export 81A312C59D679D930FA9E8B06D728F29A2DBABF8 > bin/tee-vault-admin/tests/data/pub-81A312C59D679D930FA9E8B06D728F29A2DBABF8.asc
|
||||
const TEST_KEY: &str =
|
||||
include_str!("../../tests/data/pub-81A312C59D679D930FA9E8B06D728F29A2DBABF8.asc");
|
||||
|
||||
const TEST_KEY_BASE64: &str =
|
||||
include_str!("../../tests/data/pub-81A312C59D679D930FA9E8B06D728F29A2DBABF8.b64");
|
||||
|
||||
#[test]
|
||||
fn test_sig() {
|
||||
let test_key = SignedPublicKey::from_string(TEST_KEY).unwrap().0;
|
||||
verify_sig(TEST_SIG, TEST_DATA.as_bytes(), &[test_key]).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_key_import() {
|
||||
let str = TEST_KEY_BASE64.lines().collect::<String>();
|
||||
let bytes = general_purpose::STANDARD.decode(str).unwrap();
|
||||
let _ = SignedPublicKey::from_bytes(bytes.as_slice()).unwrap();
|
||||
}
|
||||
}
|
50
src/sgx/error.rs
Normal file
50
src/sgx/error.rs
Normal file
|
@ -0,0 +1,50 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
//! Intel SGX Enclave error wrapper
|
||||
|
||||
use bytemuck::PodCastError;
|
||||
use intel_tee_quote_verification_rs::quote3_error_t;
|
||||
use std::fmt::Formatter;
|
||||
|
||||
/// Wrapper for the quote verification Error
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct Quote3Error {
|
||||
/// error message
|
||||
pub msg: &'static str,
|
||||
/// raw error code
|
||||
pub inner: quote3_error_t,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Quote3Error {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}: {:?}", self.msg, self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Quote3Error {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}: {:?}", self.msg, self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Quote3Error {}
|
||||
|
||||
impl From<quote3_error_t> for Quote3Error {
|
||||
fn from(inner: quote3_error_t) -> Self {
|
||||
Self {
|
||||
msg: "Generic",
|
||||
inner,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum QuoteFromError {
|
||||
#[error(transparent)]
|
||||
PodCastError(#[from] PodCastError),
|
||||
|
||||
#[error("Quote version is invalid")]
|
||||
InvalidVersion,
|
||||
}
|
249
src/sgx/mod.rs
Normal file
249
src/sgx/mod.rs
Normal file
|
@ -0,0 +1,249 @@
|
|||
// SPDX-License-Identifier: Apache-2.0
|
||||
// Copyright (c) 2023 Matter Labs
|
||||
|
||||
// Copyright (c) The Enarx Project Developers https://github.com/enarx/sgx
|
||||
|
||||
//! Intel SGX Enclave report structures.
|
||||
|
||||
pub mod error;
|
||||
pub mod sign;
|
||||
pub mod tcblevel;
|
||||
|
||||
use bytemuck::{cast_slice, try_from_bytes, AnyBitPattern, PodCastError};
|
||||
use intel_tee_quote_verification_rs::{
|
||||
quote3_error_t, sgx_ql_qv_supplemental_t, tee_get_supplemental_data_version_and_size,
|
||||
tee_supp_data_descriptor_t, tee_verify_quote,
|
||||
};
|
||||
use std::ffi::CStr;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::{Read, Write};
|
||||
use std::mem;
|
||||
use tracing::{trace, warn};
|
||||
|
||||
use crate::quote::GetQuoteError;
|
||||
pub use error::{Quote3Error, QuoteFromError};
|
||||
pub use intel_tee_quote_verification_rs::{sgx_ql_qv_result_t, Collateral};
|
||||
pub use tcblevel::{parse_tcb_levels, EnumSet, TcbLevel};
|
||||
|
||||
/// Structure of a quote
|
||||
#[derive(Copy, Clone, Debug, AnyBitPattern)]
|
||||
#[repr(C)]
|
||||
pub struct Quote {
|
||||
version: [u8; 2],
|
||||
key_type: [u8; 2],
|
||||
reserved: [u8; 4],
|
||||
qe_svn: [u8; 2],
|
||||
pce_svn: [u8; 2],
|
||||
qe_vendor_id: [u8; 16],
|
||||
/// The user data that was passed, when creating the enclave
|
||||
pub user_data: [u8; 20],
|
||||
/// The report body
|
||||
pub report_body: ReportBody,
|
||||
}
|
||||
|
||||
impl Quote {
|
||||
/// Creates a quote from a byte slice
|
||||
pub fn try_from_bytes(bytes: &[u8]) -> Result<&Self, QuoteFromError> {
|
||||
if bytes.len() < mem::size_of::<Self>() {
|
||||
return Err(PodCastError::SizeMismatch.into());
|
||||
}
|
||||
let this: &Self = try_from_bytes(&bytes[..mem::size_of::<Self>()])?;
|
||||
if this.version() != 3 {
|
||||
return Err(QuoteFromError::InvalidVersion);
|
||||
}
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
/// Version of the `Quote` structure
|
||||
pub fn version(&self) -> u16 {
|
||||
u16::from_le_bytes(self.version)
|
||||
}
|
||||
}
|
||||
|
||||
/// The enclave report body.
|
||||
///
|
||||
/// For more information see the following documents:
|
||||
///
|
||||
/// [Intel® Software Guard Extensions (Intel® SGX) Data Center Attestation Primitives: ECDSA Quote Library API](https://download.01.org/intel-sgx/dcap-1.0/docs/SGX_ECDSA_QuoteGenReference_DCAP_API_Linux_1.0.pdf)
|
||||
///
|
||||
/// Table 5, A.4. Quote Format
|
||||
///
|
||||
/// [Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3 (3A, 3B, 3C & 3D): System Programming Guide](https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-3d-part-4-manual.html)
|
||||
///
|
||||
/// Table 38-21. Layout of REPORT
|
||||
#[derive(Copy, Clone, Debug, AnyBitPattern)]
|
||||
#[repr(C)]
|
||||
pub struct ReportBody {
|
||||
/// The security version number of the enclave.
|
||||
pub cpusvn: [u8; 16],
|
||||
/// The Misc section of the StateSaveArea of the enclave
|
||||
pub miscselect: [u8; 4],
|
||||
reserved1: [u8; 28],
|
||||
/// The allowed Features of the enclave.
|
||||
pub features: [u8; 8],
|
||||
/// The allowed XCr0Flags of the enclave.
|
||||
pub xfrm: [u8; 8],
|
||||
/// The measurement of the enclave
|
||||
pub mrenclave: [u8; 32],
|
||||
reserved2: [u8; 32],
|
||||
/// The hash of the public key, that signed the enclave
|
||||
pub mrsigner: [u8; 32],
|
||||
reserved3: [u8; 96],
|
||||
/// ISV assigned Product ID of the enclave.
|
||||
pub isv_prodid: [u8; 2],
|
||||
/// ISV assigned SVN (security version number) of the enclave.
|
||||
pub isv_svn: [u8; 2],
|
||||
reserved4: [u8; 60],
|
||||
/// The enclave report data, injected when requesting the quote, that is used for attestation.
|
||||
pub reportdata: [u8; 64],
|
||||
}
|
||||
|
||||
/// The result of the quote verification
|
||||
pub struct QuoteVerificationResult<'a> {
|
||||
/// the raw result
|
||||
pub result: sgx_ql_qv_result_t,
|
||||
/// indicates if the collateral is expired
|
||||
pub collateral_expired: bool,
|
||||
/// the earliest expiration date of the collateral
|
||||
pub earliest_expiration_date: i64,
|
||||
/// Date of the TCB level
|
||||
pub tcb_level_date_tag: i64,
|
||||
/// the advisory string
|
||||
pub advisories: Vec<String>,
|
||||
/// the quote
|
||||
pub quote: &'a Quote,
|
||||
}
|
||||
|
||||
/// Verifies a quote with collateral material
|
||||
pub fn verify_quote_with_collateral<'a>(
|
||||
quote: &'a [u8],
|
||||
collateral: Option<&Collateral>,
|
||||
current_time: i64,
|
||||
) -> Result<QuoteVerificationResult<'a>, Quote3Error> {
|
||||
let mut supp_data: mem::MaybeUninit<sgx_ql_qv_supplemental_t> = mem::MaybeUninit::zeroed();
|
||||
let mut supp_data_desc = tee_supp_data_descriptor_t {
|
||||
major_version: 0,
|
||||
data_size: 0,
|
||||
p_data: supp_data.as_mut_ptr() as *mut u8,
|
||||
};
|
||||
trace!("tee_get_supplemental_data_version_and_size");
|
||||
let (_, supp_size) =
|
||||
tee_get_supplemental_data_version_and_size(quote).map_err(|e| Quote3Error {
|
||||
msg: "tee_get_supplemental_data_version_and_size",
|
||||
inner: e,
|
||||
})?;
|
||||
|
||||
trace!(
|
||||
"tee_get_supplemental_data_version_and_size supp_size: {}",
|
||||
supp_size
|
||||
);
|
||||
|
||||
if supp_size == mem::size_of::<sgx_ql_qv_supplemental_t>() as u32 {
|
||||
supp_data_desc.data_size = supp_size;
|
||||
} else {
|
||||
supp_data_desc.data_size = 0;
|
||||
trace!(
|
||||
"tee_get_supplemental_data_version_and_size supp_size: {}",
|
||||
supp_size
|
||||
);
|
||||
trace!(
|
||||
"mem::size_of::<sgx_ql_qv_supplemental_t>(): {}",
|
||||
mem::size_of::<sgx_ql_qv_supplemental_t>()
|
||||
);
|
||||
warn!("Quote supplemental data size is different between DCAP QVL and QvE, please make sure you installed DCAP QVL and QvE from same release.")
|
||||
}
|
||||
|
||||
let p_supplemental_data = match supp_data_desc.data_size {
|
||||
0 => None,
|
||||
_ => Some(&mut supp_data_desc),
|
||||
};
|
||||
|
||||
let has_sup = p_supplemental_data.is_some();
|
||||
|
||||
trace!("tee_verify_quote");
|
||||
|
||||
let (collateral_expiration_status, result) =
|
||||
tee_verify_quote(quote, collateral, current_time, None, p_supplemental_data).map_err(
|
||||
|e| Quote3Error {
|
||||
msg: "tee_verify_quote",
|
||||
inner: e,
|
||||
},
|
||||
)?;
|
||||
|
||||
// check supplemental data if necessary
|
||||
let (advisories, earliest_expiration_date, tcb_level_date_tag) = if has_sup {
|
||||
unsafe {
|
||||
let supp_data = supp_data.assume_init();
|
||||
// convert to valid UTF-8 string
|
||||
let ads = CStr::from_bytes_until_nul(cast_slice(&supp_data.sa_list[..]))
|
||||
.ok()
|
||||
.and_then(|s| CStr::to_str(s).ok())
|
||||
.into_iter()
|
||||
.flat_map(|s| s.split(',').map(str::trim).map(String::from))
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
(
|
||||
ads,
|
||||
supp_data.earliest_expiration_date,
|
||||
supp_data.tcb_level_date_tag,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
(vec![], 0, 0)
|
||||
};
|
||||
|
||||
let quote = Quote::try_from_bytes(quote).map_err(|_| Quote3Error {
|
||||
msg: "Quote::try_from_bytes",
|
||||
inner: quote3_error_t::SGX_QL_QUOTE_FORMAT_UNSUPPORTED,
|
||||
})?;
|
||||
|
||||
let res = QuoteVerificationResult {
|
||||
collateral_expired: collateral_expiration_status != 0,
|
||||
earliest_expiration_date,
|
||||
tcb_level_date_tag,
|
||||
result,
|
||||
quote,
|
||||
advisories,
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Get the attestation report in a Gramine enclave
|
||||
pub fn sgx_gramine_get_quote(report_data: &[u8; 64]) -> Result<Box<[u8]>, GetQuoteError> {
|
||||
let mut file = OpenOptions::new()
|
||||
.write(true)
|
||||
.open("/dev/attestation/user_report_data")
|
||||
.map_err(|e| GetQuoteError {
|
||||
msg: "Failed to open `/dev/attestation/user_report_data`".into(),
|
||||
source: e,
|
||||
})?;
|
||||
|
||||
file.write(report_data).map_err(|e| GetQuoteError {
|
||||
msg: "Failed to write `/dev/attestation/user_report_data`".into(),
|
||||
source: e,
|
||||
})?;
|
||||
|
||||
drop(file);
|
||||
|
||||
let mut file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open("/dev/attestation/quote")
|
||||
.map_err(|e| GetQuoteError {
|
||||
msg: "Failed to open `/dev/attestation/quote`".into(),
|
||||
source: e,
|
||||
})?;
|
||||
|
||||
let mut quote = Vec::new();
|
||||
file.read_to_end(&mut quote).map_err(|e| GetQuoteError {
|
||||
msg: "Failed to read `/dev/attestation/quote`".into(),
|
||||
source: e,
|
||||
})?;
|
||||
Ok(quote.into_boxed_slice())
|
||||
}
|
||||
|
||||
/// Wrapper func for error
|
||||
/// TODO: move to intel_tee_quote_verification_rs
|
||||
pub fn tee_qv_get_collateral(quote: &[u8]) -> Result<Collateral, Quote3Error> {
|
||||
intel_tee_quote_verification_rs::tee_qv_get_collateral(quote).map_err(Into::into)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue