refactor(verify-era-proof-attestation): modularize and restructure proof verification logic

- Split `verify-era-proof-attestation` into modular subcomponents for maintainability.
- Moved client, proof handling, and core types into dedicated modules.
This commit is contained in:
Harald Hoyer 2025-04-02 16:03:01 +02:00
parent 1e853f653a
commit 2605e2ae3a
Signed by: harald
GPG key ID: F519A1143B3FBE32
34 changed files with 2918 additions and 2304 deletions

1856
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -32,7 +32,6 @@ gpt = "4.0.0"
hex = { version = "0.4.3", features = ["std"], default-features = false } hex = { version = "0.4.3", features = ["std"], default-features = false }
intel-tee-quote-verification-rs = { package = "teepot-tee-quote-verification-rs", path = "crates/teepot-tee-quote-verification-rs", version = "0.3.0" } intel-tee-quote-verification-rs = { package = "teepot-tee-quote-verification-rs", path = "crates/teepot-tee-quote-verification-rs", version = "0.3.0" }
intel-tee-quote-verification-sys = { version = "0.2.1" } intel-tee-quote-verification-sys = { version = "0.2.1" }
jsonrpsee-types = { version = "0.24", default-features = false }
num-integer = "0.1.46" num-integer = "0.1.46"
num-traits = "0.2.18" num-traits = "0.2.18"
opentelemetry = { version = "0.28.0", features = ["default", "logs"] } opentelemetry = { version = "0.28.0", features = ["default", "logs"] }

View file

@ -8,19 +8,23 @@ repository.workspace = true
version.workspace = true version.workspace = true
[dependencies] [dependencies]
anyhow.workspace = true bytes.workspace = true
clap.workspace = true clap.workspace = true
enumset.workspace = true
hex.workspace = true hex.workspace = true
jsonrpsee-types.workspace = true jsonrpsee-types = "0.24"
reqwest.workspace = true reqwest.workspace = true
secp256k1.workspace = true secp256k1.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true
serde_with = { workspace = true, features = ["hex"] } serde_with = { workspace = true, features = ["hex"] }
serde_yaml = "0.9.33"
teepot.workspace = true teepot.workspace = true
thiserror.workspace = true
tokio.workspace = true tokio.workspace = true
tracing.workspace = true tracing.workspace = true
tracing-subscriber.workspace = true tracing-subscriber.workspace = true
url.workspace = true url.workspace = true
zksync_basic_types = "=0.1.0" zksync_basic_types = "27.0.0-non-semver-compat"
zksync_types = "=0.1.0" zksync_types = "27.0.0-non-semver-compat"
zksync_web3_decl = "=0.1.0" zksync_web3_decl = "27.0.0-non-semver-compat"

View file

@ -0,0 +1,76 @@
# Era Proof Attestation Verifier
This tool verifies the SGX/TDX attestations and signatures for zkSync Era L1 batches.
## Usage
Basic usage with attestation policy provided from a YAML file:
```bash
verify-era-proof-attestation --rpc https://mainnet.era.zksync.io \
--continuous 493220 \
--attestation-policy-file examples/attestation_policy.yaml \
--log-level info
```
## Attestation Policy Configuration
You can specify the attestation policy either through command-line arguments or by providing a YAML configuration file.
### Command-line Arguments
The following command-line arguments are available:
- `--batch`, `-n <BATCH>`: The batch number or range of batch numbers to verify the attestation and signature (e.g., "
42" or "42-45"). Mutually exclusive with `--continuous`.
- `--continuous <FIRST_BATCH>`: Continuous mode: keep verifying new batches starting from the specified batch number
until interrupted. Mutually exclusive with `--batch`.
- `--rpc <URL>`: URL of the RPC server to query for the batch attestation and signature.
- `--chain <CHAIN_ID>`: Chain ID of the network to query (default: L2ChainId::default()).
- `--rate-limit <MILLISECONDS>`: Rate limit between requests in milliseconds (default: 0).
- `--log-level <LEVEL>`: Log level for the log output. Valid values are: `off`, `error`, `warn`, `info`, `debug`,
`trace` (default: `warn`).
- `--attestation-policy-file <PATH>`: Path to a YAML file containing attestation policy configuration. This overrides
any attestation policy settings provided via command line options.
Either `--batch` or `--continuous` mode must be specified.
### YAML Configuration File
The attestation policy is loaded from a YAML file using the `--attestation-policy-file` option.
Example YAML configuration file:
```yaml
sgx:
mrenclaves:
- a2caa7055e333f69c3e46ca7ba65b135a86c90adfde2afb356e05075b7818b3c
- 36eeb64cc816f80a1cf5818b26710f360714b987d3799e757cbefba7697b9589
- 4a8b79e5123f4dbf23453d583cb8e5dcf4d19a6191a0be6dd85b7b3052c32faf
- 1498845b3f23667356cc49c38cae7b4ac234621a5b85fdd5c52b5f5d12703ec9
- 1b2374631bb2572a0e05b3be8b5cdd23c42e9d7551e1ef200351cae67c515a65
- 6fb19e47d72a381a9f3235c450f8c40f01428ce19a941f689389be3eac24f42a
- b610fd1d749775cc3de88beb84afe8bb79f55a19100db12d76f6a62ac576e35d
- a0b1b069b01bdcf3c1517ef8d4543794a27ed4103e464be7c4afdc6136b42d66
- 71e2a11a74b705082a7286b2008f812f340c0e4de19f8b151baa347eda32d057
- d5a0bf8932d9a3d7af6d9405d4c6de7dcb7b720bb5510666b4396fc58ee58bb2
allowed_tcb_levels:
- Ok
- SwHardeningNeeded
allowed_advisory_ids:
- INTEL-SA-00615
tdx:
mrs:
- - 2a90c8fa38672cafd791d994beb6836b99383b2563736858632284f0f760a6446efd1e7ec457cf08b629ea630f7b4525
- 3300980705adf09d28b707b79699d9874892164280832be2c386a715b6e204e0897fb564a064f810659207ba862b304f
- c08ab64725566bcc8a6fb1c79e2e64744fcff1594b8f1f02d716fb66592ecd5de94933b2bc54ffbbc43a52aab7eb1146
- 092a4866a9e6a1672d7439a5d106fbc6eb57b738d5bfea5276d41afa2551824365fdd66700c1ce9c0b20542b9f9d5945
- 971fb52f90ec98a234301ca9b8fc30b613c33e3dd9c0cc42dcb8003d4a95d8fb218b75baf028b70a3cabcb947e1ca453
- - 2a90c8fa38672cafd791d994beb6836b99383b2563736858632284f0f760a6446efd1e7ec457cf08b629ea630f7b4525
- 3300980705adf09d28b707b79699d9874892164280832be2c386a715b6e204e0897fb564a064f810659207ba862b304f
- c08ab64725566bcc8a6fb1c79e2e64744fcff1594b8f1f02d716fb66592ecd5de94933b2bc54ffbbc43a52aab7eb1146
- 092a4866a9e6a1672d7439a5d106fbc6eb57b738d5bfea5276d41afa2551824365fdd66700c1ce9c0b20542b9f9d5945
- f57bb7ed82c6ae4a29e6c9879338c592c7d42a39135583e8ccbe3940f2344b0eb6eb8503db0ffd6a39ddd00cd07d8317
allowed_tcb_levels:
- Ok
```

View file

@ -0,0 +1,31 @@
sgx:
mrenclaves:
- a2caa7055e333f69c3e46ca7ba65b135a86c90adfde2afb356e05075b7818b3c
- 36eeb64cc816f80a1cf5818b26710f360714b987d3799e757cbefba7697b9589
- 4a8b79e5123f4dbf23453d583cb8e5dcf4d19a6191a0be6dd85b7b3052c32faf
- 1498845b3f23667356cc49c38cae7b4ac234621a5b85fdd5c52b5f5d12703ec9
- 1b2374631bb2572a0e05b3be8b5cdd23c42e9d7551e1ef200351cae67c515a65
- 6fb19e47d72a381a9f3235c450f8c40f01428ce19a941f689389be3eac24f42a
- b610fd1d749775cc3de88beb84afe8bb79f55a19100db12d76f6a62ac576e35d
- a0b1b069b01bdcf3c1517ef8d4543794a27ed4103e464be7c4afdc6136b42d66
- 71e2a11a74b705082a7286b2008f812f340c0e4de19f8b151baa347eda32d057
- d5a0bf8932d9a3d7af6d9405d4c6de7dcb7b720bb5510666b4396fc58ee58bb2
allowed_tcb_levels:
- Ok
- SwHardeningNeeded
allowed_advisory_ids:
- INTEL-SA-00615
tdx:
mrs:
- - 2a90c8fa38672cafd791d994beb6836b99383b2563736858632284f0f760a6446efd1e7ec457cf08b629ea630f7b4525
- 3300980705adf09d28b707b79699d9874892164280832be2c386a715b6e204e0897fb564a064f810659207ba862b304f
- c08ab64725566bcc8a6fb1c79e2e64744fcff1594b8f1f02d716fb66592ecd5de94933b2bc54ffbbc43a52aab7eb1146
- 092a4866a9e6a1672d7439a5d106fbc6eb57b738d5bfea5276d41afa2551824365fdd66700c1ce9c0b20542b9f9d5945
- 971fb52f90ec98a234301ca9b8fc30b613c33e3dd9c0cc42dcb8003d4a95d8fb218b75baf028b70a3cabcb947e1ca453
- - 2a90c8fa38672cafd791d994beb6836b99383b2563736858632284f0f760a6446efd1e7ec457cf08b629ea630f7b4525
- 3300980705adf09d28b707b79699d9874892164280832be2c386a715b6e204e0897fb564a064f810659207ba862b304f
- c08ab64725566bcc8a6fb1c79e2e64744fcff1594b8f1f02d716fb66592ecd5de94933b2bc54ffbbc43a52aab7eb1146
- 092a4866a9e6a1672d7439a5d106fbc6eb57b738d5bfea5276d41afa2551824365fdd66700c1ce9c0b20542b9f9d5945
- f57bb7ed82c6ae4a29e6c9879338c592c7d42a39135583e8ccbe3940f2344b0eb6eb8503db0ffd6a39ddd00cd07d8317
allowed_tcb_levels:
- Ok

View file

@ -1,95 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2024 Matter Labs
use anyhow::{anyhow, Result};
use clap::{ArgGroup, Args, Parser};
use std::time::Duration;
use teepot::log::LogLevelParser;
use teepot::sgx::{parse_tcb_levels, EnumSet, TcbLevel};
use tracing_subscriber::filter::LevelFilter;
use url::Url;
use zksync_basic_types::L1BatchNumber;
use zksync_types::L2ChainId;
#[derive(Parser, Debug, Clone)]
#[command(author = "Matter Labs", version, about = "SGX attestation and batch signature verifier", long_about = None)]
#[clap(group(
ArgGroup::new("mode")
.required(true)
.args(&["batch_range", "continuous"]),
))]
pub struct Arguments {
/// Log level for the log output.
/// Valid values are: `off`, `error`, `warn`, `info`, `debug`, `trace`
#[clap(long, default_value_t = LevelFilter::WARN, value_parser = LogLevelParser)]
pub log_level: LevelFilter,
/// The batch number or range of batch numbers to verify the attestation and signature (e.g.,
/// "42" or "42-45"). This option is mutually exclusive with the `--continuous` mode.
#[clap(short = 'n', long = "batch", value_parser = parse_batch_range)]
pub batch_range: Option<(L1BatchNumber, L1BatchNumber)>,
/// Continuous mode: keep verifying new batches until interrupted. This option is mutually
/// exclusive with the `--batch` option.
#[clap(long, value_name = "FIRST_BATCH")]
pub continuous: Option<L1BatchNumber>,
/// URL of the RPC server to query for the batch attestation and signature.
#[clap(long = "rpc")]
pub rpc_url: Url,
/// Chain ID of the network to query.
#[clap(long = "chain", default_value_t = L2ChainId::default().as_u64())]
pub chain_id: u64,
/// Rate limit between requests in milliseconds.
#[clap(long, default_value = "0", value_parser = parse_duration)]
pub rate_limit: Duration,
/// Criteria for valid attestation policy. Invalid proofs will be rejected.
#[clap(flatten)]
pub attestation_policy: AttestationPolicyArgs,
}
/// Attestation policy implemented as a set of criteria that must be met by SGX attestation.
#[derive(Args, Debug, Clone)]
pub struct AttestationPolicyArgs {
/// Comma-separated list of allowed hex-encoded SGX mrsigners. Batch attestation must consist of
/// one of these mrsigners. If the list is empty, the mrsigner check is skipped.
#[arg(long = "mrsigners")]
pub sgx_mrsigners: Option<String>,
/// Comma-separated list of allowed hex-encoded SGX mrenclaves. Batch attestation must consist
/// of one of these mrenclaves. If the list is empty, the mrenclave check is skipped.
#[arg(long = "mrenclaves")]
pub sgx_mrenclaves: Option<String>,
/// Comma-separated list of allowed TCB levels. If the list is empty, the TCB level check is
/// skipped. Allowed values: Ok, ConfigNeeded, ConfigAndSwHardeningNeeded, SwHardeningNeeded,
/// OutOfDate, OutOfDateConfigNeeded.
#[arg(long, value_parser = parse_tcb_levels, default_value = "Ok")]
pub sgx_allowed_tcb_levels: EnumSet<TcbLevel>,
}
fn parse_batch_range(s: &str) -> Result<(L1BatchNumber, L1BatchNumber)> {
let parse = |s: &str| {
s.parse::<u32>()
.map(L1BatchNumber::from)
.map_err(|e| anyhow!(e))
};
match s.split_once('-') {
Some((start, end)) => {
let (start, end) = (parse(start)?, parse(end)?);
if start > end {
Err(anyhow!(
"Start batch number ({}) must be less than or equal to end batch number ({})",
start,
end
))
} else {
Ok((start, end))
}
}
None => {
let batch_number = parse(s)?;
Ok((batch_number, batch_number))
}
}
}
fn parse_duration(s: &str) -> Result<Duration> {
let millis = s.parse()?;
Ok(Duration::from_millis(millis))
}

View file

@ -1,45 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2024 Matter Labs
use anyhow::{anyhow, Context, Result};
use url::Url;
use zksync_basic_types::{L1BatchNumber, H256};
use zksync_types::L2ChainId;
use zksync_web3_decl::{
client::{Client as NodeClient, L2},
error::ClientRpcContext,
namespaces::ZksNamespaceClient,
};
pub trait JsonRpcClient {
async fn get_root_hash(&self, batch_number: L1BatchNumber) -> Result<H256>;
// TODO implement get_tee_proofs(batch_number, tee_type) once https://crates.io/crates/zksync_web3_decl crate is updated
}
pub struct MainNodeClient(NodeClient<L2>);
impl MainNodeClient {
pub fn new(rpc_url: Url, chain_id: u64) -> Result<Self> {
let node_client = NodeClient::http(rpc_url.into())
.context("failed creating JSON-RPC client for main node")?
.for_network(
L2ChainId::try_from(chain_id)
.map_err(anyhow::Error::msg)?
.into(),
)
.build();
Ok(MainNodeClient(node_client))
}
}
impl JsonRpcClient for MainNodeClient {
async fn get_root_hash(&self, batch_number: L1BatchNumber) -> Result<H256> {
self.0
.get_l1_batch_details(batch_number)
.rpc_context("get_l1_batch_details")
.await?
.and_then(|res| res.base.root_hash)
.ok_or_else(|| anyhow!("No root hash found for batch #{}", batch_number))
}
}

View file

@ -0,0 +1,66 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! HTTP client for making requests to external services
use reqwest::Client;
use serde::{de::DeserializeOwned, Serialize};
use std::time::Duration;
use url::Url;
use crate::{
core::DEFAULT_HTTP_REQUEST_TIMEOUT,
error::{Error, Result},
};
/// Client for making HTTP requests
#[derive(Clone)]
pub struct HttpClient {
client: Client,
}
impl HttpClient {
/// Create a new HTTP client with default configuration
pub fn new() -> Self {
let client = Client::builder()
.timeout(Duration::from_secs(DEFAULT_HTTP_REQUEST_TIMEOUT))
.build()
.expect("Failed to create HTTP client");
Self { client }
}
/// Make a POST request to the specified URL with the provided body
pub async fn post<T: Serialize>(&self, url: &Url, body: T) -> Result<String> {
let response = self.client.post(url.clone()).json(&body).send().await?;
self.handle_response(response).await
}
/// Send a JSON request and parse the response
pub async fn send_json<T: Serialize, R: DeserializeOwned>(
&self,
url: &Url,
body: T,
) -> Result<R> {
let response_text = self.post(url, body).await?;
let response: R = serde_json::from_str(&response_text)
.map_err(|e| Error::JsonRpcInvalidResponse(e.to_string()))?;
Ok(response)
}
/// Handle the HTTP response
async fn handle_response(&self, response: reqwest::Response) -> Result<String> {
let status = response.status();
let body = response.text().await?;
if status.is_success() {
Ok(body)
} else {
Err(Error::Http {
status_code: status.as_u16(),
message: body,
})
}
}
}

View file

@ -0,0 +1,58 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use url::Url;
use zksync_basic_types::{L1BatchNumber, H256};
use zksync_types::L2ChainId;
use zksync_web3_decl::{
client::{Client as NodeClient, L2},
error::ClientRpcContext,
namespaces::ZksNamespaceClient,
};
use crate::error;
/// Trait for interacting with the JSON-RPC API
pub trait JsonRpcClient {
/// Get the root hash for a specific batch
async fn get_root_hash(&self, batch_number: L1BatchNumber) -> error::Result<H256>;
// TODO implement get_tee_proofs(batch_number, tee_type) once https://crates.io/crates/zksync_web3_decl crate is updated
}
/// Client for interacting with the main node
pub struct MainNodeClient(NodeClient<L2>);
impl MainNodeClient {
/// Create a new client for the main node
pub fn new(rpc_url: Url, chain_id: u64) -> error::Result<Self> {
let chain_id = L2ChainId::try_from(chain_id)
.map_err(|e| error::Error::Internal(format!("Invalid chain ID: {}", e)))?;
let node_client = NodeClient::http(rpc_url.into())
.map_err(|e| {
error::Error::Internal(format!("Failed to create JSON-RPC client: {}", e))
})?
.for_network(chain_id.into())
.build();
Ok(MainNodeClient(node_client))
}
}
impl JsonRpcClient for MainNodeClient {
async fn get_root_hash(&self, batch_number: L1BatchNumber) -> error::Result<H256> {
let batch_details = self
.0
.get_l1_batch_details(batch_number)
.rpc_context("get_l1_batch_details")
.await
.map_err(|e| error::Error::JsonRpc(format!("Failed to get batch details: {}", e)))?
.ok_or_else(|| {
error::Error::JsonRpc(format!("No details found for batch #{}", batch_number))
})?;
batch_details.base.root_hash.ok_or_else(|| {
error::Error::JsonRpc(format!("No root hash found for batch #{}", batch_number))
})
}
}

View file

@ -0,0 +1,12 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Client modules for external API communication
mod http;
mod json_rpc;
mod retry;
pub use http::HttpClient;
pub use json_rpc::{JsonRpcClient, MainNodeClient};
pub use retry::{RetryConfig, RetryHelper};

View file

@ -0,0 +1,107 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Retry mechanism for handling transient failures
use std::time::Duration;
use tokio::time::sleep;
use crate::{
core::{DEFAULT_RETRY_DELAY_MS, MAX_PROOF_FETCH_RETRIES},
error::{Error, Result},
};
/// Configuration for retry behavior
#[derive(Debug, Clone)]
pub struct RetryConfig {
/// Maximum number of retry attempts
pub max_attempts: u32,
/// Delay between retry attempts
pub delay: Duration,
/// Whether to use exponential backoff
pub use_exponential_backoff: bool,
}
impl Default for RetryConfig {
fn default() -> Self {
Self {
max_attempts: MAX_PROOF_FETCH_RETRIES,
delay: Duration::from_millis(DEFAULT_RETRY_DELAY_MS),
use_exponential_backoff: true,
}
}
}
/// Helper for executing operations with retries
pub struct RetryHelper {
config: RetryConfig,
}
impl RetryHelper {
/// Create a new retry helper with the given configuration
pub fn new(config: RetryConfig) -> Self {
Self { config }
}
/// Execute an operation with retries
pub async fn execute<T, F, Fut>(&self, operation_name: &str, operation: F) -> Result<T>
where
F: Fn() -> Fut,
Fut: std::future::Future<Output = Result<T>>,
{
let mut attempt = 0;
let mut last_error;
loop {
attempt += 1;
tracing::debug!(
"Executing operation '{}' (attempt {}/{})",
operation_name,
attempt,
self.config.max_attempts
);
match operation().await {
Ok(result) => {
tracing::debug!(
"Operation '{}' succeeded on attempt {}",
operation_name,
attempt
);
return Ok(result);
}
Err(Error::Interrupted) => return Err(Error::Interrupted),
Err(e) => {
last_error = e;
if attempt >= self.config.max_attempts {
tracing::warn!(
"Operation '{}' failed after {} attempts. Giving up.",
operation_name,
attempt
);
break;
}
let delay = if self.config.use_exponential_backoff {
self.config.delay.mul_f32(2.0_f32.powi(attempt as i32 - 1))
} else {
self.config.delay
};
tracing::warn!(
"Operation '{}' failed on attempt {}: {}. Retrying in {:?}...",
operation_name,
attempt,
last_error,
delay
);
sleep(delay).await;
}
}
}
Err(last_error)
}
}

View file

@ -0,0 +1,455 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Configuration settings for the verification process
use crate::{
core::{SGX_HASH_SIZE, TDX_HASH_SIZE},
error,
};
use bytes::{Bytes, BytesMut};
use clap::{ArgGroup, Parser};
use enumset::EnumSet;
use serde::{Deserialize, Serialize};
use std::{collections::HashSet, fs, ops::Deref, path::PathBuf, str::FromStr, time::Duration};
use teepot::{log::LogLevelParser, quote::tcblevel::TcbLevel};
use tracing_subscriber::filter::LevelFilter;
use url::Url;
use zksync_basic_types::{tee_types::TeeType, L1BatchNumber};
use zksync_types::L2ChainId;
/// Primary configuration for the verification process
#[derive(Parser, Debug, Clone)]
#[command(author = "Matter Labs", version, about = "SGX attestation and batch signature verifier", long_about = None
)]
#[clap(group(
ArgGroup::new("mode")
.required(true)
.args(&["batch_range", "continuous"]),
))]
pub struct VerifierConfigArgs {
/// Log level for the log output.
/// Valid values are: `off`, `error`, `warn`, `info`, `debug`, `trace`
#[clap(long, default_value_t = LevelFilter::WARN, value_parser = LogLevelParser)]
pub log_level: LevelFilter,
/// The batch number or range of batch numbers to verify the attestation and signature (e.g.,
/// "42" or "42-45"). This option is mutually exclusive with the `--continuous` mode.
#[clap(short = 'n', long = "batch", value_parser = parse_batch_range)]
pub batch_range: Option<(L1BatchNumber, L1BatchNumber)>,
/// Continuous mode: keep verifying new batches until interrupted. This option is mutually
/// exclusive with the `--batch` option.
#[clap(long, value_name = "FIRST_BATCH")]
pub continuous: Option<L1BatchNumber>,
/// URL of the RPC server to query for the batch attestation and signature.
#[clap(long = "rpc")]
pub rpc_url: Url,
/// Chain ID of the network to query.
#[clap(long = "chain", default_value_t = L2ChainId::default().as_u64())]
pub chain_id: u64,
/// Rate limit between requests in milliseconds.
#[clap(long, default_value = "0", value_parser = parse_duration)]
pub rate_limit: Duration,
/// Path to a YAML file containing attestation policy configuration.
/// This overrides any attestation policy settings provided via command line options.
#[clap(long = "attestation-policy-file")]
pub attestation_policy_file: Option<PathBuf>,
/// Comma separated list of Tee types to process
#[clap(long)]
pub tee_types: TeeTypes,
}
/// Attestation policy implemented as a set of criteria that must be met by SGX attestation.
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct SgxAttestationPolicyConfig {
/// List of allowed hex-encoded SGX mrsigners. Batch attestation must consist of
/// one of these mrsigners. If the list is empty, the mrsigner check is skipped.
#[serde(default)]
pub mrsigners: Option<Vec<String>>,
/// List of allowed hex-encoded SGX mrenclaves. Batch attestation must consist
/// of one of these mrenclaves. If the list is empty, the mrenclave check is skipped.
#[serde(default)]
pub mrenclaves: Option<Vec<String>>,
/// List of allowed SGX TCB levels. If the list is empty, the TCB level check is
/// skipped. Allowed values: Ok, ConfigNeeded, ConfigAndSwHardeningNeeded, SwHardeningNeeded,
/// OutOfDate, OutOfDateConfigNeeded.
#[serde(default = "default_tcb_levels")]
pub allowed_tcb_levels: EnumSet<TcbLevel>,
/// List of allowed SGX Advisories. If the list is empty, theAdvisories check is skipped.
#[serde(default)]
pub allowed_advisory_ids: Option<Vec<String>>,
}
/// Attestation policy implemented as a set of criteria that must be met by TDX attestation.
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct TdxAttestationPolicyConfig {
/// List of allowed hex-encoded TDX mrs. Batch attestation must consist
/// of one of these mrs. If the list is empty, the mrs check is skipped.
#[serde(default)]
pub mrs: Option<Vec<[String; 5]>>,
/// List of allowed SGX TCB levels. If the list is empty, the TCB level check is
/// skipped. Allowed values: Ok, ConfigNeeded, ConfigAndSwHardeningNeeded, SwHardeningNeeded,
/// OutOfDate, OutOfDateConfigNeeded.
#[serde(default = "default_tcb_levels")]
pub allowed_tcb_levels: EnumSet<TcbLevel>,
/// List of allowed TDX Advisories. If the list is empty, theAdvisories check is skipped.
#[serde(default)]
pub allowed_advisory_ids: Option<Vec<String>>,
}
/// Attestation policy implemented as a set of criteria that must be met by SGX or TDX attestation.
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct AttestationPolicyConfig {
/// SGX attestation policy
pub sgx: SgxAttestationPolicyConfig,
/// TDX attestation policy
pub tdx: TdxAttestationPolicyConfig,
}
#[derive(Debug, Clone)]
pub struct AttestationPolicy {
pub sgx_mrsigners: Option<Vec<Bytes>>,
pub sgx_mrenclaves: Option<Vec<Bytes>>,
pub sgx_allowed_tcb_levels: EnumSet<TcbLevel>,
pub sgx_allowed_advisory_ids: Option<Vec<String>>,
pub tdx_allowed_tcb_levels: EnumSet<TcbLevel>,
pub tdx_mrs: Option<Vec<Bytes>>,
pub tdx_allowed_advisory_ids: Option<Vec<String>>,
}
/// Default TCB levels used for Serde deserialization
fn default_tcb_levels() -> EnumSet<TcbLevel> {
let mut set = EnumSet::new();
set.insert(TcbLevel::Ok);
set
}
// TODO:
// When moving this binary to the `zksync-era` repo, we
// should be using `EnumSet<TeeType>` but this requires
// #[derive(EnumSetType, Debug, Serialize, Deserialize)]
// #[enumset(serialize_repr = "list")]
// for `TeeType`
#[derive(Clone, Debug)]
pub struct TeeTypes(HashSet<TeeType>);
impl FromStr for TeeTypes {
type Err = error::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut hs = HashSet::new();
let tee_strs: Vec<&str> = s.split(',').collect();
for tee_str in tee_strs {
match tee_str.to_ascii_lowercase().as_str() {
"sgx" => {
hs.insert(TeeType::Sgx);
}
"tdx" => {
hs.insert(TeeType::Tdx);
}
_ => {
return Err(error::Error::internal("Unknown TEE type"));
}
}
}
Ok(Self(hs))
}
}
impl Default for TeeTypes {
fn default() -> Self {
Self(HashSet::from([TeeType::Sgx, TeeType::Tdx]))
}
}
impl Deref for TeeTypes {
type Target = HashSet<TeeType>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[derive(Debug, Clone)]
pub struct VerifierConfig {
pub args: VerifierConfigArgs,
pub policy: AttestationPolicy,
}
impl VerifierConfig {
pub fn new(args: VerifierConfigArgs) -> error::Result<Self> {
let policy = if let Some(path) = &args.attestation_policy_file {
let policy_content = fs::read_to_string(path).map_err(|e| {
error::Error::internal(format!("Failed to read attestation policy file: {}", e))
})?;
let policy_config: AttestationPolicyConfig = serde_yaml::from_str(&policy_content)
.map_err(|e| {
error::Error::internal(format!(
"Failed to parse attestation policy file: {}",
e
))
})?;
tracing::info!("Loaded attestation policy from file: {:?}", path);
policy_config
} else {
AttestationPolicyConfig::default()
};
let policy = AttestationPolicy {
sgx_mrsigners: decode_hex_vec_option(policy.sgx.mrsigners, SGX_HASH_SIZE)?,
sgx_mrenclaves: decode_hex_vec_option(policy.sgx.mrenclaves, SGX_HASH_SIZE)?,
sgx_allowed_tcb_levels: policy.sgx.allowed_tcb_levels,
sgx_allowed_advisory_ids: policy.sgx.allowed_advisory_ids,
tdx_allowed_tcb_levels: policy.tdx.allowed_tcb_levels,
tdx_mrs: decode_tdx_mrs(policy.tdx.mrs, TDX_HASH_SIZE)?,
tdx_allowed_advisory_ids: policy.tdx.allowed_advisory_ids,
};
if policy.sgx_mrsigners.is_none() && policy.sgx_mrenclaves.is_none() {
tracing::error!(
"Neither `--sgx-mrenclaves` nor `--sgx-mrsigners` specified. Any code could have produced the SGX proof."
);
}
if policy.tdx_mrs.is_none() {
tracing::error!(
"`--tdxmrs` not specified. Any code could have produced the TDX proof."
);
}
Ok(Self { args, policy })
}
}
// Helper function to decode a vector of hex strings
fn decode_hex_vec_option(
hex_strings: Option<Vec<String>>,
bytes_length: usize,
) -> Result<Option<Vec<Bytes>>, hex::FromHexError> {
hex_strings
.map(|strings| {
strings
.into_iter()
.map(|s| {
if s.len() > (bytes_length * 2) {
return Err(hex::FromHexError::InvalidStringLength);
}
hex::decode(s).map(Bytes::from)
})
.collect::<Result<Vec<_>, _>>()
})
.transpose()
}
// Improved decode_tdx_mrs function
fn decode_tdx_mrs(
tdx_mrs_opt: Option<Vec<[String; 5]>>,
bytes_length: usize,
) -> Result<Option<Vec<Bytes>>, hex::FromHexError> {
match tdx_mrs_opt {
None => Ok(None),
Some(mrs_array) => {
let result = mrs_array
.into_iter()
.map(|strings| decode_and_combine_mrs(strings, bytes_length))
.collect::<Result<Vec<_>, _>>()?;
Ok(Some(result))
}
}
}
// Helper function to decode and combine MRs
fn decode_and_combine_mrs(
strings: [String; 5],
bytes_length: usize,
) -> Result<Bytes, hex::FromHexError> {
let mut buffer = BytesMut::with_capacity(bytes_length * 5);
for s in &strings {
if s.len() > (bytes_length * 2) {
return Err(hex::FromHexError::InvalidStringLength);
}
let decoded = hex::decode(s)?;
buffer.extend(decoded);
}
Ok(buffer.freeze())
}
/// Parse a batch range from a string like "42" or "42-45"
fn parse_batch_range(s: &str) -> error::Result<(L1BatchNumber, L1BatchNumber)> {
let parse = |s: &str| {
s.parse::<u32>()
.map(L1BatchNumber::from)
.map_err(|e| error::Error::internal(format!("Can't convert batch {s} to number: {e}")))
};
match s.split_once('-') {
Some((start, end)) => {
let (start, end) = (parse(start)?, parse(end)?);
if start > end {
Err(error::Error::InvalidBatchRange(s.into()))
} else {
Ok((start, end))
}
}
None => {
let batch_number = parse(s)?;
Ok((batch_number, batch_number))
}
}
}
/// Parse a duration from a millisecond string
fn parse_duration(s: &str) -> error::Result<Duration> {
let millis = s
.parse()
.map_err(|e| error::Error::internal(format!("Can't convert {s} to duration: {e}")))?;
Ok(Duration::from_millis(millis))
}
#[cfg(test)]
mod test {
use super::*;
use std::{env, fs, path::PathBuf};
use teepot::quote::tcblevel::TcbLevel;
#[test]
fn test_load_attestation_policy_from_yaml() {
// Create a temporary directory for the test
let temp_dir = env::temp_dir().join("test_attestation_policy");
fs::create_dir_all(&temp_dir).expect("Failed to create temp directory");
// Create a temporary YAML file
let yaml_path = temp_dir.join("policy.yaml");
let yaml_content = r#"
sgx:
mrenclaves:
- a2caa7055e333f69c3e46ca7ba65b135a86c90adfde2afb356e05075b7818b3c
- 36eeb64cc816f80a1cf5818b26710f360714b987d3799e757cbefba7697b9589
allowed_tcb_levels:
- Ok
- SwHardeningNeeded
tdx:
mrs:
- - 2a90c8fa38672cafd791d994beb6836b99383b2563736858632284f0f760a6446efd1e7ec457cf08b629ea630f7b4525
- 3300980705adf09d28b707b79699d9874892164280832be2c386a715b6e204e0897fb564a064f810659207ba862b304f
- c08ab64725566bcc8a6fb1c79e2e64744fcff1594b8f1f02d716fb66592ecd5de94933b2bc54ffbbc43a52aab7eb1146
- 092a4866a9e6a1672d7439a5d106fbc6eb57b738d5bfea5276d41afa2551824365fdd66700c1ce9c0b20542b9f9d5945
- 971fb52f90ec98a234301ca9b8fc30b613c33e3dd9c0cc42dcb8003d4a95d8fb218b75baf028b70a3cabcb947e1ca453
"#;
fs::write(&yaml_path, yaml_content).expect("Failed to write YAML file");
// Create a minimal config
let config = VerifierConfig::new(VerifierConfigArgs {
log_level: LevelFilter::INFO,
batch_range: Some((L1BatchNumber(1), L1BatchNumber(10))),
continuous: None,
rpc_url: Url::parse("http://localhost:8545").unwrap(),
chain_id: 270,
rate_limit: Duration::from_millis(0),
attestation_policy_file: Some(yaml_path.clone()),
tee_types: Default::default(),
})
.expect("Failed to load attestation policy");
// Verify that the attestation policy was loaded correctly
assert_eq!(config.policy.sgx_mrsigners, None);
assert_eq!(
config.policy.sgx_mrenclaves,
Some(vec![
Bytes::from(
hex::decode("a2caa7055e333f69c3e46ca7ba65b135a86c90adfde2afb356e05075b7818b3c")
.unwrap(),
),
Bytes::from(
hex::decode("36eeb64cc816f80a1cf5818b26710f360714b987d3799e757cbefba7697b9589")
.unwrap(),
),
])
);
assert!(config.policy.sgx_allowed_tcb_levels.contains(TcbLevel::Ok));
assert!(config
.policy
.sgx_allowed_tcb_levels
.contains(TcbLevel::SwHardeningNeeded));
assert_eq!(
config.policy.tdx_mrs,
Some(vec![Bytes::from(
hex::decode(concat!(
"2a90c8fa38672cafd791d994beb6836b99383b2563736858632284f0f760a6446efd1e7ec457cf08b629ea630f7b4525",
"3300980705adf09d28b707b79699d9874892164280832be2c386a715b6e204e0897fb564a064f810659207ba862b304f",
"c08ab64725566bcc8a6fb1c79e2e64744fcff1594b8f1f02d716fb66592ecd5de94933b2bc54ffbbc43a52aab7eb1146",
"092a4866a9e6a1672d7439a5d106fbc6eb57b738d5bfea5276d41afa2551824365fdd66700c1ce9c0b20542b9f9d5945",
"971fb52f90ec98a234301ca9b8fc30b613c33e3dd9c0cc42dcb8003d4a95d8fb218b75baf028b70a3cabcb947e1ca453"
)).unwrap()),
])
);
// Clean up
fs::remove_file(yaml_path).expect("Failed to remove temp YAML file");
fs::remove_dir_all(temp_dir).expect("Failed to remove temp directory");
}
#[test]
fn test_invalid_yaml_file_path() {
// Create a minimal config with a non-existent YAML file path
let result = VerifierConfig::new(VerifierConfigArgs {
log_level: LevelFilter::INFO,
batch_range: Some((L1BatchNumber(1), L1BatchNumber(10))),
continuous: None,
rpc_url: Url::parse("http://localhost:8545").unwrap(),
chain_id: 270,
rate_limit: Duration::from_millis(0),
attestation_policy_file: Some(PathBuf::from("/non/existent/path.yaml")),
tee_types: Default::default(),
});
assert!(result.is_err());
}
#[test]
fn test_invalid_yaml_content() {
// Create a temporary directory for the test
let temp_dir = env::temp_dir().join("test_invalid_yaml");
fs::create_dir_all(&temp_dir).expect("Failed to create temp directory");
// Create a temporary YAML file with invalid content
let yaml_path = temp_dir.join("invalid_policy.yaml");
let yaml_content = r#"
sgx_mrsigners: 1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef
invalid_key: "some value"
allowed_tcb_levels:
- Invalid
- ConfigNeeded
"#;
fs::write(&yaml_path, yaml_content).expect("Failed to write YAML file");
// Create a minimal config
let result = VerifierConfig::new(VerifierConfigArgs {
log_level: LevelFilter::INFO,
batch_range: Some((L1BatchNumber(1), L1BatchNumber(10))),
continuous: None,
rpc_url: Url::parse("http://localhost:8545").unwrap(),
chain_id: 270,
rate_limit: Duration::from_millis(0),
attestation_policy_file: Some(yaml_path.clone()),
tee_types: Default::default(),
});
assert!(result.is_err());
// Clean up
fs::remove_file(yaml_path).expect("Failed to remove temp YAML file");
fs::remove_dir_all(temp_dir).expect("Failed to remove temp directory");
}
}

View file

@ -0,0 +1,19 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Constants used throughout the application
/// Maximum number of retry attempts for fetching proofs
pub const MAX_PROOF_FETCH_RETRIES: u32 = 3;
/// Default delay between retries (in milliseconds)
pub const DEFAULT_RETRY_DELAY_MS: u64 = 1000;
/// Default timeout for HTTP requests (in seconds)
pub const DEFAULT_HTTP_REQUEST_TIMEOUT: u64 = 30;
/// SGX hash size in bytes
pub const SGX_HASH_SIZE: usize = 32;
/// TDX hash size in bytes
pub const TDX_HASH_SIZE: usize = 48;

View file

@ -0,0 +1,12 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Core components for Era proof attestation verification
mod config;
mod constants;
mod types;
pub use config::*;
pub use constants::*;
pub use types::*;

View file

@ -0,0 +1,101 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Common type definitions used throughout the application
use std::fmt;
use zksync_basic_types::L1BatchNumber;
/// Represents the operating mode of the verifier
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum VerifierMode {
/// Run on a single batch or range of batches and then exit
OneShot {
/// Starting batch number
start_batch: L1BatchNumber,
/// Ending batch number
end_batch: L1BatchNumber,
},
/// Run continuously starting from a specific batch, until interrupted
Continuous {
/// Starting batch number
start_batch: L1BatchNumber,
},
}
impl fmt::Display for VerifierMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
VerifierMode::OneShot {
start_batch,
end_batch,
} => {
if start_batch == end_batch {
write!(f, "one-shot mode (batch {})", start_batch)
} else {
write!(f, "one-shot mode (batches {}-{})", start_batch, end_batch)
}
}
VerifierMode::Continuous { start_batch } => {
write!(f, "continuous mode (starting from batch {})", start_batch)
}
}
}
}
/// Result of proof verification for a single batch
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum VerificationResult {
/// All proofs for the batch were verified successfully
Success,
/// Some proofs for the batch failed verification
PartialSuccess {
/// Number of successfully verified proofs
verified_count: u32,
/// Number of proofs that failed verification
unverified_count: u32,
},
/// No proofs for the batch were verified successfully
Failure,
/// Verification was interrupted before completion
Interrupted,
/// No proofs were found for the batch
NoProofsFound,
}
impl VerificationResult {
/// Check if the majority of the proofs was verified successfully
pub fn is_successful(&self) -> bool {
match self {
VerificationResult::Success => true,
VerificationResult::PartialSuccess {
verified_count,
unverified_count,
} => verified_count > unverified_count,
VerificationResult::Failure => false,
VerificationResult::Interrupted => false,
VerificationResult::NoProofsFound => false,
}
}
}
impl fmt::Display for VerificationResult {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
VerificationResult::Success => write!(f, "Success"),
VerificationResult::PartialSuccess {
verified_count,
unverified_count,
} => {
write!(
f,
"Partial Success ({} verified, {} failed)",
verified_count, unverified_count
)
}
VerificationResult::Failure => write!(f, "Failure"),
VerificationResult::Interrupted => write!(f, "Interrupted"),
VerificationResult::NoProofsFound => write!(f, "No Proofs Found"),
}
}
}

View file

@ -0,0 +1,103 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Error types for the verification process
use teepot::sgx::QuoteError;
use thiserror::Error;
use zksync_basic_types::L1BatchNumber;
/// Result type used throughout the application
pub type Result<T> = std::result::Result<T, Error>;
/// Error types that can occur during verification
#[derive(Error, Debug)]
pub enum Error {
/// Error fetching proof
#[error("Failed to fetch proof for batch {batch_number}: {reason}")]
ProofFetch {
/// Batch number that caused the error
batch_number: L1BatchNumber,
/// Reason for the error
reason: String,
},
/// Error communicating with the HTTP server
#[error("HTTP request failed with status {status_code}: {message}")]
Http {
/// HTTP status code
status_code: u16,
/// Error message
message: String,
},
/// Error communicating with the JSON-RPC server
#[error("JSON-RPC error: {0}")]
JsonRpc(String),
/// JSON-RPC response has an invalid format
#[error("JSON-RPC response has an invalid format")]
JsonRpcInvalidResponse(String),
/// Invalid batch range
#[error("Invalid batch range: {0}")]
InvalidBatchRange(String),
/// Error verifying attestation
#[error(transparent)]
AttestationVerification(#[from] QuoteError),
/// Error verifying signature
#[error("Signature verification failed: {0}")]
SignatureVerification(String),
/// Attestation policy violation
#[error("Attestation policy violation: {0}")]
PolicyViolation(String),
/// Operation interrupted
#[error("Operation interrupted")]
Interrupted,
#[error(transparent)]
FromHex(#[from] hex::FromHexError),
/// Internal error
#[error("Internal error: {0}")]
Internal(String),
}
/// Utility functions for working with errors
impl Error {
/// Create a new proof fetch error
pub fn proof_fetch(batch_number: L1BatchNumber, reason: impl Into<String>) -> Self {
Self::ProofFetch {
batch_number,
reason: reason.into(),
}
}
/// Create a new policy violation error
pub fn policy_violation(reason: impl Into<String>) -> Self {
Self::PolicyViolation(reason.into())
}
/// Create a new signature verification error
pub fn signature_verification(reason: impl Into<String>) -> Self {
Self::SignatureVerification(reason.into())
}
/// Create a new internal error
pub fn internal(reason: impl Into<String>) -> Self {
Self::Internal(reason.into())
}
}
impl From<reqwest::Error> for Error {
fn from(value: reqwest::Error) -> Self {
Self::Http {
status_code: value.status().map(|v| v.as_u16()).unwrap_or(0),
message: value.to_string(),
}
}
}

View file

@ -1,221 +1,91 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2024 Matter Labs // Copyright (c) 2023-2025 Matter Labs
//! Tool for SGX attestation and batch signature verification, both continuous and one-shot //! Tool for SGX attestation and batch signature verification, both continuous and one-shot
mod args;
mod client; mod client;
mod core;
mod error;
mod processor;
mod proof; mod proof;
mod verification; mod verification;
use crate::verification::{
log_quote_verification_summary, verify_attestation_quote, verify_batch_proof,
};
use anyhow::Result;
use args::{Arguments, AttestationPolicyArgs};
use clap::Parser; use clap::Parser;
use client::MainNodeClient; use error::Result;
use proof::get_proofs;
use reqwest::Client;
use teepot::log::setup_logging;
use tokio::{signal, sync::watch}; use tokio::{signal, sync::watch};
use tracing::{debug, error, info, trace, warn};
use url::Url; use crate::{
use zksync_basic_types::L1BatchNumber; core::{VerifierConfig, VerifierConfigArgs},
error::Error,
processor::ProcessorFactory,
};
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
let args = Arguments::parse(); // Parse command-line arguments
tracing::subscriber::set_global_default(setup_logging( let config = VerifierConfig::new(VerifierConfigArgs::parse())?;
env!("CARGO_CRATE_NAME"),
&args.log_level,
)?)?;
validate_arguments(&args)?; // Initialize logging
tracing::subscriber::set_global_default(
teepot::log::setup_logging(env!("CARGO_CRATE_NAME"), &config.args.log_level)
.map_err(|e| Error::internal(e.to_string()))?,
)
.map_err(|e| Error::internal(e.to_string()))?;
// Create processor based on config
let (processor, mode) = ProcessorFactory::create(config.clone())?;
// Set up stop channel
let (stop_sender, stop_receiver) = watch::channel(false); let (stop_sender, stop_receiver) = watch::channel(false);
let mut process_handle = tokio::spawn(verify_batches_proofs(stop_receiver, args));
// Log startup information
tracing::info!("Starting verification in {}", mode);
// Spawn processing task
let mut process_handle = tokio::spawn(async move { processor.run(stop_receiver).await });
// Wait for processing to complete or for stop signal
tokio::select! { tokio::select! {
ret = &mut process_handle => { return ret?; }, result = &mut process_handle => {
match result {
Ok(Ok(verification_results)) => {
tracing::info!("Verification completed successfully");
let total_batches = verification_results.len();
let successful_batches = verification_results.iter()
.filter(|(_, result)| result.is_successful())
.count();
tracing::info!(
"Verified {} batches: {} succeeded, {} failed",
total_batches,
successful_batches,
total_batches - successful_batches
);
Ok(())
},
Ok(Err(e)) => {
tracing::error!("Verification failed: {}", e);
Err(e)
},
Err(e) => {
tracing::error!("Task panicked: {}", e);
Err(Error::internal(format!("Task panicked: {}", e)))
}
}
},
_ = signal::ctrl_c() => { _ = signal::ctrl_c() => {
tracing::info!("Stop signal received, shutting down"); tracing::info!("Stop signal received, shutting down gracefully...");
stop_sender.send(true).ok(); stop_sender.send(true).ok();
// Wait for process_batches to complete gracefully
process_handle.await??; // Wait for processor to complete gracefully
} match process_handle.await {
Ok(_) => tracing::info!("Processor stopped gracefully"),
Err(e) => tracing::error!("Error stopping processor: {}", e),
} }
Ok(()) Ok(())
} }
fn validate_arguments(args: &Arguments) -> Result<()> {
if args.attestation_policy.sgx_mrsigners.is_none()
&& args.attestation_policy.sgx_mrenclaves.is_none()
{
error!("Neither `--sgx-mrenclaves` nor `--sgx-mrsigners` specified. Any code could have produced the proof.");
}
Ok(())
}
/// Verify all TEE proofs for all batches starting from the given batch number up to the specified
/// batch number, if a range is provided. Otherwise, continue verifying batches until the stop
/// signal is received.
async fn verify_batches_proofs(
mut stop_receiver: watch::Receiver<bool>,
args: Arguments,
) -> Result<()> {
let node_client = MainNodeClient::new(args.rpc_url.clone(), args.chain_id)?;
let http_client = Client::new();
let first_batch_number = match args.batch_range {
Some((first_batch_number, _)) => first_batch_number,
None => args
.continuous
.expect("clap::ArgGroup should guarantee batch range or continuous option is set"),
};
let end_batch_number = args
.batch_range
.map_or(u32::MAX, |(_, end_batch_number)| end_batch_number.0);
let mut unverified_batches_count: u32 = 0;
let mut last_processed_batch_number = first_batch_number.0;
for current_batch_number in first_batch_number.0..=end_batch_number {
if *stop_receiver.borrow() {
tracing::warn!("Stop signal received, shutting down");
break;
}
trace!("Verifying TEE proofs for batch #{}", current_batch_number);
let all_verified = verify_batch_proofs(
&mut stop_receiver,
current_batch_number.into(),
&args.rpc_url,
&http_client,
&node_client,
&args.attestation_policy,
)
.await?;
if !all_verified {
unverified_batches_count += 1;
}
if current_batch_number < end_batch_number {
tokio::time::timeout(args.rate_limit, stop_receiver.changed())
.await
.ok();
}
last_processed_batch_number = current_batch_number;
}
let verified_batches_count =
last_processed_batch_number + 1 - first_batch_number.0 - unverified_batches_count;
if unverified_batches_count > 0 {
if verified_batches_count == 0 {
error!(
"All {} batches failed verification!",
unverified_batches_count
);
} else {
error!(
"Some batches failed verification! Unverified batches: {}. Verified batches: {}.",
unverified_batches_count, verified_batches_count
);
}
} else {
info!(
"All {} batches verified successfully!",
verified_batches_count
);
}
Ok(())
}
/// Verify all TEE proofs for the given batch number. Note that each batch number can potentially
/// have multiple proofs of the same TEE type.
async fn verify_batch_proofs(
stop_receiver: &mut watch::Receiver<bool>,
batch_number: L1BatchNumber,
rpc_url: &Url,
http_client: &Client,
node_client: &MainNodeClient,
attestation_policy: &AttestationPolicyArgs,
) -> Result<bool> {
let proofs = get_proofs(stop_receiver, batch_number, http_client, rpc_url).await?;
let batch_no = batch_number.0;
let mut total_proofs_count: u32 = 0;
let mut unverified_proofs_count: u32 = 0;
for proof in proofs
.into_iter()
// only support SGX proofs for now
.filter(|proof| proof.tee_type.eq_ignore_ascii_case("sgx"))
{
let batch_no = proof.l1_batch_number;
total_proofs_count += 1;
let tee_type = proof.tee_type.to_uppercase();
if proof
.status
.map_or(false, |s| s.eq_ignore_ascii_case("permanently_ignored"))
{
trace!(
batch_no,
tee_type,
"Proof is marked as permanently ignored. Skipping."
);
continue;
}
trace!(batch_no, tee_type, proof.proved_at, "Verifying proof.");
let attestation = proof.attestation.unwrap_or_default();
debug!(batch_no, "Verifying quote ({} bytes)...", attestation.len());
let quote_verification_result = verify_attestation_quote(&attestation)?;
let verified_successfully = verify_batch_proof(
&quote_verification_result,
attestation_policy,
node_client,
&proof.signature.unwrap_or_default(),
L1BatchNumber(proof.l1_batch_number),
)
.await?;
log_quote_verification_summary(&quote_verification_result);
if verified_successfully {
info!(
batch_no,
proof.proved_at, tee_type, "Verification succeeded.",
);
} else {
unverified_proofs_count += 1;
warn!(batch_no, proof.proved_at, tee_type, "Verification failed!",);
} }
} }
let verified_proofs_count = total_proofs_count - unverified_proofs_count;
if unverified_proofs_count > 0 {
if verified_proofs_count == 0 {
error!(
batch_no,
"All {} proofs failed verification!", unverified_proofs_count
);
} else {
warn!(
batch_no,
"Some proofs failed verification. Unverified proofs: {}. Verified proofs: {}.",
unverified_proofs_count,
verified_proofs_count
);
}
}
// if at least one proof is verified, consider the batch verified
let is_batch_verified = verified_proofs_count > 0;
Ok(is_batch_verified)
}

View file

@ -0,0 +1,118 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Core functionality for processing individual batches
use crate::error;
use tokio::sync::watch;
use zksync_basic_types::L1BatchNumber;
use crate::{
client::{HttpClient, MainNodeClient, RetryConfig},
core::{VerificationResult, VerifierConfig},
proof::ProofFetcher,
verification::{BatchVerifier, VerificationReporter},
};
/// Responsible for processing individual batches
pub struct BatchProcessor {
config: VerifierConfig,
proof_fetcher: ProofFetcher,
batch_verifier: BatchVerifier<MainNodeClient>,
}
impl BatchProcessor {
/// Create a new batch processor with the given configuration
pub fn new(config: VerifierConfig) -> error::Result<Self> {
// Initialize clients and fetchers
let node_client = MainNodeClient::new(config.args.rpc_url.clone(), config.args.chain_id)?;
let http_client = HttpClient::new();
let retry_config = RetryConfig::default();
let proof_fetcher =
ProofFetcher::new(http_client, config.args.rpc_url.clone(), retry_config);
let batch_verifier = BatchVerifier::new(node_client, config.policy.clone());
Ok(Self {
config,
proof_fetcher,
batch_verifier,
})
}
/// Process a single batch and return the verification result
pub async fn process_batch(
&self,
stop_receiver: &mut watch::Receiver<bool>,
batch_number: L1BatchNumber,
) -> error::Result<VerificationResult> {
if *stop_receiver.borrow() {
tracing::info!("Stop signal received, shutting down");
return Ok(VerificationResult::Interrupted);
}
tracing::trace!("Verifying TEE proofs for batch #{}", batch_number.0);
// Fetch proofs for the current batch across different TEE types
let mut proofs = Vec::new();
for tee_type in self.config.args.tee_types.iter() {
match self
.proof_fetcher
.get_proofs(stop_receiver, batch_number, tee_type)
.await
{
Ok(batch_proofs) => proofs.extend(batch_proofs),
Err(error::Error::Interrupted) => return Err(error::Error::Interrupted),
Err(e) => {
tracing::error!(
"Failed to fetch proofs for TEE type {:?} at batch {}: {:#}",
tee_type,
batch_number.0,
e
);
continue;
}
}
}
if proofs.is_empty() {
tracing::warn!("No proofs found for batch #{}", batch_number.0);
return Ok(VerificationResult::NoProofsFound);
}
// Verify proofs for the current batch
let verification_result = self
.batch_verifier
.verify_batch_proofs(stop_receiver, batch_number, proofs)
.await?;
let result = if verification_result.total_count == 0 {
VerificationResult::NoProofsFound
} else if verification_result.verified_count == verification_result.total_count {
VerificationResult::Success
} else if verification_result.verified_count > 0 {
VerificationResult::PartialSuccess {
verified_count: verification_result.verified_count,
unverified_count: verification_result.unverified_count,
}
} else {
VerificationResult::Failure
};
tracing::debug!("Batch #{} verification result: {}", batch_number.0, result);
// Apply rate limiting between batches if needed
if !matches!(result, VerificationResult::Interrupted)
&& self.config.args.rate_limit.as_millis() > 0
{
tokio::time::timeout(self.config.args.rate_limit, stop_receiver.changed())
.await
.ok();
}
Ok(result)
}
/// Log the overall verification results
pub fn log_overall_results(success_count: u32, failure_count: u32) {
VerificationReporter::log_overall_verification_results(success_count, failure_count);
}
}

View file

@ -0,0 +1,95 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Continuous batch processor for ongoing verification of new batches
use tokio::sync::watch;
use zksync_basic_types::L1BatchNumber;
use crate::{
core::{VerificationResult, VerifierConfig},
error,
processor::BatchProcessor,
};
/// Processes batches continuously until stopped
pub struct ContinuousProcessor {
batch_processor: BatchProcessor,
start_batch: L1BatchNumber,
}
impl ContinuousProcessor {
/// Create a new continuous processor that starts from the given batch
pub fn new(config: VerifierConfig, start_batch: L1BatchNumber) -> error::Result<Self> {
let batch_processor = BatchProcessor::new(config)?;
Ok(Self {
batch_processor,
start_batch,
})
}
/// Run the processor until stopped
pub async fn run(
&self,
mut stop_receiver: watch::Receiver<bool>,
) -> error::Result<Vec<(u32, VerificationResult)>> {
tracing::info!(
"Starting continuous verification from batch {}",
self.start_batch.0
);
let mut results = Vec::new();
let mut success_count = 0;
let mut failure_count = 0;
let mut current_batch = self.start_batch.0;
// Continue processing batches until stopped or reaching maximum batch number
while !*stop_receiver.borrow() {
let batch = L1BatchNumber(current_batch);
match self
.batch_processor
.process_batch(&mut stop_receiver, batch)
.await
{
Ok(result) => {
match result {
VerificationResult::Success => success_count += 1,
VerificationResult::PartialSuccess { .. } => success_count += 1,
VerificationResult::Failure => failure_count += 1,
VerificationResult::Interrupted => {
results.push((current_batch, result));
break;
}
VerificationResult::NoProofsFound => {
// In continuous mode, we might hit batches that don't have proofs yet
// Wait a bit longer before retrying
if !*stop_receiver.borrow() {
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
// Don't increment batch number, try again
continue;
}
}
}
results.push((current_batch, result));
}
Err(e) => {
tracing::error!("Error processing batch {}: {}", current_batch, e);
results.push((current_batch, VerificationResult::Failure));
failure_count += 1;
}
}
// Move to the next batch
current_batch = current_batch
.checked_add(1)
.ok_or(error::Error::internal("Maximum batch number reached"))?;
}
// Log overall results
BatchProcessor::log_overall_results(success_count, failure_count);
Ok(results)
}
}

View file

@ -0,0 +1,65 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! Processing logic for batch verification
mod batch_processor;
mod continuous_processor;
mod one_shot_processor;
pub use batch_processor::BatchProcessor;
pub use continuous_processor::ContinuousProcessor;
pub use one_shot_processor::OneShotProcessor;
use crate::{
core::{VerificationResult, VerifierConfig, VerifierMode},
error::Result,
};
use tokio::sync::watch;
// Using an enum instead of a trait because async functions in traits can't be used in trait objects
/// Processor variants for different verification modes
pub enum ProcessorType {
/// One-shot processor for processing a specific range of batches
OneShot(OneShotProcessor),
/// Continuous processor for monitoring new batches
Continuous(ContinuousProcessor),
}
impl ProcessorType {
/// Run the processor until completion or interruption
pub async fn run(
&self,
stop_receiver: watch::Receiver<bool>,
) -> Result<Vec<(u32, VerificationResult)>> {
match self {
ProcessorType::OneShot(processor) => processor.run(stop_receiver).await,
ProcessorType::Continuous(processor) => processor.run(stop_receiver).await,
}
}
}
/// Factory for creating the appropriate processor based on configuration
pub struct ProcessorFactory;
impl ProcessorFactory {
/// Create a new processor based on the provided configuration
pub fn create(config: VerifierConfig) -> Result<(ProcessorType, VerifierMode)> {
let mode = if let Some((start, end)) = config.args.batch_range {
let processor = OneShotProcessor::new(config.clone(), start, end)?;
let mode = VerifierMode::OneShot {
start_batch: start,
end_batch: end,
};
(ProcessorType::OneShot(processor), mode)
} else if let Some(start) = config.args.continuous {
let processor = ContinuousProcessor::new(config.clone(), start)?;
let mode = VerifierMode::Continuous { start_batch: start };
(ProcessorType::Continuous(processor), mode)
} else {
unreachable!("Clap ArgGroup should ensure either batch_range or continuous is set")
};
Ok(mode)
}
}

View file

@ -0,0 +1,79 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
//! One-shot batch processor for verifying a single batch or a range of batches
use crate::error;
use tokio::sync::watch;
use zksync_basic_types::L1BatchNumber;
use crate::{
core::{VerificationResult, VerifierConfig},
processor::BatchProcessor,
};
/// Processes a specific range of batches and then exits
pub struct OneShotProcessor {
batch_processor: BatchProcessor,
start_batch: L1BatchNumber,
end_batch: L1BatchNumber,
}
impl OneShotProcessor {
/// Create a new one-shot processor for the given batch range
pub fn new(
config: VerifierConfig,
start_batch: L1BatchNumber,
end_batch: L1BatchNumber,
) -> error::Result<Self> {
let batch_processor = BatchProcessor::new(config)?;
Ok(Self {
batch_processor,
start_batch,
end_batch,
})
}
/// Run the processor until completion or interruption
pub async fn run(
&self,
mut stop_receiver: watch::Receiver<bool>,
) -> error::Result<Vec<(u32, VerificationResult)>> {
tracing::info!(
"Starting one-shot verification of batches {} to {}",
self.start_batch.0,
self.end_batch.0
);
let mut results = Vec::new();
let mut success_count = 0;
let mut failure_count = 0;
for batch_number in self.start_batch.0..=self.end_batch.0 {
let batch = L1BatchNumber(batch_number);
let result = self
.batch_processor
.process_batch(&mut stop_receiver, batch)
.await?;
match result {
VerificationResult::Success => success_count += 1,
VerificationResult::PartialSuccess { .. } => success_count += 1,
VerificationResult::Failure => failure_count += 1,
VerificationResult::Interrupted => {
results.push((batch_number, result));
break;
}
VerificationResult::NoProofsFound => {}
}
results.push((batch_number, result));
}
// Log overall results
BatchProcessor::log_overall_results(success_count, failure_count);
Ok(results)
}
}

View file

@ -1,172 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2024 Matter Labs
use anyhow::{bail, Result};
use jsonrpsee_types::error::ErrorObject;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_with::{hex::Hex, serde_as};
use std::time::Duration;
use tokio::sync::watch;
use tracing::{error, warn};
use url::Url;
use zksync_basic_types::L1BatchNumber;
#[derive(Debug, Serialize, Deserialize)]
pub struct GetProofsRequest {
pub jsonrpc: String,
pub id: u32,
pub method: String,
pub params: (L1BatchNumber, String),
}
pub async fn get_proofs(
stop_receiver: &mut watch::Receiver<bool>,
batch_number: L1BatchNumber,
http_client: &Client,
rpc_url: &Url,
) -> Result<Vec<Proof>> {
let mut proofs_request = GetProofsRequest::new(batch_number);
let mut retries = 0;
let mut backoff = Duration::from_secs(1);
let max_backoff = Duration::from_secs(128);
let retry_backoff_multiplier: f32 = 2.0;
while !*stop_receiver.borrow() {
let proofs = proofs_request
.send(stop_receiver, http_client, rpc_url)
.await?;
if !proofs.is_empty()
&& proofs.iter().all(|proof| {
!proof.status.as_ref().map_or(false, |s| {
s.eq_ignore_ascii_case("failed") | s.eq_ignore_ascii_case("picked_by_prover")
})
})
{
return Ok(proofs);
}
retries += 1;
warn!(
batch_no = batch_number.0, retries,
"No TEE proofs found for batch #{}. They may not be ready yet. Retrying in {} milliseconds.",
batch_number, backoff.as_millis(),
);
tokio::time::timeout(backoff, stop_receiver.changed())
.await
.ok();
backoff = std::cmp::min(backoff.mul_f32(retry_backoff_multiplier), max_backoff);
}
Ok(vec![])
}
impl GetProofsRequest {
pub fn new(batch_number: L1BatchNumber) -> Self {
GetProofsRequest {
jsonrpc: "2.0".to_string(),
id: 1,
method: "unstable_getTeeProofs".to_string(),
params: (batch_number, "sgx".to_string()),
}
}
pub async fn send(
&mut self,
stop_receiver: &mut watch::Receiver<bool>,
http_client: &Client,
rpc_url: &Url,
) -> Result<Vec<Proof>> {
let mut retries = 0;
let max_retries = 5;
let mut backoff = Duration::from_secs(1);
let max_backoff = Duration::from_secs(128);
let retry_backoff_multiplier: f32 = 2.0;
let mut response = None;
while !*stop_receiver.borrow() {
let result = http_client
.post(rpc_url.clone())
.json(self)
.send()
.await?
.error_for_status()?
.json::<GetProofsResponse>()
.await;
match result {
Ok(res) => match res.error {
None => {
response = Some(res);
break;
}
Some(error) => {
// Handle corner case, where the old RPC interface expects 'Sgx'
if let Some(data) = error.data() {
if data.get().contains("unknown variant `sgx`, expected `Sgx`") {
self.params.1 = "Sgx".to_string();
continue;
}
}
error!(?error, "received JSONRPC error {error:?}");
bail!("JSONRPC error {error:?}");
}
},
Err(err) => {
retries += 1;
if retries >= max_retries {
return Err(anyhow::anyhow!(
"Failed to send request to {} after {} retries: {}. Request details: {:?}",
rpc_url,
max_retries,
err,
self
));
}
warn!(
%err,
"Failed to send request to {rpc_url}. {retries}/{max_retries}, retrying in {} milliseconds. Request details: {:?}",
backoff.as_millis(),
self
);
tokio::time::timeout(backoff, stop_receiver.changed())
.await
.ok();
backoff = std::cmp::min(backoff.mul_f32(retry_backoff_multiplier), max_backoff);
}
};
}
Ok(response.map_or_else(Vec::new, |res| res.result.unwrap_or_default()))
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct GetProofsResponse {
pub jsonrpc: String,
pub result: Option<Vec<Proof>>,
pub id: u32,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorObject<'static>>,
}
#[serde_as]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Proof {
pub l1_batch_number: u32,
pub tee_type: String,
#[serde_as(as = "Option<Hex>")]
pub pubkey: Option<Vec<u8>>,
#[serde_as(as = "Option<Hex>")]
pub signature: Option<Vec<u8>>,
#[serde_as(as = "Option<Hex>")]
pub proof: Option<Vec<u8>>,
pub proved_at: String,
pub status: Option<String>,
#[serde_as(as = "Option<Hex>")]
pub attestation: Option<Vec<u8>>,
}

View file

@ -0,0 +1,139 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use crate::{
client::{HttpClient, RetryConfig, RetryHelper},
error::{Error, Result},
proof::{
parsing::ProofResponseParser,
types::{GetProofsRequest, GetProofsResponse, Proof},
},
};
use std::time::Duration;
use tokio::sync::watch;
use url::Url;
use zksync_basic_types::{tee_types::TeeType, L1BatchNumber};
/// Handles fetching proofs from the server with retry logic
pub struct ProofFetcher {
http_client: HttpClient,
rpc_url: Url,
retry_config: RetryConfig,
}
impl ProofFetcher {
/// Create a new proof fetcher
pub fn new(http_client: HttpClient, rpc_url: Url, retry_config: RetryConfig) -> Self {
Self {
http_client,
rpc_url,
retry_config,
}
}
/// Get proofs for a batch number with retry logic
pub async fn get_proofs(
&self,
stop_receiver: &mut watch::Receiver<bool>,
batch_number: L1BatchNumber,
tee_type: &TeeType,
) -> Result<Vec<Proof>> {
let mut proofs_request = GetProofsRequest::new(batch_number, tee_type);
let mut backoff = Duration::from_secs(1);
let max_backoff = Duration::from_secs(128);
let retry_backoff_multiplier: f32 = 2.0;
while !*stop_receiver.borrow() {
match self.send_request(&proofs_request, stop_receiver).await {
Ok(response) => {
// Parse the response using the ProofResponseParser
match ProofResponseParser::parse_response(response) {
Ok(proofs) => {
// Filter valid proofs
let valid_proofs = ProofResponseParser::filter_valid_proofs(&proofs);
if !valid_proofs.is_empty() {
return Ok(valid_proofs);
}
// No valid proofs found, retry
let error_msg = format!(
"No valid TEE proofs found for batch #{}. They may not be ready yet. Retrying in {} milliseconds.",
batch_number.0,
backoff.as_millis()
);
tracing::warn!(batch_no = batch_number.0, "{}", error_msg);
// Here we could use the ProofFetching error if we needed to return immediately
// return Err(Error::ProofFetching(error_msg));
}
Err(e) => {
// Handle specific error for Sgx variant
if let Error::JsonRpc(msg) = &e {
if msg.contains("RPC requires 'Sgx' variant") {
tracing::debug!("Switching to 'Sgx' variant for RPC");
proofs_request.params.1 = "Sgx".to_string();
continue;
}
}
return Err(e);
}
}
}
Err(e) => {
return Err(e);
}
}
tokio::time::timeout(backoff, stop_receiver.changed())
.await
.ok();
backoff = std::cmp::min(
Duration::from_millis(
(backoff.as_millis() as f32 * retry_backoff_multiplier) as u64,
),
max_backoff,
);
if *stop_receiver.borrow() {
break;
}
}
// If we've reached this point, we've either been stopped or exhausted retries
if *stop_receiver.borrow() {
// Return empty vector if stopped
Ok(vec![])
} else {
// Use the ProofFetching error variant if we've exhausted retries
Err(Error::proof_fetch(batch_number, "exhausted retries"))
}
}
/// Send a request to the server with retry logic
async fn send_request(
&self,
request: &GetProofsRequest,
stop_receiver: &mut watch::Receiver<bool>,
) -> Result<GetProofsResponse> {
let retry_helper = RetryHelper::new(self.retry_config.clone());
let request_clone = request.clone();
let http_client = self.http_client.clone();
let rpc_url = self.rpc_url.clone();
retry_helper
.execute(&format!("get_proofs_{}", request.params.0), || async {
let result = http_client
.send_json::<_, GetProofsResponse>(&rpc_url, &request_clone)
.await;
// Check if we need to abort due to stop signal
if *stop_receiver.borrow() {
return Err(Error::Interrupted);
}
result
})
.await
}
}

View file

@ -0,0 +1,9 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
mod fetcher;
mod parsing;
mod types;
pub use fetcher::ProofFetcher;
pub use types::Proof;

View file

@ -0,0 +1,277 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use super::types::{GetProofsResponse, Proof};
use crate::error;
/// Handles parsing of proof responses and error handling
pub struct ProofResponseParser;
impl ProofResponseParser {
/// Parse a response and extract the proofs
pub fn parse_response(response: GetProofsResponse) -> error::Result<Vec<Proof>> {
// Handle JSON-RPC errors
if let Some(error) = response.error {
// Special case for handling the old RPC interface
if let Some(data) = error.data() {
if data.get().contains("unknown variant `sgx`, expected `Sgx`") {
return Err(error::Error::JsonRpc(
"RPC requires 'Sgx' variant instead of 'sgx'".to_string(),
));
}
}
return Err(error::Error::JsonRpc(format!("JSONRPC error: {:?}", error)));
}
// Extract proofs from the result
Ok(response.result.unwrap_or_default())
}
/// Filter proofs to find valid ones
pub fn filter_valid_proofs(proofs: &[Proof]) -> Vec<Proof> {
proofs
.iter()
.filter(|proof| !proof.is_failed_or_picked())
.cloned()
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use jsonrpsee_types::error::ErrorObject;
#[test]
fn test_proof_is_permanently_ignored() {
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("permanently_ignored".to_string()),
attestation: None,
};
assert!(proof.is_permanently_ignored());
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("PERMANENTLY_IGNORED".to_string()),
attestation: None,
};
assert!(proof.is_permanently_ignored());
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("other".to_string()),
attestation: None,
};
assert!(!proof.is_permanently_ignored());
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: None,
attestation: None,
};
assert!(!proof.is_permanently_ignored());
}
#[test]
fn test_proof_is_failed_or_picked() {
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("failed".to_string()),
attestation: None,
};
assert!(proof.is_failed_or_picked());
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("picked_by_prover".to_string()),
attestation: None,
};
assert!(proof.is_failed_or_picked());
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("FAILED".to_string()),
attestation: None,
};
assert!(proof.is_failed_or_picked());
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("other".to_string()),
attestation: None,
};
assert!(!proof.is_failed_or_picked());
let proof = Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: None,
attestation: None,
};
assert!(!proof.is_failed_or_picked());
}
#[test]
fn test_parse_response_success() {
let response = GetProofsResponse {
jsonrpc: "2.0".to_string(),
result: Some(vec![Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: None,
attestation: None,
}]),
id: 1,
error: None,
};
let proofs = ProofResponseParser::parse_response(response).unwrap();
assert_eq!(proofs.len(), 1);
assert_eq!(proofs[0].l1_batch_number, 123);
}
#[test]
fn test_parse_response_error() {
let response = GetProofsResponse {
jsonrpc: "2.0".to_string(),
result: None,
id: 1,
error: Some(ErrorObject::owned(1, "Error", None::<()>)),
};
let error = ProofResponseParser::parse_response(response).unwrap_err();
match error {
error::Error::JsonRpc(msg) => {
assert!(msg.contains("JSONRPC error"));
}
_ => panic!("Expected JsonRpc error"),
}
}
#[test]
fn test_parse_response_sgx_variant_error() {
let error_obj = ErrorObject::owned(
1,
"Error",
Some(
serde_json::to_value("unknown variant `sgx`, expected `Sgx`")
.unwrap()
.to_string(),
),
);
let response = GetProofsResponse {
jsonrpc: "2.0".to_string(),
result: None,
id: 1,
error: Some(error_obj),
};
let error = ProofResponseParser::parse_response(response).unwrap_err();
match error {
error::Error::JsonRpc(msg) => {
assert!(msg.contains("RPC requires 'Sgx' variant"));
}
_ => panic!("Expected JsonRpc error about Sgx variant"),
}
}
#[test]
fn test_filter_valid_proofs() {
let proofs = vec![
Proof {
l1_batch_number: 123,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: None,
attestation: None,
},
Proof {
l1_batch_number: 124,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("failed".to_string()),
attestation: None,
},
Proof {
l1_batch_number: 125,
tee_type: "TDX".to_string(),
pubkey: None,
signature: None,
proof: None,
proved_at: "2023-01-01T00:00:00Z".to_string(),
status: Some("picked_by_prover".to_string()),
attestation: None,
},
];
let valid_proofs = ProofResponseParser::filter_valid_proofs(&proofs);
assert_eq!(valid_proofs.len(), 1);
assert_eq!(valid_proofs[0].l1_batch_number, 123);
}
}

View file

@ -0,0 +1,83 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use jsonrpsee_types::error::ErrorObject;
use serde::{Deserialize, Serialize};
use serde_with::{hex::Hex, serde_as};
use zksync_basic_types::{tee_types::TeeType, L1BatchNumber};
/// Request structure for fetching proofs
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct GetProofsRequest {
pub jsonrpc: String,
pub id: u32,
pub method: String,
pub params: (L1BatchNumber, String),
}
impl GetProofsRequest {
/// Create a new request for the given batch number
pub fn new(batch_number: L1BatchNumber, tee_type: &TeeType) -> Self {
GetProofsRequest {
jsonrpc: "2.0".to_string(),
id: 1,
method: "unstable_getTeeProofs".to_string(),
params: (batch_number, tee_type.to_string()),
}
}
}
/// Response structure for proof requests
#[derive(Debug, Serialize, Deserialize)]
pub struct GetProofsResponse {
pub jsonrpc: String,
pub result: Option<Vec<Proof>>,
pub id: u32,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorObject<'static>>,
}
/// Proof structure containing attestation and signature data
#[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Proof {
pub l1_batch_number: u32,
pub tee_type: String,
#[serde_as(as = "Option<Hex>")]
pub pubkey: Option<Vec<u8>>,
#[serde_as(as = "Option<Hex>")]
pub signature: Option<Vec<u8>>,
#[serde_as(as = "Option<Hex>")]
pub proof: Option<Vec<u8>>,
pub proved_at: String,
pub status: Option<String>,
#[serde_as(as = "Option<Hex>")]
pub attestation: Option<Vec<u8>>,
}
impl Proof {
/// Check if the proof is marked as permanently ignored
pub fn is_permanently_ignored(&self) -> bool {
self.status
.as_ref()
.map_or(false, |s| s.eq_ignore_ascii_case("permanently_ignored"))
}
/// Check if the proof is failed or picked by a prover
pub fn is_failed_or_picked(&self) -> bool {
self.status.as_ref().map_or(false, |s| {
s.eq_ignore_ascii_case("failed") || s.eq_ignore_ascii_case("picked_by_prover")
})
}
/// Get the attestation bytes or an empty vector if not present
pub fn attestation_bytes(&self) -> Vec<u8> {
self.attestation.clone().unwrap_or_default()
}
/// Get the signature bytes or an empty vector if not present
pub fn signature_bytes(&self) -> Vec<u8> {
self.signature.clone().unwrap_or_default()
}
}

View file

@ -1,208 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use crate::{args::AttestationPolicyArgs, client::JsonRpcClient};
use anyhow::{anyhow, Context, Result};
use hex::encode;
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId, Signature},
Message, SECP256K1,
};
use teepot::{
ethereum::{public_key_to_ethereum_address, recover_signer},
prover::reportdata::ReportData,
quote::{
error::QuoteContext, tee_qv_get_collateral, verify_quote_with_collateral,
QuoteVerificationResult, Report,
},
sgx::TcbLevel,
};
use tracing::{debug, info, trace, warn};
use zksync_basic_types::{L1BatchNumber, H256};
struct TeeProof {
report: ReportData,
root_hash: H256,
signature: Vec<u8>,
}
impl TeeProof {
pub fn new(report: ReportData, root_hash: H256, signature: Vec<u8>) -> Self {
Self {
report,
root_hash,
signature,
}
}
pub fn verify(&self) -> Result<bool> {
match &self.report {
ReportData::V0(report) => {
debug!("ReportData::V0");
let signature = Signature::from_compact(&self.signature)?;
let root_hash_msg = Message::from_digest_slice(&self.root_hash.0)?;
Ok(signature.verify(&root_hash_msg, &report.pubkey).is_ok())
}
ReportData::V1(report) => {
debug!("ReportData::V1");
let ethereum_address_from_report = report.ethereum_address;
let root_hash_msg = Message::from_digest_slice(self.root_hash.as_bytes())?;
trace!("sig len = {}", self.signature.len());
let sig_vec = self.signature.clone();
if self.signature.len() == 64 {
info!("Signature is missing RecoveryId!");
// Fallback for missing RecoveryId
for rec_id in [
RecoveryId::Zero,
RecoveryId::One,
RecoveryId::Two,
RecoveryId::Three,
] {
let Ok(sig) = RecoverableSignature::from_compact(&sig_vec, rec_id) else {
continue;
};
let Ok(public) = SECP256K1.recover_ecdsa(&root_hash_msg, &sig) else {
continue;
};
let ethereum_address_from_signature =
public_key_to_ethereum_address(&public);
debug!(
"Root hash: {}. Ethereum address from the attestation quote: {}. Ethereum address from the signature: {}.",
self.root_hash,
encode(ethereum_address_from_report),
encode(ethereum_address_from_signature),
);
if ethereum_address_from_signature == ethereum_address_from_report {
info!("Had to use RecoveryId::{rec_id:?}");
return Ok(true);
}
}
return Ok(false);
}
let signature_bytes: [u8; 65] = sig_vec
.try_into()
.map_err(|e| anyhow!("{:?}", e))
.context("invalid length of signature bytes")?;
let ethereum_address_from_signature =
recover_signer(&signature_bytes, &root_hash_msg)?;
debug!(
"Root hash: {}. Ethereum address from the attestation quote: {}. Ethereum address from the signature: {}.",
self.root_hash,
encode(ethereum_address_from_report),
encode(ethereum_address_from_signature),
);
Ok(ethereum_address_from_signature == ethereum_address_from_report)
}
ReportData::Unknown(_) => Ok(false),
}
}
}
pub async fn verify_batch_proof(
quote_verification_result: &QuoteVerificationResult,
attestation_policy: &AttestationPolicyArgs,
node_client: &impl JsonRpcClient,
signature: &[u8],
batch_number: L1BatchNumber,
) -> Result<bool> {
if !is_quote_matching_policy(attestation_policy, quote_verification_result) {
return Ok(false);
}
let root_hash = node_client.get_root_hash(batch_number).await?;
let report_data_bytes = quote_verification_result.quote.get_report_data();
let report_data = ReportData::try_from(report_data_bytes)?;
let tee_proof = TeeProof::new(report_data, root_hash, signature.to_vec());
tee_proof.verify()
}
pub fn verify_attestation_quote(attestation_quote_bytes: &[u8]) -> Result<QuoteVerificationResult> {
let collateral = QuoteContext::context(
tee_qv_get_collateral(attestation_quote_bytes),
"Failed to get collateral!",
)?;
let unix_time: i64 = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)?
.as_secs() as _;
verify_quote_with_collateral(attestation_quote_bytes, Some(&collateral), unix_time)
.context("Failed to verify quote with collateral!")
}
pub fn log_quote_verification_summary(quote_verification_result: &QuoteVerificationResult) {
let QuoteVerificationResult {
collateral_expired,
result,
quote,
advisories,
..
} = quote_verification_result;
if *collateral_expired {
warn!("Freshly fetched collateral expired!");
}
let tcblevel = TcbLevel::from(*result);
let advisories = if advisories.is_empty() {
"None".to_string()
} else {
advisories
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
};
info!(
"Quote verification result: {tcblevel}. {report}. Advisory IDs: {advisories}.",
report = &quote.report
);
}
fn is_quote_matching_policy(
attestation_policy: &AttestationPolicyArgs,
quote_verification_result: &QuoteVerificationResult,
) -> bool {
let quote = &quote_verification_result.quote;
let tcblevel = TcbLevel::from(quote_verification_result.result);
if !attestation_policy.sgx_allowed_tcb_levels.contains(tcblevel) {
warn!(
"Quote verification failed: TCB level mismatch (expected one of: {:?}, actual: {})",
attestation_policy.sgx_allowed_tcb_levels, tcblevel
);
return false;
}
match &quote.report {
Report::SgxEnclave(report_body) => {
check_policy(
attestation_policy.sgx_mrsigners.as_deref(),
&report_body.mr_signer,
"mrsigner",
) && check_policy(
attestation_policy.sgx_mrenclaves.as_deref(),
&report_body.mr_enclave,
"mrenclave",
)
}
_ => false,
}
}
fn check_policy(policy: Option<&str>, actual_value: &[u8], field_name: &str) -> bool {
if let Some(valid_values) = policy {
let valid_values: Vec<&str> = valid_values.split(',').collect();
let actual_value = hex::encode(actual_value);
if !valid_values.contains(&actual_value.as_str()) {
warn!(
"Quote verification failed: {} mismatch (expected one of: {:?}, actual: {})",
field_name, valid_values, actual_value
);
return false;
}
debug!(field_name, actual_value, "Attestation policy check passed");
}
true
}

View file

@ -0,0 +1,35 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use teepot::quote::{
error::QuoteContext, tee_qv_get_collateral, verify_quote_with_collateral,
QuoteVerificationResult,
};
use crate::error;
/// Handles verification of attestation quotes
pub struct AttestationVerifier;
impl AttestationVerifier {
/// Verify an attestation quote
pub fn verify_quote(attestation_quote_bytes: &[u8]) -> error::Result<QuoteVerificationResult> {
// Get collateral for the quote
let collateral = QuoteContext::context(
tee_qv_get_collateral(attestation_quote_bytes),
"Failed to get collateral!",
)?;
// Get current time for verification
let unix_time: i64 = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map_err(|e| error::Error::internal(format!("Failed to get system time: {}", e)))?
.as_secs() as _;
// Verify the quote with the collateral
let res =
verify_quote_with_collateral(attestation_quote_bytes, Some(&collateral), unix_time)?;
Ok(res)
}
}

View file

@ -0,0 +1,141 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2025 Matter Labs
use crate::{
client::JsonRpcClient,
core::AttestationPolicy,
error,
proof::Proof,
verification::{AttestationVerifier, PolicyEnforcer, SignatureVerifier, VerificationReporter},
};
use tokio::sync::watch;
use zksync_basic_types::L1BatchNumber;
/// Result of a batch verification
#[derive(Debug, Clone, Copy)]
pub struct BatchVerificationResult {
/// Total number of proofs processed
pub total_count: u32,
/// Number of proofs that were verified successfully
pub verified_count: u32,
/// Number of proofs that failed verification
pub unverified_count: u32,
}
/// Handles the batch verification process
pub struct BatchVerifier<C: JsonRpcClient> {
node_client: C,
attestation_policy: AttestationPolicy,
}
impl<C: JsonRpcClient> BatchVerifier<C> {
/// Create a new batch verifier
pub fn new(node_client: C, attestation_policy: AttestationPolicy) -> Self {
Self {
node_client,
attestation_policy,
}
}
/// Verify proofs for a batch
pub async fn verify_batch_proofs(
&self,
stop_receiver: &mut watch::Receiver<bool>,
batch_number: L1BatchNumber,
proofs: Vec<Proof>,
) -> error::Result<BatchVerificationResult> {
let batch_no = batch_number.0;
let mut total_proofs_count: u32 = 0;
let mut verified_proofs_count: u32 = 0;
for proof in proofs.into_iter() {
if *stop_receiver.borrow() {
tracing::warn!("Stop signal received during batch verification");
return Ok(BatchVerificationResult {
total_count: total_proofs_count,
verified_count: verified_proofs_count,
unverified_count: total_proofs_count - verified_proofs_count,
});
}
total_proofs_count += 1;
let tee_type = proof.tee_type.to_uppercase();
if proof.is_permanently_ignored() {
tracing::debug!(
batch_no,
tee_type,
"Proof is marked as permanently ignored. Skipping."
);
continue;
}
tracing::debug!(batch_no, tee_type, proof.proved_at, "Verifying proof.");
let attestation_bytes = proof.attestation_bytes();
let signature_bytes = proof.signature_bytes();
tracing::debug!(
batch_no,
"Verifying quote ({} bytes)...",
attestation_bytes.len()
);
// Verify attestation
let quote_verification_result = AttestationVerifier::verify_quote(&attestation_bytes)?;
// Log verification results
VerificationReporter::log_quote_verification_summary(&quote_verification_result);
// Check if attestation matches policy
let policy_matches = PolicyEnforcer::validate_policy(
&self.attestation_policy,
&quote_verification_result,
);
if let Err(e) = policy_matches {
tracing::error!(batch_no, tee_type, "Attestation policy check failed: {e}");
continue;
}
// Verify signature
let root_hash = self
.node_client
.get_root_hash(L1BatchNumber(proof.l1_batch_number))
.await?;
let signature_verified = SignatureVerifier::verify_batch_proof(
&quote_verification_result,
root_hash,
&signature_bytes,
)?;
if signature_verified {
tracing::info!(
batch_no,
proof.proved_at,
tee_type,
"Verification succeeded.",
);
verified_proofs_count += 1;
} else {
tracing::warn!(batch_no, proof.proved_at, tee_type, "Verification failed!",);
}
}
let unverified_proofs_count = total_proofs_count.saturating_sub(verified_proofs_count);
// Log batch verification results
VerificationReporter::log_batch_verification_results(
batch_no,
verified_proofs_count,
unverified_proofs_count,
);
Ok(BatchVerificationResult {
total_count: total_proofs_count,
verified_count: verified_proofs_count,
unverified_count: unverified_proofs_count,
})
}
}

View file

@ -0,0 +1,14 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
mod attestation;
mod batch;
mod policy;
mod reporting;
mod signature;
pub use attestation::AttestationVerifier;
pub use batch::BatchVerifier;
pub use policy::PolicyEnforcer;
pub use reporting::VerificationReporter;
pub use signature::SignatureVerifier;

View file

@ -0,0 +1,212 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use crate::{
core::AttestationPolicy,
error::{Error, Result},
};
use bytes::Bytes;
use enumset::EnumSet;
use teepot::quote::{tcblevel::TcbLevel, QuoteVerificationResult, Report};
/// Enforces policy requirements on attestation quotes
pub struct PolicyEnforcer;
impl PolicyEnforcer {
/// Check if a quote matches the attestation policy
pub fn validate_policy(
attestation_policy: &AttestationPolicy,
quote_verification_result: &QuoteVerificationResult,
) -> Result<()> {
let quote = &quote_verification_result.quote;
let tcblevel = TcbLevel::from(quote_verification_result.result);
match &quote.report {
Report::SgxEnclave(report_body) => {
// Validate TCB level
Self::validate_tcb_level(&attestation_policy.sgx_allowed_tcb_levels, tcblevel)?;
// Validate SGX Advisories
for advisory in &quote_verification_result.advisories {
Self::check_policy(
attestation_policy.sgx_allowed_advisory_ids.as_deref(),
advisory,
"advisories",
)?;
}
// Validate SGX policies
Self::check_policy_hash(
attestation_policy.sgx_mrsigners.as_deref(),
&report_body.mr_signer,
"mrsigner",
)?;
Self::check_policy_hash(
attestation_policy.sgx_mrenclaves.as_deref(),
&report_body.mr_enclave,
"mrenclave",
)
}
Report::TD10(report_body) => {
// Validate TCB level
Self::validate_tcb_level(&attestation_policy.tdx_allowed_tcb_levels, tcblevel)?;
// Validate TDX Advisories
for advisory in &quote_verification_result.advisories {
Self::check_policy(
attestation_policy.tdx_allowed_advisory_ids.as_deref(),
advisory,
"mrsigner",
)?;
}
// Build combined TDX MR and validate
let tdx_mr = Self::build_tdx_mr([
&report_body.mr_td,
&report_body.rt_mr0,
&report_body.rt_mr1,
&report_body.rt_mr2,
&report_body.rt_mr3,
]);
Self::check_policy_hash(attestation_policy.tdx_mrs.as_deref(), &tdx_mr, "tdxmr")
}
Report::TD15(report_body) => {
// Validate TCB level
Self::validate_tcb_level(&attestation_policy.tdx_allowed_tcb_levels, tcblevel)?;
// Validate TDX Advisories
for advisory in &quote_verification_result.advisories {
Self::check_policy(
attestation_policy.tdx_allowed_advisory_ids.as_deref(),
advisory,
"advisories",
)?;
}
// Build combined TDX MR and validate
let tdx_mr = Self::build_tdx_mr([
&report_body.base.mr_td,
&report_body.base.rt_mr0,
&report_body.base.rt_mr1,
&report_body.base.rt_mr2,
&report_body.base.rt_mr3,
]);
Self::check_policy_hash(attestation_policy.tdx_mrs.as_deref(), &tdx_mr, "tdxmr")
}
_ => Err(Error::policy_violation("Unknown quote report format")),
}
}
/// Helper method to validate TCB levels
fn validate_tcb_level(
allowed_levels: &EnumSet<TcbLevel>,
actual_level: TcbLevel,
) -> Result<()> {
if !allowed_levels.contains(actual_level) {
let error_msg = format!(
"Quote verification failed: TCB level mismatch (expected one of: {:?}, actual: {})",
allowed_levels, actual_level
);
return Err(Error::policy_violation(error_msg));
}
Ok(())
}
/// Helper method to build combined TDX measurement register
fn build_tdx_mr<const N: usize>(parts: [&[u8]; N]) -> Vec<u8> {
parts.into_iter().flatten().cloned().collect()
}
/// Check if a policy value matches the actual value
fn check_policy(policy: Option<&[String]>, actual_value: &str, field_name: &str) -> Result<()> {
if let Some(valid_values) = policy {
if !valid_values.iter().any(|value| value == actual_value) {
let error_msg =
format!(
"Quote verification failed: {} mismatch (expected one of: [ {} ], actual: {})",
field_name, valid_values.join(", "), actual_value
);
return Err(Error::policy_violation(error_msg));
}
tracing::debug!(field_name, actual_value, "Attestation policy check passed");
}
Ok(())
}
fn check_policy_hash(
policy: Option<&[Bytes]>,
actual_value: &[u8],
field_name: &str,
) -> Result<()> {
if let Some(valid_values) = policy {
let actual_value = Bytes::copy_from_slice(actual_value);
if !valid_values.contains(&actual_value) {
let valid_values = valid_values
.iter()
.map(hex::encode)
.collect::<Vec<_>>()
.join(", ");
let error_msg = format!(
"Quote verification failed: {} mismatch (expected one of: [ {} ], actual: {:x})",
field_name, valid_values, actual_value
);
return Err(Error::policy_violation(error_msg));
}
tracing::debug!(
field_name,
actual_value = format!("{actual_value:x}"),
"Attestation policy check passed"
);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_check_policy() {
// Test with no policy (should pass)
PolicyEnforcer::check_policy_hash(None, &[1, 2, 3], "test").unwrap();
// Test with matching policy
let actual_value: Bytes = hex::decode("01020304").unwrap().into();
PolicyEnforcer::check_policy_hash(
Some(vec![actual_value.clone()]).as_deref(),
&actual_value,
"test",
)
.unwrap();
//.clone() Test with matching policy (multiple values)
PolicyEnforcer::check_policy_hash(
Some(vec![
"aabbcc".into(),
"01020304".into(),
"ddeeff".into(),
actual_value.clone(),
])
.as_deref(),
&actual_value,
"test",
)
.unwrap();
// Test with non-matching policy
PolicyEnforcer::check_policy_hash(
Some(vec!["aabbcc".into(), "ddeeff".into()]).as_deref(),
&actual_value,
"test",
)
.unwrap_err();
}
}

View file

@ -0,0 +1,93 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use teepot::quote::{tcblevel::TcbLevel, QuoteVerificationResult};
/// Handles reporting and logging of verification results
pub struct VerificationReporter;
impl VerificationReporter {
/// Log summary of a quote verification
pub fn log_quote_verification_summary(quote_verification_result: &QuoteVerificationResult) {
let QuoteVerificationResult {
collateral_expired,
result,
quote,
advisories,
..
} = quote_verification_result;
if *collateral_expired {
tracing::warn!("Freshly fetched collateral expired!");
}
let tcblevel = TcbLevel::from(*result);
let advisories = if advisories.is_empty() {
"None".to_string()
} else {
advisories
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
};
tracing::debug!(
"Quote verification result: {tcblevel}. {report}. Advisory IDs: {advisories}.",
report = &quote.report
);
}
/// Log the results of batch verification
pub fn log_batch_verification_results(
batch_no: u32,
verified_proofs_count: u32,
unverified_proofs_count: u32,
) {
if unverified_proofs_count > 0 {
if verified_proofs_count == 0 {
tracing::error!(
batch_no,
"All {} proofs failed verification!",
unverified_proofs_count
);
} else {
tracing::warn!(
batch_no,
"Some proofs failed verification. Unverified proofs: {}. Verified proofs: {}.",
unverified_proofs_count,
verified_proofs_count
);
}
} else if verified_proofs_count > 0 {
tracing::info!(
batch_no,
"All {} proofs verified successfully!",
verified_proofs_count
);
}
}
/// Log overall verification results for multiple batches
pub fn log_overall_verification_results(
verified_batches_count: u32,
unverified_batches_count: u32,
) {
if unverified_batches_count > 0 {
if verified_batches_count == 0 {
tracing::error!(
"All {} batches failed verification!",
unverified_batches_count
);
} else {
tracing::error!(
"Some batches failed verification! Unverified batches: {}. Verified batches: {}.",
unverified_batches_count,
verified_batches_count
);
}
} else {
tracing::info!("{} batches verified successfully!", verified_batches_count);
}
}
}

View file

@ -0,0 +1,157 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2023-2025 Matter Labs
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId, Signature},
Message, SECP256K1,
};
use teepot::{
ethereum::{public_key_to_ethereum_address, recover_signer},
prover::reportdata::ReportData,
quote::QuoteVerificationResult,
};
use zksync_basic_types::H256;
use crate::error;
const SIGNATURE_LENGTH_WITH_RECOVERY_ID: usize = 65;
const SIGNATURE_LENGTH_WITHOUT_RECOVERY_ID: usize = 64;
/// Handles verification of signatures in proofs
pub struct SignatureVerifier;
impl SignatureVerifier {
/// Verify a batch proof signature
pub fn verify_batch_proof(
quote_verification_result: &QuoteVerificationResult,
root_hash: H256,
signature: &[u8],
) -> error::Result<bool> {
let report_data_bytes = quote_verification_result.quote.get_report_data();
tracing::trace!(?report_data_bytes);
let report_data = ReportData::try_from(report_data_bytes).map_err(|e| {
error::Error::internal(format!("Could not convert to ReportData: {}", e))
})?;
Self::verify(&report_data, &root_hash, signature)
}
/// Verify signature against report data and root hash
pub fn verify(
report_data: &ReportData,
root_hash: &H256,
signature: &[u8],
) -> error::Result<bool> {
match report_data {
ReportData::V0(report) => Self::verify_v0(report, root_hash, signature),
ReportData::V1(report) => Self::verify_v1(report, root_hash, signature),
ReportData::Unknown(_) => Ok(false),
}
}
/// Verify a V0 report
fn verify_v0(
report: &teepot::prover::reportdata::ReportDataV0,
root_hash: &H256,
signature: &[u8],
) -> error::Result<bool> {
tracing::debug!("ReportData::V0");
let signature = Signature::from_compact(signature)
.map_err(|e| error::Error::signature_verification(e.to_string()))?;
let root_hash_msg = Message::from_digest(root_hash.0);
Ok(signature.verify(&root_hash_msg, &report.pubkey).is_ok())
}
/// Verify a V1 report
fn verify_v1(
report: &teepot::prover::reportdata::ReportDataV1,
root_hash: &H256,
signature: &[u8],
) -> error::Result<bool> {
tracing::debug!("ReportData::V1");
let ethereum_address_from_report = report.ethereum_address;
let root_hash_msg = Message::from_digest(
root_hash
.as_bytes()
.try_into()
.map_err(|_| error::Error::signature_verification("root hash not 32 bytes"))?,
);
tracing::trace!("sig len = {}", signature.len());
// Try to recover Ethereum address from signature
let ethereum_address_from_signature = match signature.len() {
// Handle 64-byte signature case (missing recovery ID)
SIGNATURE_LENGTH_WITHOUT_RECOVERY_ID => {
SignatureVerifier::recover_address_with_missing_recovery_id(
signature,
&root_hash_msg,
)?
}
// Standard 65-byte signature case
SIGNATURE_LENGTH_WITH_RECOVERY_ID => {
let signature_bytes: [u8; SIGNATURE_LENGTH_WITH_RECOVERY_ID] =
signature.try_into().map_err(|_| {
error::Error::signature_verification(
"Expected 65-byte signature but got a different length",
)
})?;
recover_signer(&signature_bytes, &root_hash_msg).map_err(|e| {
error::Error::signature_verification(format!("Failed to recover signer: {}", e))
})?
}
// Any other length is invalid
len => {
return Err(error::Error::signature_verification(format!(
"Invalid signature length: {len} bytes"
)))
}
};
// Log verification details
tracing::debug!(
"Root hash: {}. Ethereum address from the attestation quote: {}. Ethereum address from the signature: {}.",
root_hash,
hex::encode(ethereum_address_from_report),
hex::encode(ethereum_address_from_signature),
);
Ok(ethereum_address_from_signature == ethereum_address_from_report)
}
/// Helper function to recover Ethereum address when recovery ID is missing
fn recover_address_with_missing_recovery_id(
signature: &[u8],
message: &Message,
) -> error::Result<[u8; 20]> {
tracing::info!("Signature is missing RecoveryId!");
// Try all possible recovery IDs
for rec_id in [
RecoveryId::Zero,
RecoveryId::One,
RecoveryId::Two,
RecoveryId::Three,
] {
let Ok(rec_sig) = RecoverableSignature::from_compact(signature, rec_id) else {
continue;
};
let Ok(public) = SECP256K1.recover_ecdsa(message, &rec_sig) else {
continue;
};
let ethereum_address = public_key_to_ethereum_address(&public);
tracing::info!("Had to use RecoveryId::{rec_id:?}");
return Ok(ethereum_address);
}
// No valid recovery ID found
Err(error::Error::signature_verification(
"Could not find valid recovery ID",
))
}
}

View file

@ -14,7 +14,9 @@ use std::{
pub use enumset::EnumSet; pub use enumset::EnumSet;
/// TCB level /// TCB level
#[derive(EnumSetType, Debug)] #[derive(EnumSetType, Debug, Serialize, Deserialize)]
#[enumset(serialize_repr = "list")]
#[non_exhaustive]
pub enum TcbLevel { pub enum TcbLevel {
/// TCB is up to date /// TCB is up to date
Ok, Ok,