Merge pull request #305 from matter-labs/small-quality

refactor: many small code quality improvements
This commit is contained in:
Lucille Blumire 2025-04-17 17:43:22 +01:00 committed by GitHub
commit 9bd0e9c36e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
30 changed files with 99 additions and 124 deletions

View file

@ -65,6 +65,8 @@ impl Display for Rtmr {
}
}
const CHUNK_SIZE: u64 = 1024 * 128;
fn main() -> Result<()> {
let args = Arguments::parse();
tracing::subscriber::set_global_default(setup_logging(
@ -127,7 +129,7 @@ fn main() -> Result<()> {
let _ = device.seek(SeekFrom::Start(pstart))?;
assert_eq!(header.part_size, 128);
assert!(header.num_parts < u8::MAX as _);
assert!(header.num_parts < u32::from(u8::MAX));
let empty_bytes = [0u8; 128];
@ -158,7 +160,7 @@ fn main() -> Result<()> {
let section_table = pe.get_section_table()?;
for section in section_table.iter() {
for section in &section_table {
debug!(section_name = ?section.name()?);
}
@ -175,14 +177,13 @@ fn main() -> Result<()> {
.find(|s| s.name().unwrap().eq(sect))
.ok_or(anyhow!("Failed to find section `{sect}`"))?;
let mut start = s.pointer_to_raw_data as u64;
let end = start + s.virtual_size as u64;
let mut start = u64::from(s.pointer_to_raw_data);
let end = start + u64::from(s.virtual_size);
debug!(sect, start, end, len = (s.virtual_size));
let mut hasher = Sha384::new();
const CHUNK_SIZE: u64 = 1024 * 128;
loop {
if start >= end {
break;

View file

@ -61,13 +61,11 @@ pub fn extend_sha384(base: &str, extend: &str) -> Result<String> {
let mut hasher = sha2::Sha384::new();
hasher.update(pad::<48>(&hex::decode(base).context(format!(
"Failed to decode base digest '{}' - expected hex string",
base
"Failed to decode base digest '{base}' - expected hex string",
))?)?);
hasher.update(pad::<48>(&hex::decode(extend).context(format!(
"Failed to decode extend digest '{}' - expected hex string",
extend
"Failed to decode extend digest '{extend}' - expected hex string",
))?)?);
Ok(hex::encode(hasher.finalize()))

View file

@ -26,7 +26,7 @@ async fn main() -> Result<()> {
.context("failed to get quote and collateral")?;
let base64_string = general_purpose::STANDARD.encode(report.quote.as_ref());
print!("{}", base64_string);
print!("{base64_string}");
Ok(())
}

View file

@ -81,7 +81,7 @@ fn print_quote_verification_summary(quote_verification_result: &QuoteVerificatio
for advisory in advisories {
println!("\tInfo: Advisory ID: {advisory}");
}
println!("Quote verification result: {}", tcblevel);
println!("Quote verification result: {tcblevel}");
println!("{:#}", &quote.report);
}

View file

@ -26,12 +26,10 @@ impl MainNodeClient {
/// Create a new client for the main node
pub fn new(rpc_url: Url, chain_id: u64) -> error::Result<Self> {
let chain_id = L2ChainId::try_from(chain_id)
.map_err(|e| error::Error::Internal(format!("Invalid chain ID: {}", e)))?;
.map_err(|e| error::Error::Internal(format!("Invalid chain ID: {e}")))?;
let node_client = NodeClient::http(rpc_url.into())
.map_err(|e| {
error::Error::Internal(format!("Failed to create JSON-RPC client: {}", e))
})?
.map_err(|e| error::Error::Internal(format!("Failed to create JSON-RPC client: {e}")))?
.for_network(chain_id.into())
.build();
@ -46,13 +44,13 @@ impl JsonRpcClient for MainNodeClient {
.get_l1_batch_details(batch_number)
.rpc_context("get_l1_batch_details")
.await
.map_err(|e| error::Error::JsonRpc(format!("Failed to get batch details: {}", e)))?
.map_err(|e| error::Error::JsonRpc(format!("Failed to get batch details: {e}")))?
.ok_or_else(|| {
error::Error::JsonRpc(format!("No details found for batch #{}", batch_number))
error::Error::JsonRpc(format!("No details found for batch #{batch_number}"))
})?;
batch_details.base.root_hash.ok_or_else(|| {
error::Error::JsonRpc(format!("No root hash found for batch #{}", batch_number))
error::Error::JsonRpc(format!("No root hash found for batch #{batch_number}"))
})
}
}

View file

@ -190,15 +190,12 @@ impl VerifierConfig {
pub fn new(args: VerifierConfigArgs) -> error::Result<Self> {
let policy = if let Some(path) = &args.attestation_policy_file {
let policy_content = fs::read_to_string(path).map_err(|e| {
error::Error::internal(format!("Failed to read attestation policy file: {}", e))
error::Error::internal(format!("Failed to read attestation policy file: {e}"))
})?;
let policy_config: AttestationPolicyConfig = serde_yaml::from_str(&policy_content)
.map_err(|e| {
error::Error::internal(format!(
"Failed to parse attestation policy file: {}",
e
))
error::Error::internal(format!("Failed to parse attestation policy file: {e}"))
})?;
tracing::info!("Loaded attestation policy from file: {:?}", path);
@ -263,7 +260,7 @@ fn decode_tdx_mrs(
Some(mrs_array) => {
let result = mrs_array
.into_iter()
.map(|strings| decode_and_combine_mrs(strings, bytes_length))
.map(|strings| decode_and_combine_mrs(&strings, bytes_length))
.collect::<Result<Vec<_>, _>>()?;
Ok(Some(result))
}
@ -272,12 +269,12 @@ fn decode_tdx_mrs(
// Helper function to decode and combine MRs
fn decode_and_combine_mrs(
strings: [String; 5],
strings: &[String; 5],
bytes_length: usize,
) -> Result<Bytes, hex::FromHexError> {
let mut buffer = BytesMut::with_capacity(bytes_length * 5);
for s in &strings {
for s in strings {
if s.len() > (bytes_length * 2) {
return Err(hex::FromHexError::InvalidStringLength);
}
@ -295,19 +292,16 @@ fn parse_batch_range(s: &str) -> error::Result<(L1BatchNumber, L1BatchNumber)> {
.map(L1BatchNumber::from)
.map_err(|e| error::Error::internal(format!("Can't convert batch {s} to number: {e}")))
};
match s.split_once('-') {
Some((start, end)) => {
let (start, end) = (parse(start)?, parse(end)?);
if start > end {
Err(error::Error::InvalidBatchRange(s.into()))
} else {
Ok((start, end))
}
}
None => {
let batch_number = parse(s)?;
Ok((batch_number, batch_number))
if let Some((start, end)) = s.split_once('-') {
let (start, end) = (parse(start)?, parse(end)?);
if start > end {
Err(error::Error::InvalidBatchRange(s.into()))
} else {
Ok((start, end))
}
} else {
let batch_number = parse(s)?;
Ok((batch_number, batch_number))
}
}

View file

@ -31,13 +31,13 @@ impl fmt::Display for VerifierMode {
end_batch,
} => {
if start_batch == end_batch {
write!(f, "one-shot mode (batch {})", start_batch)
write!(f, "one-shot mode (batch {start_batch})")
} else {
write!(f, "one-shot mode (batches {}-{})", start_batch, end_batch)
write!(f, "one-shot mode (batches {start_batch}-{end_batch})")
}
}
VerifierMode::Continuous { start_batch } => {
write!(f, "continuous mode (starting from batch {})", start_batch)
write!(f, "continuous mode (starting from batch {start_batch})")
}
}
}
@ -72,9 +72,9 @@ impl VerificationResult {
verified_count,
unverified_count,
} => verified_count > unverified_count,
VerificationResult::Failure => false,
VerificationResult::Interrupted => false,
VerificationResult::NoProofsFound => false,
VerificationResult::Failure
| VerificationResult::Interrupted
| VerificationResult::NoProofsFound => false,
}
}
}
@ -89,8 +89,7 @@ impl fmt::Display for VerificationResult {
} => {
write!(
f,
"Partial Success ({} verified, {} failed)",
verified_count, unverified_count
"Partial Success ({verified_count} verified, {unverified_count} failed)"
)
}
VerificationResult::Failure => write!(f, "Failure"),

View file

@ -96,7 +96,7 @@ impl Error {
impl From<reqwest::Error> for Error {
fn from(value: reqwest::Error) -> Self {
Self::Http {
status_code: value.status().map(|v| v.as_u16()).unwrap_or(0),
status_code: value.status().map_or(0, |v| v.as_u16()),
message: value.to_string(),
}
}

View file

@ -74,7 +74,7 @@ async fn main() -> Result<()> {
},
Err(e) => {
tracing::error!("Task panicked: {}", e);
Err(Error::internal(format!("Task panicked: {}", e)))
Err(Error::internal(format!("Task panicked: {e}")))
}
}
},

View file

@ -53,7 +53,7 @@ impl BatchProcessor {
// Fetch proofs for the current batch across different TEE types
let mut proofs = Vec::new();
for tee_type in self.config.args.tee_types.iter() {
for tee_type in self.config.args.tee_types.iter().copied() {
match self
.proof_fetcher
.get_proofs(token, batch_number, tee_type)
@ -68,7 +68,6 @@ impl BatchProcessor {
batch_number.0,
e
);
continue;
}
}
}

View file

@ -50,8 +50,9 @@ impl ContinuousProcessor {
match self.batch_processor.process_batch(token, batch).await {
Ok(result) => {
match result {
VerificationResult::Success => success_count += 1,
VerificationResult::PartialSuccess { .. } => success_count += 1,
VerificationResult::Success | VerificationResult::PartialSuccess { .. } => {
success_count += 1;
}
VerificationResult::Failure => failure_count += 1,
VerificationResult::Interrupted => {
results.push((current_batch, result));

View file

@ -43,14 +43,14 @@ impl ProcessorFactory {
/// Create a new processor based on the provided configuration
pub fn create(config: VerifierConfig) -> Result<(ProcessorType, VerifierMode)> {
let mode = if let Some((start, end)) = config.args.batch_range {
let processor = OneShotProcessor::new(config.clone(), start, end)?;
let processor = OneShotProcessor::new(config, start, end)?;
let mode = VerifierMode::OneShot {
start_batch: start,
end_batch: end,
};
(ProcessorType::OneShot(processor), mode)
} else if let Some(start) = config.args.continuous {
let processor = ContinuousProcessor::new(config.clone(), start)?;
let processor = ContinuousProcessor::new(config, start)?;
let mode = VerifierMode::Continuous { start_batch: start };
(ProcessorType::Continuous(processor), mode)
} else {

View file

@ -55,8 +55,9 @@ impl OneShotProcessor {
let result = self.batch_processor.process_batch(token, batch).await?;
match result {
VerificationResult::Success => success_count += 1,
VerificationResult::PartialSuccess { .. } => success_count += 1,
VerificationResult::Success | VerificationResult::PartialSuccess { .. } => {
success_count += 1;
}
VerificationResult::Failure => failure_count += 1,
VerificationResult::Interrupted => {
results.push((batch_number, result));

View file

@ -36,7 +36,7 @@ impl ProofFetcher {
&self,
token: &CancellationToken,
batch_number: L1BatchNumber,
tee_type: &TeeType,
tee_type: TeeType,
) -> Result<Vec<Proof>> {
let mut proofs_request = GetProofsRequest::new(batch_number, tee_type);
let mut backoff = Duration::from_secs(1);

View file

@ -21,7 +21,7 @@ impl ProofResponseParser {
}
}
return Err(error::Error::JsonRpc(format!("JSONRPC error: {:?}", error)));
return Err(error::Error::JsonRpc(format!("JSONRPC error: {error:?}")));
}
// Extract proofs from the result

View file

@ -17,7 +17,7 @@ pub struct GetProofsRequest {
impl GetProofsRequest {
/// Create a new request for the given batch number
pub fn new(batch_number: L1BatchNumber, tee_type: &TeeType) -> Self {
pub fn new(batch_number: L1BatchNumber, tee_type: TeeType) -> Self {
GetProofsRequest {
jsonrpc: "2.0".to_string(),
id: 1,

View file

@ -17,7 +17,7 @@ impl AttestationVerifier {
// Get current time for verification
let unix_time: i64 = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map_err(|e| error::Error::internal(format!("Failed to get system time: {}", e)))?
.map_err(|e| error::Error::internal(format!("Failed to get system time: {e}")))?
.as_secs() as _;
// Verify the quote with the collateral

View file

@ -48,7 +48,7 @@ impl<C: JsonRpcClient> BatchVerifier<C> {
let mut total_proofs_count: u32 = 0;
let mut verified_proofs_count: u32 = 0;
for proof in proofs.into_iter() {
for proof in proofs {
if token.is_cancelled() {
tracing::warn!("Stop signal received during batch verification");
return Ok(BatchVerificationResult {
@ -119,7 +119,7 @@ impl<C: JsonRpcClient> BatchVerifier<C> {
);
verified_proofs_count += 1;
} else {
tracing::warn!(batch_no, proof.proved_at, tee_type, "Verification failed!",);
tracing::warn!(batch_no, proof.proved_at, tee_type, "Verification failed!");
}
}

View file

@ -24,7 +24,7 @@ impl PolicyEnforcer {
match &quote.report {
Report::SgxEnclave(report_body) => {
// Validate TCB level
Self::validate_tcb_level(&attestation_policy.sgx_allowed_tcb_levels, tcblevel)?;
Self::validate_tcb_level(attestation_policy.sgx_allowed_tcb_levels, tcblevel)?;
// Validate SGX Advisories
for advisory in &quote_verification_result.advisories {
@ -50,7 +50,7 @@ impl PolicyEnforcer {
}
Report::TD10(report_body) => {
// Validate TCB level
Self::validate_tcb_level(&attestation_policy.tdx_allowed_tcb_levels, tcblevel)?;
Self::validate_tcb_level(attestation_policy.tdx_allowed_tcb_levels, tcblevel)?;
// Validate TDX Advisories
for advisory in &quote_verification_result.advisories {
@ -74,7 +74,7 @@ impl PolicyEnforcer {
}
Report::TD15(report_body) => {
// Validate TCB level
Self::validate_tcb_level(&attestation_policy.tdx_allowed_tcb_levels, tcblevel)?;
Self::validate_tcb_level(attestation_policy.tdx_allowed_tcb_levels, tcblevel)?;
// Validate TDX Advisories
for advisory in &quote_verification_result.advisories {
@ -101,14 +101,10 @@ impl PolicyEnforcer {
}
/// Helper method to validate TCB levels
fn validate_tcb_level(
allowed_levels: &EnumSet<TcbLevel>,
actual_level: TcbLevel,
) -> Result<()> {
fn validate_tcb_level(allowed_levels: EnumSet<TcbLevel>, actual_level: TcbLevel) -> Result<()> {
if !allowed_levels.contains(actual_level) {
let error_msg = format!(
"Quote verification failed: TCB level mismatch (expected one of: {:?}, actual: {})",
allowed_levels, actual_level
"Quote verification failed: TCB level mismatch (expected one of: {allowed_levels:?}, actual: {actual_level})",
);
return Err(Error::policy_violation(error_msg));
}
@ -117,7 +113,7 @@ impl PolicyEnforcer {
/// Helper method to build combined TDX measurement register
fn build_tdx_mr<const N: usize>(parts: [&[u8]; N]) -> Vec<u8> {
parts.into_iter().flatten().cloned().collect()
parts.into_iter().flatten().copied().collect()
}
/// Check if a policy value matches the actual value
@ -152,8 +148,7 @@ impl PolicyEnforcer {
.collect::<Vec<_>>()
.join(", ");
let error_msg = format!(
"Quote verification failed: {} mismatch (expected one of: [ {} ], actual: {:x})",
field_name, valid_values, actual_value
"Quote verification failed: {field_name} mismatch (expected one of: [ {valid_values} ], actual: {actual_value:x})"
);
return Err(Error::policy_violation(error_msg));
}

View file

@ -30,9 +30,8 @@ impl SignatureVerifier {
let report_data_bytes = quote_verification_result.quote.get_report_data();
tracing::trace!(?report_data_bytes);
let report_data = ReportData::try_from(report_data_bytes).map_err(|e| {
error::Error::internal(format!("Could not convert to ReportData: {}", e))
})?;
let report_data = ReportData::try_from(report_data_bytes)
.map_err(|e| error::Error::internal(format!("Could not convert to ReportData: {e}")))?;
Self::verify(&report_data, &root_hash, signature)
}
@ -100,7 +99,7 @@ impl SignatureVerifier {
})?;
recover_signer(&signature_bytes, &root_hash_msg).map_err(|e| {
error::Error::signature_verification(format!("Failed to recover signer: {}", e))
error::Error::signature_verification(format!("Failed to recover signer: {e}"))
})?
}
// Any other length is invalid

View file

@ -25,20 +25,18 @@ impl ApiClient {
if technology == "tdx" && self.api_version == ApiVersion::V3 {
return Err(IntelApiError::UnsupportedApiVersion(format!(
"TDX endpoint /{}/{}/{} requires API v4",
service, endpoint, technology
"TDX endpoint /{service}/{endpoint}/{technology} requires API v4",
)));
}
if technology == "sgx" && service == "registration" {
// Registration paths are fixed at v1 regardless of client's api_version
return Ok(format!("/sgx/registration/v1/{}", endpoint).replace("//", "/"));
return Ok(format!("/sgx/registration/v1/{endpoint}").replace("//", "/"));
}
Ok(format!(
"/{}/certification/{}/{}/{}",
technology, api_segment, service, endpoint
Ok(
format!("/{technology}/certification/{api_segment}/{service}/{endpoint}")
.replace("//", "/"),
)
.replace("//", "/"))
}
/// Helper to add an optional header if the string is non-empty.
@ -187,25 +185,22 @@ impl ApiClient {
/// Ensures the client is configured for API v4, otherwise returns an error.
pub(super) fn ensure_v4_api(&self, function_name: &str) -> Result<(), IntelApiError> {
if self.api_version != ApiVersion::V4 {
Err(IntelApiError::UnsupportedApiVersion(format!(
"{} requires API v4",
function_name
)))
} else {
Ok(())
return Err(IntelApiError::UnsupportedApiVersion(format!(
"{function_name} requires API v4",
)));
}
Ok(())
}
/// Checks if a V4-only parameter is provided with a V3 API version.
pub(super) fn check_v4_only_param<T>(
pub(super) fn check_v4_only_param<T: Copy>(
&self,
param_value: Option<T>,
param_name: &str,
) -> Result<(), IntelApiError> {
if self.api_version == ApiVersion::V3 && param_value.is_some() {
Err(IntelApiError::UnsupportedApiVersion(format!(
"'{}' parameter requires API v4",
param_name
"'{param_name}' parameter requires API v4",
)))
} else {
Ok(())

View file

@ -312,13 +312,13 @@ fn init_telemetry(
if config.logging.console {
// Optionally configure JSON logging
if config.logging.json {
subscriber.with(fmt_layer.json()).init()
subscriber.with(fmt_layer.json()).init();
} else {
subscriber.with(fmt_layer.pretty()).init()
subscriber.with(fmt_layer.pretty()).init();
}
} else {
subscriber.init()
};
subscriber.init();
}
Ok(())
}

View file

@ -15,7 +15,7 @@ use sha3::{Digest, Keccak256};
pub fn recover_signer(sig: &[u8; 65], root_hash: &Message) -> Result<[u8; 20]> {
let sig = RecoverableSignature::from_compact(
&sig[0..64],
RecoveryId::try_from(sig[64] as i32 - 27)?,
RecoveryId::try_from(i32::from(sig[64]) - 27)?,
)?;
let public = SECP256K1.recover_ecdsa(root_hash, &sig)?;
Ok(public_key_to_ethereum_address(&public))

View file

@ -53,10 +53,7 @@ pub fn setup_logging(
.try_from_env()
.unwrap_or(match *log_level {
LevelFilter::OFF => EnvFilter::new("off"),
_ => EnvFilter::new(format!(
"warn,{crate_name}={log_level},teepot={log_level}",
log_level = log_level
)),
_ => EnvFilter::new(format!("warn,{crate_name}={log_level},teepot={log_level}")),
});
let fmt_layer = tracing_subscriber::fmt::layer()

View file

@ -37,8 +37,7 @@ pub enum ReportData {
fn report_data_to_bytes(data: &[u8], version: u8) -> [u8; REPORT_DATA_LENGTH] {
debug_assert!(
data.len() < REPORT_DATA_LENGTH, // Ensure there is space for the version byte
"Data length exceeds maximum of {} bytes",
REPORT_DATA_LENGTH
"Data length exceeds maximum of {REPORT_DATA_LENGTH} bytes",
);
let mut bytes = [0u8; REPORT_DATA_LENGTH];
bytes[..data.len()].copy_from_slice(data);

View file

@ -104,7 +104,7 @@ pub trait QuoteContextErr {
impl<T, E: std::fmt::Display> QuoteContextErr for Result<T, E> {
type Ok = T;
fn str_context<I: std::fmt::Display>(self, msg: I) -> Result<T, QuoteError> {
self.map_err(|e| QuoteError::Unexpected(format!("{}: {}", msg, e)))
self.map_err(|e| QuoteError::Unexpected(format!("{msg}: {e}")))
}
}

View file

@ -31,9 +31,9 @@ use std::{
use tracing::trace;
#[allow(missing_docs)]
pub const TEE_TYPE_SGX: u32 = 0x00000000;
pub const TEE_TYPE_SGX: u32 = 0x0000_0000;
#[allow(missing_docs)]
pub const TEE_TYPE_TDX: u32 = 0x00000081;
pub const TEE_TYPE_TDX: u32 = 0x0000_0081;
#[allow(missing_docs)]
pub const BODY_SGX_ENCLAVE_REPORT_TYPE: u16 = 1;
@ -583,7 +583,7 @@ impl Display for TEEType {
TEEType::TDX => "tdx",
TEEType::SNP => "snp",
};
write!(f, "{}", str)
write!(f, "{str}")
}
}

View file

@ -24,7 +24,7 @@ fn extract_header_value(
.ok_or_else(|| QuoteError::Unexpected(format!("Missing required header: {header_name}")))?
.to_str()
.map_err(|e| QuoteError::Unexpected(format!("Invalid header value: {e}")))
.map(|val| val.to_string())
.map(str::to_string)
}
/// Fetch collateral data from Intel's Provisioning Certification Service
@ -74,14 +74,14 @@ pub(crate) fn get_collateral(quote: &[u8]) -> Result<Collateral, QuoteError> {
let (collateral, pck_crl, pck_issuer_chain) = result;
// Convert QuoteCollateralV3 to Collateral
convert_to_collateral(collateral, pck_crl, pck_issuer_chain)
convert_to_collateral(collateral, &pck_crl, &pck_issuer_chain)
}
// Helper function to convert QuoteCollateralV3 to Collateral
fn convert_to_collateral(
collateral: QuoteCollateralV3,
pck_crl: String,
pck_issuer_chain: Bytes,
pck_crl: &str,
pck_issuer_chain: &[u8],
) -> Result<Collateral, QuoteError> {
let QuoteCollateralV3 {
tcb_info_issuer_chain,
@ -119,7 +119,7 @@ fn convert_to_collateral(
root_ca_crl: Box::new([]),
// Converted values
pck_crl_issuer_chain: pck_issuer_chain.as_ref().into(),
pck_crl_issuer_chain: pck_issuer_chain.into(),
pck_crl: pck_crl.as_bytes().into(),
tcb_info_issuer_chain: to_bytes_with_nul(tcb_info_issuer_chain, "tcb_info_issuer_chain")?,
tcb_info: to_bytes_with_nul(tcb_info_json, "tcb_info")?,
@ -134,14 +134,14 @@ fn convert_to_collateral(
/// Split the last zero byte
fn get_str_from_bytes(bytes: &[u8], context: &str) -> Result<String, QuoteError> {
let c_str = CStr::from_bytes_until_nul(bytes)
.str_context(format!("Failed to extract CString: {}", context))?;
.str_context(format!("Failed to extract CString: {context}"))?;
Ok(c_str.to_string_lossy().into_owned())
}
/// Parse JSON field from collateral data
fn parse_json_field(data: &[u8], context: &str) -> Result<serde_json::Value, QuoteError> {
serde_json::from_str(&get_str_from_bytes(data, context)?)
.str_context(format!("Failed to parse JSON: {}", context))
.str_context(format!("Failed to parse JSON: {context}"))
}
/// Convert Collateral to QuoteCollateralV3

View file

@ -38,15 +38,14 @@ impl FromStr for TcbLevel {
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_ascii_lowercase().as_str() {
"ok" => Ok(TcbLevel::Ok),
"uptodate" => Ok(TcbLevel::Ok),
"ok" | "uptodate" => Ok(TcbLevel::Ok),
"configneeded" => Ok(TcbLevel::ConfigNeeded),
"configandswhardeningneeded" => Ok(TcbLevel::ConfigAndSwHardeningNeeded),
"swhardeningneeded" => Ok(TcbLevel::SwHardeningNeeded),
"outofdate" => Ok(TcbLevel::OutOfDate),
"outofdateconfigneeded" => Ok(TcbLevel::OutOfDateConfigNeeded),
"invalid" => Ok(TcbLevel::Invalid),
_ => Err(format!("Invalid TCB level: {}", s)),
_ => Err(format!("Invalid TCB level: {s}")),
}
}
}
@ -72,8 +71,8 @@ pub fn parse_tcb_levels(
let mut set = EnumSet::new();
for level_str in s.split(',') {
let level_str = level_str.trim();
let level = TcbLevel::from_str(level_str)
.map_err(|_| format!("Invalid TCB level: {}", level_str))?;
let level =
TcbLevel::from_str(level_str).map_err(|_| format!("Invalid TCB level: {level_str}"))?;
set.insert(level);
}
Ok(set)

View file

@ -50,8 +50,8 @@ pub struct Author {
unsafe impl Zeroable for Author {}
impl Author {
const HEADER1: [u8; 16] = 0x06000000E10000000000010000000000u128.to_be_bytes();
const HEADER2: [u8; 16] = 0x01010000600000006000000001000000u128.to_be_bytes();
const HEADER1: [u8; 16] = 0x0600_0000_E100_0000_0000_0100_0000_0000u128.to_be_bytes();
const HEADER2: [u8; 16] = 0x0101_0000_6000_0000_6000_0000_0100_0000u128.to_be_bytes();
#[allow(clippy::unreadable_literal)]
/// Creates a new Author from a date and software defined value.
@ -245,7 +245,7 @@ impl Digest for S256Digest {
#[inline]
fn update(&mut self, bytes: &[u8]) {
self.0.update(bytes)
self.0.update(bytes);
}
#[inline]