diff --git a/src/client.rs b/src/client.rs index 913caef2..befd214d 100644 --- a/src/client.rs +++ b/src/client.rs @@ -13,7 +13,7 @@ //! # use tuf::metadata::{RootMetadata, SignedMetadata, Role, MetadataPath, //! # MetadataVersion}; //! # use tuf::interchange::Json; -//! # use tuf::repository::{Repository, FileSystemRepository, HttpRepositoryBuilder}; +//! # use tuf::repository::{FileSystemRepository, HttpRepositoryBuilder}; //! //! static TRUSTED_ROOT_KEY_IDS: &'static [&str] = &[ //! "4750eaf6878740780d6f97b12dbad079fb012bec88c78de2c380add56d3f51db", @@ -62,10 +62,10 @@ use crate::crypto::{self, HashAlgorithm, HashValue, KeyId, PublicKey}; use crate::error::Error; use crate::interchange::DataInterchange; use crate::metadata::{ - Metadata, MetadataPath, MetadataVersion, Role, RootMetadata, SignedMetadata, SnapshotMetadata, - TargetDescription, TargetPath, TargetsMetadata, VirtualTargetPath, + Metadata, MetadataPath, MetadataVersion, RawSignedMetadata, Role, RootMetadata, SignedMetadata, + SnapshotMetadata, TargetDescription, TargetPath, TargetsMetadata, VirtualTargetPath, }; -use crate::repository::Repository; +use crate::repository::{Repository, RepositoryProvider, RepositoryStorage}; use crate::tuf::Tuf; use crate::Result; @@ -119,21 +119,21 @@ impl PathTranslator for DefaultTranslator { pub struct Client where D: DataInterchange + Sync, - L: Repository, - R: Repository, + L: RepositoryProvider + RepositoryStorage, + R: RepositoryProvider, T: PathTranslator, { tuf: Tuf, config: Config, - local: L, - remote: R, + local: Repository, + remote: Repository, } impl Client where D: DataInterchange + Sync, - L: Repository, - R: Repository, + L: RepositoryProvider + RepositoryStorage, + R: RepositoryProvider, T: PathTranslator, { /// Create a new TUF client. It will attempt to load the latest root metadata from the local @@ -155,7 +155,7 @@ where /// # client::{Client, Config}, /// # crypto::{KeyType, PrivateKey, SignatureScheme}, /// # metadata::{MetadataPath, MetadataVersion, Role, RootMetadataBuilder}, - /// # repository::{EphemeralRepository, Repository}, + /// # repository::{EphemeralRepository, RepositoryStorage}, /// # }; /// # fn main() -> Result<(), Error> { /// # block_on(async { @@ -164,8 +164,8 @@ where /// # SignatureScheme::Ed25519, /// # )?; /// # let public_key = private_key.public().clone(); - /// let local = EphemeralRepository::new(); - /// let remote = EphemeralRepository::new(); + /// let local = EphemeralRepository::::new(); + /// let remote = EphemeralRepository::::new(); /// /// let root_version = 1; /// let root = RootMetadataBuilder::new() @@ -180,7 +180,11 @@ where /// let root_path = MetadataPath::from_role(&Role::Root); /// let root_version = MetadataVersion::Number(root_version); /// - /// local.store_metadata(&root_path, &root_version, &root).await?; + /// local.store_metadata( + /// &root_path, + /// &root_version, + /// root.to_raw().unwrap().as_bytes() + /// ).await?; /// /// let client = Client::with_trusted_local( /// Config::default(), @@ -192,12 +196,13 @@ where /// # } /// ``` pub async fn with_trusted_local(config: Config, local: L, remote: R) -> Result { + let (local, remote) = (Repository::new(local), Repository::new(remote)); let root_path = MetadataPath::from_role(&Role::Root); // FIXME should this be MetadataVersion::None so we bootstrap with the latest version? let root_version = MetadataVersion::Number(1); - let root = local + let (_, root) = local .fetch_metadata(&root_path, &root_version, config.max_root_length, None) .await?; @@ -224,7 +229,7 @@ where /// # client::{Client, Config}, /// # crypto::{KeyType, PrivateKey, SignatureScheme}, /// # metadata::{MetadataPath, MetadataVersion, Role, RootMetadataBuilder}, - /// # repository::{EphemeralRepository, Repository}, + /// # repository::{EphemeralRepository}, /// # }; /// # fn main() -> Result<(), Error> { /// # block_on(async { @@ -233,8 +238,8 @@ where /// # SignatureScheme::Ed25519, /// # )?; /// # let public_key = private_key.public().clone(); - /// let local = EphemeralRepository::new(); - /// let remote = EphemeralRepository::new(); + /// let local = EphemeralRepository::::new(); + /// let remote = EphemeralRepository::::new(); /// /// let root_version = 1; /// let root_threshold = 1; @@ -265,6 +270,7 @@ where local: L, remote: R, ) -> Result { + let (local, remote) = (Repository::new(local), Repository::new(remote)); let tuf = Tuf::from_trusted_root(trusted_root.clone())?; Ok(Client { @@ -292,7 +298,7 @@ where /// # client::{Client, Config}, /// # crypto::{KeyType, PrivateKey, SignatureScheme}, /// # metadata::{MetadataPath, MetadataVersion, Role, RootMetadataBuilder}, - /// # repository::{EphemeralRepository, Repository}, + /// # repository::{EphemeralRepository, RepositoryStorage}, /// # }; /// # fn main() -> Result<(), Error> { /// # block_on(async { @@ -301,8 +307,8 @@ where /// # SignatureScheme::Ed25519, /// # )?; /// # let public_key = private_key.public().clone(); - /// let local = EphemeralRepository::new(); - /// let remote = EphemeralRepository::new(); + /// let local = EphemeralRepository::::new(); + /// let remote = EphemeralRepository::::new(); /// /// let root_version = 1; /// let root_threshold = 1; @@ -319,7 +325,11 @@ where /// let root_path = MetadataPath::from_role(&Role::Root); /// let root_version = MetadataVersion::Number(root_version); /// - /// remote.store_metadata(&root_path, &root_version, &root).await?; + /// remote.store_metadata( + /// &root_path, + /// &root_version, + /// root.to_raw().unwrap().as_bytes() + /// ).await?; /// /// let client = Client::with_trusted_root_keyids( /// Config::default(), @@ -344,9 +354,10 @@ where where I: IntoIterator, { - let root_path = MetadataPath::from_role(&Role::Root); + let (local, remote) = (Repository::new(local), Repository::new(remote)); - let (fetched, trusted_root) = fetch_metadata_from_local_or_else_remote( + let root_path = MetadataPath::from_role(&Role::Root); + let (fetched, raw_trusted_root, trusted_root) = fetch_metadata_from_local_or_else_remote( &root_path, &root_version, config.max_root_length, @@ -357,6 +368,7 @@ where .await?; // FIXME(#253) verify the trusted root version matches the provided version. + let root_version = MetadataVersion::Number(trusted_root.version()); let tuf = { let root: &RootMetadata = trusted_root.as_ref(); @@ -382,9 +394,8 @@ where // Only store the metadata after we have validated it. if fetched { - let root_version = MetadataVersion::Number(trusted_root.version()); client - .store_metadata(&root_path, &root_version, &trusted_root) + .store_metadata(&root_path, &root_version, &raw_trusted_root) .await; // FIXME: should we also store the root as `MetadataVersion::None`? @@ -408,7 +419,7 @@ where /// # client::{Client, Config}, /// # crypto::{KeyType, PrivateKey, SignatureScheme}, /// # metadata::{MetadataPath, MetadataVersion, Role, RootMetadataBuilder}, - /// # repository::{EphemeralRepository, Repository}, + /// # repository::{EphemeralRepository, RepositoryStorage}, /// # }; /// # fn main() -> Result<(), Error> { /// # block_on(async { @@ -417,8 +428,8 @@ where /// # SignatureScheme::Ed25519, /// # )?; /// # let public_key = private_key.public().clone(); - /// let local = EphemeralRepository::new(); - /// let remote = EphemeralRepository::new(); + /// let local = EphemeralRepository::::new(); + /// let remote = EphemeralRepository::::new(); /// /// let root_version = 1; /// let root_threshold = 1; @@ -435,7 +446,11 @@ where /// let root_path = MetadataPath::from_role(&Role::Root); /// let root_version = MetadataVersion::Number(root_version); /// - /// remote.store_metadata(&root_path, &root_version, &root).await?; + /// remote.store_metadata( + /// &root_path, + /// &root_version, + /// root.to_raw().unwrap().as_bytes() + /// ).await?; /// /// let client = Client::with_trusted_root_keys( /// Config::default(), @@ -460,9 +475,10 @@ where where I: IntoIterator, { - let root_path = MetadataPath::from_role(&Role::Root); + let (local, remote) = (Repository::new(local), Repository::new(remote)); - let (fetched, root) = fetch_metadata_from_local_or_else_remote( + let root_path = MetadataPath::from_role(&Role::Root); + let (fetched, raw_root, root) = fetch_metadata_from_local_or_else_remote( &root_path, root_version, config.max_root_length, @@ -473,9 +489,9 @@ where .await?; // FIXME(#253) verify the trusted root version matches the provided version. + let root_version = MetadataVersion::Number(root.version()); - let tuf = - Tuf::from_root_with_trusted_keys(root.clone(), root_threshold, trusted_root_keys)?; + let tuf = Tuf::from_root_with_trusted_keys(root, root_threshold, trusted_root_keys)?; let mut client = Client { tuf, @@ -487,7 +503,7 @@ where // Only store the metadata after we have validated it. if fetched { client - .store_metadata(&root_path, &root_version, &root) + .store_metadata(&root_path, &root_version, &raw_root) .await; // FIXME: should we also store the root as `MetadataVersion::None`? @@ -514,16 +530,15 @@ where &'a mut self, path: &'a MetadataPath, version: &'a MetadataVersion, - metadata: &'a SignedMetadata, + metadata: &'a RawSignedMetadata, ) where - M: Metadata + Sync + 'static, + M: Metadata + Sync, { match self.local.store_metadata(path, version, metadata).await { Ok(()) => {} Err(err) => { warn!( - "failed to store {} metadata version {:?} to {}: {}", - M::ROLE.name(), + "failed to store metadata version {:?} to {}: {}", version, path.to_string(), err, @@ -536,7 +551,7 @@ where async fn update_root(&mut self) -> Result { let root_path = MetadataPath::from_role(&Role::Root); - let latest_root = self + let (raw_latest_root, latest_root) = self .remote .fetch_metadata( &root_path, @@ -563,30 +578,30 @@ where for i in (self.tuf.root().version() + 1)..latest_version { let version = MetadataVersion::Number(i); - let signed_root = self + let (raw_signed_root, signed_root) = self .remote .fetch_metadata(&root_path, &version, self.config.max_root_length, None) .await?; - if !self.tuf.update_root(signed_root.clone())? { + if !self.tuf.update_root(signed_root)? { error!("{}", err_msg); return Err(Error::Programming(err_msg.into())); } - self.store_metadata(&root_path, &version, &signed_root) + self.store_metadata(&root_path, &version, &raw_signed_root) .await; } - if !self.tuf.update_root(latest_root.clone())? { + if !self.tuf.update_root(latest_root)? { error!("{}", err_msg); return Err(Error::Programming(err_msg.into())); } let latest_version = MetadataVersion::Number(latest_version); - self.store_metadata(&root_path, &latest_version, &latest_root) + self.store_metadata(&root_path, &latest_version, &raw_latest_root) .await; - self.store_metadata(&root_path, &MetadataVersion::None, &latest_root) + self.store_metadata(&root_path, &MetadataVersion::None, &raw_latest_root) .await; if self.tuf.root().expires() <= &Utc::now() { @@ -601,7 +616,7 @@ where async fn update_timestamp(&mut self) -> Result { let timestamp_path = MetadataPath::from_role(&Role::Timestamp); - let signed_timestamp = self + let (raw_signed_timestamp, signed_timestamp) = self .remote .fetch_metadata( ×tamp_path, @@ -610,12 +625,11 @@ where None, ) .await?; + let latest_version = signed_timestamp.version(); + let latest_version = MetadataVersion::Number(latest_version); - if self.tuf.update_timestamp(signed_timestamp.clone())? { - let latest_version = signed_timestamp.version(); - let latest_version = MetadataVersion::Number(latest_version); - - self.store_metadata(×tamp_path, &latest_version, &signed_timestamp) + if self.tuf.update_timestamp(signed_timestamp)? { + self.store_metadata(×tamp_path, &latest_version, &raw_signed_timestamp) .await; Ok(true) @@ -650,7 +664,7 @@ where let snapshot_path = MetadataPath::from_role(&Role::Snapshot); let snapshot_length = Some(snapshot_description.length()); - let signed_snapshot = self + let (raw_signed_snapshot, signed_snapshot) = self .remote .fetch_metadata( &snapshot_path, @@ -660,8 +674,8 @@ where ) .await?; - if self.tuf.update_snapshot(signed_snapshot.clone())? { - self.store_metadata(&snapshot_path, &version, &signed_snapshot) + if self.tuf.update_snapshot(signed_snapshot)? { + self.store_metadata(&snapshot_path, &version, &raw_signed_snapshot) .await; Ok(true) @@ -700,7 +714,7 @@ where let targets_path = MetadataPath::from_role(&Role::Targets); let targets_length = Some(targets_description.length()); - let signed_targets = self + let (raw_signed_targets, signed_targets) = self .remote .fetch_metadata( &targets_path, @@ -710,8 +724,8 @@ where ) .await?; - if self.tuf.update_targets(signed_targets.clone())? { - self.store_metadata(&targets_path, &version, &signed_targets) + if self.tuf.update_targets(signed_targets)? { + self.store_metadata(&targets_path, &version, &raw_signed_targets) .await; Ok(true) @@ -766,7 +780,7 @@ where async fn _fetch_target<'a>( &'a mut self, target: &'a TargetPath, - ) -> Result> { + ) -> Result { let target_description = self.fetch_target_description(target).await?; // According to TUF section 5.5.2, when consistent snapshot is enabled, target files should @@ -847,9 +861,9 @@ where }; let role_length = Some(role_meta.length()); - let signed_meta = self + let raw_signed_meta = self .local - .fetch_metadata::( + .fetch_metadata( delegation.role(), &MetadataVersion::None, role_length, @@ -857,12 +871,12 @@ where ) .await; - let signed_meta = match signed_meta { - Ok(signed_meta) => signed_meta, + let (raw_signed_meta, signed_meta) = match raw_signed_meta { + Ok(m) => m, Err(_) => { match self .remote - .fetch_metadata::( + .fetch_metadata( delegation.role(), &version, role_length, @@ -885,12 +899,12 @@ where match self .tuf - .update_delegation(&targets_role, delegation.role(), signed_meta.clone()) + .update_delegation(&targets_role, delegation.role(), signed_meta) { Ok(_) => { match self .local - .store_metadata(delegation.role(), &MetadataVersion::None, &signed_meta) + .store_metadata(delegation.role(), &MetadataVersion::None, &raw_signed_meta) .await { Ok(_) => (), @@ -933,31 +947,31 @@ where } /// Helper function that first tries to fetch the metadata from the local store, and if it doesn't -/// exist, try fetching it from the remote store. +/// exist or does and fails to parse, try fetching it from the remote store. async fn fetch_metadata_from_local_or_else_remote<'a, D, L, R, M>( path: &'a MetadataPath, version: &'a MetadataVersion, max_length: Option, hash_data: Option<(&'static HashAlgorithm, HashValue)>, - local: &'a L, - remote: &'a R, -) -> Result<(bool, SignedMetadata)> + local: &'a Repository, + remote: &'a Repository, +) -> Result<(bool, RawSignedMetadata, SignedMetadata)> where D: DataInterchange + Sync, - L: Repository, - R: Repository, + L: RepositoryProvider + RepositoryStorage, + R: RepositoryProvider, M: Metadata + 'static, { match local .fetch_metadata(path, version, max_length, hash_data.clone()) .await { - Ok(meta) => Ok((false, meta)), + Ok((raw_meta, meta)) => Ok((false, raw_meta, meta)), Err(Error::NotFound) => { - let meta = remote + let (raw_meta, meta) = remote .fetch_metadata(path, version, max_length, hash_data) .await?; - Ok((true, meta)) + Ok((true, raw_meta, meta)) } Err(err) => Err(err), } @@ -1151,7 +1165,7 @@ mod test { let public_key = private_key.public().clone(); assert_matches!( - Client::with_trusted_local(Config::default(), &local, &remote,).await, + Client::with_trusted_local(Config::default(), &local, &remote).await, Err(Error::NotFound) ); @@ -1186,8 +1200,9 @@ mod test { #[test] fn client_constructors_err_with_invalid_keys() { block_on(async { - let local = EphemeralRepository::::new(); - let remote = EphemeralRepository::::new(); + let local = EphemeralRepository::new(); + let remote = EphemeralRepository::new(); + let mut repo = Repository::<_, Json>::new(&remote); let good_private_key = PrivateKey::from_pkcs8( &PrivateKey::new(KeyType::Ed25519).unwrap(), @@ -1210,8 +1225,7 @@ mod test { let root_path = MetadataPath::from_role(&Role::Root); let root_version = MetadataVersion::Number(root_version); - remote - .store_metadata(&root_path, &root_version, &root) + repo.store_metadata(&root_path, &root_version, &root.to_raw().unwrap()) .await .unwrap(); @@ -1253,7 +1267,8 @@ mod test { #[test] fn root_chain_update() { block_on(async { - let repo = EphemeralRepository::new(); + let repo = EphemeralRepository::::new(); + let mut remote = Repository::new(&repo); //// First, create the root metadata. let root1 = RootMetadataBuilder::new() @@ -1327,35 +1342,71 @@ mod test { let snapshot_path = MetadataPath::from_role(&Role::Snapshot); let timestamp_path = MetadataPath::from_role(&Role::Timestamp); - repo.store_metadata(&root_path, &MetadataVersion::Number(1), &root1) + remote + .store_metadata( + &root_path, + &MetadataVersion::Number(1), + &root1.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&root_path, &MetadataVersion::None, &root1) + remote + .store_metadata(&root_path, &MetadataVersion::None, &root1.to_raw().unwrap()) .await .unwrap(); - repo.store_metadata(&targets_path, &MetadataVersion::Number(1), &targets) + remote + .store_metadata( + &targets_path, + &MetadataVersion::Number(1), + &targets.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&targets_path, &MetadataVersion::None, &targets) + remote + .store_metadata( + &targets_path, + &MetadataVersion::None, + &targets.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&snapshot_path, &MetadataVersion::Number(1), &snapshot) + remote + .store_metadata( + &snapshot_path, + &MetadataVersion::Number(1), + &snapshot.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&snapshot_path, &MetadataVersion::None, &snapshot) + remote + .store_metadata( + &snapshot_path, + &MetadataVersion::None, + &snapshot.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(×tamp_path, &MetadataVersion::Number(1), ×tamp) + remote + .store_metadata( + ×tamp_path, + &MetadataVersion::Number(1), + ×tamp.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(×tamp_path, &MetadataVersion::None, ×tamp) + remote + .store_metadata( + ×tamp_path, + &MetadataVersion::None, + ×tamp.to_raw().unwrap(), + ) .await .unwrap(); @@ -1377,7 +1428,7 @@ mod test { assert_eq!(client.tuf.root().version(), 1); assert_eq!( - root1, + root1.to_raw().unwrap(), client .local .fetch_metadata::( @@ -1387,7 +1438,8 @@ mod test { None ) .await - .unwrap(), + .unwrap() + .0 ); //// @@ -1395,25 +1447,33 @@ mod test { client .remote - .store_metadata(&root_path, &MetadataVersion::Number(2), &root2) + .store_metadata( + &root_path, + &MetadataVersion::Number(2), + &root2.to_raw().unwrap(), + ) .await .unwrap(); client .remote - .store_metadata(&root_path, &MetadataVersion::None, &root2) + .store_metadata(&root_path, &MetadataVersion::None, &root2.to_raw().unwrap()) .await .unwrap(); client .remote - .store_metadata(&root_path, &MetadataVersion::Number(3), &root3) + .store_metadata( + &root_path, + &MetadataVersion::Number(3), + &root3.to_raw().unwrap(), + ) .await .unwrap(); client .remote - .store_metadata(&root_path, &MetadataVersion::None, &root3) + .store_metadata(&root_path, &MetadataVersion::None, &root3.to_raw().unwrap()) .await .unwrap(); @@ -1424,7 +1484,7 @@ mod test { assert_eq!(client.tuf.root().version(), 3); assert_eq!( - root3, + root3.to_raw().unwrap(), client .local .fetch_metadata::( @@ -1434,7 +1494,8 @@ mod test { None ) .await - .unwrap(), + .unwrap() + .0 ); }); } @@ -1450,7 +1511,8 @@ mod test { } async fn test_versioned_init(consistent_snapshot: bool) { - let repo = EphemeralRepository::new(); + let repo = EphemeralRepository::::new(); + let mut remote = Repository::new(&repo); //// First, create the root metadata. let root1 = RootMetadataBuilder::new() @@ -1507,11 +1569,17 @@ mod test { let snapshot_path = MetadataPath::from_role(&Role::Snapshot); let timestamp_path = MetadataPath::from_role(&Role::Timestamp); - repo.store_metadata(&root_path, &MetadataVersion::Number(1), &root1) + remote + .store_metadata( + &root_path, + &MetadataVersion::Number(1), + &root1.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&root_path, &MetadataVersion::None, &root1) + remote + .store_metadata(&root_path, &MetadataVersion::None, &root1.to_raw().unwrap()) .await .unwrap(); @@ -1521,23 +1589,40 @@ mod test { MetadataVersion::None }; - repo.store_metadata(&targets_path, &metadata_version, &targets) + remote + .store_metadata(&targets_path, &metadata_version, &targets.to_raw().unwrap()) .await .unwrap(); - repo.store_metadata(&snapshot_path, &metadata_version, &snapshot) + remote + .store_metadata( + &snapshot_path, + &metadata_version, + &snapshot.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(×tamp_path, &MetadataVersion::None, ×tamp) + remote + .store_metadata( + ×tamp_path, + &MetadataVersion::None, + ×tamp.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&root_path, &MetadataVersion::Number(2), &root2) + remote + .store_metadata( + &root_path, + &MetadataVersion::Number(2), + &root2.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&root_path, &MetadataVersion::None, &root2) + remote + .store_metadata(&root_path, &MetadataVersion::None, &root2.to_raw().unwrap()) .await .unwrap(); @@ -1564,12 +1649,13 @@ mod test { assert_eq!(client.tuf.root().version(), 2); assert_eq!( - root2, + root2.to_raw().unwrap(), client .local .fetch_metadata::(&root_path, &MetadataVersion::Number(2), None, None) .await - .unwrap(), + .unwrap() + .0 ); } @@ -1623,7 +1709,8 @@ mod test { async fn test_fetch_target_description(path: String, expected_description: TargetDescription) { // Generate an ephemeral repository with a single target. - let repo = EphemeralRepository::new(); + let repo = EphemeralRepository::::new(); + let mut remote = Repository::new(&repo); let root = RootMetadataBuilder::new() .root_key(KEYS[0].public().clone()) @@ -1659,23 +1746,44 @@ mod test { let snapshot_path = MetadataPath::from_role(&Role::Snapshot); let timestamp_path = MetadataPath::from_role(&Role::Timestamp); - repo.store_metadata(&root_path, &MetadataVersion::Number(1), &root) + remote + .store_metadata( + &root_path, + &MetadataVersion::Number(1), + &root.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&root_path, &MetadataVersion::None, &root) + remote + .store_metadata(&root_path, &MetadataVersion::None, &root.to_raw().unwrap()) .await .unwrap(); - repo.store_metadata(&targets_path, &MetadataVersion::None, &targets) + remote + .store_metadata( + &targets_path, + &MetadataVersion::None, + &targets.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(&snapshot_path, &MetadataVersion::None, &snapshot) + remote + .store_metadata( + &snapshot_path, + &MetadataVersion::None, + &snapshot.to_raw().unwrap(), + ) .await .unwrap(); - repo.store_metadata(×tamp_path, &MetadataVersion::None, ×tamp) + remote + .store_metadata( + ×tamp_path, + &MetadataVersion::None, + ×tamp.to_raw().unwrap(), + ) .await .unwrap(); diff --git a/src/crypto.rs b/src/crypto.rs index 0c46cd53..a7822be5 100644 --- a/src/crypto.rs +++ b/src/crypto.rs @@ -75,6 +75,13 @@ pub fn hash_preference<'a>( Err(Error::NoSupportedHashAlgorithm) } +#[cfg(test)] +pub(crate) fn calculate_hash(data: &[u8], hash_alg: HashAlgorithm) -> HashValue { + let mut context = hash_alg.digest_context().unwrap(); + context.update(data); + HashValue::new(context.finish().as_ref().to_vec()) +} + /// Calculate the size and hash digest from a given `Read`. pub fn calculate_hashes( mut read: R, diff --git a/src/metadata.rs b/src/metadata.rs index 5dcb1efa..61b22def 100644 --- a/src/metadata.rs +++ b/src/metadata.rs @@ -237,7 +237,39 @@ pub trait Metadata: Debug + PartialEq + Serialize + DeserializeOwned { fn expires(&self) -> &DateTime; } -/// A piece of raw metadata with attached signatures. +/// Unverified raw metadata with attached signatures and type information identifying the +/// metadata's type and serialization format. +#[derive(Debug, Clone, PartialEq)] +pub struct RawSignedMetadata { + bytes: Vec, + _marker: PhantomData<(D, M)>, +} + +impl RawSignedMetadata +where + D: DataInterchange, + M: Metadata, +{ + /// Create a new [`RawSignedMetadata`] using the provided `bytes`. + pub fn new(bytes: Vec) -> Self { + Self { + bytes, + _marker: PhantomData, + } + } + + /// Access this metadata's inner raw bytes. + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } + + /// Parse this metadata. + pub fn parse(&self) -> Result> { + D::from_slice(&self.bytes) + } +} + +/// A piece of metadata with attached signatures. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SignedMetadata { signatures: Vec, @@ -280,6 +312,23 @@ where }) } + /// Serialize this metadata to canonical bytes suitable for serialization. Note that this + /// method is only intended to serialize signed metadata generated by this crate, not to + /// re-serialize metadata that was originally obtained from a remote source. + /// + /// TUF metadata hashes are on the raw bytes of the metadata, so it is not guaranteed that the + /// hash of the returned bytes will match a hash included in, for example, a snapshot metadata + /// file, as: + /// * Parsing metadata removes unknown fields, which would not be included in the returned + /// bytes, + /// * DataInterchange implementations only guarantee the bytes are canonical for the purpose of + /// a signature. Metadata obtained from a remote source may have included different whitespace + /// or ordered fields in a way that is not preserved when parsing that metadata. + pub fn to_raw(&self) -> Result> { + let bytes = D::canonicalize(&D::serialize(self)?)?; + Ok(RawSignedMetadata::new(bytes)) + } + /// Append a signature to this signed metadata. Will overwrite signature by keys with the same /// ID. /// diff --git a/src/repository.rs b/src/repository.rs index eaeaa41a..9acf40fd 100644 --- a/src/repository.rs +++ b/src/repository.rs @@ -1,14 +1,18 @@ //! Interfaces for interacting with different types of TUF repositories. -use crate::crypto::{HashAlgorithm, HashValue}; -use crate::error::Error; +use crate::crypto::{self, HashAlgorithm, HashValue}; use crate::interchange::DataInterchange; use crate::metadata::{ - Metadata, MetadataPath, MetadataVersion, SignedMetadata, TargetDescription, TargetPath, + Metadata, MetadataPath, MetadataVersion, RawSignedMetadata, SignedMetadata, TargetDescription, + TargetPath, }; -use crate::Result; +use crate::util::SafeAsyncRead; +use crate::{Error, Result}; + use futures_io::AsyncRead; use futures_util::future::BoxFuture; +use futures_util::io::AsyncReadExt; +use std::marker::PhantomData; mod file_system; pub use self::file_system::{FileSystemRepository, FileSystemRepositoryBuilder}; @@ -19,53 +23,143 @@ pub use self::http::{HttpRepository, HttpRepositoryBuilder}; mod ephemeral; pub use self::ephemeral::EphemeralRepository; -/// Top-level trait that represents a TUF repository and contains all the ways it can be interacted -/// with. -pub trait Repository +/// A readable TUF repository. +pub trait RepositoryProvider where D: DataInterchange + Sync, { - /// Store signed metadata. + /// Fetch signed metadata identified by `meta_path`, `version`, and + /// [`D::extension()`][extension]. + /// + /// Implementations may ignore `max_length` and `hash_data` as [`Repository`] will verify these + /// constraints itself. However, it may be more efficient for an implementation to detect + /// invalid metadata and fail the fetch operation before streaming all of the bytes of the + /// metadata. /// - /// Note: This **MUST** canonicalize the bytes before storing them as a read will expect the - /// hashes of the metadata to match. - fn store_metadata<'a, M>( + /// [extension]: crate::interchange::DataInterchange::extension + fn fetch_metadata<'a>( &'a self, meta_path: &'a MetadataPath, version: &'a MetadataVersion, - metadata: &'a SignedMetadata, - ) -> BoxFuture<'a, Result<()>> - where - M: Metadata + Sync + 'static; + max_length: Option, + hash_data: Option<(&'static HashAlgorithm, HashValue)>, + ) -> BoxFuture<'a, Result>>; + + /// Fetch the given target. + /// + /// Implementations may ignore the `length` and `hashes` fields in `target_description` as + /// [`Repository`] will verify these constraints itself. However, it may be more efficient for + /// an implementation to detect invalid targets and fail the fetch operation before streaming + /// all of the bytes. + fn fetch_target<'a>( + &'a self, + target_path: &'a TargetPath, + target_description: &'a TargetDescription, + ) -> BoxFuture<'a, Result>>; +} - /// Fetch signed metadata. - fn fetch_metadata<'a, M>( +/// A writable TUF repository. Most implementors of this trait should also implement +/// `RepositoryProvider`. +pub trait RepositoryStorage +where + D: DataInterchange + Sync, +{ + /// Store the provided `metadata` in a location identified by `meta_path`, `version`, and + /// [`D::extension()`][extension], overwriting any existing metadata at that location. + /// + /// [extension]: crate::interchange::DataInterchange::extension + fn store_metadata<'a, R>( &'a self, meta_path: &'a MetadataPath, version: &'a MetadataVersion, - max_length: Option, - hash_data: Option<(&'static HashAlgorithm, HashValue)>, - ) -> BoxFuture<'a, Result>> + metadata: R, + ) -> BoxFuture<'a, Result<()>> where - M: Metadata + 'static; + R: AsyncRead + Send + Unpin + 'a; - /// Store the given target. + /// Store the provided `target` in a location identified by `target_path`, overwriting any + /// existing target at that location. fn store_target<'a, R>( &'a self, - read: R, + target: R, target_path: &'a TargetPath, ) -> BoxFuture<'a, Result<()>> where R: AsyncRead + Send + Unpin + 'a; +} + +impl RepositoryProvider for &T +where + T: RepositoryProvider, + D: DataInterchange + Sync, +{ + fn fetch_metadata<'a>( + &'a self, + meta_path: &'a MetadataPath, + version: &'a MetadataVersion, + max_length: Option, + hash_data: Option<(&'static HashAlgorithm, HashValue)>, + ) -> BoxFuture<'a, Result>> { + (**self).fetch_metadata(meta_path, version, max_length, hash_data) + } - /// Fetch the given target. fn fetch_target<'a>( &'a self, target_path: &'a TargetPath, target_description: &'a TargetDescription, - ) -> BoxFuture<'a, Result>>; + ) -> BoxFuture<'a, Result>> { + (**self).fetch_target(target_path, target_description) + } +} + +impl RepositoryStorage for &T +where + T: RepositoryStorage, + D: DataInterchange + Sync, +{ + fn store_metadata<'a, R>( + &'a self, + meta_path: &'a MetadataPath, + version: &'a MetadataVersion, + metadata: R, + ) -> BoxFuture<'a, Result<()>> + where + R: AsyncRead + Send + Unpin + 'a, + { + (**self).store_metadata(meta_path, version, metadata) + } + + fn store_target<'a, R>( + &'a self, + target: R, + target_path: &'a TargetPath, + ) -> BoxFuture<'a, Result<()>> + where + R: AsyncRead + Send + Unpin + 'a, + { + (**self).store_target(target, target_path) + } +} - /// Perform a sanity check that `M`, `Role`, and `MetadataPath` all desrcribe the same entity. +/// A wrapper around an implementation of [`RepositoryProvider`] and/or [`RepositoryStorage`] tied +/// to a specific [`DataInterchange`](crate::interchange::DataInterchange) that will enforce +/// provided length limits and hash checks. +#[derive(Debug, Clone)] +pub(crate) struct Repository { + repository: R, + _interchange: PhantomData, +} + +impl Repository { + /// Creates a new [`Repository`] wrapping `repository`. + pub(crate) fn new(repository: R) -> Self { + Self { + repository, + _interchange: PhantomData, + } + } + + /// Perform a sanity check that `M`, `Role`, and `MetadataPath` all describe the same entity. fn check(meta_path: &MetadataPath) -> Result<()> where M: Metadata, @@ -82,57 +176,317 @@ where } } -impl Repository for &T +impl Repository where - T: Repository, + R: RepositoryProvider, D: DataInterchange + Sync, { - fn store_metadata<'a, M>( + /// Fetch and parse metadata identified by `meta_path`, `version`, and + /// [`D::extension()`][extension]. + /// + /// If `max_length` is provided, this method will return an error if the metadata exceeds + /// `max_length` bytes. If `hash_data` is provided, this method will return and error if the + /// hashed bytes of the metadata do not match `hash_data`. + /// + /// [extension]: crate::interchange::DataInterchange::extension + pub(crate) async fn fetch_metadata<'a, M>( &'a self, meta_path: &'a MetadataPath, version: &'a MetadataVersion, - metadata: &'a SignedMetadata, - ) -> BoxFuture<'a, Result<()>> + max_length: Option, + hash_data: Option<(&'static HashAlgorithm, HashValue)>, + ) -> Result<(RawSignedMetadata, SignedMetadata)> where - M: Metadata + Sync + 'static, + M: Metadata, { - (**self).store_metadata(meta_path, version, metadata) + let raw_signed_meta = self + .fetch_raw_metadata(meta_path, version, max_length, hash_data) + .await?; + let signed_meta = raw_signed_meta.parse()?; + + Ok((raw_signed_meta, signed_meta)) } - /// Fetch signed metadata. - fn fetch_metadata<'a, M>( + /// Fetch metadata identified by `meta_path`, `version`, and [`D::extension()`][extension]. + /// + /// If `max_length` is provided, this method will return an error if the metadata exceeds + /// `max_length` bytes. If `hash_data` is provided, this method will return and error if the + /// hashed bytes of the metadata do not match `hash_data`. + /// + /// [extension]: crate::interchange::DataInterchange::extension + async fn fetch_raw_metadata<'a, M>( &'a self, meta_path: &'a MetadataPath, version: &'a MetadataVersion, max_length: Option, hash_data: Option<(&'static HashAlgorithm, HashValue)>, - ) -> BoxFuture<'a, Result>> + ) -> Result> where - M: Metadata + 'static, + M: Metadata, { - (**self).fetch_metadata(meta_path, version, max_length, hash_data) + Self::check::(meta_path)?; + + // Fetch the metadata, verifying max_length and hash_data if provided, as the repository + // implementation should only be trusted to use those as hints to fail early. + let mut reader = self + .repository + .fetch_metadata(meta_path, version, max_length, hash_data.clone()) + .await? + .check_length_and_hash(max_length.unwrap_or(::std::usize::MAX) as u64, hash_data)?; + + let mut buf = Vec::new(); + reader.read_to_end(&mut buf).await?; + + let raw_signed_meta = RawSignedMetadata::new(buf); + + Ok(raw_signed_meta) } - /// Store the given target. - fn store_target<'a, R>( + /// Fetch the target identified by `target_path` through the returned `AsyncRead`, verifying + /// that the target matches the preferred hash specified in `target_description` and that it is + /// the expected length. Such verification errors will be provided by a read failure on the + /// provided `AsyncRead`. + /// + /// It is **critical** that none of the bytes from the returned `AsyncRead` are used until it + /// has been fully consumed as the data is untrusted. + pub(crate) async fn fetch_target<'a>( &'a self, - read: R, target_path: &'a TargetPath, - ) -> BoxFuture<'a, Result<()>> + target_description: &'a TargetDescription, + ) -> Result { + let (hash_alg, value) = crypto::hash_preference(target_description.hashes())?; + + self.repository + .fetch_target(target_path, target_description) + .await? + .check_length_and_hash(target_description.length(), Some((hash_alg, value.clone()))) + } +} + +impl Repository +where + R: RepositoryStorage, + D: DataInterchange + Sync, +{ + /// Store the provided `metadata` in a location identified by `meta_path`, `version`, and + /// [`D::extension()`][extension], overwriting any existing metadata at that location. + /// + /// [extension]: crate::interchange::DataInterchange::extension + pub async fn store_metadata<'a, M>( + &'a mut self, + path: &'a MetadataPath, + version: &'a MetadataVersion, + metadata: &'a RawSignedMetadata, + ) -> Result<()> where - R: AsyncRead + Send + Unpin + 'a, + M: Metadata + Sync, { - (**self).store_target(read, target_path) + Self::check::(path)?; + + self.repository + .store_metadata(path, version, metadata.as_bytes()) + .await } - /// Fetch the given target. - fn fetch_target<'a>( - &'a self, + /// Store the provided `target` in a location identified by `target_path`. + pub async fn store_target<'a, S>( + &'a mut self, + target: S, target_path: &'a TargetPath, - target_description: &'a TargetDescription, - ) -> BoxFuture<'a, Result>> { - { - (**self).fetch_target(target_path, target_description) - } + ) -> Result<()> + where + S: AsyncRead + Send + Unpin + 'a, + { + self.repository.store_target(target, target_path).await + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::interchange::Json; + use crate::metadata::{MetadataPath, MetadataVersion, Role, RootMetadata, SnapshotMetadata}; + use crate::repository::EphemeralRepository; + use futures_executor::block_on; + use matches::assert_matches; + + #[test] + fn repository_forwards_not_found_error() { + block_on(async { + let repo = Repository::<_, Json>::new(EphemeralRepository::new()); + + assert_eq!( + repo.fetch_metadata::( + &MetadataPath::from_role(&Role::Root), + &MetadataVersion::None, + None, + None + ) + .await, + Err(Error::NotFound) + ); + }); + } + + #[test] + fn repository_rejects_mismatched_path() { + block_on(async { + let mut repo = Repository::<_, Json>::new(EphemeralRepository::new()); + let fake_metadata = RawSignedMetadata::::new(vec![]); + + repo.store_metadata( + &MetadataPath::from_role(&Role::Root), + &MetadataVersion::None, + &fake_metadata, + ) + .await + .unwrap(); + + assert_matches!( + repo.store_metadata( + &MetadataPath::from_role(&Role::Snapshot), + &MetadataVersion::None, + &fake_metadata, + ) + .await, + Err(Error::IllegalArgument(_)) + ); + + assert_matches!( + repo.fetch_metadata::( + &MetadataPath::from_role(&Role::Root), + &MetadataVersion::None, + None, + None + ) + .await, + Err(Error::IllegalArgument(_)) + ); + }); + } + + #[test] + fn repository_verifies_metadata_hash() { + block_on(async { + let path = MetadataPath::from_role(&Role::Root); + let version = MetadataVersion::None; + let data: &[u8] = b"valid metadata"; + let data_hash = crypto::calculate_hash(data, HashAlgorithm::Sha256); + + let repo = EphemeralRepository::new(); + repo.store_metadata(&path, &version, data).await.unwrap(); + + let client = Repository::<_, Json>::new(repo); + + assert_eq!( + client + .fetch_raw_metadata::( + &path, + &version, + None, + Some((&HashAlgorithm::Sha256, data_hash)) + ) + .await, + Ok(RawSignedMetadata::new(data.to_vec())) + ); + }) + } + + #[test] + fn repository_rejects_corrupt_metadata() { + block_on(async { + let path = MetadataPath::from_role(&Role::Root); + let version = MetadataVersion::None; + let data: &[u8] = b"corrupt metadata"; + + let repo = EphemeralRepository::new(); + repo.store_metadata(&path, &version, data).await.unwrap(); + + let client = Repository::<_, Json>::new(repo); + + assert_matches!( + client + .fetch_metadata::( + &path, + &version, + None, + Some((&HashAlgorithm::Sha256, HashValue::new(vec![]))) + ) + .await, + Err(_) + ); + }) + } + + #[test] + fn repository_verifies_metadata_size() { + block_on(async { + let path = MetadataPath::from_role(&Role::Root); + let version = MetadataVersion::None; + let data: &[u8] = b"reasonably sized metadata"; + + let repo = EphemeralRepository::new(); + repo.store_metadata(&path, &version, data).await.unwrap(); + + let client = Repository::<_, Json>::new(repo); + + assert_eq!( + client + .fetch_raw_metadata::(&path, &version, Some(100), None) + .await, + Ok(RawSignedMetadata::new(data.to_vec())) + ); + }) + } + + #[test] + fn repository_rejects_oversized_metadata() { + block_on(async { + let path = MetadataPath::from_role(&Role::Root); + let version = MetadataVersion::None; + let data: &[u8] = b"very big metadata"; + + let repo = EphemeralRepository::new(); + repo.store_metadata(&path, &version, data).await.unwrap(); + + let client = Repository::<_, Json>::new(repo); + + assert_matches!( + client + .fetch_metadata::(&path, &version, Some(4), None) + .await, + Err(_) + ); + }) + } + + #[test] + fn repository_rejects_corrupt_targets() { + block_on(async { + let repo = EphemeralRepository::new(); + let mut client = Repository::<_, Json>::new(repo); + + let data: &[u8] = b"like tears in the rain"; + let target_description = + TargetDescription::from_reader(data, &[HashAlgorithm::Sha256]).unwrap(); + let path = TargetPath::new("batty".into()).unwrap(); + client.store_target(data, &path).await.unwrap(); + + let mut read = client + .fetch_target(&path, &target_description) + .await + .unwrap(); + let mut buf = Vec::new(); + read.read_to_end(&mut buf).await.unwrap(); + assert_eq!(buf.as_slice(), data); + + let bad_data: &[u8] = b"you're in a desert"; + client.store_target(bad_data, &path).await.unwrap(); + let mut read = client + .fetch_target(&path, &target_description) + .await + .unwrap(); + assert!(read.read_to_end(&mut buf).await.is_err()); + }) } } diff --git a/src/repository/ephemeral.rs b/src/repository/ephemeral.rs index a4272a54..781f0ab0 100644 --- a/src/repository/ephemeral.rs +++ b/src/repository/ephemeral.rs @@ -8,27 +8,21 @@ use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; -use crate::crypto::{self, HashAlgorithm, HashValue}; +use crate::crypto::{HashAlgorithm, HashValue}; use crate::error::Error; use crate::interchange::DataInterchange; -use crate::metadata::{ - Metadata, MetadataPath, MetadataVersion, SignedMetadata, TargetDescription, TargetPath, -}; -use crate::repository::Repository; -use crate::util::SafeAsyncRead; +use crate::metadata::{MetadataPath, MetadataVersion, TargetDescription, TargetPath}; +use crate::repository::{RepositoryProvider, RepositoryStorage}; use crate::Result; type ArcHashMap = Arc>>; /// An ephemeral repository contained solely in memory. #[derive(Debug)] -pub struct EphemeralRepository -where - D: DataInterchange, -{ +pub struct EphemeralRepository { metadata: ArcHashMap<(MetadataPath, MetadataVersion), Arc<[u8]>>, targets: ArcHashMap>, - interchange: PhantomData, + _interchange: PhantomData, } impl EphemeralRepository @@ -37,10 +31,10 @@ where { /// Create a new ephemercal repository. pub fn new() -> Self { - EphemeralRepository { + Self { metadata: Arc::new(RwLock::new(HashMap::new())), targets: Arc::new(RwLock::new(HashMap::new())), - interchange: PhantomData, + _interchange: PhantomData, } } } @@ -54,44 +48,18 @@ where } } -impl Repository for EphemeralRepository +impl RepositoryProvider for EphemeralRepository where D: DataInterchange + Sync, { - fn store_metadata<'a, M>( - &'a self, - meta_path: &'a MetadataPath, - version: &'a MetadataVersion, - metadata: &'a SignedMetadata, - ) -> BoxFuture<'a, Result<()>> - where - M: Metadata + Sync + 'static, - { - async move { - Self::check::(meta_path)?; - let mut buf = Vec::new(); - D::to_writer(&mut buf, metadata)?; - self.metadata - .write() - .insert((meta_path.clone(), version.clone()), Arc::from(buf)); - Ok(()) - } - .boxed() - } - - fn fetch_metadata<'a, M>( + fn fetch_metadata<'a>( &'a self, meta_path: &'a MetadataPath, version: &'a MetadataVersion, - max_length: Option, - hash_data: Option<(&'static HashAlgorithm, HashValue)>, - ) -> BoxFuture<'a, Result>> - where - M: Metadata + 'static, - { + _max_length: Option, + _hash_data: Option<(&'static HashAlgorithm, HashValue)>, + ) -> BoxFuture<'a, Result>> { async move { - Self::check::(meta_path)?; - let bytes = match self .metadata .read() @@ -103,57 +71,71 @@ where } }; - let mut reader = Cursor::new(bytes) - .check_length_and_hash(max_length.unwrap_or(::std::usize::MAX) as u64, hash_data)?; + let reader: Box = Box::new(Cursor::new(bytes)); + Ok(reader) + } + .boxed() + } - let mut buf = Vec::with_capacity(max_length.unwrap_or(0)); - reader.read_to_end(&mut buf).await?; + fn fetch_target<'a>( + &'a self, + target_path: &'a TargetPath, + _target_description: &'a TargetDescription, + ) -> BoxFuture<'a, Result>> { + async move { + let bytes = match self.targets.read().get(target_path) { + Some(bytes) => Arc::clone(&bytes), + None => { + return Err(Error::NotFound); + } + }; - D::from_slice(&buf) + let reader: Box = Box::new(Cursor::new(bytes)); + Ok(reader) } .boxed() } +} - fn store_target<'a, R>( +impl RepositoryStorage for EphemeralRepository +where + D: DataInterchange + Sync, +{ + fn store_metadata<'a, R>( &'a self, - mut read: R, - target_path: &'a TargetPath, + meta_path: &'a MetadataPath, + version: &'a MetadataVersion, + mut metadata: R, ) -> BoxFuture<'a, Result<()>> where R: AsyncRead + Send + Unpin + 'a, { async move { let mut buf = Vec::new(); - read.read_to_end(&mut buf).await?; - self.targets + metadata.read_to_end(&mut buf).await?; + self.metadata .write() - .insert(target_path.clone(), Arc::from(buf)); + .insert((meta_path.clone(), version.clone()), Arc::from(buf)); Ok(()) } .boxed() } - fn fetch_target<'a>( + fn store_target<'a, R>( &'a self, + mut read: R, target_path: &'a TargetPath, - target_description: &'a TargetDescription, - ) -> BoxFuture<'a, Result>> { + ) -> BoxFuture<'a, Result<()>> + where + R: AsyncRead + Send + Unpin + 'a, + { async move { - let bytes = match self.targets.read().get(target_path) { - Some(bytes) => Arc::clone(&bytes), - None => { - return Err(Error::NotFound); - } - }; - - let (alg, value) = crypto::hash_preference(target_description.hashes())?; - - let reader: Box = - Box::new(Cursor::new(bytes).check_length_and_hash( - target_description.length(), - Some((alg, value.clone())), - )?); - Ok(reader) + let mut buf = Vec::new(); + read.read_to_end(&mut buf).await?; + self.targets + .write() + .insert(target_path.clone(), Arc::from(buf)); + Ok(()) } .boxed() } @@ -181,10 +163,13 @@ mod test { read.read_to_end(&mut buf).await.unwrap(); assert_eq!(buf.as_slice(), data); + // RepositoryProvider implementations do not guarantee data is not corrupt. let bad_data: &[u8] = b"you're in a desert"; repo.store_target(bad_data, &path).await.unwrap(); let mut read = repo.fetch_target(&path, &target_description).await.unwrap(); - assert!(read.read_to_end(&mut buf).await.is_err()); + buf.clear(); + read.read_to_end(&mut buf).await.unwrap(); + assert_eq!(buf.as_slice(), bad_data); }) } } diff --git a/src/repository/file_system.rs b/src/repository/file_system.rs index fe5ee328..b7a238ab 100644 --- a/src/repository/file_system.rs +++ b/src/repository/file_system.rs @@ -2,37 +2,39 @@ use futures_io::AsyncRead; use futures_util::future::{BoxFuture, FutureExt}; -use futures_util::io::{copy, AllowStdIo, AsyncReadExt}; +use futures_util::io::{copy, AllowStdIo}; use log::debug; use std::fs::{DirBuilder, File}; use std::marker::PhantomData; use std::path::{Path, PathBuf}; use tempfile::NamedTempFile; -use crate::crypto::{self, HashAlgorithm, HashValue}; +use crate::crypto::{HashAlgorithm, HashValue}; use crate::error::Error; use crate::interchange::DataInterchange; -use crate::metadata::{ - Metadata, MetadataPath, MetadataVersion, SignedMetadata, TargetDescription, TargetPath, -}; -use crate::repository::Repository; -use crate::util::SafeAsyncRead; +use crate::metadata::{MetadataPath, MetadataVersion, TargetDescription, TargetPath}; +use crate::repository::{RepositoryProvider, RepositoryStorage}; use crate::Result; /// A builder to create a repository contained on the local file system. -pub struct FileSystemRepositoryBuilder { +pub struct FileSystemRepositoryBuilder { local_path: PathBuf, metadata_prefix: Option, targets_prefix: Option, + _interchange: PhantomData, } -impl FileSystemRepositoryBuilder { +impl FileSystemRepositoryBuilder +where + D: DataInterchange, +{ /// Create a new repository with the given `local_path` prefix. pub fn new>(local_path: P) -> Self { FileSystemRepositoryBuilder { local_path: local_path.into(), metadata_prefix: None, targets_prefix: None, + _interchange: PhantomData, } } @@ -57,10 +59,7 @@ impl FileSystemRepositoryBuilder { } /// Build a `FileSystemRepository`. - pub fn build(self) -> Result> - where - D: DataInterchange, - { + pub fn build(self) -> Result> { let metadata_path = if let Some(metadata_prefix) = self.metadata_prefix { self.local_path.join(metadata_prefix) } else { @@ -78,7 +77,7 @@ impl FileSystemRepositoryBuilder { Ok(FileSystemRepository { metadata_path, targets_path, - interchange: PhantomData, + _interchange: PhantomData, }) } } @@ -90,7 +89,7 @@ where { metadata_path: PathBuf, targets_path: PathBuf, - interchange: PhantomData, + _interchange: PhantomData, } impl FileSystemRepository @@ -106,62 +105,75 @@ where } } -impl Repository for FileSystemRepository +impl RepositoryProvider for FileSystemRepository where D: DataInterchange + Sync, { - fn store_metadata<'a, M>( + fn fetch_metadata<'a>( &'a self, meta_path: &'a MetadataPath, version: &'a MetadataVersion, - metadata: &'a SignedMetadata, - ) -> BoxFuture<'a, Result<()>> - where - M: Metadata + Sync + 'static, - { + _max_length: Option, + _hash_data: Option<(&'static HashAlgorithm, HashValue)>, + ) -> BoxFuture<'a, Result>> { async move { - Self::check::(meta_path)?; - let mut path = self.metadata_path.clone(); - path.extend(meta_path.components::(version)); + path.extend(meta_path.components::(&version)); - if path.exists() { - debug!("Metadata path exists. Overwriting: {:?}", path); - } + let reader: Box = + Box::new(AllowStdIo::new(File::open(&path)?)); + Ok(reader) + } + .boxed() + } - let mut temp_file = create_temp_file(&path)?; - D::to_writer(&mut temp_file, metadata)?; - temp_file.persist(&path)?; + fn fetch_target<'a>( + &'a self, + target_path: &'a TargetPath, + _target_description: &'a TargetDescription, + ) -> BoxFuture<'a, Result>> { + async move { + let mut path = self.targets_path.clone(); + path.extend(target_path.components()); - Ok(()) + if !path.exists() { + return Err(Error::NotFound); + } + + let reader: Box = + Box::new(AllowStdIo::new(File::open(&path)?)); + Ok(reader) } .boxed() } +} - /// Fetch signed metadata. - fn fetch_metadata<'a, M>( +impl RepositoryStorage for FileSystemRepository +where + D: DataInterchange + Sync, +{ + fn store_metadata<'a, R>( &'a self, meta_path: &'a MetadataPath, version: &'a MetadataVersion, - max_length: Option, - hash_data: Option<(&'static HashAlgorithm, HashValue)>, - ) -> BoxFuture<'a, Result>> + metadata: R, + ) -> BoxFuture<'a, Result<()>> where - M: Metadata + 'static, + R: AsyncRead + Send + Unpin + 'a, { async move { - Self::check::(&meta_path)?; - let mut path = self.metadata_path.clone(); - path.extend(meta_path.components::(&version)); + path.extend(meta_path.components::(version)); - let mut reader = AllowStdIo::new(File::open(&path)?) - .check_length_and_hash(max_length.unwrap_or(::std::usize::MAX) as u64, hash_data)?; + if path.exists() { + debug!("Metadata path exists. Overwriting: {:?}", path); + } - let mut buf = Vec::with_capacity(max_length.unwrap_or(0)); - reader.read_to_end(&mut buf).await?; + let mut temp_file = AllowStdIo::new(create_temp_file(&path)?); + copy(metadata, &mut temp_file).await?; + temp_file.into_inner().persist(&path)?; - Ok(D::from_slice(&buf)?) + Ok(()) } .boxed() } @@ -190,32 +202,6 @@ where } .boxed() } - - fn fetch_target<'a>( - &'a self, - target_path: &'a TargetPath, - target_description: &'a TargetDescription, - ) -> BoxFuture<'a, Result>> { - async move { - let mut path = self.targets_path.clone(); - path.extend(target_path.components()); - - if !path.exists() { - return Err(Error::NotFound); - } - - let (alg, value) = crypto::hash_preference(target_description.hashes())?; - - let reader: Box = - Box::new(AllowStdIo::new(File::open(&path)?).check_length_and_hash( - target_description.length(), - Some((alg, value.clone())), - )?); - - Ok(reader) - } - .boxed() - } } fn create_temp_file(path: &Path) -> Result { @@ -237,7 +223,9 @@ mod test { use super::*; use crate::interchange::Json; use crate::metadata::{Role, RootMetadata}; + use crate::repository::Repository; use futures_executor::block_on; + use futures_util::io::AsyncReadExt; use tempfile; #[test] @@ -248,17 +236,18 @@ mod test { .tempdir() .unwrap(); let repo = FileSystemRepositoryBuilder::new(temp_dir.path()) - .build::() + .build() .unwrap(); assert_eq!( - repo.fetch_metadata::( - &MetadataPath::from_role(&Role::Root), - &MetadataVersion::None, - None, - None - ) - .await, + Repository::<_, Json>::new(repo) + .fetch_metadata::( + &MetadataPath::from_role(&Role::Root), + &MetadataVersion::None, + None, + None + ) + .await, Err(Error::NotFound) ); }) @@ -271,10 +260,10 @@ mod test { .prefix("rust-tuf") .tempdir() .unwrap(); - let repo = FileSystemRepositoryBuilder::new(temp_dir.path().to_path_buf()) + let repo = FileSystemRepositoryBuilder::::new(temp_dir.path().to_path_buf()) .metadata_prefix("meta") .targets_prefix("targs") - .build::() + .build() .unwrap(); // test that init worked @@ -305,10 +294,13 @@ mod test { assert_eq!(buf.as_slice(), data); } + // RepositoryProvider implementations do not guarantee data is not corrupt. let bad_data: &[u8] = b"you're in a desert"; repo.store_target(bad_data, &path).await.unwrap(); let mut read = repo.fetch_target(&path, &target_description).await.unwrap(); - assert!(read.read_to_end(&mut buf).await.is_err()); + buf.clear(); + read.read_to_end(&mut buf).await.unwrap(); + assert_eq!(buf.as_slice(), bad_data); }) } } diff --git a/src/repository/http.rs b/src/repository/http.rs index 1af334f6..b8cb402e 100644 --- a/src/repository/http.rs +++ b/src/repository/http.rs @@ -3,7 +3,6 @@ use futures_io::AsyncRead; use futures_util::compat::{Future01CompatExt, Stream01CompatExt}; use futures_util::future::{BoxFuture, FutureExt}; -use futures_util::io::AsyncReadExt; use futures_util::stream::TryStreamExt; use http::{Response, StatusCode, Uri}; use hyper::body::Body; @@ -15,13 +14,11 @@ use std::io; use std::marker::PhantomData; use url::Url; -use crate::crypto::{self, HashAlgorithm, HashValue}; +use crate::crypto::{HashAlgorithm, HashValue}; use crate::error::Error; use crate::interchange::DataInterchange; -use crate::metadata::{ - Metadata, MetadataPath, MetadataVersion, SignedMetadata, TargetDescription, TargetPath, -}; -use crate::repository::Repository; +use crate::metadata::{MetadataPath, MetadataVersion, TargetDescription, TargetPath}; +use crate::repository::RepositoryProvider; use crate::util::SafeAsyncRead; use crate::Result; @@ -33,11 +30,11 @@ where { uri: Uri, client: Client, - interchange: PhantomData, user_agent: Option, metadata_prefix: Option>, targets_prefix: Option>, min_bytes_per_second: u32, + _interchange: PhantomData, } impl HttpRepositoryBuilder @@ -50,11 +47,11 @@ where HttpRepositoryBuilder { uri: url.to_string().parse::().unwrap(), // This is dangerous, but will only exist for a short time as we migrate APIs. client: client, - interchange: PhantomData, user_agent: None, metadata_prefix: None, targets_prefix: None, min_bytes_per_second: 4096, + _interchange: PhantomData, } } @@ -63,11 +60,11 @@ where HttpRepositoryBuilder { uri: uri, client: client, - interchange: PhantomData, user_agent: None, metadata_prefix: None, targets_prefix: None, min_bytes_per_second: 4096, + _interchange: PhantomData, } } @@ -117,11 +114,11 @@ where HttpRepository { uri: self.uri, client: self.client, - interchange: self.interchange, user_agent: user_agent, metadata_prefix: self.metadata_prefix, targets_prefix: self.targets_prefix, min_bytes_per_second: self.min_bytes_per_second, + _interchange: PhantomData, } } } @@ -138,7 +135,7 @@ where metadata_prefix: Option>, targets_prefix: Option>, min_bytes_per_second: u32, - interchange: PhantomData, + _interchange: PhantomData, } // Configuration for urlencoding URI path elements. @@ -243,86 +240,54 @@ where } } -impl Repository for HttpRepository +impl RepositoryProvider for HttpRepository where C: Connect + Sync + 'static, D: DataInterchange + Send + Sync, { - /// This always returns `Err` as storing over HTTP is not yet supported. - fn store_metadata<'a, M>( - &'a self, - _: &'a MetadataPath, - _: &'a MetadataVersion, - _: &'a SignedMetadata, - ) -> BoxFuture<'a, Result<()>> - where - M: Metadata + 'static, - { - async { - Err(Error::Opaque( - "Http repo store metadata not implemented".to_string(), - )) - } - .boxed() - } - - fn fetch_metadata<'a, M>( + fn fetch_metadata<'a>( &'a self, meta_path: &'a MetadataPath, version: &'a MetadataVersion, - max_length: Option, - hash_data: Option<(&'static HashAlgorithm, HashValue)>, - ) -> BoxFuture<'a, Result>> - where - M: Metadata + 'static, - { + _max_length: Option, + _hash_data: Option<(&'static HashAlgorithm, HashValue)>, + ) -> BoxFuture<'a, Result>> { + let components = meta_path.components::(&version); async move { - Self::check::(meta_path)?; - - let components = meta_path.components::(&version); let resp = self.get(&self.metadata_prefix, &components).await?; - let mut reader = resp + // TODO(#278) check content length if known and fail early if the payload is too large. + + let reader = resp .into_body() .compat() .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) .into_async_read() - .enforce_minimum_bitrate(self.min_bytes_per_second) - .check_length_and_hash(max_length.unwrap_or(::std::usize::MAX) as u64, hash_data)?; - - let mut buf = Vec::new(); - reader.read_to_end(&mut buf).await?; + .enforce_minimum_bitrate(self.min_bytes_per_second); - Ok(D::from_slice(&buf)?) + let reader: Box = Box::new(reader); + Ok(reader) } .boxed() } - /// This always returns `Err` as storing over HTTP is not yet supported. - fn store_target<'a, R>(&'a self, _: R, _: &'a TargetPath) -> BoxFuture<'a, Result<()>> - where - R: AsyncRead + 'a, - { - async { Err(Error::Opaque("Http repo store not implemented".to_string())) }.boxed() - } - fn fetch_target<'a>( &'a self, target_path: &'a TargetPath, - target_description: &'a TargetDescription, + _target_description: &'a TargetDescription, ) -> BoxFuture<'a, Result>> { async move { - let (alg, value) = crypto::hash_preference(target_description.hashes())?; let components = target_path.components(); let resp = self.get(&self.targets_prefix, &components).await?; + // TODO(#278) check content length if known and fail early if the payload is too large. + let reader = resp .into_body() .compat() .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) .into_async_read() - .enforce_minimum_bitrate(self.min_bytes_per_second) - .check_length_and_hash(target_description.length(), Some((alg, value.clone())))?; + .enforce_minimum_bitrate(self.min_bytes_per_second); Ok(Box::new(reader) as Box) } diff --git a/tests/interop/main.rs b/tests/interop/main.rs index cc02ace0..e3e96a7a 100644 --- a/tests/interop/main.rs +++ b/tests/interop/main.rs @@ -35,16 +35,17 @@ //! download targets at each step of the test. use futures_executor::block_on; +use futures_util::io::AsyncReadExt; use std::collections::BTreeMap; use std::path::{Path, PathBuf}; use tuf::client::{Client, Config}; use tuf::crypto::KeyId; use tuf::interchange::Json; use tuf::metadata::{ - MetadataPath, MetadataVersion, Role, RootMetadata, SignedMetadata, TargetPath, + MetadataPath, MetadataVersion, RawSignedMetadata, Role, RootMetadata, TargetPath, }; use tuf::repository::{ - EphemeralRepository, FileSystemRepository, FileSystemRepositoryBuilder, Repository, + EphemeralRepository, FileSystemRepository, FileSystemRepositoryBuilder, RepositoryProvider, }; use tuf::Result; @@ -144,7 +145,7 @@ impl TestKeyRotation { TestKeyRotation { test_steps, - local: EphemeralRepository::::new(), + local: EphemeralRepository::new(), expected_targets: BTreeMap::new(), } } @@ -206,10 +207,16 @@ async fn extract_keys(dir: &Path) -> Vec { let remote = init_remote(dir).unwrap(); let root_path = MetadataPath::from_role(&Role::Root); - let metadata: SignedMetadata<_, RootMetadata> = remote + + let mut buf = Vec::new(); + let mut reader = remote .fetch_metadata(&root_path, &MetadataVersion::Number(1), None, None) .await .unwrap(); + reader.read_to_end(&mut buf).await.unwrap(); + let metadata = RawSignedMetadata::::new(buf) + .parse() + .unwrap(); metadata.as_ref().root().key_ids().iter().cloned().collect() } diff --git a/tests/metadata/generate.rs b/tests/metadata/generate.rs index 220bf7cd..89e56eff 100644 --- a/tests/metadata/generate.rs +++ b/tests/metadata/generate.rs @@ -12,7 +12,7 @@ use tuf::metadata::{ MetadataPath, MetadataVersion, Role, RootMetadataBuilder, SnapshotMetadataBuilder, TargetPath, TargetsMetadataBuilder, TimestampMetadataBuilder, VirtualTargetPath, }; -use tuf::repository::{FileSystemRepository, FileSystemRepositoryBuilder, Repository}; +use tuf::repository::{FileSystemRepository, FileSystemRepositoryBuilder, RepositoryStorage}; const KEYS_PATH: &str = "./keys.json"; // These structs and functions are necessary to parse keys.json, which contains the keys @@ -113,12 +113,20 @@ async fn update_root( }; let root_path = MetadataPath::from_role(&Role::Root); - repo.store_metadata(&root_path, &MetadataVersion::Number(version), &root) - .await - .unwrap(); - repo.store_metadata(&root_path, &MetadataVersion::None, &root) - .await - .unwrap(); + repo.store_metadata( + &root_path, + &MetadataVersion::Number(version), + root.to_raw().unwrap().as_bytes(), + ) + .await + .unwrap(); + repo.store_metadata( + &root_path, + &MetadataVersion::None, + root.to_raw().unwrap().as_bytes(), + ) + .await + .unwrap(); } // adds a target and updates the non-root metadata files. @@ -178,9 +186,13 @@ async fn add_target( MetadataVersion::None }; - repo.store_metadata(&targets_path, &version_prefix, &targets) - .await - .unwrap(); + repo.store_metadata( + &targets_path, + &version_prefix, + targets.to_raw().unwrap().as_bytes(), + ) + .await + .unwrap(); let snapshot_path = MetadataPath::from_role(&Role::Snapshot); let snapshot = SnapshotMetadataBuilder::new() @@ -191,9 +203,13 @@ async fn add_target( .signed::(&keys.get("snapshot").unwrap()) .unwrap(); - repo.store_metadata(&snapshot_path, &version_prefix, &snapshot) - .await - .unwrap(); + repo.store_metadata( + &snapshot_path, + &version_prefix, + snapshot.to_raw().unwrap().as_bytes(), + ) + .await + .unwrap(); let timestamp_path = MetadataPath::from_role(&Role::Timestamp); let timestamp = TimestampMetadataBuilder::from_snapshot(&snapshot, &[HashAlgorithm::Sha256]) @@ -204,9 +220,13 @@ async fn add_target( .unwrap(); // Timestamp doesn't require a version prefix even in consistent_snapshot. - repo.store_metadata(×tamp_path, &MetadataVersion::None, ×tamp) - .await - .unwrap(); + repo.store_metadata( + ×tamp_path, + &MetadataVersion::None, + timestamp.to_raw().unwrap().as_bytes(), + ) + .await + .unwrap(); } async fn generate_repos(dir: &str, consistent_snapshot: bool) -> tuf::Result<()> { diff --git a/tests/simple_example.rs b/tests/simple_example.rs index 0a2954ba..75f12dee 100644 --- a/tests/simple_example.rs +++ b/tests/simple_example.rs @@ -6,7 +6,7 @@ use tuf::metadata::{ MetadataPath, MetadataVersion, RootMetadataBuilder, SnapshotMetadataBuilder, TargetDescription, TargetPath, TargetsMetadataBuilder, TimestampMetadataBuilder, VirtualTargetPath, }; -use tuf::repository::{EphemeralRepository, Repository}; +use tuf::repository::{EphemeralRepository, RepositoryStorage}; use tuf::Result; // Ironically, this is far from simple, but it's as simple as it can be made. @@ -74,8 +74,8 @@ async fn run_tests(config: Config, consistent_snapshots: bool) where T: PathTranslator, { - let mut remote = EphemeralRepository::::new(); - let root_key_ids = init_server(&mut remote, &config, consistent_snapshots) + let remote = EphemeralRepository::new(); + let root_key_ids = init_server(&remote, &config, consistent_snapshots) .await .unwrap(); init_client(&root_key_ids, remote, config).await.unwrap(); @@ -89,7 +89,7 @@ async fn init_client( where T: PathTranslator, { - let local = EphemeralRepository::::new(); + let local = EphemeralRepository::new(); let mut client = Client::with_trusted_root_keyids( config, &MetadataVersion::Number(1), @@ -105,7 +105,7 @@ where } async fn init_server<'a, T>( - remote: &'a mut EphemeralRepository, + remote: &'a EphemeralRepository, config: &'a Config, consistent_snapshot: bool, ) -> Result> @@ -130,10 +130,18 @@ where let root_path = MetadataPath::new("root")?; remote - .store_metadata(&root_path, &MetadataVersion::Number(1), &signed) + .store_metadata( + &root_path, + &MetadataVersion::Number(1), + signed.to_raw().unwrap().as_bytes(), + ) .await?; remote - .store_metadata(&root_path, &MetadataVersion::None, &signed) + .store_metadata( + &root_path, + &MetadataVersion::None, + signed.to_raw().unwrap().as_bytes(), + ) .await?; //// build the targets //// @@ -162,10 +170,18 @@ where let targets_path = &MetadataPath::new("targets")?; remote - .store_metadata(&targets_path, &MetadataVersion::Number(1), &targets) + .store_metadata( + &targets_path, + &MetadataVersion::Number(1), + targets.to_raw().unwrap().as_bytes(), + ) .await?; remote - .store_metadata(&targets_path, &MetadataVersion::None, &targets) + .store_metadata( + &targets_path, + &MetadataVersion::None, + targets.to_raw().unwrap().as_bytes(), + ) .await?; //// build the snapshot //// @@ -176,10 +192,18 @@ where let snapshot_path = MetadataPath::new("snapshot")?; remote - .store_metadata(&snapshot_path, &MetadataVersion::Number(1), &snapshot) + .store_metadata( + &snapshot_path, + &MetadataVersion::Number(1), + snapshot.to_raw().unwrap().as_bytes(), + ) .await?; remote - .store_metadata(&snapshot_path, &MetadataVersion::None, &snapshot) + .store_metadata( + &snapshot_path, + &MetadataVersion::None, + snapshot.to_raw().unwrap().as_bytes(), + ) .await?; //// build the timestamp //// @@ -189,10 +213,18 @@ where let timestamp_path = MetadataPath::new("timestamp")?; remote - .store_metadata(×tamp_path, &MetadataVersion::Number(1), ×tamp) + .store_metadata( + ×tamp_path, + &MetadataVersion::Number(1), + timestamp.to_raw().unwrap().as_bytes(), + ) .await?; remote - .store_metadata(×tamp_path, &MetadataVersion::None, ×tamp) + .store_metadata( + ×tamp_path, + &MetadataVersion::None, + timestamp.to_raw().unwrap().as_bytes(), + ) .await?; Ok(vec![root_key.key_id().clone()])