Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

migrator: end-to-end test #956

Merged
merged 2 commits into from
Jul 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions sources/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 5 additions & 1 deletion sources/api/migration/migrator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ lazy_static = "1.2"
log = "0.4"
lz4 = "1.23.1"
nix = "0.17"
pentacle = "0.1.1"
pentacle = "0.2.0"
rand = { version = "0.7", default-features = false, features = ["std"] }
regex = "1.1"
semver = "0.9"
Expand All @@ -27,6 +27,10 @@ url = "2.1.1"
[build-dependencies]
cargo-readme = "3.1"

[dev-dependencies]
chrono = "0.4.11"
storewolf = { path = "../../storewolf" }

[[bin]]
name = "migrator"
path = "src/main.rs"
6 changes: 4 additions & 2 deletions sources/api/migration/migrator/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ use update_metadata::{load_manifest, MIGRATION_FILENAME_RE};
mod args;
mod direction;
mod error;
#[cfg(test)]
mod test;

lazy_static! {
/// This is the last version of Bottlerocket that supports *only* unsigned migrations.
Expand Down Expand Up @@ -131,7 +133,7 @@ where
Version::parse(version_str).context(error::InvalidDataStoreVersion { path: &patch })
}

fn run(args: &Args) -> Result<()> {
pub(crate) fn run(args: &Args) -> Result<()> {
// Get the directory we're working in.
let datastore_dir = args
.datastore_path
Expand Down Expand Up @@ -782,7 +784,7 @@ where
// =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^=

#[cfg(test)]
mod test {
mod main_test {
use super::*;

#[test]
Expand Down
248 changes: 248 additions & 0 deletions sources/api/migration/migrator/src/test.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,248 @@
//! Provides an end-to-end test of `migrator` via the `run` function. This module is conditionally
//! compiled for cfg(test) only.
use crate::args::Args;
use crate::run;
use chrono::{DateTime, Utc};
use semver::Version;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
use tempfile::TempDir;

/// Provides the path to a folder where test data files reside.
fn test_data() -> PathBuf {
let mut p = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
p.pop();
p.join("migrator").join("tests").join("data")
}

/// Returns the filepath to a `root.json` file stored in tree for testing. This file declares
/// an expiration date of `1970-01-01` to ensure success with an expired TUF repository.
fn root() -> PathBuf {
test_data()
.join("expired-root.json")
.canonicalize()
.unwrap()
}

/// Returns the filepath to a private key, stored in tree and used only for testing.
fn pem() -> PathBuf {
tjkirch marked this conversation as resolved.
Show resolved Hide resolved
test_data().join("snakeoil.pem").canonicalize().unwrap()
}

/// The name of a test migration. The prefix `b-` ensures we are not alphabetically sorting.
const FIRST_MIGRATION: &str = "b-first-migration";

/// The name of a test migration. The prefix `a-` ensures we are not alphabetically sorting.
const SECOND_MIGRATION: &str = "a-second-migration";

/// Creates a script that will serve as a migration during testing. The script writes its migrations
/// name to a file named `result.txt` in the parent directory of the datastore. `pentacle` does not
/// retain the name of the executing binary or script, so we take the `migration_name` as input,
/// and 'hardcode' it into the script.
fn create_test_migration<S: AsRef<str>>(migration_name: S) -> String {
format!(
r#"#!/usr/bin/env bash
set -eo pipefail
migration_name="{}"
datastore_parent_dir="$(dirname "${{3}}")"
outfile="${{datastore_parent_dir}}/result.txt"
echo "${{migration_name}}:" "${{@}}" >> "${{outfile}}"
"#,
migration_name.as_ref()
)
}

/// Holds the lifetime of a `TempDir` inside which a datastore directory and links are held for
/// testing.
struct TestDatastore {
tmp: TempDir,
datastore: PathBuf,
}

impl TestDatastore {
/// Creates a `TempDir`, sets up the datastore links needed to represent the `from_version`
/// and returns a `TestDatastore` populated with this information.
fn new(from_version: Version) -> Self {
let tmp = TempDir::new().unwrap();
let datastore = storewolf::create_new_datastore(tmp.path(), Some(from_version)).unwrap();
TestDatastore { tmp, datastore }
}
}

/// Represents a TUF repository, which is held in a tempdir.
struct TestRepo {
/// This field preserves the lifetime of the TempDir even though we never read it. When
/// `TestRepo` goes out of scope, `TempDir` will remove the temporary directory.
_tuf_dir: TempDir,
metadata_path: PathBuf,
targets_path: PathBuf,
}

/// LZ4 compresses `source` bytes to a new file at `destination`.
fn compress(source: &[u8], destination: &Path) {
let output_file = File::create(destination).unwrap();
let mut encoder = lz4::EncoderBuilder::new()
.level(4)
.build(output_file)
.unwrap();
encoder.write_all(source).unwrap();
let (_output, result) = encoder.finish();
result.unwrap()
}

/// Creates a test repository with a couple of versions defined in the manifest and a couple of
/// migrations. See the test description for for more info.
fn create_test_repo() -> TestRepo {
// This is where the signed TUF repo will exist when we are done. It is the
// root directory of the `TestRepo` we will return when we are done.
let test_repo_dir = TempDir::new().unwrap();
let metadata_path = test_repo_dir.path().join("metadata");
let targets_path = test_repo_dir.path().join("targets");

// This is where we will stage the TUF repository targets prior to signing them. We are using
// symlinks from `tuf_indir` to `tuf_outdir/targets` so we keep both in the same `TempDir`.
let tuf_indir = test_repo_dir.path();

// Create a Manifest and save it to the tuftool_indir for signing.
let mut manifest = update_metadata::Manifest::default();
// insert the following migrations to the manifest. note that the first migration would sort
// later than the second migration alphabetically. this is to help ensure that migrations
// are running in their listed order (rather than sorted order as in previous
// implementations).
manifest.migrations.insert(
(Version::new(0, 99, 0), Version::new(0, 99, 1)),
vec![FIRST_MIGRATION.into(), SECOND_MIGRATION.into()],
);
update_metadata::write_file(tuf_indir.join("manifest.json").as_path(), &manifest).unwrap();

// Create an script that we can use as the 'migration' that migrator will run. This script will
// write its name and arguments to a file named result.txt in the directory that is the parent
// of --source-datastore. result.txt can then be used to see what migrations ran, and in what
// order. Note that tests are sensitive to the order and number of arguments passed. If
// --source-datastore is given at a different position then the tests will fail and the script
// will need to be updated.
let migration_a = create_test_migration(FIRST_MIGRATION);
let migration_b = create_test_migration(SECOND_MIGRATION);

// Save lz4 compressed copies of the migration script into the tuftool_indir.
compress(migration_a.as_bytes(), &tuf_indir.join(FIRST_MIGRATION));
compress(migration_b.as_bytes(), &tuf_indir.join(SECOND_MIGRATION));

// Create and sign the TUF repository.
let mut editor = tough::editor::RepositoryEditor::new(root()).unwrap();
let long_ago: DateTime<Utc> = DateTime::parse_from_rfc3339("1970-01-01T00:00:00Z")
.unwrap()
.into();
let one = std::num::NonZeroU64::new(1).unwrap();
editor
.targets_version(one)
.targets_expires(long_ago)
.snapshot_version(one)
.snapshot_expires(long_ago)
.timestamp_version(one)
.timestamp_expires(long_ago);

fs::read_dir(tuf_indir)
.unwrap()
.filter(|dir_entry_result| {
if let Ok(dir_entry) = dir_entry_result {
return dir_entry.path().is_file();
}
false
})
.for_each(|dir_entry_result| {
let dir_entry = dir_entry_result.unwrap();
editor.add_target(
dir_entry.file_name().to_str().unwrap().into(),
tough::schema::Target::from_path(dir_entry.path()).unwrap(),
);
});
let signed_repo = editor
.sign(&[Box::new(tough::key_source::LocalKeySource { path: pem() })])
.unwrap();
signed_repo.link_targets(tuf_indir, &targets_path).unwrap();
signed_repo.write(&metadata_path).unwrap();

TestRepo {
_tuf_dir: test_repo_dir,
metadata_path,
targets_path,
}
}

/// Tests the migrator program end-to-end using the `run` function. Creates a TUF repo in a
/// tempdir which includes a `manifest.json` with a couple of migrations:
/// ```
/// "(0.99.0, 0.99.1)": [
/// "b-first-migration",
/// "a-second-migration"
/// ]
/// ```
///
/// The two 'migrations' are instances of the same bash script (see `create_test_repo`) which
/// writes its name (i.e. the migration name) and its arguments to a file at `./result.txt`
/// (i.e. since migrations run in the context of the datastore directory, `result.txt` is
/// written one directory above the datastore.) We can then inspect the contents of `result.txt`
/// to see that the expected migrations ran in the correct order.
#[test]
fn migrate_forward() {
let from_version = Version::parse("0.99.0").unwrap();
let to_version = Version::parse("0.99.1").unwrap();
let test_datastore = TestDatastore::new(from_version);
let test_repo = create_test_repo();
let args = Args {
datastore_path: test_datastore.datastore.clone(),
log_level: log::LevelFilter::Info,
migration_directory: test_repo.targets_path.clone(),
migrate_to_version: to_version,
root_path: root(),
metadata_directory: test_repo.metadata_path.clone(),
};
run(&args).unwrap();
// the migrations should write to a file named result.txt.
let output_file = test_datastore.tmp.path().join("result.txt");
let contents = std::fs::read_to_string(&output_file).unwrap();
let lines: Vec<&str> = contents.split('\n').collect();
assert_eq!(lines.len(), 3);
let first_line = *lines.get(0).unwrap();
let want = format!("{}: --forward", FIRST_MIGRATION);
let got: String = first_line.chars().take(want.len()).collect();
assert_eq!(got, want);
let second_line = *lines.get(1).unwrap();
let want = format!("{}: --forward", SECOND_MIGRATION);
let got: String = second_line.chars().take(want.len()).collect();
assert_eq!(got, want);
}

/// This test ensures that migrations run when migrating from a newer to an older version.
/// See `migrate_forward` for a description of how these tests work.
#[test]
fn migrate_backward() {
let from_version = Version::parse("0.99.1").unwrap();
let to_version = Version::parse("0.99.0").unwrap();
let test_datastore = TestDatastore::new(from_version);
let test_repo = create_test_repo();
let args = Args {
datastore_path: test_datastore.datastore.clone(),
log_level: log::LevelFilter::Info,
migration_directory: test_repo.targets_path.clone(),
migrate_to_version: to_version,
root_path: root(),
metadata_directory: test_repo.metadata_path.clone(),
};
run(&args).unwrap();
let output_file = test_datastore.tmp.path().join("result.txt");
let contents = std::fs::read_to_string(&output_file).unwrap();
let lines: Vec<&str> = contents.split('\n').collect();
assert_eq!(lines.len(), 3);
let first_line = *lines.get(0).unwrap();
let want = format!("{}: --backward", SECOND_MIGRATION);
let got: String = first_line.chars().take(want.len()).collect();
assert_eq!(got, want);
let second_line = *lines.get(1).unwrap();
let want = format!("{}: --backward", FIRST_MIGRATION);
let got: String = second_line.chars().take(want.len()).collect();
assert_eq!(got, want);
}
50 changes: 50 additions & 0 deletions sources/api/migration/migrator/tests/data/expired-root.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
{
"signed": {
"_type": "root",
"spec_version": "1.0.0",
"consistent_snapshot": true,
"version": 1,
"expires": "1970-01-01T00:00:00Z",
"keys": {
"febb06e5853878c3b2447c5100d327ebcf0807832c942f5e93ab28e0e4644684": {
"keytype": "rsa",
"keyval": {
"public": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7LU2gyhDDc7jglt2h3+q\n3+pHUprpe5hX2W4yE8NlM3U7EQRjiyd9doyXGAanBMd8IyqS3Q2ehuo2TZ5aVUFh\n+s/ZboEj+VMNPwPYhRv4QnNT79/kFsA5z0jMDFxCr3+IT2NFJv9GV+83PFVrvTZX\nNeqIZiAT/EJDENn7wS8p8G+eC/XkUcyA5kWxHXDdBgs+Xd+nXkh2v/8/lFKDJ+A4\nZlF9cIuAiWB7vNRMg29bhsLreD3F73O7iJCaFfg3I9EpofVUWWNZg4VM6Mmjksav\nFneTgjXTN9wPnNTjCBrUGwChLklBtInm+9C5iIfEoysqKZSwjeF9gchOANBlu7PD\nxwIDAQAB\n-----END PUBLIC KEY-----"
},
"scheme": "rsassa-pss-sha256"
}
},
"roles": {
"targets": {
"keyids": [
"febb06e5853878c3b2447c5100d327ebcf0807832c942f5e93ab28e0e4644684"
],
"threshold": 1
},
"snapshot": {
"keyids": [
"febb06e5853878c3b2447c5100d327ebcf0807832c942f5e93ab28e0e4644684"
],
"threshold": 1
},
"timestamp": {
"keyids": [
"febb06e5853878c3b2447c5100d327ebcf0807832c942f5e93ab28e0e4644684"
],
"threshold": 1
},
"root": {
"keyids": [
"febb06e5853878c3b2447c5100d327ebcf0807832c942f5e93ab28e0e4644684"
],
"threshold": 1
}
}
},
"signatures": [
{
"keyid": "febb06e5853878c3b2447c5100d327ebcf0807832c942f5e93ab28e0e4644684",
"sig": "4ed06d6bd1b8cc145c2a872e6705f37038f52534c01d4f52c7bd0e520aa46bfb83ee1987fdbbeb415b3d42ed6f85abed640d9cb4e403a20f56a3d6661b00a174411b927cb064e214632f0bb5d7b1b2319d8064cedb58ae1abc68908ad8e6ce2c451b0d3aafbff3700d6cd74517ccf10f5f00ee0eb16eb4272afc3a9021ff9be8b4e00a69b24607039a8230803eb537293ce6b244d77cd58db512af7ee0a976612a7498f1b31c7e5918925ca3846e5d7f419e9d5825af16290a36eb1b8465de73b8bc1bbaf2e1ae0f7eeb6999fa06f09bee19cd30d8c6848c08d33970e66d8f49704e41c4f2c933be3a77a8a949309cdcdbd7c2262ca89243aff0b5e450e45d64"
}
]
}
Loading