From 752d49715120061b7c75dd19429384480c0f1f8e Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 19 Feb 2020 10:39:03 -0800 Subject: [PATCH 1/3] Fix type in RemoveSettingMigration so we don't need to copy strings --- .../api/migration/migration-helpers/src/common_migrations.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/workspaces/api/migration/migration-helpers/src/common_migrations.rs b/workspaces/api/migration/migration-helpers/src/common_migrations.rs index 9681f62d743..3675c5ead3c 100644 --- a/workspaces/api/migration/migration-helpers/src/common_migrations.rs +++ b/workspaces/api/migration/migration-helpers/src/common_migrations.rs @@ -33,14 +33,14 @@ impl Migration for AddSettingMigration { /// We use this migration when we remove a setting from the model, so the new version doesn't see /// it and error. -pub struct RemoveSettingMigration(String); +pub struct RemoveSettingMigration(pub &'static str); impl Migration for RemoveSettingMigration { /// Newer versions don't know about the setting; we remove it so that new versions don't see /// it and fail deserialization. (The setting must be defaulted or generated in old versions, /// and safe to remove.) fn forward(&mut self, mut input: MigrationData) -> Result { - if let Some(data) = input.data.remove(&self.0) { + if let Some(data) = input.data.remove(self.0) { println!("Removed {}, which was set to '{}'", self.0, data); } else { println!("Found no {} to remove", self.0); From cac0bc9dd422a6097b99893becee243bf94e0d4b Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Wed, 19 Feb 2020 16:38:05 -0800 Subject: [PATCH 2/3] apiserver: don't fail list_transactions if '/pending' doesn't exist --- .../api/apiserver/src/datastore/filesystem.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/workspaces/api/apiserver/src/datastore/filesystem.rs b/workspaces/api/apiserver/src/datastore/filesystem.rs index d07273f016e..ab6ed80822c 100644 --- a/workspaces/api/apiserver/src/datastore/filesystem.rs +++ b/workspaces/api/apiserver/src/datastore/filesystem.rs @@ -550,7 +550,19 @@ impl DataStore for FilesystemDataStore { ); for entry in walker { - let entry = entry.context(error::ListKeys)?; + let entry = match entry { + Ok(entry) => entry, + Err(e) => { + if let Some(io_error) = e.io_error() { + // If there's no pending directory, that's OK, just return empty set. + if io_error.kind() == io::ErrorKind::NotFound { + break; + } + } + return Err(e).context(error::ListKeys); + } + }; + if entry.file_type().is_dir() { // The directory name should be valid UTF-8, encoded by encode_path_component, // or the data store has been corrupted. From f8a95cfe71cb1db54a7424b6145c8cd381a0b39e Mon Sep 17 00:00:00 2001 From: Tom Kirchner Date: Fri, 14 Feb 2020 14:15:21 -0800 Subject: [PATCH 3/3] Remove data store version, use OS version instead We found confusion between data store versions and OS versions, so this simplifies the system to only use OS version. If there's been no change in the data store, which is now determined by there being no migrations, then the new data store will simply be a link to the last version that did have changes. These are the changes at a high level: * Remove the data_store_version library and its type, instead using semver::Version * Cascade this change through migrator, storewolf, update_metadata, and updog * Remove OS version -> data store version mapping in updog * Remove the data-store-version file, instead using OS version from bottlerocket-release * In migrator, just symlink to previous data store if there are no migrations * In migrator, flip symlinks for current, major, minor, and patch, since any could change * In storewolf, also create the new patch-level symlink One change deserves more description. It's an extension of the problem from #644 that only showed up while testing this change, and was made easier to fix by these changes, so it was done together. Background: In migrator, we were giving each migration the same source and target data store. Migrations make whatever changes they want to a MigrationData, each key of which is then written to the data store. Problem: Let's say the system has settings A and B. One migration adds setting C. This is written to the target data store. Another migration tries to remove setting B. It does so in memory, but B has already been written to the target by the first migration, so it's to no avail. Solution: Chain the input and output of migrations in order. The first migration receives the source data store and adds setting C, writing out to a *new* data store path it was given. The second migration is given that new data store as a source; it removes setting B and writes out to another new data store path it was given. At the end of the chain, the final data store is kept and the intermediate ones are removed. --- Release.toml | 1 - packages/workspaces/data-store-version | 1 - packages/workspaces/migrator.service | 2 +- packages/workspaces/workspaces.spec | 3 - workspaces/Cargo.lock | 22 +- workspaces/Cargo.toml | 1 - workspaces/api/README.md | 2 +- workspaces/api/apiserver/README.md | 2 +- workspaces/api/data_store_version/Cargo.toml | 19 -- workspaces/api/data_store_version/README.md | 14 - workspaces/api/data_store_version/README.tpl | 9 - workspaces/api/data_store_version/build.rs | 32 -- workspaces/api/data_store_version/src/lib.rs | 236 ------------- workspaces/api/migration/README.md | 175 +++------- workspaces/api/migration/migrator/Cargo.toml | 3 +- workspaces/api/migration/migrator/README.md | 8 +- workspaces/api/migration/migrator/src/args.rs | 23 +- .../api/migration/migrator/src/direction.rs | 22 +- .../api/migration/migrator/src/error.rs | 22 +- workspaces/api/migration/migrator/src/lib.rs | 12 +- workspaces/api/migration/migrator/src/main.rs | 315 +++++++++++++----- workspaces/api/storewolf/Cargo.toml | 3 +- workspaces/api/storewolf/README.md | 2 +- workspaces/api/storewolf/src/main.rs | 56 ++-- workspaces/bottlerocket-release/src/lib.rs | 2 +- workspaces/updater/update_metadata/Cargo.toml | 1 - workspaces/updater/update_metadata/src/de.rs | 14 +- .../updater/update_metadata/src/error.rs | 18 +- workspaces/updater/update_metadata/src/lib.rs | 43 ++- workspaces/updater/update_metadata/src/se.rs | 4 +- workspaces/updater/updog/Cargo.toml | 2 +- workspaces/updater/updog/src/bin/updata.rs | 165 +-------- workspaces/updater/updog/src/error.rs | 40 +-- workspaces/updater/updog/src/main.rs | 208 ++++-------- .../updater/updog/tests/data/bad-bound.json | 3 +- .../updog/tests/data/duplicate-bound.json | 3 +- .../updater/updog/tests/data/example.json | 9 +- .../updater/updog/tests/data/example_2.json | 4 - .../updater/updog/tests/data/example_3.json | 7 +- .../updater/updog/tests/data/migrations.json | 14 +- .../updater/updog/tests/data/multiple.json | 3 +- .../updater/updog/tests/data/regret.json | 3 +- .../updater/updog/tests/data/release.toml | 6 +- 43 files changed, 500 insertions(+), 1034 deletions(-) delete mode 100644 packages/workspaces/data-store-version delete mode 100644 workspaces/api/data_store_version/Cargo.toml delete mode 100644 workspaces/api/data_store_version/README.md delete mode 100644 workspaces/api/data_store_version/README.tpl delete mode 100644 workspaces/api/data_store_version/build.rs delete mode 100644 workspaces/api/data_store_version/src/lib.rs diff --git a/Release.toml b/Release.toml index 998aad2dbf1..6e331a7c480 100644 --- a/Release.toml +++ b/Release.toml @@ -1,5 +1,4 @@ version = "0.2.2" -datastore_version = "0.3" [migrations] # Happy path - skip over 0.1, which had an issue with compressed migrations. Normal new migrations go here. diff --git a/packages/workspaces/data-store-version b/packages/workspaces/data-store-version deleted file mode 100644 index 3b04cfb60da..00000000000 --- a/packages/workspaces/data-store-version +++ /dev/null @@ -1 +0,0 @@ -0.2 diff --git a/packages/workspaces/migrator.service b/packages/workspaces/migrator.service index 5703da25371..8fd0ea22812 100644 --- a/packages/workspaces/migrator.service +++ b/packages/workspaces/migrator.service @@ -3,7 +3,7 @@ Description=Bottlerocket data store migrator [Service] Type=oneshot -ExecStart=/usr/bin/migrator --datastore-path /var/lib/bottlerocket/datastore/current --migration-directories /var/lib/bottlerocket/datastore/migrations --migrate-to-version-from-file /usr/share/bottlerocket/data-store-version +ExecStart=/usr/bin/migrator --datastore-path /var/lib/bottlerocket/datastore/current --migration-directories /var/lib/bottlerocket/datastore/migrations --migrate-to-version-from-os-release RemainAfterExit=true StandardError=journal+console diff --git a/packages/workspaces/workspaces.spec b/packages/workspaces/workspaces.spec index 595adfeec1a..f2837114cfb 100644 --- a/packages/workspaces/workspaces.spec +++ b/packages/workspaces/workspaces.spec @@ -9,7 +9,6 @@ Summary: Bottlerocket's first-party code License: Apache-2.0 OR MIT # sources < 100: misc -Source1: data-store-version Source2: api-sysusers.conf # Taken from https://github.com/awslabs/amazon-eks-ami/blob/master/files/eni-max-pods.txt Source3: eni-max-pods @@ -217,7 +216,6 @@ done install -d %{buildroot}%{migration_dir} install -d %{buildroot}%{_cross_datadir}/bottlerocket -install -p -m 0644 %{S:1} %{buildroot}%{_cross_datadir}/bottlerocket install -d %{buildroot}%{_cross_sysusersdir} install -p -m 0644 %{S:2} %{buildroot}%{_cross_sysusersdir}/api.conf @@ -251,7 +249,6 @@ install -p -m 0644 %{S:201} %{buildroot}%{_cross_tmpfilesdir}/host-containers.co %{_cross_bindir}/apiserver %{_cross_unitdir}/apiserver.service %{_cross_unitdir}/migrator.service -%{_cross_datadir}/bottlerocket/data-store-version %{_cross_sysusersdir}/api.conf %files -n %{_cross_os}apiclient diff --git a/workspaces/Cargo.lock b/workspaces/Cargo.lock index 61176379581..4dc5bb81f2b 100644 --- a/workspaces/Cargo.lock +++ b/workspaces/Cargo.lock @@ -726,19 +726,6 @@ dependencies = [ "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "data_store_version" -version = "0.1.0" -dependencies = [ - "cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_plain 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "derive_more" version = "0.15.0" @@ -1579,13 +1566,14 @@ dependencies = [ name = "migrator" version = "0.1.0" dependencies = [ + "bottlerocket-release 0.1.0", "cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "data_store_version 0.1.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "simplelog 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2643,11 +2631,12 @@ name = "storewolf" version = "0.1.0" dependencies = [ "apiserver 0.1.0", + "bottlerocket-release 0.1.0", "cargo-readme 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "data_store_version 0.1.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "models 0.1.0", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "simplelog 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "snafu 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3226,7 +3215,6 @@ name = "update_metadata" version = "0.1.0" dependencies = [ "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "data_store_version 0.1.0", "migrator 0.1.0", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3241,8 +3229,8 @@ dependencies = [ name = "updog" version = "0.1.0" dependencies = [ + "bottlerocket-release 0.1.0", "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "data_store_version 0.1.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "lz4 1.23.1 (registry+https://github.com/rust-lang/crates.io-index)", "migrator 0.1.0", diff --git a/workspaces/Cargo.toml b/workspaces/Cargo.toml index 171c4c3fe80..3e661c099e9 100644 --- a/workspaces/Cargo.toml +++ b/workspaces/Cargo.toml @@ -3,7 +3,6 @@ members = [ "api/apiserver", "api/apiclient", "api/bork", - "api/data_store_version", "api/moondog", "api/netdog", "api/sundog", diff --git a/workspaces/api/README.md b/workspaces/api/README.md index c10942418ab..53a3e4ee2c2 100644 --- a/workspaces/api/README.md +++ b/workspaces/api/README.md @@ -39,7 +39,7 @@ Further docs: * [Migration system](migration/) The migrator ensures the data store is up to date by running any applicable data store migrations. -The existing data store format version is found by looking at the symlink naming in `/var/lib/bottlerocket/datastore`, and the incoming data store format version is found by looking at `/usr/share/bottlerocket/data-store-version` in the booting image. +The existing data store format version is found by looking at the symlink naming in `/var/lib/bottlerocket/datastore`, and the incoming data store format version is found by looking at `/etc/os-release` in the booting image. On first boot, [storewolf](#storewolf) hasn’t run yet, so there’s no data store, so the migrator has nothing to do. diff --git a/workspaces/api/apiserver/README.md b/workspaces/api/apiserver/README.md index 1f137174cca..39434bd477c 100644 --- a/workspaces/api/apiserver/README.md +++ b/workspaces/api/apiserver/README.md @@ -79,4 +79,4 @@ See `../../apiclient/README.md` for client examples. ## Colophon -This text was generated using [cargo-readme](https://crates.io/crates/cargo-readme), and includes the rustdoc from `src/lib.rs`. +This text was generated using [cargo-readme](https://crates.io/crates/cargo-readme), and includes the rustdoc from `src/lib.rs`. \ No newline at end of file diff --git a/workspaces/api/data_store_version/Cargo.toml b/workspaces/api/data_store_version/Cargo.toml deleted file mode 100644 index 19a30a335ad..00000000000 --- a/workspaces/api/data_store_version/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "data_store_version" -version = "0.1.0" -authors = ["mrowicki "] -license = "Apache-2.0 OR MIT" -edition = "2018" -publish = false - -[dependencies] -lazy_static = "1.2" -log = "0.4" -regex = "1.1" -serde = { version = "1.0", features = ["derive"] } -serde_plain = "0.3.0" - -snafu = "0.6" - -[build-dependencies] -cargo-readme = "3.1" diff --git a/workspaces/api/data_store_version/README.md b/workspaces/api/data_store_version/README.md deleted file mode 100644 index 6b697ed656f..00000000000 --- a/workspaces/api/data_store_version/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# data_store_version - -Current version: 0.1.0 - -## Background - -This library handles versioning of data stores - primarily the detection and creation of -Version objects from various inputs. - -It is especially helpful during data store migrations, and is also used for data store creation. - -## Colophon - -This text was generated from `README.tpl` using [cargo-readme](https://crates.io/crates/cargo-readme), and includes the rustdoc from `src/lib.rs`. \ No newline at end of file diff --git a/workspaces/api/data_store_version/README.tpl b/workspaces/api/data_store_version/README.tpl deleted file mode 100644 index 91fb62910c8..00000000000 --- a/workspaces/api/data_store_version/README.tpl +++ /dev/null @@ -1,9 +0,0 @@ -# {{crate}} - -Current version: {{version}} - -{{readme}} - -## Colophon - -This text was generated from `README.tpl` using [cargo-readme](https://crates.io/crates/cargo-readme), and includes the rustdoc from `src/lib.rs`. diff --git a/workspaces/api/data_store_version/build.rs b/workspaces/api/data_store_version/build.rs deleted file mode 100644 index 86c7bbc026e..00000000000 --- a/workspaces/api/data_store_version/build.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Automatically generate README.md from rustdoc. - -use std::env; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; - -fn main() { - // Check for environment variable "SKIP_README". If it is set, - // skip README generation - if env::var_os("SKIP_README").is_some() { - return; - } - - let mut source = File::open("src/lib.rs").unwrap(); - let mut template = File::open("README.tpl").unwrap(); - - let content = cargo_readme::generate_readme( - &PathBuf::from("."), // root - &mut source, // source - Some(&mut template), // template - // The "add x" arguments don't apply when using a template. - true, // add title - false, // add badges - false, // add license - true, // indent headings - ) - .unwrap(); - - let mut readme = File::create("README.md").unwrap(); - readme.write_all(content.as_bytes()).unwrap(); -} diff --git a/workspaces/api/data_store_version/src/lib.rs b/workspaces/api/data_store_version/src/lib.rs deleted file mode 100644 index 3f57c4dc098..00000000000 --- a/workspaces/api/data_store_version/src/lib.rs +++ /dev/null @@ -1,236 +0,0 @@ -/*! -# Background - -This library handles versioning of data stores - primarily the detection and creation of -Version objects from various inputs. - -It is especially helpful during data store migrations, and is also used for data store creation. -*/ - -#![deny(rust_2018_idioms)] - -#[macro_use] -extern crate log; -#[macro_use] -extern crate serde_plain; - -use lazy_static::lazy_static; -use regex::Regex; -use serde::{Serialize, Serializer}; -use snafu::{OptionExt, ResultExt}; -use std::path::Path; -use std::path::PathBuf; -use std::str::FromStr; -use std::{fmt, fs}; - -/// VersionComponent represents each integer segment of a version string. -pub type VersionComponent = u32; - -lazy_static! { - /// Regular expression that captures the entire version string (1.2 or v1.2) along with the - /// major (1) and minor (2) separately. - #[doc(hidden)] - pub static ref VERSION_RE: Regex = - Regex::new(r"(?Pv?(?P[0-9]+)\.(?P[0-9]+))").unwrap(); - - /// Regular expression that captures the version and ID from the name of a data store - /// directory, e.g. matching "v1.5_0123456789abcdef" will let you retrieve version (v1.5), - /// major (1), minor (5), and id (0123456789abcdef). - pub(crate) static ref DATA_STORE_DIRECTORY_RE: Regex = - Regex::new(&format!(r"^{}_(?P.*)$", *VERSION_RE)).unwrap(); -} - -pub mod error { - use std::io; - use std::num::ParseIntError; - use std::path::PathBuf; - - use snafu::Snafu; - - #[derive(Debug, Snafu)] - #[snafu(visibility = "pub(super)")] - pub enum Error { - #[snafu(display("Internal error: {}", msg))] - Internal { msg: String }, - - #[snafu(display("Given string '{}' not a version, must match re: {}", given, re))] - InvalidVersion { given: String, re: String }, - - #[snafu(display("Version component '{}' not an integer: {}", component, source))] - InvalidVersionComponent { - component: String, - source: ParseIntError, - }, - - #[snafu(display("Data store link '{}' points to /", path.display()))] - DataStoreLinkToRoot { path: PathBuf }, - - #[snafu(display("Data store path '{}' contains invalid UTF-8", path.display()))] - DataStorePathNotUTF8 { path: PathBuf }, - - #[snafu(display("Unable to read from version file path '{}': {}", path.display(), source))] - VersionPathRead { path: PathBuf, source: io::Error }, - } -} - -type Result = std::result::Result; - -/// Version represents the version identifiers of our data store. -// Deriving Ord will check the fields in order, so as long as the more important fields (e.g. -// 'major') are listed first, it will compare versions as expected. -#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] -pub struct Version { - /// The data store format version, or major for short. - pub major: VersionComponent, - /// The content format version, or minor for short. - pub minor: VersionComponent, -} - -impl FromStr for Version { - type Err = error::Error; - - /// Parse a version string like "1.0" or "v1.0" into a Version. - fn from_str(input: &str) -> Result { - Self::from_str_with_re(input, &VERSION_RE) - } -} - -derive_deserialize_from_str!(Version, "Valid data-store version"); - -impl fmt::Display for Version { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "v{}.{}", self.major, self.minor) - } -} - -impl Serialize for Version { - fn serialize(&self, serializer: S) -> std::result::Result - where - S: Serializer, - { - serializer.serialize_str(&format!("{}.{}", self.major, self.minor)) - } -} - -impl Version { - #[allow(dead_code)] - pub fn new(major: VersionComponent, minor: VersionComponent) -> Self { - Self { major, minor } - } - - /// Parses the input string into a Version, with a given Regex that is expected to provide - /// "major" and "minor" captures. Used to implement the pub(crate) entry points. - fn from_str_with_re(input: &str, re: &Regex) -> Result { - trace!("Parsing version from string: {}", input); - - let captures = re.captures(&input).context(error::InvalidVersion { - given: input, - re: re.as_str(), - })?; - - let major_str = captures.name("major").context(error::Internal { - msg: "Version matched regex but we don't have a 'major' capture", - })?; - let minor_str = captures.name("minor").context(error::Internal { - msg: "Version matched regex but we don't have a 'minor' capture", - })?; - - let major = major_str - .as_str() - .parse::() - .with_context(|| error::InvalidVersionComponent { - component: major_str.as_str(), - })?; - let minor = minor_str - .as_str() - .parse::() - .with_context(|| error::InvalidVersionComponent { - component: minor_str.as_str(), - })?; - - trace!("Parsed major '{}' and minor '{}'", major, minor); - Ok(Self { major, minor }) - } - - /// This pulls the version number out of the given datastore path. - /// - /// Returns Err if the given path isn't named like a versioned data store. - /// - /// Background: The data store path uses symlinks to represent versions and allow for easy - /// version flips. This function expects the target directory path. - /// - /// An example setup for version 1.5: - /// /path/to/datastore/current - /// -> /path/to/datastore/v1 - /// -> /path/to/datastore/v1.5 - /// -> /path/to/datastore/v1.5_0123456789abcdef - pub fn from_datastore_path>(path: P) -> Result { - let path = path.into(); - trace!("Getting version from datastore path: {}", path.display()); - - // Pull out the basename of the path, which contains the version - let version_os_str = path - .file_name() - .context(error::DataStoreLinkToRoot { path: &path })?; - let version_str = version_os_str - .to_str() - .context(error::DataStorePathNotUTF8 { path: &path })?; - - // Parse and return the version - Self::from_str_with_re(version_str, &DATA_STORE_DIRECTORY_RE) - } - - /// This reads the version number from a given file. - pub fn from_file>(path: P) -> Result { - let version_str = fs::read_to_string(path.as_ref()).context(error::VersionPathRead { - path: path.as_ref(), - })?; - Version::from_str(&version_str) - } -} - -#[cfg(test)] -mod test { - use super::Version; - use std::str::FromStr; - - #[test] - fn version_eq() { - assert_eq!(Version::new(0, 0), Version::new(0, 0)); - assert_eq!(Version::new(1, 0), Version::new(1, 0)); - assert_eq!(Version::new(1, 1), Version::new(1, 1)); - - assert_ne!(Version::new(0, 0), Version::new(0, 1)); - assert_ne!(Version::new(0, 1), Version::new(1, 0)); - assert_ne!(Version::new(1, 0), Version::new(0, 1)); - } - - #[test] - fn version_ord() { - assert!(Version::new(0, 1) > Version::new(0, 0)); - assert!(Version::new(1, 0) > Version::new(0, 99)); - assert!(Version::new(1, 1) > Version::new(1, 0)); - - assert!(Version::new(0, 0) < Version::new(0, 1)); - assert!(Version::new(0, 99) < Version::new(1, 0)); - assert!(Version::new(1, 0) < Version::new(1, 1)); - } - - #[test] - fn from_str() { - assert_eq!(Version::from_str("0.1").unwrap(), Version::new(0, 1)); - assert_eq!(Version::from_str("1.0").unwrap(), Version::new(1, 0)); - assert_eq!(Version::from_str("2.3").unwrap(), Version::new(2, 3)); - - assert_eq!(Version::from_str("v0.1").unwrap(), Version::new(0, 1)); - assert_eq!(Version::from_str("v1.0").unwrap(), Version::new(1, 0)); - assert_eq!(Version::from_str("v2.3").unwrap(), Version::new(2, 3)); - } - - #[test] - fn fmt() { - assert_eq!("v0.1", format!("{}", Version::new(0, 1))); - assert_eq!("v1.0", format!("{}", Version::new(1, 0))); - assert_eq!("v2.3", format!("{}", Version::new(2, 3))); - } -} diff --git a/workspaces/api/migration/README.md b/workspaces/api/migration/README.md index 3046422d0d7..a4ff4034df2 100644 --- a/workspaces/api/migration/README.md +++ b/workspaces/api/migration/README.md @@ -7,63 +7,14 @@ To achieve both, we need the ability to migrate configuration between versions. The mechanism proposed is reminiscent of database migrations - individual bits of code that manipulate specific data, and that are labeled to indicate what version they apply to. -## What exactly is versioned - -This document covers versioning of the data store specifically. -Two things will be versioned: the data store format itself, and the content format within the data store. - -### data store format - -This refers to the major structure of the data store, i.e. the fact that we're using the filesystem to store data, how the data is laid out on the filesystem, etc. - -This will start out at “v1”. -If we were to make a major change, e.g. moving to SQLite, we would move to v2. - -The data store format is not expected to change frequently. -Most of this document covers content format changes; see [Handling data store format changes](#handling-data-store-format-changes) below for discussion of handling data store format changes. - -### content format - -This refers to the available keys and the format of data within those keys. - -This will start out at 0. -When combined with the data store format, that means we will start at “v1.0”. -A content format update would put us at “v1.1”. -A data store format update would then put us at “v2.0”. - -The content format is expected to change relatively frequently, because we will likely get requests for adding customization, and will be updating and adding applications occasionally. - -Note: adding new keys will usually not require a migration; default settings will be populated for new keys automatically. - -### Rejected options - -#### Versioning prefixes: - -"prefix" means the initial, common substring of a set of keys. -For example, `settings.a.b.c` and `settings.a.x.y` would both be matched by the prefix `settings.a`. - -We could attach a version number to prefixes and migrate them separately. -This would provide the benefit of scoping migrations to a specific subset of data, e.g. only “settings.docker” to handle a Docker upgrade. -However, this would be confusing because parent and child nodes could have different versions. -It would be difficult to understand what migrations should run, which have been run, etc. - -### For reference: Other versioned artifacts - -Other documents should cover: - -* Versioning of the API, and how that versioning relates to data store versioning -* Versioning of specific components like Docker or the Linux kernel; this is more complex because we may want to support multiple versions - ## When to migrate data -We will run migrations during the boot process when we detect that the on-disk data store doesn't match the format of the booting image. - -This can slow boot, but the impact can be partially mitigated by having recent migrations stored in the image, and having the system download any missing, appropriate migrations before rebooting. (The image can be mounted read-only after being written to disk so we can check what’s missing.) +We will run migrations during the boot process when we detect that the on-disk data store doesn't match the version of the booting image. +The system will download any missing, appropriate migrations before rebooting. -Another downside is that migration failure would require another reboot to flip back to a matching version. +One downside of this approach is that migration failure would require another reboot to flip back to a matching version. In return, we get a consistent process where we always check and migrate the data store before it's used, and we have a simpler pre-reboot process. - We can also handle some cases of unexpected version mismatches at boot, for example if something goes wrong during an update. ### Offline migration @@ -75,20 +26,6 @@ When downloading an OS update, we necessarily download the TUF metadata describi This metadata also describes the migrations necessary between various updates. During this update preparation process, we will also download all necessary migrations, and refuse to start an update otherwise. -This means we can be confident in running the migrations we have, even if there may be newer versions online. -If there are any fixed migrations, there will necessarily be a new content format version to handle fixing any known issues with migrations to the prior version. -This means we can be confident that running cached migrations offline won’t leave a machine stranded; it will just need to update again. - -### Migrations location - -We expect most migrations to be stored in the incoming image in a known location. -Since we’re running migrations at boot, we can easily access and run these from the current partition. - -Not all migrations will be in the image, though, because of the case described above in [Offline Migration](#offline-migration) where fixed migrations are listed in the metadata and had to be downloaded. -At the time of download, we’re still in the old image, and the new image must be considered read-only or verified boot would fail. - -Therefore, we must also look for migrations in a known location on persistent storage where they were downloaded. - ### Rejected options * Before reboot - This would optimistically prepare the data store before rebooting into a new version. @@ -101,45 +38,37 @@ Therefore, we must also look for migrations in a known location on persistent st As mentioned above in [Offline Migration](#offline-migration), update metadata will list the migrations needed to move between versions. This metadata will be made available for the migrator to use later to verify the migrations. -As mentioned above in [When to migrate data](#when-to-migrate-data), expected migrations will be stored directly in the image so most updates can be fulfilled without further downloads. -However, there will be times when we discover issues with migrations after release and need to replace them. +There may be times when we discover issues with migrations after release and need to replace them. These fixed migrations can be listed in the update metadata and downloaded along with the image. -Since this metadata is the source of truth for the migration list, any old migrations in the image will simply be ignored. +Since this metadata is the source of truth for the migration list, any old migrations will simply be ignored. Note: The tool downloading migrations does not have to be the same as the one downloading images, nor does it have to be part of the migrator. Its job is closest to the update downloader, though, and so should be described in further detail in the update system design. ## How to update and flip the data store -To start, the migration system would copy the data store, update it by running migrations, and (when ready) flip a link to the new format. +To start, the migration system copies the data store, updates it by running migrations, and (when ready) flips symlinks to the new version. This duplicates the data, but at the start, we aren't expecting to store large quantities of data in the data store. -We could mitigate this downside by deduplicating data through clever inner linking or other techniques. +Plus, new OS versions that have no data store changes are simply a new symlink rather than a copy. ### Data store symlink structure Applications access the data store at `/var/lib/bottlerocket/datastore/current` so they don't have to understand the versioning scheme. -`current` is a link to the data store format (major) version, e.g. `v1`. -If we change data store format versions, it's this symlink we'd flip; see [Handling data store format changes](#handling-data-store-format-changes) below for details. - -`v1` is a link to the content format (minor) version, e.g. `v1.5`. -Any time we change content format versions, it's this symlink we'd flip. - -`v1.5` is a link to the real data store directory. -This has a random identifier appended so that we can run migrations on a copy without affecting live data. -The link is created by the migrator. - +`current` is a link to the major version, which is a link to the minor version, etc. Here's a full example setup for version 1.5: ``` /var/lib/bottlerocket/datastore/current -> v1 -> v1.5 - -> v1.5_0123456789abcdef + -> v1.5.2 + -> v1.5.2_0123456789abcdef ``` +The final level has a random identifier appended so we can track migration attempts and help prevent timing issues. -The old version's directory can be kept for quick rollbacks; automated cleanup will be necessary to prevent filling the disk. +Old versions can be kept for quick rollbacks, but automated cleanup will be necessary to prevent filling the disk. -Note that the version applies to both the live and pending trees, which live inside the directory described above - we have to migrate both live and pending data or we could lose customer information. +Note that the version applies to both the `live` and `pending` trees, which both live inside the directory described above - we have to migrate both live and pending data or we could lose customer information. ## How to run migrations @@ -148,29 +77,30 @@ The migration system will follow these steps. ### Find outgoing and incoming versions The outgoing version is represented by the link described above in [How to update and flip the data store](#how-to-update-and-flip-the-data-store). -The incoming version is listed in the file `/usr/share/bottlerocket/data-store-version` in the incoming image. +The incoming version is listed in the file `/etc/os-release` in the incoming image. ### Find migrations Next we find the migration binaries that are applicable when moving from the outgoing version to the incoming version. Migration names include the version where they first apply, so we take any migrations applicable after the outgoing version, up to and including the incoming version. -Migrations from recent versions will be in a known location (e.g. `/var/lib/bottlerocket/datastore/migrations`) in the incoming image, and all migrations will also be available on the update server for download. +Migrations will be in a known location (e.g. `/var/lib/bottlerocket/datastore/migrations`) and all migrations will also be available on the update server for download. Migration lists (metadata) will be available from both sources to ensure we're not missing any migrations. ### Run migrations The migration system then runs the migrations in order. -This can use a relatively simple numerical ordering of the migration binary filenames, because the names include the applicable version and optional integer for ordering multiple migrations within a version. +This can use a relatively simple numerical ordering of the migration binary filenames, because the names include the applicable version and a name for ordering multiple migrations within a version. (See [Structure](#structure).) -As mentioned above in [How to update and flip the data store](#how-to-update-and-flip-the-data-store), migrations are run on a copy of the current data store, and are run on the live and pending trees within the copy. +As mentioned above in [How to update and flip the data store](#how-to-update-and-flip-the-data-store), migrations are run on an in-memory copy of the current data store, and are run on the live and pending trees within the copy. ### Handling failure Upon failure of a migration, we don't need to run any rollbacks because we were operating on a copy of the data store. -However, if migrating at boot, we need to flip the partition table back to the version that supports our current data store, and we should reboot back into the supported version. +If the migrator fails, boot services will be marked as failed, which means the current partition set won't be marked as successful. +A reboot will automatically switch back to the other partition set containing the version that supports our current data store. ## How to write migrations @@ -184,24 +114,21 @@ Each method will give data and metadata maps as input, and require data and meta (These structures will be nested, rather than using dotted keys, to make it easier to handle sub-trees and reduce dependency on existing data store code.) Migration code should not assume that any given keys exist, because migrations will be run on live data (where all keys will likely exist) and on pending data (where none, some, or all keys may exist). +Plus, different variants of Bottlerocket may not have the same keys. The migration system could deserialize the function outputs into the incoming model types to confirm that the structure is valid; we should prototype this idea because it would add safety. -To write a migration, start a Rust project at `/migrations///Cargo.toml` +To write a migration, start a Rust project at `/migrations//migrate-/Cargo.toml` The filename of the migration will be used by the migration system to order migrations. -The name will take the format `migrate_v_`, similar to the directory name it's stored in. -Cargo does not allow naming binaries this way, so the migration build process will rename them appropriately when installing them into the image. -Migration authors can simply name their crate with the `` referenced above. +The name will take the format `migrate_v_`. +Cargo does not allow naming binaries this way, so the migration build process renames them appropriately when installing them into the image. ### Helpers -We will have a standard structure for migration code that handles common things like argument parsing, so that we can have a common CLI interface for the migration system to run migrations. +We have a standard structure for migration code that handles common things like argument parsing, so that we can have a common CLI interface for the migration system to run migrations. -An expected common use case is to discard some existing data and use the new defaults. -This is expected to be common for system-level data in the data store, e.g. services and configuration files, which aren't modifiable by the user. -We'll make it easy to generate default values from the incoming image, so the new defaults can be used as the migration output. -(This is not needed for entirely new keys, which will be taken from defaults automatically.) +We also have a Rust module that handles common migration types, such as adding, removing, and replacing settings. ### Rejected options @@ -209,7 +136,6 @@ Regarding ordering: * We could use metadata in the migration project, and in the resulting binary, to represent the applicable version and ordering. It's unclear how to do this, and filenames are obvious; we can figure out a metadata-based system if our needs become more complex. - Regarding migration function parameters: * We could use dotted keys instead of nested maps, but nested maps make it easier to handle sub-trees and reduce dependency on existing data store code. @@ -223,47 +149,28 @@ Therefore, rollbacks can largely be treated the same way, just running the backw If we're confident no user configuration has changed, or if the user wants to discard changes, we can do a quick rollback by flipping the symlink(s) back, as mentioned in [How to update and flip the data store](#how-to-update-and-flip-the-data-store). -## Handling data store format changes - -Not all of the above design applies to handling data store format changes because they're necessarily bigger changes that we can't predict. -If we move everything to version 2, there could be a dramatically different storage system that requires entirely different migration types, for example. - -### How to migrate data store format - -We still have clear version numbers changing in a clear way, so we can use the basic plan from [How to run migrations](#how-to-run-migrations) above. - -We would be migrating at the same time as in [When to migrate data](#when-to-migrate-data) above. - -We would likely still flip symlinks as in [How to update and flip the data store](#how-to-update-and-flip-the-data-store) above, but we have to leave open the possibility that the data would move entirely and the symlinks are no longer appropriate. -Therefore, managing this would be left to the individual migration code. - -The primary changes are to [How to write migrations](#how-to-write-migrations). We would still want the capability to migrate forward and backward, but we can't guarantee that the map-based parameters are relevant. -The migrations would have a looser interface (“void”) and would manipulate the existing and new data stores however they deem appropriate. -(We'd probably still want the same argument handling so the migration system can run them consistently.) - -### Rejected options - -We could use the OS version as a surrogate for data store format version, but because the data store format version will change rarely, there will be many times when we have a new OS version but no data store format change. -It would be harder to determine when there was actually a change, and harder to find appropriate migrations. -Because we expect to change data store format rarely, it's better to stay simple until we have more specific needs. - ## Example use cases ### New application Say we add a new open-source application, like rngd. We can add data regarding settings, services, and configuration-files of the application to our data model. -In this case, we wouldn't need to write migrations because data store keys that are entirely new will be automatically populated from defaults by storewolf. -### Docker version upgrade +In this case, we can use the existing helper `AddSettingMigration`. +It doesn't need to do anything on upgrade, because the new key will be populated by its default value. +On downgrade, it removes the setting, so that the old data store model doesn't see an unexpected key and reject the data. + +### Application version upgrade -If we upgrade Docker, its available and required settings may change. +If we upgrade an important application, its available and required settings may change. This means we'd have to update the data model to include any new or changed settings, and we'd write migrations to transform data from the old settings to the new. +This can likely be handled by existing helpers `AddSettingMigration`, `RemoveSettingMigration`, `ReplaceStringMigration`, and `ReplaceTemplateMigration`. ### Data store implementation change If we find that the filesystem data store implementation is insufficient, we may, for example, want to move to a database like SQLite, or to an improved filesystem data store implementation. -In this case, we'd use data store format migration(s) to insert the data into the new system and (if still applicable) flip symlinks so we know to use it. +If it can use the same symlink-flip system, we can handle it with a migration: make a migration that doesn't use the standard migration-helpers library, but uses custom code to affect the larger change. +If it needs an entirely new structure, we need to add capabilities to the migrator first. ## Open questions @@ -279,14 +186,10 @@ We can't do this unless we saved the value somewhere. The system does not currently have a provision for this, but we can imagine a few options: * The data store could have versioned keys, storing all previous values * There could be a snapshot of the data store from before migrations +* There could be a "recycle bin" outside of the data store that stores any removed data in a separate versioned store, making it available for rollbacks -### Multiple versions of an application - -We expect that some customers will want to run different versions of Docker or of the Linux kernel, for example. -If we want to support multiple versions of an application in our source tree, what does that mean for data migration? If a user wants to switch from Linux 4.14 to 4.18, how do we migrate settings? - -The main issue is that this presents another dimension for migrations. -Consider the case where the user is updating an instance and also wants change their kernel from 4.14 to 4.18. -We have to know to run version migrations, and also to run kernel-application-related migrations; how do we choose an order? To prevent broken migrations, you'd need a total ordering across both dimensions. +### Variants -We also don't currently have a concrete plan for configurable builds and feature selection, so it's unclear how to tie this in. +Bottlerocket variants allow the creation of Bottlerocket images with varying system components. +Do we want to support moving between variants? +If so, and we want to maintain user settings through the transition, we need to understand how to define and order migrations between variants. diff --git a/workspaces/api/migration/migrator/Cargo.toml b/workspaces/api/migration/migrator/Cargo.toml index d5b480d7b60..2d834a91668 100644 --- a/workspaces/api/migration/migrator/Cargo.toml +++ b/workspaces/api/migration/migrator/Cargo.toml @@ -8,12 +8,13 @@ publish = false build = "build.rs" [dependencies] -data_store_version = { path = "../../data_store_version" } +bottlerocket-release = { path = "../../../bottlerocket-release" } lazy_static = "1.2" log = "0.4" nix = "0.16" rand = { version = "0.7", default-features = false, features = ["std"] } regex = "1.1" +semver = "0.9" simplelog = "0.7" snafu = "0.6" diff --git a/workspaces/api/migration/migrator/README.md b/workspaces/api/migration/migrator/README.md index 9da19e097fc..1e1e8e9a6e6 100644 --- a/workspaces/api/migration/migrator/README.md +++ b/workspaces/api/migration/migrator/README.md @@ -13,9 +13,11 @@ Given those, it will: * confirm that the given data store has the appropriate versioned symlink structure * find the version of the given data store * find migrations between the two versions -* copy the data store -* run the migrations on the copy -* do a symlink-flip so the copy takes the place of the original +* if there are migrations: + * run the migrations; the transformed data becomes the new data store +* if there are *no* migrations: + * just symlink to the old data store +* do symlink flips so the new version takes the place of the original To understand motivation and more about the overall process, look at the migration system documentation, one level up. diff --git a/workspaces/api/migration/migrator/src/args.rs b/workspaces/api/migration/migrator/src/args.rs index 06788803fe9..63cf90b6b7c 100644 --- a/workspaces/api/migration/migrator/src/args.rs +++ b/workspaces/api/migration/migrator/src/args.rs @@ -1,6 +1,7 @@ //! This module handles argument parsing for the migrator binary. -use data_store_version::Version; +use bottlerocket_release::BottlerocketRelease; +use semver::Version; use simplelog::LevelFilter; use std::env; use std::fs; @@ -15,7 +16,7 @@ fn usage() -> ! { r"Usage: {} --datastore-path PATH --migration-directories PATH[:PATH:PATH...] - (--migrate-to-version x.y | --migrate-to-version-from-file PATH) + (--migrate-to-version x.y | --migrate-to-version-from-os-release) [ --no-color ] [ --log-level trace|debug|info|warn|error ]", program_name @@ -106,21 +107,11 @@ impl Args { migrate_to_version = Some(version) } - "--migrate-to-version-from-file" => { - let path_str = iter.next().unwrap_or_else(|| { - usage_msg("Did not give argument to --migrate-to-version-from-file") + "--migrate-to-version-from-os-release" => { + let br = BottlerocketRelease::new().unwrap_or_else(|e| { + usage_msg(format!("Unable to get version from os-release: {}", e)) }); - trace!("Given --migrate-to-version-from-file: {}", path_str); - - let version_str = fs::read_to_string(&path_str).unwrap_or_else(|e| { - usage_msg(format!("Could not read version from {}: {}", path_str, e)) - }); - trace!("Read version string from file: {}", version_str); - - let version = Version::from_str(&version_str).unwrap_or_else(|e| { - usage_msg(format!("Invalid version in {}: {}", path_str, e)) - }); - migrate_to_version = Some(version) + migrate_to_version = Some(br.version_id) } _ => usage(), diff --git a/workspaces/api/migration/migrator/src/direction.rs b/workspaces/api/migration/migrator/src/direction.rs index 4eaf239703c..ef250cc9679 100644 --- a/workspaces/api/migration/migrator/src/direction.rs +++ b/workspaces/api/migration/migrator/src/direction.rs @@ -1,7 +1,7 @@ //! This module owns the Direction type used by the migrator to determine whether a migration //! is moving forward to a new version or rolling back to a previous version. -use data_store_version::Version; +use semver::Version; use std::cmp::Ordering; use std::fmt; @@ -25,7 +25,7 @@ impl fmt::Display for Direction { impl Direction { /// Determines the migration direction, given the outgoing ("from') and incoming ("to") /// versions. - pub(crate) fn from_versions(from: Version, to: Version) -> Option { + pub(crate) fn from_versions(from: &Version, to: &Version) -> Option { match from.cmp(&to) { Ordering::Less => Some(Direction::Forward), Ordering::Greater => Some(Direction::Backward), @@ -37,24 +37,24 @@ impl Direction { #[cfg(test)] mod test { use super::Direction; - use data_store_version::Version; + use semver::Version; #[test] fn direction() { - let v01 = Version::new(0, 1); - let v02 = Version::new(0, 2); - let v10 = Version::new(1, 0); + let v01 = Version::new(0, 0, 1); + let v02 = Version::new(0, 0, 2); + let v10 = Version::new(0, 1, 0); - assert_eq!(Direction::from_versions(v01, v02), Some(Direction::Forward)); + assert_eq!(Direction::from_versions(&v01, &v02), Some(Direction::Forward)); assert_eq!( - Direction::from_versions(v02, v01), + Direction::from_versions(&v02, &v01), Some(Direction::Backward) ); - assert_eq!(Direction::from_versions(v01, v01), None); + assert_eq!(Direction::from_versions(&v01, &v01), None); - assert_eq!(Direction::from_versions(v02, v10), Some(Direction::Forward)); + assert_eq!(Direction::from_versions(&v02, &v10), Some(Direction::Forward)); assert_eq!( - Direction::from_versions(v10, v02), + Direction::from_versions(&v10, &v02), Some(Direction::Backward) ); } diff --git a/workspaces/api/migration/migrator/src/error.rs b/workspaces/api/migration/migrator/src/error.rs index 9563f7b8236..be150fad68f 100644 --- a/workspaces/api/migration/migrator/src/error.rs +++ b/workspaces/api/migration/migrator/src/error.rs @@ -1,7 +1,6 @@ //! This module owns the error type used by the migrator. -use data_store_version::error::Error as VersionError; -use data_store_version::{Version, VersionComponent}; +use semver::Version; use snafu::Snafu; use std::io; use std::path::PathBuf; @@ -14,14 +13,8 @@ pub(crate) enum Error { #[snafu(display("Internal error: {}", msg))] Internal { msg: String }, - #[snafu(display("Unable to create version from data store path '{}': {}", path.display(), source))] - VersionFromDataStorePath { path: PathBuf, source: VersionError }, - - #[snafu(display("Can only migrate minor versions; major version '{}' requested, major version of given data store is '{}'", given, found))] - MajorVersionMismatch { - given: VersionComponent, - found: VersionComponent, - }, + #[snafu(display("Data store path '{}' contains invalid UTF-8", path.display()))] + DataStorePathNotUTF8 { path: PathBuf }, #[snafu(display("Unable to open data store directory '{}': {}", path.display(), source))] DataStoreDirOpen { path: PathBuf, source: nix::Error }, @@ -32,15 +25,13 @@ pub(crate) enum Error { #[snafu(display("Data store path '{}' contains invalid version: {}", path.display(), source))] InvalidDataStoreVersion { path: PathBuf, - #[snafu(source(from(Error, Box::new)))] - source: Box, + source: semver::SemVerError, }, #[snafu(display("Migration '{}' contains invalid version: {}", path.display(), source))] InvalidMigrationVersion { path: PathBuf, - #[snafu(source(from(VersionError, Box::new)))] - source: Box, + source: semver::SemVerError, }, #[snafu(display("Data store for new version {} already exists at {}", version, path.display()))] @@ -62,6 +53,9 @@ pub(crate) enum Error { #[snafu(display("Failed to swap symlink at {} to new version: {}", link.display(), source))] LinkSwap { link: PathBuf, source: io::Error }, + #[snafu(display("Failed to read symlink at {} to find version: {}", link.display(), source))] + LinkRead { link: PathBuf, source: io::Error }, + #[snafu(display("Failed listing migration directory '{}': {}", dir.display(), source))] ListMigrations { dir: PathBuf, source: io::Error }, diff --git a/workspaces/api/migration/migrator/src/lib.rs b/workspaces/api/migration/migrator/src/lib.rs index 06f722cac09..c9b48166bae 100644 --- a/workspaces/api/migration/migrator/src/lib.rs +++ b/workspaces/api/migration/migrator/src/lib.rs @@ -1,12 +1,20 @@ #![deny(rust_2018_idioms)] -use data_store_version::VERSION_RE; use lazy_static::lazy_static; use regex::Regex; lazy_static! { /// Regular expression that will match migration file names and allow retrieving the /// version and name components. + // Note: the version component is a simplified semver regex; we don't use any of the + // extensions, just a simple x.y.z, so this isn't as strict as it could be. pub static ref MIGRATION_FILENAME_RE: Regex = - Regex::new(&format!(r"^migrate_{}_(?P[a-zA-Z0-9-]+)$", *VERSION_RE)).unwrap(); + Regex::new(r"(?x)^ + migrate + _ + v? # optional 'v' prefix for humans + (?P[0-9]+\.[0-9]+\.[0-9]+[0-9a-zA-Z+-]*) + _ + (?P[a-zA-Z0-9-]+) + $").unwrap(); } diff --git a/workspaces/api/migration/migrator/src/main.rs b/workspaces/api/migration/migrator/src/main.rs index bfb1b2e1a09..5b1ca25844a 100644 --- a/workspaces/api/migration/migrator/src/main.rs +++ b/workspaces/api/migration/migrator/src/main.rs @@ -9,9 +9,11 @@ //! * confirm that the given data store has the appropriate versioned symlink structure //! * find the version of the given data store //! * find migrations between the two versions -//! * copy the data store -//! * run the migrations on the copy -//! * do a symlink-flip so the copy takes the place of the original +//! * if there are migrations: +//! * run the migrations; the transformed data becomes the new data store +//! * if there are *no* migrations: +//! * just symlink to the old data store +//! * do symlink flips so the new version takes the place of the original //! //! To understand motivation and more about the overall process, look at the migration system //! documentation, one level up. @@ -21,18 +23,18 @@ #[macro_use] extern crate log; -use data_store_version::Version; use nix::{dir::Dir, fcntl::OFlag, sys::stat::Mode, unistd::fsync}; use rand::{distributions::Alphanumeric, thread_rng, Rng}; +use semver::Version; use simplelog::{Config as LogConfig, TermLogger, TerminalMode}; use snafu::{ensure, OptionExt, ResultExt}; +use std::collections::HashSet; use std::env; use std::fs::{self, Permissions}; use std::os::unix::fs::{symlink, PermissionsExt}; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; use std::process::{self, Command}; -use std::str::FromStr; use migrator::MIGRATION_FILENAME_RE; mod args; @@ -60,23 +62,17 @@ fn run() -> Result<()> { TermLogger::init(args.log_level, LogConfig::default(), TerminalMode::Mixed) .context(error::Logger)?; - // We don't handle data store format (major version) migrations because they could change - // anything about our storage; they're handled by more free-form binaries run by a separate - // startup service. - let current_version = Version::from_datastore_path(&args.datastore_path).context( - error::VersionFromDataStorePath { + // Get the directory we're working in. + let datastore_dir = args + .datastore_path + .parent() + .context(error::DataStoreLinkToRoot { path: &args.datastore_path, - }, - )?; - if current_version.major != args.migrate_to_version.major { - return error::MajorVersionMismatch { - given: args.migrate_to_version.major, - found: current_version.major, - } - .fail(); - } + })?; - let direction = Direction::from_versions(current_version, args.migrate_to_version) + let current_version = get_current_version(&datastore_dir)?; + + let direction = Direction::from_versions(¤t_version, &args.migrate_to_version) .unwrap_or_else(|| { info!( "Requested version {} matches version of given datastore at '{}'; nothing to do", @@ -88,19 +84,60 @@ fn run() -> Result<()> { let migrations = find_migrations( &args.migration_directories, - current_version, - args.migrate_to_version, + ¤t_version, + &args.migrate_to_version, )?; - let (copy_path, copy_id) = copy_datastore(&args.datastore_path, args.migrate_to_version)?; - run_migrations(direction, &migrations, &args.datastore_path, ©_path)?; - flip_to_new_minor_version(args.migrate_to_version, ©_path, ©_id)?; + if migrations.is_empty() { + // Not all new OS versions need to change the data store format. If there's been no + // change, we can just link to the last version rather than making a copy. + // (Note: we link to the fully resolved directory, args.datastore_path, so we don't + // have a chain of symlinks that could go past the maximum depth.) + flip_to_new_version(&args.migrate_to_version, &args.datastore_path)?; + } else { + let copy_path = run_migrations( + direction, + &migrations, + &args.datastore_path, + &args.migrate_to_version, + )?; + flip_to_new_version(&args.migrate_to_version, ©_path)?; + } Ok(()) } // =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= =^..^= +fn get_current_version

(datastore_dir: P) -> Result +where + P: AsRef, +{ + let datastore_dir = datastore_dir.as_ref(); + + // Find the current patch version link, which contains our full version number + let current = datastore_dir.join("current"); + let major = + datastore_dir.join(fs::read_link(¤t).context(error::LinkRead { link: current })?); + let minor = datastore_dir.join(fs::read_link(&major).context(error::LinkRead { link: major })?); + let patch = datastore_dir.join(fs::read_link(&minor).context(error::LinkRead { link: minor })?); + + // Pull out the basename of the path, which contains the version + let version_os_str = patch + .file_name() + .context(error::DataStoreLinkToRoot { path: &patch })?; + let mut version_str = version_os_str + .to_str() + .context(error::DataStorePathNotUTF8 { path: &patch })?; + + // Allow 'v' at the start so the links have clearer names for humans + if version_str.starts_with('v') { + version_str = &version_str[1..]; + } + + Version::parse(version_str).context(error::InvalidDataStoreVersion { path: &patch }) +} + /// Returns a list of all migrations found on disk. /// /// TODO: This does not yet handle migrations that have been replaced by newer versions - we only @@ -142,8 +179,8 @@ where /// Returns the sublist of the given migrations that should be run, in the returned order, to move /// from the 'from' version to the 'to' version. fn select_migrations>( - from: Version, - to: Version, + from: &Version, + to: &Version, paths: &[P], ) -> Result> { // Intermediate result where we also store the version and name, needed for sorting @@ -174,7 +211,7 @@ fn select_migrations>( let version_match = captures.name("version").context(error::Internal { msg: "Migration name matched regex but we don't have a 'version' capture", })?; - let version = Version::from_str(version_match.as_str()) + let version = Version::parse(version_match.as_str()) .context(error::InvalidMigrationVersion { path: &path })?; let name_match = captures.name("name").context(error::Internal { @@ -187,13 +224,13 @@ fn select_migrations>( // how to undo its changes and take you to the lower version. For example, the v2 // migration knows what changes it made to go from v1 to v2 and therefore how to go // back from v2 to v1. See tests. - let applicable = if to > from && version > from && version <= to { + let applicable = if to > from && version > *from && version <= *to { info!( "Found applicable forward migration '{}': {} < ({}) <= {}", file_name, from, version, to ); true - } else if to < from && version > to && version <= from { + } else if to < from && version > *to && version <= *from { info!( "Found applicable backward migration '{}': {} >= ({}) > {}", file_name, from, version, to @@ -245,7 +282,7 @@ fn select_migrations>( /// Given the versions we're migrating from and to, this will return an ordered list of paths to /// migration binaries we should run to complete the migration on a data store. // This separation allows for easy testing of select_migrations. -fn find_migrations

(paths: &[P], from: Version, to: Version) -> Result> +fn find_migrations

(paths: &[P], from: &Version, to: &Version) -> Result> where P: AsRef, { @@ -256,38 +293,61 @@ where select_migrations(from, to, &candidates) } -/// Copies the data store at the given path to a new directory in the same parent direction, with -/// the new copy being named appropriately for the given new version. -fn copy_datastore>(from: P, new_version: Version) -> Result<(PathBuf, String)> { - // First, we need a random ID to append; this helps us avoid timing issues, and lets us - // leave failed migrations for inspection, while being able to try again in a new path. - // Note: we use this rather than mktemp::new_path_in because that has a delete destructor. - // Note: consider using this as a more general migration ID? - let copy_id = thread_rng().sample_iter(&Alphanumeric).take(16).collect(); +/// Generates a random ID, affectionately known as a 'rando', that can be used to avoid timing +/// issues and identify unique migration attempts. +fn rando() -> String { + thread_rng().sample_iter(&Alphanumeric).take(16).collect() +} +/// Generates a path for a new data store, given the path of the existing data store, +/// the new version number, and a random "copy id" to append. +fn new_datastore_location

(from: P, new_version: &Version) -> Result +where + P: AsRef, +{ let to = from .as_ref() - .with_file_name(format!("{}_{}", new_version, copy_id)); + .with_file_name(format!("v{}_{}", new_version, rando())); ensure!( !to.exists(), error::NewVersionAlreadyExists { - version: new_version, + version: new_version.clone(), path: to } ); - info!("New data store is being built at work location {}", to.display()); - Ok((to, copy_id)) + info!( + "New data store is being built at work location {}", + to.display() + ); + Ok(to) } -/// Runs the given migrations in their given order on the given data store. The given direction -/// is passed to each migration so it knows which direction we're migrating. -fn run_migrations(direction: Direction, migrations: &[P1], source_datastore: P2, target_datastore: P3) -> Result<()> +/// Runs the given migrations in their given order. The given direction is passed to each +/// migration so it knows which direction we're migrating. +/// +/// The given data store is used as a starting point; each migration is given the output of the +/// previous migration, and the final output becomes the new data store. +fn run_migrations( + direction: Direction, + migrations: &[P1], + source_datastore: P2, + new_version: &Version, +) -> Result where P1: AsRef, P2: AsRef, - P3: AsRef, { + // We start with the given source_datastore, updating this after each migration to point to the + // output of the previous one. + let mut source_datastore = source_datastore.as_ref(); + // We create a new data store (below) to serve as the target of each migration. (Start at + // source just to have the right type; we know we have migrations at this point.) + let mut target_datastore = source_datastore.to_owned(); + // Any data stores we create that aren't the final one, i.e. intermediate data stores, will be + // removed at the end. (If we fail and return early, they're left for debugging purposes.) + let mut intermediate_datastores = HashSet::new(); + for migration in migrations { // Ensure the migration is executable. fs::set_permissions(migration.as_ref(), Permissions::from_mode(0o755)).context( @@ -302,11 +362,16 @@ where command.arg(direction.to_string()); command.args(&[ "--source-datastore".to_string(), - source_datastore.as_ref().display().to_string(), + source_datastore.display().to_string(), ]); + + // Create a new output location for this migration. + target_datastore = new_datastore_location(&source_datastore, &new_version)?; + intermediate_datastores.insert(target_datastore.clone()); + command.args(&[ "--target-datastore".to_string(), - target_datastore.as_ref().display().to_string(), + target_datastore.display().to_string(), ]); info!("Running migration command: {:?}", command); @@ -325,22 +390,40 @@ where ); ensure!(output.status.success(), error::MigrationFailure { output }); + + source_datastore = &target_datastore; } - Ok(()) + + // Remove the intermediate data stores + intermediate_datastores.remove(&target_datastore); + for intermediate_datastore in intermediate_datastores { + // Even if we fail to remove an intermediate data store, we've still migrated + // successfully, and we don't want to fail the upgrade - just let someone know for + // later cleanup. + trace!("Removing intermediate data store at {}", intermediate_datastore.display()); + if let Err(e) = fs::remove_dir_all(&intermediate_datastore) { + error!( + "Failed to remove intermediate data store at '{}': {}", + intermediate_datastore.display(), + e + ); + } + } + + Ok(target_datastore) } /// Atomically flips version symlinks to point to the given "to" datastore so that it becomes live. /// -/// This includes pointing the new minor version to the given `to_datastore` (which includes -/// `copy_id` in its name), then pointing the major version to the new minor version, and finally -/// fsyncing the directory to disk. -/// -/// `copy_id` is the identifier for this migration attempt, as created by copy_datastore, which we -/// use internally in this function for consistency. -fn flip_to_new_minor_version(version: Version, to_datastore: P, copy_id: S) -> Result<()> +/// This includes: +/// * pointing the new patch version to the given `to_datastore` +/// * pointing the minor version to the patch version +/// * pointing the major version to the minor version +/// * pointing the 'current' link to the major version +/// * fsyncing the directory to disk +fn flip_to_new_version

(version: &Version, to_datastore: P) -> Result<()> where P: AsRef, - S: AsRef, { // Get the directory we're working in. let to_dir = to_datastore @@ -360,18 +443,27 @@ where .context(error::DataStoreDirOpen { path: &to_dir })?; // Get a unique temporary path in the directory; we need this to atomically swap. - // We use the same copy_id so that mistakes/errors here will be obviously related to a - // given copy attempt. - let temp_link = to_dir.join(copy_id.as_ref()); + let temp_link = to_dir.join(rando()); + // Build the path to the 'current' link; this is what we're atomically swapping from + // pointing at the old major version to pointing at the new major version. + // Example: /path/to/datastore/current + let current_version_link = to_dir.join("current"); // Build the path to the major version link; this is what we're atomically swapping from // pointing at the old minor version to pointing at the new minor version. // Example: /path/to/datastore/v1 - // FIXME: duplicating knowledge of formatting here let major_version_link = to_dir.join(format!("v{}", version.major)); - // Build the path to the minor version link. If this already exists, it's because we've - // previously tried to migrate to this version. We point it at the full `to_datastore` path. + // Build the path to the minor version link; this is what we're atomically swapping from + // pointing at the old patch version to pointing at the new patch version. // Example: /path/to/datastore/v1.5 - let minor_version_link = to_dir.join(format!("{}", version)); + let minor_version_link = to_dir.join(format!("v{}.{}", version.major, version.minor)); + // Build the path to the patch version link. If this already exists, it's because we've + // previously tried to migrate to this version. We point it at the full `to_datastore` + // path. + // Example: /path/to/datastore/v1.5.2 + let patch_version_link = to_dir.join(format!( + "v{}.{}.{}", + version.major, version.minor, version.patch + )); // Get the final component of the paths we're linking to, so we can use relative links instead // of absolute, for understandability. @@ -381,28 +473,59 @@ where .context(error::DataStoreLinkToRoot { path: to_datastore.as_ref(), })?; + let patch_target = patch_version_link + .file_name() + .context(error::DataStoreLinkToRoot { + path: to_datastore.as_ref(), + })?; let minor_target = minor_version_link .file_name() .context(error::DataStoreLinkToRoot { path: to_datastore.as_ref(), })?; + let major_target = major_version_link + .file_name() + .context(error::DataStoreLinkToRoot { + path: to_datastore.as_ref(), + })?; + + // =^..^= =^..^= =^..^= =^..^= info!( "Flipping {} to point to {}", - minor_version_link.display(), + patch_version_link.display(), to_target.to_string_lossy(), ); - // Create a symlink from the minor version to the new data store. We create it at a temporary + // Create a symlink from the patch version to the new data store. We create it at a temporary // path so we can atomically swap it into the real path with a rename call. - // This will point at, for example, /path/to/datastore/v1.5_0123456789abcdef + // This will point at, for example, /path/to/datastore/v1.5.2_0123456789abcdef symlink(&to_target, &temp_link).context(error::LinkCreate { path: &temp_link })?; - // Atomically swap the link into place, so that the minor version link points to the new data + // Atomically swap the link into place, so that the patch version link points to the new data // store copy. + fs::rename(&temp_link, &patch_version_link).context(error::LinkSwap { + link: &patch_version_link, + })?; + + // =^..^= =^..^= =^..^= =^..^= + + info!( + "Flipping {} to point to {}", + minor_version_link.display(), + patch_target.to_string_lossy(), + ); + + // Create a symlink from the minor version to the new patch version. + // This will point at, for example, /path/to/datastore/v1.5.2 + symlink(&patch_target, &temp_link).context(error::LinkCreate { path: &temp_link })?; + // Atomically swap the link into place, so that the minor version link points to the new patch + // version. fs::rename(&temp_link, &minor_version_link).context(error::LinkSwap { link: &minor_version_link, })?; + // =^..^= =^..^= =^..^= =^..^= + info!( "Flipping {} to point to {}", major_version_link.display(), @@ -418,6 +541,24 @@ where link: &major_version_link, })?; + // =^..^= =^..^= =^..^= =^..^= + + info!( + "Flipping {} to point to {}", + current_version_link.display(), + major_target.to_string_lossy(), + ); + + // Create a symlink from 'current' to the new major version. + // This will point at, for example, /path/to/datastore/v1 + symlink(&major_target, &temp_link).context(error::LinkCreate { path: &temp_link })?; + // Atomically swap the link into place, so that 'current' points to the new major version. + fs::rename(&temp_link, ¤t_version_link).context(error::LinkSwap { + link: ¤t_version_link, + })?; + + // =^..^= =^..^= =^..^= =^..^= + // fsync the directory so the links point to the new version even if we crash right after // this. If fsync fails, warn but continue, because we likely can't swap the links back // without hitting the same failure. @@ -442,49 +583,49 @@ mod test { #[allow(unused_variables)] fn select_migrations_works() { // Migration paths for use in testing - let m00_1 = Path::new("migrate_v0.0_001"); - let m01_1 = Path::new("migrate_v0.1_001"); - let m01_2 = Path::new("migrate_v0.1_002"); - let m02_1 = Path::new("migrate_v0.2_001"); - let m03_1 = Path::new("migrate_v0.3_001"); - let m04_1 = Path::new("migrate_v0.4_001"); - let m04_2 = Path::new("migrate_v0.4_002"); + let m00_1 = Path::new("migrate_v0.0.0_001"); + let m01_1 = Path::new("migrate_v0.0.1_001"); + let m01_2 = Path::new("migrate_v0.0.1_002"); + let m02_1 = Path::new("migrate_v0.0.2_001"); + let m03_1 = Path::new("migrate_v0.0.3_001"); + let m04_1 = Path::new("migrate_v0.0.4_001"); + let m04_2 = Path::new("migrate_v0.0.4_002"); let all_migrations = vec![&m00_1, &m01_1, &m01_2, &m02_1, &m03_1, &m04_1, &m04_2]; // Versions for use in testing - let v00 = Version::new(0, 0); - let v01 = Version::new(0, 1); - let v02 = Version::new(0, 2); - let v03 = Version::new(0, 3); - let v04 = Version::new(0, 4); - let v05 = Version::new(0, 5); + let v00 = Version::new(0, 0, 0); + let v01 = Version::new(0, 0, 1); + let v02 = Version::new(0, 0, 2); + let v03 = Version::new(0, 0, 3); + let v04 = Version::new(0, 0, 4); + let v05 = Version::new(0, 0, 5); // Test going forward one minor version assert_eq!( - select_migrations(v01, v02, &all_migrations).unwrap(), + select_migrations(&v01, &v02, &all_migrations).unwrap(), vec![m02_1] ); // Test going backward one minor version assert_eq!( - select_migrations(v02, v01, &all_migrations).unwrap(), + select_migrations(&v02, &v01, &all_migrations).unwrap(), vec![m02_1] ); // Test going forward a few minor versions assert_eq!( - select_migrations(v01, v04, &all_migrations).unwrap(), + select_migrations(&v01, &v04, &all_migrations).unwrap(), vec![m02_1, m03_1, m04_1, m04_2] ); // Test going backward a few minor versions assert_eq!( - select_migrations(v04, v01, &all_migrations).unwrap(), + select_migrations(&v04, &v01, &all_migrations).unwrap(), vec![m04_2, m04_1, m03_1, m02_1] ); // Test no matching migrations - assert!(select_migrations(v04, v05, &all_migrations) + assert!(select_migrations(&v04, &v05, &all_migrations) .unwrap() .is_empty()); } diff --git a/workspaces/api/storewolf/Cargo.toml b/workspaces/api/storewolf/Cargo.toml index 1ee8efb24b6..30e85ef76ca 100644 --- a/workspaces/api/storewolf/Cargo.toml +++ b/workspaces/api/storewolf/Cargo.toml @@ -9,10 +9,11 @@ build = "build.rs" [dependencies] apiserver = { path = "../apiserver" } -data_store_version = { path = "../data_store_version" } +bottlerocket-release = { path = "../../bottlerocket-release" } log = "0.4" models = { path = "../../models" } rand = { version = "0.7", default-features = false, features = ["std"] } +semver = "0.9" simplelog = "0.7" snafu = "0.6" toml = "0.5" diff --git a/workspaces/api/storewolf/README.md b/workspaces/api/storewolf/README.md index 2ffa2ad6ca0..146472d13e9 100644 --- a/workspaces/api/storewolf/README.md +++ b/workspaces/api/storewolf/README.md @@ -4,7 +4,7 @@ Current version: 0.1.0 ## Introduction -storewolf is a small program to create the filesystem datastore. +storewolf creates the filesystem datastore used by the API system. It creates the datastore at a provided path and populates any default settings given in the defaults.toml file, unless they already exist. diff --git a/workspaces/api/storewolf/src/main.rs b/workspaces/api/storewolf/src/main.rs index 55406f04e86..ae3e6d140df 100644 --- a/workspaces/api/storewolf/src/main.rs +++ b/workspaces/api/storewolf/src/main.rs @@ -1,7 +1,7 @@ /*! # Introduction -storewolf is a small program to create the filesystem datastore. +storewolf creates the filesystem datastore used by the API system. It creates the datastore at a provided path and populates any default settings given in the defaults.toml file, unless they already exist. @@ -12,6 +12,7 @@ settings given in the defaults.toml file, unless they already exist. extern crate log; use rand::{distributions::Alphanumeric, thread_rng, Rng}; +use semver::Version; use simplelog::{Config as LogConfig, LevelFilter, TermLogger, TerminalMode}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::{HashMap, HashSet}; @@ -26,11 +27,10 @@ use toml::{map::Entry, Value}; use apiserver::datastore::key::{Key, KeyType}; use apiserver::datastore::serialization::{to_pairs, to_pairs_with_prefix}; use apiserver::datastore::{self, DataStore, FilesystemDataStore, ScalarError}; -use data_store_version::Version; +use bottlerocket_release::BottlerocketRelease; use model::modeled_types::SingleLineString; // FIXME Get these from configuration in the future -const DATASTORE_VERSION_FILE: &str = "/usr/share/bottlerocket/data-store-version"; // Shared transaction used by boot-time services. const TRANSACTION: &str = "bottlerocket-launch"; @@ -40,7 +40,6 @@ mod error { use apiserver::datastore::key::KeyType; use apiserver::datastore::{self, serialization, ScalarError}; - use data_store_version::error::Error as DataStoreVersionError; use model::modeled_types::error::Error as ModeledTypesError; use snafu::Snafu; @@ -54,10 +53,9 @@ mod error { #[snafu(display("Unable to create datastore at '{}': {}", path.display(), source))] DatastoreCreation { path: PathBuf, source: io::Error }, - #[snafu(display("Unable to read datastore version from '{}': {}", path.display(), source))] - DatastoreVersion { - path: PathBuf, - source: DataStoreVersionError, + #[snafu(display("Unable to get OS version: {}", source))] + ReleaseVersion { + source: bottlerocket_release::Error, }, #[snafu(display("{} is not valid TOML: {}", file, source))] @@ -133,38 +131,44 @@ type Result = std::result::Result; /// Given a base path, create a brand new datastore with the appropriate /// symlink structure for the desired datastore version. /// -/// If `version` is given, uses it, otherwise pulls version from DATASTORE_VERSION_FILE. +/// If `version` is given, uses it, otherwise pulls version from /etc/os-release. /// /// An example setup for theoretical version 1.5: /// /path/to/datastore/current /// -> /path/to/datastore/v1 /// -> /path/to/datastore/v1.5 -/// -> /path/to/datastore/v1.5_0123456789abcdef +/// -> /path/to/datastore/v1.5.2 +/// -> /path/to/datastore/v1.5.2_0123456789abcdef fn create_new_datastore>(base_path: P, version: Option) -> Result<()> { - // Get the datastore version from the version file - let datastore_version = match version { + let version = match version { Some(v) => v, - None => Version::from_file(&DATASTORE_VERSION_FILE).context(error::DatastoreVersion { - path: &DATASTORE_VERSION_FILE, - })?, + None => { + let br = BottlerocketRelease::new().context(error::ReleaseVersion)?; + br.version_id + }, }; + // Create random string to append to the end of the new datastore path let random_id: String = thread_rng().sample_iter(&Alphanumeric).take(16).collect(); // Build the various paths to which we'll symlink - // /path/to/datastore/v1.5_0123456789abcdef - let data_store_filename = format!("{}_{}", datastore_version, random_id); - let data_store_path = base_path.as_ref().join(&data_store_filename); - // /path/to/datastore/v1 - let major_version_filename = format!("v{}", datastore_version.major); + let major_version_filename = format!("v{}", version.major); let major_version_path = base_path.as_ref().join(&major_version_filename); // /path/to/datastore/v1.5 - let minor_version_filename = format!("{}", datastore_version); + let minor_version_filename = format!("v{}.{}", version.major, version.minor); let minor_version_path = base_path.as_ref().join(&minor_version_filename); + // /path/to/datastore/v1.5.2 + let patch_version_filename = format!("v{}.{}.{}", version.major, version.minor, version.patch); + let patch_version_path = base_path.as_ref().join(&patch_version_filename); + + // /path/to/datastore/v1.5_0123456789abcdef + let data_store_filename = format!("v{}.{}.{}_{}", version.major, version.minor, version.patch, random_id); + let data_store_path = base_path.as_ref().join(&data_store_filename); + // /path/to/datastore/current let current_path = base_path.as_ref().join("current"); @@ -174,8 +178,12 @@ fn create_new_datastore>(base_path: P, version: Option) })?; // Build our symlink chain (See example in docstring above) - // /path/to/datastore/v1.5 -> v1.5_0123456789abcdef - symlink(&data_store_filename, &minor_version_path).context(error::LinkCreate { + // /path/to/datastore/v1.5.2 -> v1.5.2_0123456789abcdef + symlink(&data_store_filename, &patch_version_path).context(error::LinkCreate { + path: &patch_version_path, + })?; + // /path/to/datastore/v1.5 -> v1.5.2 + symlink(&patch_version_filename, &minor_version_path).context(error::LinkCreate { path: &minor_version_path, })?; // /path/to/datastore/v1 -> v1.5 @@ -533,7 +541,7 @@ fn usage() -> ! { [ --version X.Y ] [ --log-level trace|debug|info|warn|error ] - If --version is not given, the version will be pulled from /usr/share/bottlerocket/data-store-version. + If --version is not given, the version will be pulled from /etc/os-release. This is used to set up versioned symlinks in the data store base path. ", program_name diff --git a/workspaces/bottlerocket-release/src/lib.rs b/workspaces/bottlerocket-release/src/lib.rs index 5ffef29a91d..270ba43d7ee 100644 --- a/workspaces/bottlerocket-release/src/lib.rs +++ b/workspaces/bottlerocket-release/src/lib.rs @@ -70,7 +70,6 @@ impl BottlerocketRelease { Some(part) => part, None => return None, }; - debug!("Found os-release value {}={}", key, value); // If the value was quoted (unnecessary in this file) then remove the quotes if value.starts_with('"') { @@ -80,6 +79,7 @@ impl BottlerocketRelease { value = &value[..value.len() - 1]; } + debug!("Found os-release value {}={}", key, value); Some((key.to_owned(), value.to_owned())) }) .collect(); diff --git a/workspaces/updater/update_metadata/Cargo.toml b/workspaces/updater/update_metadata/Cargo.toml index f396db6f967..57dd8298308 100644 --- a/workspaces/updater/update_metadata/Cargo.toml +++ b/workspaces/updater/update_metadata/Cargo.toml @@ -8,7 +8,6 @@ publish = false [dependencies] chrono = { version = "0.4.9", features = ["serde"] } -data_store_version = { path = "../../api/data_store_version" } rand = "0.7.0" regex = "1.1" semver = { version = "0.9.0", features = ["serde"] } diff --git a/workspaces/updater/update_metadata/src/de.rs b/workspaces/updater/update_metadata/src/de.rs index 7721786309b..ef35ff6d475 100644 --- a/workspaces/updater/update_metadata/src/de.rs +++ b/workspaces/updater/update_metadata/src/de.rs @@ -1,7 +1,7 @@ use crate::error; use chrono::{DateTime, Utc}; -use data_store_version::Version as DataVersion; use regex::Regex; +use semver::Version; use serde::{de::Error as _, Deserializer}; use snafu::{ensure, ResultExt}; use std::collections::BTreeMap; @@ -54,15 +54,17 @@ where deserializer.deserialize_map(Visitor) } -/// Converts the tuple keys to a `DataVersion` before insertion and catches duplicates +/// Converts the tuple keys to a `Version` before insertion and catches duplicates pub(crate) fn deserialize_migration<'de, D>( deserializer: D, -) -> Result>, D::Error> +) -> Result>, D::Error> where D: Deserializer<'de>, { fn parse_versions(key: &str) -> Result<(&str, &str), error::Error> { - let r = Regex::new(r"\((?P[0-9.]+),[ ]*(?P[0-9.]+)\)"); + // We don't need to be too strict about the version syntax here, it's parsed after we + // return these strings. We just want to make sure we have "(X, Y)" syntax and split it. + let r = Regex::new(r"\((?P[^ ,]+),[ ]*(?P[^ ,]+)\)"); if let Ok(regex) = r { if let Some(captures) = regex.captures(&key) { @@ -78,7 +80,7 @@ where fn parse_tuple_key( key: String, list: Vec, - map: &mut BTreeMap<(DataVersion, DataVersion), Vec>, + map: &mut BTreeMap<(Version, Version), Vec>, ) -> Result<(), error::Error> { let (from, to) = parse_versions(&key)?; @@ -101,7 +103,7 @@ where struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { - type Value = BTreeMap<(DataVersion, DataVersion), Vec>; + type Value = BTreeMap<(Version, Version), Vec>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") diff --git a/workspaces/updater/update_metadata/src/error.rs b/workspaces/updater/update_metadata/src/error.rs index 51a269176c7..07dfd93ded6 100644 --- a/workspaces/updater/update_metadata/src/error.rs +++ b/workspaces/updater/update_metadata/src/error.rs @@ -1,6 +1,6 @@ #![allow(clippy::default_trait_access)] -use data_store_version::Version as DataVersion; +use semver::Version; use snafu::{Backtrace, Snafu}; use std::path::PathBuf; @@ -23,14 +23,14 @@ pub enum Error { backtrace: Backtrace, }, - #[snafu(display("Could not parse datastore version: {}", key))] - BadDataVersion { + #[snafu(display("Could not parse OS version: {}", key))] + BadVersion { backtrace: Backtrace, key: String, - source: data_store_version::error::Error, + source: semver::SemVerError, }, - #[snafu(display("Could not parse datastore versions: {}", key))] + #[snafu(display("Could not parse OS versions: {}", key))] BadDataVersionsFromTo { backtrace: Backtrace, key: String }, #[snafu(display("Could not parse image version: {} - {}", key, value))] @@ -81,8 +81,8 @@ pub enum Error { MigrationInvalidTarget { backtrace: Backtrace, name: String, - to: DataVersion, - version: DataVersion, + to: Version, + version: Version, }, #[snafu(display( @@ -93,8 +93,8 @@ pub enum Error { #[snafu(display("Unable to get mutable reference to ({},{}) migrations", from, to))] MigrationMutable { backtrace: Backtrace, - from: DataVersion, - to: DataVersion, + from: Version, + to: Version, }, #[snafu(display("Failed to serialize update information: {}", source))] diff --git a/workspaces/updater/update_metadata/src/lib.rs b/workspaces/updater/update_metadata/src/lib.rs index 088dd70adc3..b060688ae6b 100644 --- a/workspaces/updater/update_metadata/src/lib.rs +++ b/workspaces/updater/update_metadata/src/lib.rs @@ -5,10 +5,9 @@ pub mod error; mod se; use chrono::{DateTime, Duration, Utc}; -use data_store_version::Version as DataVersion; use migrator::MIGRATION_FILENAME_RE; use rand::{thread_rng, Rng}; -use semver::Version as SemVer; +use semver::Version; use serde::{Deserialize, Serialize}; use snafu::{ensure, OptionExt, ResultExt}; use std::collections::BTreeMap; @@ -64,8 +63,8 @@ pub struct Images { pub struct Update { pub variant: String, pub arch: String, - pub version: SemVer, - pub max_version: SemVer, + pub version: Version, + pub max_version: Version, #[serde(deserialize_with = "de::deserialize_bound")] pub waves: BTreeMap>, pub images: Images, @@ -76,19 +75,18 @@ pub struct Manifest { pub updates: Vec, #[serde(deserialize_with = "de::deserialize_migration")] #[serde(serialize_with = "se::serialize_migration")] - pub migrations: BTreeMap<(DataVersion, DataVersion), Vec>, - pub datastore_versions: BTreeMap, + pub migrations: BTreeMap<(Version, Version), Vec>, } -#[derive(Debug, Default, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct Release { - pub version: String, + pub version: Version, /// For now, this matches the Manifest struct, but having a separate struct gives us the /// flexibility to have a different, human-oriented representation in the release TOML compared /// to the machine-oriented representation in the manifest. #[serde(deserialize_with = "de::deserialize_migration")] #[serde(serialize_with = "se::serialize_migration")] - pub migrations: BTreeMap<(DataVersion, DataVersion), Vec>, + pub migrations: BTreeMap<(Version, Version), Vec>, } pub fn load_file(path: &Path) -> Result { @@ -106,8 +104,8 @@ impl Manifest { pub fn add_migration( &mut self, append: bool, - from: DataVersion, - to: DataVersion, + from: Version, + to: Version, migration_list: Vec, ) -> Result<()> { // Check each migration matches the filename conventions used by the migrator @@ -119,8 +117,8 @@ impl Manifest { let version_match = captures .name("version") .context(error::BadRegexVersion { name })?; - let version = DataVersion::from_str(version_match.as_str()) - .context(error::BadDataVersion { key: name })?; + let version = Version::from_str(version_match.as_str()) + .context(error::BadVersion { key: name })?; ensure!( version == to, error::MigrationInvalidTarget { name, to, version } @@ -132,11 +130,11 @@ impl Manifest { } // If append is true, append the new migrations to the existing vec. - if append && self.migrations.contains_key(&(from, to)) { + if append && self.migrations.contains_key(&(from.clone(), to.clone())) { let migrations = self .migrations - .get_mut(&(from, to)) - .context(error::MigrationMutable { from: from, to: to })?; + .get_mut(&(from.clone(), to.clone())) + .context(error::MigrationMutable { from, to })?; migrations.extend_from_slice(&migration_list); // Otherwise just overwrite the existing migrations } else { @@ -147,9 +145,8 @@ impl Manifest { pub fn add_update( &mut self, - image_version: SemVer, - max_version: Option, - datastore_version: DataVersion, + image_version: Version, + max_version: Option, arch: String, variant: String, images: Images, @@ -172,8 +169,6 @@ impl Manifest { images, waves: BTreeMap::new(), }; - self.datastore_versions - .insert(image_version, datastore_version); self.update_max_version( &update.max_version, Some(&update.arch), @@ -187,7 +182,7 @@ impl Manifest { /// architecture and variant of some new update. pub fn update_max_version( &mut self, - version: &SemVer, + version: &Version, arch: Option<&str>, variant: Option<&str>, ) { @@ -229,7 +224,7 @@ impl Manifest { &mut self, variant: String, arch: String, - image_version: SemVer, + image_version: Version, bound: u32, start: DateTime, ) -> Result { @@ -253,7 +248,7 @@ impl Manifest { &mut self, variant: String, arch: String, - image_version: SemVer, + image_version: Version, bound: u32, ) -> Result<()> { let matching: Vec<&mut Update> = self diff --git a/workspaces/updater/update_metadata/src/se.rs b/workspaces/updater/update_metadata/src/se.rs index da3b80d72ae..1ac26624c80 100644 --- a/workspaces/updater/update_metadata/src/se.rs +++ b/workspaces/updater/update_metadata/src/se.rs @@ -1,10 +1,10 @@ -use data_store_version::Version as DataVersion; +use semver::Version; use serde::ser::Error as _; use serde::{Serialize, Serializer}; use std::collections::BTreeMap; pub(crate) fn serialize_migration( - value: &BTreeMap<(DataVersion, DataVersion), Vec>, + value: &BTreeMap<(Version, Version), Vec>, serializer: S, ) -> Result where diff --git a/workspaces/updater/updog/Cargo.toml b/workspaces/updater/updog/Cargo.toml index 01c538de219..fe3598f1369 100644 --- a/workspaces/updater/updog/Cargo.toml +++ b/workspaces/updater/updog/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" publish = false [dependencies] +bottlerocket-release = { path = "../../bottlerocket-release" } chrono = "0.4.9" -data_store_version = { path = "../../api/data_store_version" } log = "0.4" lz4 = "1.23.1" rand = "0.7.0" diff --git a/workspaces/updater/updog/src/bin/updata.rs b/workspaces/updater/updog/src/bin/updata.rs index 76e49d6cc61..8e82fee4632 100644 --- a/workspaces/updater/updog/src/bin/updata.rs +++ b/workspaces/updater/updog/src/bin/updata.rs @@ -9,14 +9,13 @@ extern crate log; use crate::error::Result; use chrono::{DateTime, Utc}; -use data_store_version::Version as DataVersion; -use semver::Version as SemVer; +use semver::Version; use simplelog::{Config as LogConfig, LevelFilter, TermLogger, TerminalMode}; use snafu::{ErrorCompat, OptionExt, ResultExt}; use std::fs; use std::path::PathBuf; use structopt::StructOpt; -use update_metadata::{Images, Manifest, Release, Update}; +use update_metadata::{Images, Manifest, Release}; #[derive(Debug, StructOpt)] struct GeneralArgs { @@ -35,19 +34,15 @@ struct AddUpdateArgs { // image version #[structopt(short = "v", long = "version")] - image_version: SemVer, + image_version: Version, // architecture image is built for #[structopt(short = "a", long = "arch")] arch: String, - // corresponding datastore version for this image - #[structopt(short = "d", long = "data-version")] - datastore_version: DataVersion, - // maximum valid version #[structopt(short = "m", long = "max-version")] - max_version: Option, + max_version: Option, // root image target name #[structopt(short = "r", long = "root")] @@ -72,7 +67,6 @@ impl AddUpdateArgs { manifest.add_update( self.image_version, self.max_version, - self.datastore_version, self.arch, self.variant, Images { @@ -97,19 +91,11 @@ struct RemoveUpdateArgs { // image version #[structopt(short = "v", long = "version")] - image_version: SemVer, + image_version: Version, // architecture image is built for #[structopt(short = "a", long = "arch")] arch: String, - - // Whether to clean up datastore mappings that no longer reference an - // existing update. Migration paths for such datastore versions are - // preserved. - // This should _only_ be used if there are no existing users of the - // specified Bottlerocket image version. - #[structopt(short, long)] - cleanup: bool, } impl RemoveUpdateArgs { @@ -121,22 +107,6 @@ impl RemoveUpdateArgs { || update.variant != self.variant || update.version != self.image_version }); - if self.cleanup { - let remaining: Vec<&Update> = manifest - .updates - .iter() - .filter(|update| update.version == self.image_version) - .collect(); - if remaining.is_empty() { - manifest.datastore_versions.remove(&self.image_version); - } else { - info!( - "Cleanup skipped; {} {} updates remain", - remaining.len(), - self.image_version - ); - } - } // Note: We don't revert the maximum version on removal update_metadata::write_file(&self.file, &manifest)?; if let Some(current) = manifest.updates.first() { @@ -165,7 +135,7 @@ struct WaveArgs { // image version #[structopt(short = "v", long = "version")] - image_version: SemVer, + image_version: Version, // architecture image is built for #[structopt(short = "a", long = "arch")] @@ -243,7 +213,7 @@ struct MaxVersionArgs { // maximum valid version #[structopt(short, long)] - max_version: SemVer, + max_version: Version, } impl MaxVersionArgs { @@ -255,36 +225,6 @@ impl MaxVersionArgs { } } -#[derive(Debug, StructOpt)] -struct MappingArgs { - // metadata file to create/modify - file: PathBuf, - - #[structopt(short, long)] - image_version: SemVer, - - #[structopt(short, long)] - data_version: DataVersion, -} - -impl MappingArgs { - fn run(self) -> Result<()> { - let mut manifest: Manifest = update_metadata::load_file(&self.file)?; - let version = self.image_version.clone(); - let old = manifest - .datastore_versions - .insert(self.image_version, self.data_version); - if let Some(old) = old { - warn!( - "Warning: New mapping ({},{}) replaced old mapping ({},{})", - version, self.data_version, version, old - ); - } - update_metadata::write_file(&self.file, &manifest)?; - Ok(()) - } -} - #[derive(Debug, StructOpt)] #[structopt(rename_all = "kebab-case")] enum Command { @@ -294,8 +234,6 @@ enum Command { AddUpdate(AddUpdateArgs), /// Add a (bound_id, time) wave to an existing update AddWave(WaveArgs), - /// Add a image_version:data_store_version mapping to the manifest - AddVersionMapping(MappingArgs), /// Set the global maximum image version SetMaxVersion(MaxVersionArgs), /// Remove an update from the manifest, including wave information @@ -322,7 +260,6 @@ fn main_inner() -> Result<()> { } Command::AddUpdate(args) => args.run(), Command::AddWave(args) => args.add(), - Command::AddVersionMapping(args) => args.run(), Command::SetMaxVersion(args) => args.run(), Command::RemoveUpdate(args) => args.run(), Command::RemoveWave(args) => args.remove(), @@ -356,7 +293,6 @@ mod tests { use super::*; use chrono::Duration; use std::path::Path; - use std::str::FromStr; use tempfile::NamedTempFile; #[test] @@ -420,9 +356,8 @@ mod tests { file: PathBuf::from(tmpfd.path()), variant: String::from("yum"), arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.3").unwrap(), - max_version: Some(SemVer::parse("1.2.3").unwrap()), - datastore_version: DataVersion::from_str("1.0").unwrap(), + image_version: Version::parse("1.2.3").unwrap(), + max_version: Some(Version::parse("1.2.3").unwrap()), boot: String::from("boot"), root: String::from("root"), hash: String::from("hash"), @@ -433,9 +368,8 @@ mod tests { file: PathBuf::from(tmpfd.path()), variant: String::from("yum"), arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.5").unwrap(), - max_version: Some(SemVer::parse("1.2.3").unwrap()), - datastore_version: DataVersion::from_str("1.0").unwrap(), + image_version: Version::parse("1.2.5").unwrap(), + max_version: Some(Version::parse("1.2.3").unwrap()), boot: String::from("boot"), root: String::from("root"), hash: String::from("hash"), @@ -446,9 +380,8 @@ mod tests { file: PathBuf::from(tmpfd.path()), variant: String::from("yum"), arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.4").unwrap(), - max_version: Some(SemVer::parse("1.2.4").unwrap()), - datastore_version: DataVersion::from_str("1.0").unwrap(), + image_version: Version::parse("1.2.4").unwrap(), + max_version: Some(Version::parse("1.2.4").unwrap()), boot: String::from("boot"), root: String::from("root"), hash: String::from("hash"), @@ -458,72 +391,11 @@ mod tests { let m: Manifest = update_metadata::load_file(tmpfd.path())?; for u in m.updates { - assert!(u.max_version == SemVer::parse("1.2.4").unwrap()); + assert!(u.max_version == Version::parse("1.2.4").unwrap()); } Ok(()) } - #[test] - fn datastore_mapping() -> Result<()> { - let tmpfd = NamedTempFile::new().context(error::TmpFileCreate)?; - AddUpdateArgs { - file: PathBuf::from(tmpfd.path()), - variant: String::from("yum"), - arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.3").unwrap(), - max_version: Some(SemVer::parse("1.2.3").unwrap()), - datastore_version: DataVersion::from_str("1.0").unwrap(), - boot: String::from("boot"), - root: String::from("root"), - hash: String::from("hash"), - } - .run() - .unwrap(); - AddUpdateArgs { - file: PathBuf::from(tmpfd.path()), - variant: String::from("yum"), - arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.5").unwrap(), - max_version: Some(SemVer::parse("1.2.3").unwrap()), - datastore_version: DataVersion::from_str("1.1").unwrap(), - boot: String::from("boot"), - root: String::from("root"), - hash: String::from("hash"), - } - .run() - .unwrap(); - AddUpdateArgs { - file: PathBuf::from(tmpfd.path()), - variant: String::from("yum"), - arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.4").unwrap(), - max_version: Some(SemVer::parse("1.2.4").unwrap()), - datastore_version: DataVersion::from_str("1.0").unwrap(), - boot: String::from("boot"), - root: String::from("root"), - hash: String::from("hash"), - } - .run() - .unwrap(); - - // TODO this needs to test against arch and variant not being considered - RemoveUpdateArgs { - file: PathBuf::from(tmpfd.path()), - variant: String::from("yum"), - arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.4").unwrap(), - cleanup: true, - } - .run() - .unwrap(); - - let m: Manifest = update_metadata::load_file(tmpfd.path())?; - assert!(m - .datastore_versions - .contains_key(&SemVer::parse("1.2.3").unwrap())); - Ok(()) - } - #[test] fn ordered_waves() -> Result<()> { let tmpfd = NamedTempFile::new().context(error::TmpFileCreate)?; @@ -531,9 +403,8 @@ mod tests { file: PathBuf::from(tmpfd.path()), variant: String::from("yum"), arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.3").unwrap(), - max_version: Some(SemVer::parse("1.2.3").unwrap()), - datastore_version: DataVersion::from_str("1.0").unwrap(), + image_version: Version::parse("1.2.3").unwrap(), + max_version: Some(Version::parse("1.2.3").unwrap()), boot: String::from("boot"), root: String::from("root"), hash: String::from("hash"), @@ -545,7 +416,7 @@ mod tests { file: PathBuf::from(tmpfd.path()), variant: String::from("yum"), arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.3").unwrap(), + image_version: Version::parse("1.2.3").unwrap(), bound: 1024, start: Some(Utc::now()), } @@ -556,7 +427,7 @@ mod tests { file: PathBuf::from(tmpfd.path()), variant: String::from("yum"), arch: String::from("x86_64"), - image_version: SemVer::parse("1.2.3").unwrap(), + image_version: Version::parse("1.2.3").unwrap(), bound: 1536, start: Some(Utc::now() - Duration::hours(1)), } diff --git a/workspaces/updater/updog/src/error.rs b/workspaces/updater/updog/src/error.rs index 6325771e541..a30702e63e3 100644 --- a/workspaces/updater/updog/src/error.rs +++ b/workspaces/updater/updog/src/error.rs @@ -1,7 +1,6 @@ #![allow(clippy::default_trait_access)] -use data_store_version::Version as DataVersion; -use semver::Version as SemVer; +use semver::Version; use snafu::{Backtrace, Snafu}; use std::path::PathBuf; use update_metadata::error::Error as update_metadata_error; @@ -104,14 +103,8 @@ pub(crate) enum Error { #[snafu(display("Migration ({},{}) not present in manifest", from, to))] MigrationNotPresent { backtrace: Backtrace, - from: DataVersion, - to: DataVersion, - }, - - #[snafu(display("Missing datastore version in metadata: {:?}", version))] - MissingDataVersion { - backtrace: Backtrace, - version: DataVersion, + from: Version, + to: Version, }, #[snafu(display( @@ -121,8 +114,8 @@ pub(crate) enum Error { ))] MissingMigration { backtrace: Backtrace, - current: DataVersion, - target: DataVersion, + current: Version, + target: Version, }, #[snafu(display("Missing version in metadata: {}", version))] @@ -183,6 +176,11 @@ pub(crate) enum Error { backtrace: Backtrace, }, + #[snafu(display("Unable to get OS version: {}", source))] + ReleaseVersion { + source: bottlerocket_release::Error, + }, + #[snafu(display("Failed setting permissions of '{}': {}", path.display(), source))] SetPermissions { path: PathBuf, @@ -213,7 +211,7 @@ pub(crate) enum Error { #[snafu(display("Update {} exists but wave in the future", version))] UpdateNotReady { backtrace: Backtrace, - version: SemVer, + version: Version, }, #[snafu(display("Failed to serialize update information: {}", source))] @@ -231,22 +229,6 @@ pub(crate) enum Error { backtrace: Backtrace, }, - #[snafu(display("Failed to determine VERSION_ID from /etc/os-release"))] - VersionIdNotFound { backtrace: Backtrace }, - - #[snafu(display("Failed to parse VERSION_ID from /etc/os-release: {}", line))] - VersionIdParse { - source: semver::SemVerError, - backtrace: Backtrace, - line: String, - }, - - #[snafu(display("Failed to read /etc/os-release: {}", source))] - VersionIdRead { - source: std::io::Error, - backtrace: Backtrace, - }, - #[snafu(display("--start-time