Skip to content

Commit

Permalink
Merge pull request #2 from 00xc/utils/immut
Browse files Browse the repository at this point in the history
ImmutAfterInit: misuse runtime detection
  • Loading branch information
joergroedel committed Jun 19, 2023
2 parents 631ae3b + 10b7610 commit 411e12b
Show file tree
Hide file tree
Showing 7 changed files with 109 additions and 52 deletions.
6 changes: 4 additions & 2 deletions src/console.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ pub static WRITER: SpinLock<Console> = SpinLock::new(unsafe {
static CONSOLE_INITIALIZED: ImmutAfterInitCell<bool> = ImmutAfterInitCell::new(false);

pub fn init_console() {
unsafe { CONSOLE_INITIALIZED.reinit(&true) };
CONSOLE_INITIALIZED.reinit(&true);
}

#[doc(hidden)]
Expand Down Expand Up @@ -121,7 +121,9 @@ static CONSOLE_LOGGER: ImmutAfterInitCell<ConsoleLogger> = ImmutAfterInitCell::u

pub fn install_console_logger(component: &'static str) {
let logger = ConsoleLogger::new(component);
unsafe { CONSOLE_LOGGER.init(&logger) };
CONSOLE_LOGGER
.init(&logger)
.expect("Already initialized console logger");

if let Err(e) = log::set_logger(&*CONSOLE_LOGGER) {
// Failed to install the ConsoleLogger, presumably because something had
Expand Down
4 changes: 3 additions & 1 deletion src/cpu/cpuid.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,9 @@ pub struct SnpCpuidTable {
}

pub fn register_cpuid_table(table: &'static SnpCpuidTable) {
unsafe { CPUID_PAGE.init_from_ref(table) };
CPUID_PAGE
.init_from_ref(table)
.expect("Could not initialize CPUID page");
}

pub struct CpuidResult {
Expand Down
19 changes: 4 additions & 15 deletions src/mm/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,28 +15,17 @@ struct KernelMapping {
phys_start: PhysAddr,
}

impl KernelMapping {
pub const fn new() -> Self {
KernelMapping {
virt_start: VirtAddr::null(),
virt_end: VirtAddr::null(),
phys_start: PhysAddr::null(),
}
}
}

static KERNEL_MAPPING: ImmutAfterInitCell<KernelMapping> =
ImmutAfterInitCell::new(KernelMapping::new());
static KERNEL_MAPPING: ImmutAfterInitCell<KernelMapping> = ImmutAfterInitCell::uninit();

pub fn init_kernel_mapping_info(vstart: VirtAddr, vend: VirtAddr, pstart: PhysAddr) {
let km = KernelMapping {
virt_start: vstart,
virt_end: vend,
phys_start: pstart,
};
unsafe {
KERNEL_MAPPING.init(&km);
}
KERNEL_MAPPING
.init(&km)
.expect("Already initialized kernel mapping info");
}

#[cfg(not(test))]
Expand Down
14 changes: 5 additions & 9 deletions src/mm/pagetable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ pub fn paging_init_early() {
let mut feature_mask = PTEntryFlags::all();
feature_mask.remove(PTEntryFlags::NX);
feature_mask.remove(PTEntryFlags::GLOBAL);
unsafe { FEATURE_MASK.reinit(&feature_mask) };
FEATURE_MASK.reinit(&feature_mask);
}

pub fn paging_init() {
Expand All @@ -45,15 +45,15 @@ pub fn paging_init() {
if !cpu_has_pge() {
feature_mask.remove(PTEntryFlags::GLOBAL);
}
unsafe { FEATURE_MASK.reinit(&feature_mask) };
FEATURE_MASK.reinit(&feature_mask);
}

fn init_encrypt_mask() {
// Find C bit position
let res = cpuid_table(0x8000001f).expect("Can not get C-Bit position from CPUID table");
let c_bit = res.ebx & 0x3f;
let mask = 1u64 << c_bit;
unsafe { ENCRYPT_MASK.reinit(&(mask as usize)) };
ENCRYPT_MASK.reinit(&(mask as usize));

// Find physical address size.
let res = cpuid_table(0x80000008).expect("Can not get physical address size from CPUID table");
Expand All @@ -74,9 +74,7 @@ fn init_encrypt_mask() {
let effective_phys_addr_size = cmp::min(c_bit, phys_addr_size);

let max_addr = 1 << effective_phys_addr_size;
unsafe {
MAX_PHYS_ADDR.reinit(&max_addr);
}
MAX_PHYS_ADDR.reinit(&max_addr);

// KVM currently sets all bits when executing the launch update for the
// launch vCPU's VMSA, but the firmware will truncate the gPA to the limit
Expand All @@ -85,9 +83,7 @@ fn init_encrypt_mask() {
// future. This can be removed once the patches for that land.
let launch_vmsa_addr = (1u64 << phys_addr_size) - 0x1000;
let launch_vmsa_addr = PhysAddr::from(launch_vmsa_addr);
unsafe {
LAUNCH_VMSA_ADDR.reinit(&launch_vmsa_addr);
}
LAUNCH_VMSA_ADDR.reinit(&launch_vmsa_addr);
}

fn encrypt_mask() -> usize {
Expand Down
4 changes: 3 additions & 1 deletion src/sev/status.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,9 @@ fn sev_flags() -> SEVStatusFlags {

pub fn sev_status_init() {
let status: SEVStatusFlags = read_sev_status();
unsafe { SEV_FLAGS.init(&status) };
SEV_FLAGS
.init(&status)
.expect("Already initialized SEV flags");
}

pub fn sev_es_enabled() -> bool {
Expand Down
12 changes: 8 additions & 4 deletions src/svsm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -327,12 +327,16 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: VirtAddr) {
load_gdt();
early_idt_init();

unsafe {
LAUNCH_INFO.init(li);
}
LAUNCH_INFO
.init(li)
.expect("Already initialized launch info");

let cpuid_table_virt = VirtAddr::from(launch_info.cpuid_page);
unsafe { CPUID_PAGE.init(&*(cpuid_table_virt.as_ptr::<SnpCpuidTable>())) };
unsafe {
CPUID_PAGE
.init(&*(cpuid_table_virt.as_ptr::<SnpCpuidTable>()))
.expect("Already initialized CPUID page")
};
register_cpuid_table(&CPUID_PAGE);
dump_cpuid_table();

Expand Down
102 changes: 82 additions & 20 deletions src/utils/immut_after_init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,21 @@ use core::marker::Copy;
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::ops::Deref;
#[cfg(debug_assertions)]
use core::sync::atomic::{AtomicBool, Ordering};

#[cfg(not(debug_assertions))]
pub type ImmutAfterInitResult<T> = Result<T, core::convert::Infallible>;

#[cfg(debug_assertions)]
pub type ImmutAfterInitResult<T> = Result<T, ImmutAfterInitError>;

#[cfg(debug_assertions)]
#[derive(Clone, Copy, Debug)]
pub enum ImmutAfterInitError {
AlreadyInit,
Uninitialized,
}

/// A memory location which is effectively immutable after initalization code
/// has run.
Expand Down Expand Up @@ -52,62 +67,110 @@ use core::ops::Deref;
/// }
/// ```
///
#[repr(transparent)]
pub struct ImmutAfterInitCell<T: Copy> {
pub struct ImmutAfterInitCell<T> {
#[doc(hidden)]
data: UnsafeCell<MaybeUninit<T>>,
// Used to keep track of the initialization state. Even though this
// is atomic, the data structure does not guarantee thread safety.
#[cfg(debug_assertions)]
init: AtomicBool,
}

impl<T: Copy> ImmutAfterInitCell<T> {
impl<T> ImmutAfterInitCell<T> {
/// Create an unitialized `ImmutAfterInitCell` instance. The value must get
/// initialized by means of [`Self::init()`] before first usage.
pub const fn uninit() -> Self {
ImmutAfterInitCell {
Self {
data: UnsafeCell::new(MaybeUninit::uninit()),
#[cfg(debug_assertions)]
init: AtomicBool::new(false),
}
}

fn set_init(&self) {
#[cfg(debug_assertions)]
self.init.store(true, Ordering::Relaxed);
}

fn check_init(&self) -> ImmutAfterInitResult<()> {
#[cfg(debug_assertions)]
if !self.init.load(Ordering::Relaxed) {
return Err(ImmutAfterInitError::Uninitialized);
}
Ok(())
}

fn check_uninit(&self) -> ImmutAfterInitResult<()> {
#[cfg(debug_assertions)]
if self.init.load(Ordering::Relaxed) {
return Err(ImmutAfterInitError::AlreadyInit);
}
Ok(())
}

// The caller must check the initialization status to avoid double init bugs
unsafe fn set_inner(&self, v: &T) {
self.set_init();
(*self.data.get())
.as_mut_ptr()
.copy_from_nonoverlapping(v as *const T, 1)
}

// The caller must ensure that the cell is initialized
unsafe fn get_inner(&self) -> &T {
(*self.data.get()).assume_init_ref()
}

fn try_get_inner(&self) -> ImmutAfterInitResult<&T> {
self.check_init()?;
unsafe { Ok(self.get_inner()) }
}

/// Initialize an uninitialized `ImmutAfterInitCell` instance from a value.
///
/// Must **not** get called on an already initialized instance!
///
/// * `v` - Initialization value.
pub unsafe fn init(&self, v: &T) {
core::ptr::copy_nonoverlapping(v as *const T, (*self.data.get()).as_mut_ptr(), 1);
pub fn init(&self, v: &T) -> ImmutAfterInitResult<()> {
self.check_uninit()?;
unsafe { self.set_inner(v) };
Ok(())
}

/// Reinitialize an initialized `ImmutAfterInitCell` instance from a value.
///
/// Must **not** get called while any borrow via [`Self::deref()`] or
/// [`ImmutAfterInitRef::deref()`]is alive!
/// [`ImmutAfterInitRef::deref()`] is alive!
///
/// * `v` - Initialization value.
pub unsafe fn reinit(&self, v: &T) {
self.init(v);
pub fn reinit(&self, v: &T) {
unsafe { self.set_inner(v) }
}

/// Create an initialized `ImmutAfterInitCell` instance from a value.
///
/// * `v` - Initialization value.
pub const fn new(v: T) -> Self {
ImmutAfterInitCell {
Self {
data: UnsafeCell::new(MaybeUninit::new(v)),
#[cfg(debug_assertions)]
init: AtomicBool::new(true),
}
}
}

impl<T: Copy> Deref for ImmutAfterInitCell<T> {
impl<T> Deref for ImmutAfterInitCell<T> {
type Target = T;

/// Dereference the wrapped value. Must **only ever** get called on an
/// initialized instance!
fn deref(&self) -> &T {
unsafe { (&*self.data.get()).assume_init_ref() }
self.try_get_inner().unwrap()
}
}

unsafe impl<T: Copy> Send for ImmutAfterInitCell<T> {}
unsafe impl<T: Copy> Sync for ImmutAfterInitCell<T> {}
unsafe impl<T> Send for ImmutAfterInitCell<T> {}
unsafe impl<T> Sync for ImmutAfterInitCell<T> {}

/// A reference to a memory location which is effectively immutable after
/// initalization code has run.
Expand Down Expand Up @@ -178,7 +241,6 @@ unsafe impl<T: Copy> Sync for ImmutAfterInitCell<T> {}
/// }
/// ```
///
#[repr(transparent)]
pub struct ImmutAfterInitRef<'a, T: 'a> {
#[doc(hidden)]
ptr: ImmutAfterInitCell<*const T>,
Expand Down Expand Up @@ -206,11 +268,11 @@ impl<'a, T> ImmutAfterInitRef<'a, T> {
/// * `r` - Reference to the value to make the `ImmutAfterInitRef` to refer
/// to. By convention, the referenced value must have been
/// initialized already.
pub unsafe fn init_from_ref<'b>(&self, r: &'b T)
pub fn init_from_ref<'b>(&self, r: &'b T) -> ImmutAfterInitResult<()>
where
'b: 'a,
{
self.ptr.init(&(r as *const T));
self.ptr.init(&(r as *const T))
}

/// Create an initialized `ImmutAfterInitRef` instance pointing to a value
Expand All @@ -234,7 +296,7 @@ impl<'a, T> ImmutAfterInitRef<'a, T> {
}
}

impl<'a, T: Copy> ImmutAfterInitRef<'a, T> {
impl<'a, T> ImmutAfterInitRef<'a, T> {
/// Initialize an uninitialized `ImmutAfterInitRef` instance to point to
/// value wrapped in a [`ImmutAfterInitCell`].
///
Expand All @@ -243,11 +305,11 @@ impl<'a, T: Copy> ImmutAfterInitRef<'a, T> {
/// * `cell` - The value to make the `ImmutAfterInitRef` to refer to. By
/// convention, the referenced value must have been initialized
/// already.
pub unsafe fn init_from_cell<'b>(&self, cell: &'b ImmutAfterInitCell<T>)
pub fn init_from_cell<'b>(&self, cell: &'b ImmutAfterInitCell<T>) -> ImmutAfterInitResult<()>
where
'b: 'a,
{
self.ptr.init(&(*cell.data.get()).as_ptr());
self.ptr.init(&(cell.try_get_inner()? as *const T))
}
}

Expand Down

0 comments on commit 411e12b

Please sign in to comment.