Skip to content

Commit

Permalink
stage2: Use low memory for the stage2 heap
Browse files Browse the repository at this point in the history
Separating the stage2 heap into low memory and away from the stage2
binary means that the stage2 binary can grow to be arbitrarily large
without affecting the amount of memory available for heap usage.  The
area below 640 KB is never populated with initial data for the guest OS,
so it is free for stage 2 to use, and it is returned to a non-validated
state by the time the guest OS begins running.

Signed-off-by: Jon Lange <jlange@microsoft.com>
  • Loading branch information
msft-jlange committed Aug 14, 2024
1 parent 8baf808 commit 6b5b181
Show file tree
Hide file tree
Showing 7 changed files with 111 additions and 60 deletions.
8 changes: 3 additions & 5 deletions igvmbuilder/src/gpa_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ pub struct GpaMap {
pub stage1_image: GpaRange,
pub stage2_stack: GpaRange,
pub stage2_image: GpaRange,
pub stage2_free: GpaRange,
pub secrets_page: GpaRange,
pub cpuid_page: GpaRange,
pub kernel_elf: GpaRange,
Expand Down Expand Up @@ -108,9 +107,9 @@ impl GpaMap {

let stage2_image = GpaRange::new(0x808000, stage2_len as u64)?;

// The kernel image is loaded beyond the end of the stage2 heap,
// at 0x8A0000.
let kernel_address = 0x8A0000;
// The kernel image is loaded beyond the end of the stage2 image,
// rounded up to a 4 KB boundary.
let kernel_address = (stage2_image.get_end() + 0xFFF) & !0xFFF;
let kernel_elf = GpaRange::new(kernel_address, kernel_elf_len as u64)?;
let kernel_fs = GpaRange::new(kernel_elf.get_end(), kernel_fs_len as u64)?;

Expand Down Expand Up @@ -153,7 +152,6 @@ impl GpaMap {
stage1_image,
stage2_stack: GpaRange::new_page(0x805000)?,
stage2_image,
stage2_free: GpaRange::new(stage2_image.get_end(), 0x8a0000 - &stage2_image.get_end())?,
secrets_page: GpaRange::new_page(0x806000)?,
cpuid_page: GpaRange::new_page(0x807000)?,
kernel_elf,
Expand Down
8 changes: 0 additions & 8 deletions igvmbuilder/src/igvm_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,14 +419,6 @@ impl IgvmBuilder {
IgvmPageDataType::NORMAL,
)?;

// Populate the empty region above the stage 2 binary.
self.add_empty_pages(
self.gpa_map.stage2_free.get_start(),
self.gpa_map.stage2_free.get_size(),
COMPATIBILITY_MASK.get(),
IgvmPageDataType::NORMAL,
)?;

// Populate the stage 2 binary.
self.add_data_pages_from_file(
&self.options.stage2.clone(),
Expand Down
Binary file added kernel/src/a.out
Binary file not shown.
117 changes: 85 additions & 32 deletions kernel/src/mm/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,48 +7,97 @@
use crate::address::{PhysAddr, VirtAddr};
use crate::utils::immut_after_init::ImmutAfterInitCell;

#[derive(Copy, Clone)]
#[derive(Debug, Copy, Clone)]
#[allow(dead_code)]
struct KernelMapping {
pub struct FixedAddressMappingRange {
virt_start: VirtAddr,
virt_end: VirtAddr,
phys_start: PhysAddr,
}

static KERNEL_MAPPING: ImmutAfterInitCell<KernelMapping> = ImmutAfterInitCell::uninit();
impl FixedAddressMappingRange {
pub fn new(virt_start: VirtAddr, virt_end: VirtAddr, phys_start: PhysAddr) -> Self {
Self {
virt_start,
virt_end,
phys_start,
}
}
}

#[derive(Debug, Copy, Clone)]
#[cfg_attr(not(target_os = "none"), allow(dead_code))]
pub struct FixedAddressMapping {
kernel_mapping: FixedAddressMappingRange,
heap_mapping: Option<FixedAddressMappingRange>,
}

pub fn init_kernel_mapping_info(vstart: VirtAddr, vend: VirtAddr, pstart: PhysAddr) {
let km = KernelMapping {
virt_start: vstart,
virt_end: vend,
phys_start: pstart,
static FIXED_MAPPING: ImmutAfterInitCell<FixedAddressMapping> = ImmutAfterInitCell::uninit();

pub fn init_kernel_mapping_info(
kernel_mapping: FixedAddressMappingRange,
heap_mapping: Option<FixedAddressMappingRange>,
) {
let mapping = FixedAddressMapping {
kernel_mapping,
heap_mapping,
};
KERNEL_MAPPING
.init(&km)
.expect("Already initialized kernel mapping info");
FIXED_MAPPING
.init(&mapping)
.expect("Already initialized fixed mapping info");
}

#[cfg(target_os = "none")]
fn virt_to_phys_mapping(vaddr: VirtAddr, mapping: &FixedAddressMappingRange) -> Option<PhysAddr> {
if (vaddr < mapping.virt_start) || (vaddr >= mapping.virt_end) {
None
} else {
let offset: usize = vaddr - mapping.virt_start;
Some(mapping.phys_start + offset)
}
}

#[cfg(target_os = "none")]
pub fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
if vaddr < KERNEL_MAPPING.virt_start || vaddr >= KERNEL_MAPPING.virt_end {
panic!("Invalid physical address {:#018x}", vaddr);
if let Some(addr) = virt_to_phys_mapping(vaddr, &FIXED_MAPPING.kernel_mapping) {
return addr;
}
if let Some(ref mapping) = &FIXED_MAPPING.heap_mapping {
if let Some(addr) = virt_to_phys_mapping(vaddr, mapping) {
return addr;
}
}

let offset: usize = vaddr - KERNEL_MAPPING.virt_start;
panic!("Invalid virtual address {:#018x}", vaddr);
}

KERNEL_MAPPING.phys_start + offset
#[cfg(target_os = "none")]
fn phys_to_virt_mapping(paddr: PhysAddr, mapping: &FixedAddressMappingRange) -> Option<VirtAddr> {
if paddr < mapping.phys_start {
None
} else {
let size: usize = mapping.virt_end - mapping.virt_start;
if paddr >= mapping.phys_start + size {
None
} else {
let offset: usize = paddr - mapping.phys_start;
Some(mapping.virt_start + offset)
}
}
}

#[cfg(target_os = "none")]
pub fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
let size: usize = KERNEL_MAPPING.virt_end - KERNEL_MAPPING.virt_start;
if paddr < KERNEL_MAPPING.phys_start || paddr >= KERNEL_MAPPING.phys_start + size {
panic!("Invalid physical address {:#018x}", paddr);
if let Some(addr) = phys_to_virt_mapping(paddr, &FIXED_MAPPING.kernel_mapping) {
return addr;
}
if let Some(ref mapping) = &FIXED_MAPPING.heap_mapping {
if let Some(addr) = phys_to_virt_mapping(paddr, mapping) {
return addr;
}
}

let offset: usize = paddr - KERNEL_MAPPING.phys_start;

KERNEL_MAPPING.virt_start + offset
panic!("Invalid physical address {:#018x}", paddr);
}

#[cfg(not(target_os = "none"))]
Expand Down Expand Up @@ -169,7 +218,8 @@ mod tests {
use super::*;
use crate::locking::SpinLock;

static KERNEL_MAPPING_TEST: ImmutAfterInitCell<KernelMapping> = ImmutAfterInitCell::uninit();
static KERNEL_MAPPING_TEST: ImmutAfterInitCell<FixedAddressMapping> =
ImmutAfterInitCell::uninit();
static INITIALIZED: SpinLock<bool> = SpinLock::new(false);

#[test]
Expand All @@ -179,13 +229,16 @@ mod tests {
if *initialized {
return;
}
KERNEL_MAPPING_TEST
.init(&KernelMapping {
virt_start: VirtAddr::new(0x1000),
virt_end: VirtAddr::new(0x2000),
phys_start: PhysAddr::new(0x3000),
})
.unwrap();
let kernel_mapping = FixedAddressMappingRange::new(
VirtAddr::new(0x1000),
VirtAddr::new(0x2000),
PhysAddr::new(0x3000),
);
let mapping = FixedAddressMapping {
kernel_mapping,
heap_mapping: None,
};
KERNEL_MAPPING_TEST.init(&mapping).unwrap();
*initialized = true;
}

Expand All @@ -196,9 +249,9 @@ mod tests {

let km = &KERNEL_MAPPING_TEST;

assert_eq!(km.virt_start, VirtAddr::new(0x1000));
assert_eq!(km.virt_end, VirtAddr::new(0x2000));
assert_eq!(km.phys_start, PhysAddr::new(0x3000));
assert_eq!(km.kernel_mapping.virt_start, VirtAddr::new(0x1000));
assert_eq!(km.kernel_mapping.virt_end, VirtAddr::new(0x2000));
assert_eq!(km.kernel_mapping.phys_start, PhysAddr::new(0x3000));
}

#[test]
Expand Down
12 changes: 7 additions & 5 deletions kernel/src/stage2.lds
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,20 @@ SECTIONS
}
. = ALIGN(16);
.data : { *(.data) }
. = ALIGN(16);
.rodata : { *(.rodata) }
edata = .;
. = ALIGN(4096);
. = ALIGN(16);
.bss : {
_bss = .;
*(.bss) *(.bss.[0-9a-zA-Z_]*)
. = ALIGN(16);
_ebss = .;
}
. = ALIGN(4096);
heap_start = .;
/* Move rodata to follow bss so that the in-memory image has the same
* length as the ELF image. This is required so that the IGVM
* builder does not have to parse the ELF file to know how much space
* to reserve for BSS. */
. = ALIGN(16);
.rodata : { *(.rodata) }
}

ENTRY(startup_32)
21 changes: 13 additions & 8 deletions kernel/src/stage2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use bootlib::kernel_launch::{KernelLaunchInfo, Stage2LaunchInfo};
use bootlib::platform::SvsmPlatformType;
use core::arch::asm;
use core::panic::PanicInfo;
use core::ptr::{addr_of, addr_of_mut};
use core::ptr::addr_of_mut;
use core::slice;
use cpuarch::snp_cpuid::SnpCpuidTable;
use elf::ElfError;
Expand All @@ -28,25 +28,24 @@ use svsm::error::SvsmError;
use svsm::fw_cfg::FwCfg;
use svsm::igvm_params::IgvmParams;
use svsm::mm::alloc::{memory_info, print_memory_info, root_mem_init};
use svsm::mm::init_kernel_mapping_info;
use svsm::mm::pagetable::{
get_init_pgtable_locked, paging_init_early, set_init_pgtable, PTEntryFlags, PageTable,
PageTableRef,
};
use svsm::mm::validate::{
init_valid_bitmap_alloc, valid_bitmap_addr, valid_bitmap_set_valid_range,
};
use svsm::mm::{init_kernel_mapping_info, FixedAddressMappingRange};
use svsm::platform::{PageStateChangeOp, SvsmPlatform, SvsmPlatformCell};
use svsm::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M};
use svsm::utils::{halt, is_aligned, MemoryRegion};

extern "C" {
pub static heap_start: u8;
pub static mut pgtable: PageTable;
}

fn setup_stage2_allocator(heap_end: u64) {
let vstart = unsafe { VirtAddr::from(addr_of!(heap_start)).page_align_up() };
fn setup_stage2_allocator(heap_start: u64, heap_end: u64) {
let vstart = VirtAddr::from(heap_start);
let vend = VirtAddr::from(heap_end);
let pstart = PhysAddr::from(vstart.bits()); // Identity mapping
let nr_pages = (vend - vstart) / PAGE_SIZE;
Expand Down Expand Up @@ -90,11 +89,16 @@ fn setup_env(
.validate_page_range(region)
.expect("failed to validate low 640 KB");

init_kernel_mapping_info(
// Supply the heap bounds as the kernel range, since the only virtual-to
// physical translations required will be on heap memory.
let kernel_mapping = FixedAddressMappingRange::new(
VirtAddr::from(0x808000u64),
VirtAddr::from(0x8A0000u64),
PhysAddr::from(0x808000u64),
);
let heap_mapping =
FixedAddressMappingRange::new(region.start(), region.end(), PhysAddr::from(0u64));
init_kernel_mapping_info(kernel_mapping, Some(heap_mapping));

let cpuid_page = unsafe {
let ptr = VirtAddr::from(launch_info.cpuid_page as u64).as_ptr::<SnpCpuidTable>();
Expand All @@ -106,8 +110,9 @@ fn setup_env(

set_init_pgtable(PageTableRef::shared(unsafe { addr_of_mut!(pgtable) }));

// The end of the heap is the base of the kernel image.
setup_stage2_allocator(launch_info.kernel_elf_start as u64);
// Configure the heap to exist from 64 KB to 640 KB.
setup_stage2_allocator(0x10000, 0xA0000);

init_percpu(platform).expect("Failed to initialize per-cpu area");

// Init IDT again with handlers requiring GHCB (eg. #VC handler)
Expand Down
5 changes: 3 additions & 2 deletions kernel/src/svsm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use svsm::mm::alloc::{memory_info, print_memory_info, root_mem_init};
use svsm::mm::memory::{init_memory_map, write_guest_memory_map};
use svsm::mm::pagetable::paging_init;
use svsm::mm::virtualrange::virt_log_usage;
use svsm::mm::{init_kernel_mapping_info, PerCPUPageMappingGuard};
use svsm::mm::{init_kernel_mapping_info, FixedAddressMappingRange, PerCPUPageMappingGuard};
use svsm::platform::{SvsmPlatformCell, SVSM_PLATFORM};
use svsm::requests::{request_loop, request_processing_main, update_mappings};
use svsm::sev::utils::{rmp_adjust, RMPFlags};
Expand Down Expand Up @@ -243,11 +243,12 @@ pub fn boot_stack_info() {
}

fn mapping_info_init(launch_info: &KernelLaunchInfo) {
init_kernel_mapping_info(
let kernel_mapping = FixedAddressMappingRange::new(
VirtAddr::from(launch_info.heap_area_virt_start),
VirtAddr::from(launch_info.heap_area_virt_end()),
PhysAddr::from(launch_info.heap_area_phys_start),
);
init_kernel_mapping_info(kernel_mapping, None);
}

/// # Panics
Expand Down

0 comments on commit 6b5b181

Please sign in to comment.