Skip to content

Commit

Permalink
Merge pull request #49 from 00xc/address/arith
Browse files Browse the repository at this point in the history
Address: remove offset() and sub() methods
  • Loading branch information
joergroedel committed Jun 19, 2023
2 parents 575a817 + 073653c commit 39b92fb
Show file tree
Hide file tree
Showing 20 changed files with 87 additions and 100 deletions.
8 changes: 0 additions & 8 deletions src/address.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,10 @@ pub trait Address:
self.is_aligned(PAGE_SIZE)
}

fn offset(&self, off: InnerAddr) -> Self {
Self::from(self.bits() + off)
}

fn checked_offset(&self, off: InnerAddr) -> Option<Self> {
self.bits().checked_add(off).map(|addr| addr.into())
}

fn sub(&self, off: InnerAddr) -> Self {
Self::from(self.bits() - off)
}

fn checked_sub(&self, off: InnerAddr) -> Option<Self> {
self.bits().checked_sub(off).map(|addr| addr.into())
}
Expand Down
4 changes: 2 additions & 2 deletions src/cpu/extable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ fn check_exception_table(rip: VirtAddr) -> VirtAddr {
return end;
}

current = current.offset(mem::size_of::<ExceptionTableEntry>());
current = current + mem::size_of::<ExceptionTableEntry>();
if current >= ex_table_end {
break;
}
Expand All @@ -59,7 +59,7 @@ pub fn dump_exception_table() {

log::info!("Extable Entry {:#018x}-{:#018x}", start, end);

current = current.offset(mem::size_of::<ExceptionTableEntry>());
current = current + mem::size_of::<ExceptionTableEntry>();
if current >= ex_table_end {
break;
}
Expand Down
4 changes: 2 additions & 2 deletions src/cpu/idt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,12 +143,12 @@ fn init_idt(idt: &mut Idt) {
// Set IDT handlers
let handlers = unsafe { VirtAddr::from(&idt_handler_array as *const u8) };
for (i, entry) in idt.iter_mut().enumerate() {
*entry = IdtEntry::entry(handlers.offset(32 * i));
*entry = IdtEntry::entry(handlers + (32 * i));
}
}

unsafe fn init_ist_vectors(idt: &mut Idt) {
let handler = VirtAddr::from(&idt_handler_array as *const u8).offset(32 * DF_VECTOR);
let handler = VirtAddr::from(&idt_handler_array as *const u8) + (32 * DF_VECTOR);
idt[DF_VECTOR] = IdtEntry::ist_entry(handler, IST_DF.try_into().unwrap());
}

Expand Down
2 changes: 1 addition & 1 deletion src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ impl PerCpu {
let caa_phys = locked.caa_phys()?;
let offset = caa_phys.page_offset();

Some(VirtAddr::from(SVSM_PERCPU_CAA_BASE).offset(offset))
Some(VirtAddr::from(SVSM_PERCPU_CAA_BASE) + offset)
}

fn vmsa_tr_segment(&self) -> VMSASegment {
Expand Down
4 changes: 2 additions & 2 deletions src/debug/stacktrace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,9 +134,9 @@ impl StackUnwinder {
}

let rbp = unsafe { rsp.as_ptr::<VirtAddr>().read_unaligned() };
let rsp = rsp.offset(mem::size_of::<VirtAddr>());
let rsp = rsp + mem::size_of::<VirtAddr>();
let rip = unsafe { rsp.as_ptr::<VirtAddr>().read_unaligned() };
let rsp = rsp.offset(mem::size_of::<VirtAddr>());
let rsp = rsp + mem::size_of::<VirtAddr>();

Self::check_unwound_frame(rbp, rsp, rip, stacks)
}
Expand Down
2 changes: 1 addition & 1 deletion src/fs/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ pub fn populate_ram_fs(kernel_fs_start: u64, kernel_fs_end: u64) -> Result<(), S
log::info!("Unpacking FS archive...");

let guard = PerCPUPageMappingGuard::create(pstart.page_align(), pend.page_align_up(), 0)?;
let vstart = guard.virt_addr().offset(pstart.page_offset());
let vstart = guard.virt_addr() + pstart.page_offset();

let data: &[u8] = unsafe { slice::from_raw_parts(vstart.as_ptr(), size) };
let archive = FsArchive::load(data)?;
Expand Down
22 changes: 11 additions & 11 deletions src/fw_meta.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ impl SevPreValidMem {

#[inline]
fn end(&self) -> PhysAddr {
self.base.offset(self.length)
self.base + self.length
}

fn overlap(&self, other: &Self) -> bool {
Expand Down Expand Up @@ -172,15 +172,15 @@ const SVSM_INFO_GUID: &str = "a789a612-0597-4c4b-a49f-cbb1fe9d1ddd";

unsafe fn find_table(uuid: &Uuid, start: VirtAddr, len: usize) -> Option<(VirtAddr, usize)> {
let mut curr = start;
let end = start.sub(len);
let end = start - len;

while curr >= end {
curr = curr.sub(mem::size_of::<Uuid>());
curr = curr - mem::size_of::<Uuid>();

let ptr = curr.as_ptr::<u8>();
let curr_uuid = Uuid::from_mem(ptr);

curr = curr.sub(mem::size_of::<u16>());
curr = curr - mem::size_of::<u16>();
if curr < end {
break;
}
Expand All @@ -193,7 +193,7 @@ unsafe fn find_table(uuid: &Uuid, start: VirtAddr, len: usize) -> Option<(VirtAd
}
let len = orig_len - (mem::size_of::<Uuid>() + mem::size_of::<u16>());

curr = curr.sub(len);
curr = curr - len;

if *uuid == curr_uuid {
return Some((curr, len));
Expand Down Expand Up @@ -230,13 +230,13 @@ pub fn parse_fw_meta_data() -> Result<SevFWMetaData, SvsmError> {
// Map meta-data location, it starts at 32 bytes below 4GiB
let guard = PerCPUPageMappingGuard::create_4k(pstart)?;
let vstart = guard.virt_addr();
let vend = vstart.offset(PAGE_SIZE);
let vend = vstart + PAGE_SIZE;

let mut curr = vend.sub(32);
let mut curr = vend - 32;

let meta_uuid = Uuid::from_str(OVMF_TABLE_FOOTER_GUID).map_err(|()| SvsmError::Firmware)?;

curr = curr.sub(mem::size_of::<Uuid>());
curr = curr - mem::size_of::<Uuid>();
let ptr = curr.as_ptr::<u8>();

unsafe {
Expand All @@ -246,7 +246,7 @@ pub fn parse_fw_meta_data() -> Result<SevFWMetaData, SvsmError> {
return Err(SvsmError::Firmware);
}

curr = curr.sub(mem::size_of::<u16>());
curr = curr - mem::size_of::<u16>();
let ptr = curr.as_ptr::<u16>();

let full_len = ptr.read() as usize;
Expand Down Expand Up @@ -280,7 +280,7 @@ pub fn parse_fw_meta_data() -> Result<SevFWMetaData, SvsmError> {
let off_ptr = base.as_ptr::<u32>();
let offset = off_ptr.read_unaligned() as usize;

let meta_ptr = vend.sub(offset).as_ptr::<SevMetaDataHeader>();
let meta_ptr = (vend - offset).as_ptr::<SevMetaDataHeader>();
//let len = meta_ptr.read().len;
let num_descs = meta_ptr.read().num_desc as isize;
let desc_ptr = meta_ptr.offset(1).cast::<SevMetaDataDesc>();
Expand Down Expand Up @@ -342,7 +342,7 @@ fn validate_fw_mem_region(region: SevPreValidMem) -> Result<(), SvsmError> {
// Make page accessible to guest VMPL
rmp_adjust(vaddr, RMPFlags::GUEST_VMPL | RMPFlags::RWX, false)?;

zero_mem_region(vaddr, vaddr.offset(PAGE_SIZE));
zero_mem_region(vaddr, vaddr + PAGE_SIZE);
}

Ok(())
Expand Down
10 changes: 6 additions & 4 deletions src/mm/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@
//
// Author: Joerg Roedel <jroedel@suse.de>

use crate::address::{Address, PhysAddr, VirtAddr};
#[cfg(test)]
use crate::address::Address;
use crate::address::{PhysAddr, VirtAddr};
use crate::utils::immut_after_init::ImmutAfterInitCell;

#[derive(Copy, Clone)]
Expand Down Expand Up @@ -36,19 +38,19 @@ pub fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr {

let offset: usize = vaddr - KERNEL_MAPPING.virt_start;

KERNEL_MAPPING.phys_start.offset(offset)
KERNEL_MAPPING.phys_start + offset
}

#[cfg(not(test))]
pub fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
let size: usize = KERNEL_MAPPING.virt_end - KERNEL_MAPPING.virt_start;
if paddr < KERNEL_MAPPING.phys_start || paddr >= KERNEL_MAPPING.phys_start.offset(size) {
if paddr < KERNEL_MAPPING.phys_start || paddr >= KERNEL_MAPPING.phys_start + size {
panic!("Invalid physical address {:#018x}", paddr);
}

let offset: usize = paddr - KERNEL_MAPPING.phys_start;

KERNEL_MAPPING.virt_start.offset(offset)
KERNEL_MAPPING.virt_start + offset
}

#[cfg(test)]
Expand Down
24 changes: 12 additions & 12 deletions src/mm/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ impl MemoryRegion {

#[allow(dead_code)]
pub fn phys_to_virt(&self, paddr: PhysAddr) -> Option<VirtAddr> {
let end_phys = self.start_phys.offset(self.page_count * PAGE_SIZE);
let end_phys = self.start_phys + (self.page_count * PAGE_SIZE);

if paddr < self.start_phys || paddr >= end_phys {
// For the initial stage2 identity mapping, the root page table
Expand All @@ -264,26 +264,26 @@ impl MemoryRegion {

let offset = paddr - self.start_phys;

Some(self.start_virt.offset(offset))
Some(self.start_virt + offset)
}

#[allow(dead_code)]
pub fn virt_to_phys(&self, vaddr: VirtAddr) -> Option<PhysAddr> {
let end_virt = self.start_virt.offset(self.page_count * PAGE_SIZE);
let end_virt = self.start_virt + (self.page_count * PAGE_SIZE);

if vaddr < self.start_virt || vaddr >= end_virt {
return None;
}

let offset = vaddr - self.start_virt;

Some(self.start_phys.offset(offset))
Some(self.start_phys + offset)
}

fn page_info_virt_addr(&self, pfn: usize) -> VirtAddr {
let size = size_of::<PageStorageType>();
let virt = self.start_virt;
virt.offset(pfn * size)
virt + (pfn * size)
}

fn check_pfn(&self, pfn: usize) {
Expand All @@ -294,7 +294,7 @@ impl MemoryRegion {

fn check_virt_addr(&self, vaddr: VirtAddr) -> bool {
let start = self.start_virt;
let end = self.start_virt.offset(self.page_count * PAGE_SIZE);
let end = self.start_virt + (self.page_count * PAGE_SIZE);

vaddr >= start && vaddr < end
}
Expand Down Expand Up @@ -409,7 +409,7 @@ impl MemoryRegion {
let pfn = self.get_next_page(order)?;
let pg = Page::Allocated(AllocatedInfo { order });
self.write_page_info(pfn, pg);
Ok(self.start_virt.offset(pfn * PAGE_SIZE))
Ok(self.start_virt + (pfn * PAGE_SIZE))
}

pub fn allocate_page(&mut self) -> Result<VirtAddr, SvsmError> {
Expand All @@ -419,7 +419,7 @@ impl MemoryRegion {
pub fn allocate_zeroed_page(&mut self) -> Result<VirtAddr, SvsmError> {
let vaddr = self.allocate_page()?;

zero_mem_region(vaddr, vaddr.offset(PAGE_SIZE));
zero_mem_region(vaddr, vaddr + PAGE_SIZE);

Ok(vaddr)
}
Expand All @@ -432,15 +432,15 @@ impl MemoryRegion {
assert_eq!(slab_vaddr.bits() & (PAGE_TYPE_MASK as usize), 0);
let pg = Page::SlabPage(SlabPageInfo { slab: slab_vaddr });
self.write_page_info(pfn, pg);
Ok(self.start_virt.offset(pfn * PAGE_SIZE))
Ok(self.start_virt + (pfn * PAGE_SIZE))
}

pub fn allocate_file_page(&mut self) -> Result<VirtAddr, SvsmError> {
self.refill_page_list(0)?;
let pfn = self.get_next_page(0)?;
let pg = Page::FilePage(FileInfo::new(1));
self.write_page_info(pfn, pg);
Ok(self.start_virt.offset(pfn * PAGE_SIZE))
Ok(self.start_virt + (pfn * PAGE_SIZE))
}

pub fn get_file_page(&mut self, vaddr: VirtAddr) -> Result<(), SvsmError> {
Expand Down Expand Up @@ -871,15 +871,15 @@ impl SlabPage {
if self.used_bitmap[idx] & mask == 0 {
self.used_bitmap[idx] |= mask;
self.free -= 1;
return Ok(self.vaddr.offset((self.item_size * i) as usize));
return Ok(self.vaddr + ((self.item_size * i) as usize));
}
}

Err(SvsmError::Mem)
}

pub fn free(&mut self, vaddr: VirtAddr) -> Result<(), SvsmError> {
if vaddr < self.vaddr || vaddr >= self.vaddr.offset(PAGE_SIZE) {
if vaddr < self.vaddr || vaddr >= self.vaddr + PAGE_SIZE {
return Err(SvsmError::Mem);
}

Expand Down
24 changes: 12 additions & 12 deletions src/mm/pagetable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ impl PageTable {

// Prepare PTE leaf page
for i in 0..512 {
let addr_4k = addr_2m.offset(i * PAGE_SIZE);
let addr_4k = addr_2m + (i * PAGE_SIZE);
unsafe {
(*page).entries[i].clear();
(*page).entries[i].set(set_c_bit(addr_4k), flags);
Expand Down Expand Up @@ -544,7 +544,7 @@ impl PageTable {
if !entry.flags().contains(PTEntryFlags::PRESENT) {
return Err(SvsmError::Mem);
}
Ok(entry.address().offset(offset))
Ok(entry.address() + offset)
}
Mapping::Level1(entry) => {
let offset = vaddr.bits() & (PAGE_SIZE_2M - 1);
Expand All @@ -554,7 +554,7 @@ impl PageTable {
return Err(SvsmError::Mem);
}

Ok(entry.address().offset(offset))
Ok(entry.address() + offset)
}
Mapping::Level2(_entry) => Err(SvsmError::Mem),
Mapping::Level3(_entry) => Err(SvsmError::Mem),
Expand All @@ -573,7 +573,7 @@ impl PageTable {
.map(VirtAddr::from)
{
let offset = addr - start;
self.map_4k(addr, phys.offset(offset), flags)?;
self.map_4k(addr, phys + offset, flags)?;
}
Ok(())
}
Expand All @@ -599,7 +599,7 @@ impl PageTable {
.map(VirtAddr::from)
{
let offset = addr - start;
self.map_2m(addr, phys.offset(offset), flags)?;
self.map_2m(addr, phys + offset, flags)?;
}
Ok(())
}
Expand All @@ -626,17 +626,17 @@ impl PageTable {
while vaddr < end {
if vaddr.is_aligned(PAGE_SIZE_2M)
&& paddr.is_aligned(PAGE_SIZE_2M)
&& vaddr.offset(PAGE_SIZE_2M) <= end
&& vaddr + PAGE_SIZE_2M <= end
&& self.map_2m(vaddr, paddr, flags).is_ok()
{
vaddr = vaddr.offset(PAGE_SIZE_2M);
paddr = paddr.offset(PAGE_SIZE_2M);
vaddr = vaddr + PAGE_SIZE_2M;
paddr = paddr + PAGE_SIZE_2M;
continue;
}

self.map_4k(vaddr, paddr, flags)?;
vaddr = vaddr.offset(PAGE_SIZE);
paddr = paddr.offset(PAGE_SIZE);
vaddr = vaddr + PAGE_SIZE;
paddr = paddr + PAGE_SIZE;
}

Ok(())
Expand All @@ -651,11 +651,11 @@ impl PageTable {
match mapping {
Mapping::Level0(entry) => {
entry.clear();
vaddr = vaddr.offset(PAGE_SIZE);
vaddr = vaddr + PAGE_SIZE;
}
Mapping::Level1(entry) => {
entry.clear();
vaddr = vaddr.offset(PAGE_SIZE_2M);
vaddr = vaddr + PAGE_SIZE_2M;
}
_ => {
log::debug!("Can't unmap - address not mapped {:#x}", vaddr);
Expand Down
Loading

0 comments on commit 39b92fb

Please sign in to comment.