Skip to content

Commit

Permalink
Add Atomic{I,U}*::{fetch_not,not}
Browse files Browse the repository at this point in the history
Note: `AtomicBool` already has `fetch_not` and `not` methods.
  • Loading branch information
taiki-e committed Dec 18, 2022
1 parent 7ab9936 commit da034b0
Show file tree
Hide file tree
Showing 11 changed files with 351 additions and 27 deletions.
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,14 @@ Note: In this file, do not use the hard wrap in the middle of a sentence for com

Currently, optimizations by these methods (`neg`) are only guaranteed for x86.

- Add `Atomic{I,U}*::{fetch_not,not}` methods.

`Atomic{I,U}*::not` are equivalent to the corresponding `fetch_*` methods, but do not return the previous value. They are intended for optimization on platforms that have atomic instructions for the corresponding operation, such as x86's `lock not`, MSP430's `inv`.

Currently, optimizations by these methods (`not`) are only guaranteed for x86 and MSP430.

(Note: `AtomicBool` already has `fetch_not` and `not` methods.)

## [0.3.18] - 2022-12-15

- Fix build error when not using `portable_atomic_unsafe_assume_single_core` cfg on AVR and MSP430 custom targets. ([#50](https://github.com/taiki-e/portable-atomic/pull/50))
Expand Down
28 changes: 19 additions & 9 deletions src/imp/atomic128/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -582,19 +582,16 @@ macro_rules! atomic128 {
// pointer passed in is valid because we got it from a reference.
unsafe { $atomic_min(self.v.get(), val, order) }
}
}
};
(int, $atomic_type:ident, $int_type:ident, $atomic_max:ident, $atomic_min:ident) => {
atomic128!(uint, $atomic_type, $int_type, $atomic_max, $atomic_min);
impl $atomic_type {

#[inline]
pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
self.fetch_update_(order, |x| x.wrapping_neg())
pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
self.fetch_update_(order, |x| !x)
}
#[inline]
pub(crate) fn neg(&self, order: Ordering) {
self.fetch_neg(order);
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}

#[inline]
fn fetch_update_<F>(&self, set_order: Ordering, mut f: F) -> $int_type
where
Expand All @@ -612,6 +609,19 @@ macro_rules! atomic128 {
}
}
};
(int, $atomic_type:ident, $int_type:ident, $atomic_max:ident, $atomic_min:ident) => {
atomic128!(uint, $atomic_type, $int_type, $atomic_max, $atomic_min);
impl $atomic_type {
#[inline]
pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
self.fetch_update_(order, |x| x.wrapping_neg())
}
#[inline]
pub(crate) fn neg(&self, order: Ordering) {
self.fetch_neg(order);
}
}
};
}

atomic128!(int, AtomicI128, i128, atomic_max, atomic_min);
Expand Down
46 changes: 36 additions & 10 deletions src/imp/atomic128/macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,21 @@ macro_rules! atomic128 {
}) as $int_type
}
}

#[inline]
pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
crate::utils::assert_swap_ordering(order);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
atomic_update(self.v.get().cast(), order, |x| !(x as $int_type) as u128)
as $int_type
}
}
#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}
}
};
(int, $atomic_type:ident, $int_type:ident) => {
Expand Down Expand Up @@ -406,20 +421,17 @@ macro_rules! atomic128 {
// pointer passed in is valid because we got it from a reference.
unsafe { $atomic_min(self.v.get(), val, order) }
}
}
};
(int, $atomic_type:ident, $int_type:ident, $atomic_max:ident, $atomic_min:ident) => {
atomic128!(uint, $atomic_type, $int_type, $atomic_max, $atomic_min);
impl $atomic_type {

#[inline]
pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
// TODO: define atomic_neg function and use it
self.fetch_update_(order, |x| x.wrapping_neg())
pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
// TODO: define atomic_not function and use it
self.fetch_update_(order, |x| !x)
}
#[inline]
pub(crate) fn neg(&self, order: Ordering) {
self.fetch_neg(order);
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}

#[inline]
fn fetch_update_<F>(&self, set_order: Ordering, mut f: F) -> $int_type
where
Expand All @@ -437,4 +449,18 @@ macro_rules! atomic128 {
}
}
};
(int, $atomic_type:ident, $int_type:ident, $atomic_max:ident, $atomic_min:ident) => {
atomic128!(uint, $atomic_type, $int_type, $atomic_max, $atomic_min);
impl $atomic_type {
#[inline]
pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
// TODO: define atomic_neg function and use it
self.fetch_update_(order, |x| x.wrapping_neg())
}
#[inline]
pub(crate) fn neg(&self, order: Ordering) {
self.fetch_neg(order);
}
}
};
}
17 changes: 16 additions & 1 deletion src/imp/core_atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,6 @@ macro_rules! atomic_int {
let success = crate::utils::upgrade_success_ordering(success, failure);
self.inner.compare_exchange_weak(current, new, success, failure)
}
#[cfg_attr(not(portable_atomic_no_atomic_min_max), allow(dead_code))]
#[inline]
fn fetch_update_<F>(&self, set_order: Ordering, mut f: F) -> $int_type
where
Expand Down Expand Up @@ -348,6 +347,22 @@ macro_rules! atomic_int {
self.fetch_update_(order, |x| core::cmp::min(x, val))
}
}
#[inline]
pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
self.fetch_update_(order, |x| !x)
}
#[cfg(not(all(
not(all(
any(miri, portable_atomic_sanitize_thread),
portable_atomic_new_atomic_intrinsics,
)),
any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
any(target_arch = "x86", target_arch = "x86_64")
)))]
#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}
}
impl core::ops::Deref for $atomic_type {
type Target = core::sync::atomic::$atomic_type;
Expand Down
14 changes: 14 additions & 0 deletions src/imp/fallback/imp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,20 @@ macro_rules! atomic {
self.write(core::cmp::min(result, val), &guard);
result
}

#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_not(&self, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(!result, &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}
}
};
(int, $atomic_type:ident, $int_type:ident, $align:expr) => {
Expand Down
31 changes: 31 additions & 0 deletions src/imp/interrupt/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -493,6 +493,13 @@ macro_rules! atomic_int {

#[cfg(not(target_arch = "msp430"))]
no_fetch_ops_impl!($atomic_type, $int_type);
#[cfg(not(target_arch = "msp430"))]
impl $atomic_type {
#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}
}
#[cfg(target_arch = "msp430")]
impl $atomic_type {
#[inline]
Expand Down Expand Up @@ -530,6 +537,13 @@ macro_rules! atomic_int {
(*(self as *const Self as *const atomic::$atomic_type)).xor(val, order);
}
}
#[inline]
pub(crate) fn not(&self, order: Ordering) {
// SAFETY: Self and atomic::$atomic_type have the same layout,
unsafe {
(*(self as *const Self as *const atomic::$atomic_type)).not(order);
}
}
}
};
(
Expand Down Expand Up @@ -558,6 +572,11 @@ macro_rules! atomic_int {
// from a reference.
with(|| unsafe { self.v.get().write(val) });
}

#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}
}
};
(uint, cas, $atomic_type:ident, $int_type:ident) => {
Expand Down Expand Up @@ -701,6 +720,18 @@ macro_rules! atomic_int {
result
})
}

#[inline]
pub(crate) fn fetch_not(&self, _order: Ordering) -> $int_type {
// SAFETY: any data races are prevented by disabling interrupts (see
// module-level comments) and the raw pointer is valid because we got it
// from a reference.
with(|| unsafe {
let result = self.v.get().read();
self.v.get().write(!result);
result
})
}
}
};
(int, cas, $atomic_type:ident, $int_type:ident) => {
Expand Down
28 changes: 28 additions & 0 deletions src/imp/msp430.rs
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,15 @@ macro_rules! atomic_int {
$int_type::atomic_xor(self.v.get(), val);
}
}

#[inline]
pub(crate) fn not(&self, _order: Ordering) {
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
$int_type::atomic_not(self.v.get());
}
}
}

impl AtomicOperations for $int_type {
Expand Down Expand Up @@ -447,6 +456,24 @@ macro_rules! atomic_int {
);
}
}

#[inline]
unsafe fn atomic_not(dst: *mut Self) {
// SAFETY: the caller must uphold the safety contract for `atomic_not`.
unsafe {
#[cfg(not(portable_atomic_no_asm))]
asm!(
concat!("inv", $asm_suffix, " 0({dst})"),
dst = in(reg) dst,
options(nostack),
);
#[cfg(portable_atomic_no_asm)]
llvm_asm!(
concat!("inv", $asm_suffix, " $0")
:: "*m"(dst) : "memory" : "volatile"
);
}
}
}
}
}
Expand All @@ -466,4 +493,5 @@ trait AtomicOperations: Sized {
unsafe fn atomic_and(dst: *mut Self, val: Self);
unsafe fn atomic_or(dst: *mut Self, val: Self);
unsafe fn atomic_xor(dst: *mut Self, val: Self);
unsafe fn atomic_not(dst: *mut Self);
}
59 changes: 52 additions & 7 deletions src/imp/x86.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,32 @@ macro_rules! ptr_modifier {
}

macro_rules! atomic_int {
($int_type:ident, $atomic_type:ident, $val_reg:tt, $val_modifier:tt, $ptr_size:tt) => {
(uint, $int_type:ident, $atomic_type:ident, $val_reg:tt, $val_modifier:tt, $ptr_size:tt) => {
impl imp::$atomic_type {
#[inline]
pub(crate) fn not(&self, _order: Ordering) {
// SAFETY: imp::$atomic_type is internally UnsafeCell<$int_type>
// so has the same size and greater or equal alignment to UnsafeCell<$int_type>.
let dst = unsafe {
(*(self as *const imp::$atomic_type as *const UnsafeCell<$int_type>)).get()
};
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
//
// https://www.felixcloutier.com/x86/not
unsafe {
// atomic RMW is always SeqCst.
asm!(
concat!("lock not ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}]"),
dst = inout(reg) dst => _,
options(nostack, preserves_flags),
);
}
}
}
};
(int, $int_type:ident, $atomic_type:ident, $val_reg:tt, $val_modifier:tt, $ptr_size:tt) => {
atomic_int!(uint, $int_type, $atomic_type, $val_reg, $val_modifier, $ptr_size);
impl imp::$atomic_type {
#[inline]
pub(crate) fn neg(&self, _order: Ordering) {
Expand All @@ -47,20 +72,40 @@ macro_rules! atomic_int {
};
}

atomic_int!(i8, AtomicI8, reg_byte, "", "byte");
atomic_int!(i16, AtomicI16, reg, ":x", "word");
atomic_int!(i32, AtomicI32, reg, ":e", "dword");
atomic_int!(int, i8, AtomicI8, reg_byte, "", "byte");
atomic_int!(uint, u8, AtomicU8, reg_byte, "", "byte");
atomic_int!(int, i16, AtomicI16, reg, ":x", "word");
atomic_int!(uint, u16, AtomicU16, reg, ":x", "word");
atomic_int!(int, i32, AtomicI32, reg, ":e", "dword");
atomic_int!(uint, u32, AtomicU32, reg, ":e", "dword");
#[cfg(target_arch = "x86_64")]
atomic_int!(int, i64, AtomicI64, reg, "", "qword");
#[cfg(target_arch = "x86_64")]
atomic_int!(i64, AtomicI64, reg, "", "qword");
atomic_int!(uint, u64, AtomicU64, reg, "", "qword");
#[cfg(target_pointer_width = "32")]
atomic_int!(int, isize, AtomicIsize, reg, ":e", "dword");
#[cfg(target_pointer_width = "32")]
atomic_int!(isize, AtomicIsize, reg, ":e", "dword");
atomic_int!(uint, usize, AtomicUsize, reg, ":e", "dword");
#[cfg(target_pointer_width = "64")]
atomic_int!(isize, AtomicIsize, reg, "", "qword");
atomic_int!(int, isize, AtomicIsize, reg, "", "qword");
#[cfg(target_pointer_width = "64")]
atomic_int!(uint, usize, AtomicUsize, reg, "", "qword");

#[cfg(target_arch = "x86")]
impl imp::AtomicI64 {
#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}
#[inline]
pub(crate) fn neg(&self, order: Ordering) {
self.fetch_neg(order);
}
}
#[cfg(target_arch = "x86")]
impl imp::AtomicU64 {
#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}
}
Loading

0 comments on commit da034b0

Please sign in to comment.