From 6d4270138678342dfff997793602897d0ad6dbce Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 27 Feb 2023 00:56:16 +0900 Subject: [PATCH] Add bit_{set,clear,toggle} to Atomic{I,U}* and AtomicPtr --- .github/.cspell/project-dictionary.txt | 1 + .github/workflows/ci.yml | 2 +- bench/benches/imp/spinlock_fallback.rs | 1 + src/imp/atomic128/intrinsics.rs | 1 + src/imp/atomic128/macros.rs | 2 + src/imp/core_atomic.rs | 11 + src/imp/fallback/imp.rs | 2 + src/imp/interrupt/mod.rs | 2 + src/imp/x86.rs | 119 +++++++++- src/lib.rs | 310 +++++++++++++++++++++++++ src/tests/helper.rs | 32 +++ src/utils.rs | 21 ++ 12 files changed, 498 insertions(+), 6 deletions(-) diff --git a/.github/.cspell/project-dictionary.txt b/.github/.cspell/project-dictionary.txt index abdf8f506..deddc1690 100644 --- a/.github/.cspell/project-dictionary.txt +++ b/.github/.cspell/project-dictionary.txt @@ -90,6 +90,7 @@ rcpc reentrancy sbcs seqlock +setb sete sifive SIGILL diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a169d174a..9d1ccccba 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -283,7 +283,7 @@ jobs: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-retag-fields -Zmiri-disable-isolation RUSTDOCFLAGS: ${{ env.RUSTDOCFLAGS }} -Z randomize-layout RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout - QUICKCHECK_TESTS: 20 + QUICKCHECK_TESTS: 10 san: strategy: diff --git a/bench/benches/imp/spinlock_fallback.rs b/bench/benches/imp/spinlock_fallback.rs index f4899013e..1a9e44bfa 100644 --- a/bench/benches/imp/spinlock_fallback.rs +++ b/bench/benches/imp/spinlock_fallback.rs @@ -85,6 +85,7 @@ macro_rules! atomic_int { unsafe impl Sync for $atomic_type {} no_fetch_ops_impl!($atomic_type, $int_type); + bit_opts_fetch_impl!($atomic_type, $int_type); impl $atomic_type { #[inline] pub(crate) const fn new(v: $int_type) -> Self { diff --git a/src/imp/atomic128/intrinsics.rs b/src/imp/atomic128/intrinsics.rs index 189bb9d8d..5e6ab6fa0 100644 --- a/src/imp/atomic128/intrinsics.rs +++ b/src/imp/atomic128/intrinsics.rs @@ -421,6 +421,7 @@ macro_rules! atomic128 { unsafe impl Sync for $atomic_type {} no_fetch_ops_impl!($atomic_type, $int_type); + bit_opts_fetch_impl!($atomic_type, $int_type); impl $atomic_type { #[inline] pub(crate) const fn new(v: $int_type) -> Self { diff --git a/src/imp/atomic128/macros.rs b/src/imp/atomic128/macros.rs index bc27776fc..601621ba3 100644 --- a/src/imp/atomic128/macros.rs +++ b/src/imp/atomic128/macros.rs @@ -11,6 +11,7 @@ macro_rules! atomic128 { unsafe impl Sync for $atomic_type {} no_fetch_ops_impl!($atomic_type, $int_type); + bit_opts_fetch_impl!($atomic_type, $int_type); impl $atomic_type { #[inline] pub(crate) const fn new(v: $int_type) -> Self { @@ -258,6 +259,7 @@ macro_rules! atomic128 { unsafe impl Sync for $atomic_type {} no_fetch_ops_impl!($atomic_type, $int_type); + bit_opts_fetch_impl!($atomic_type, $int_type); impl $atomic_type { #[inline] pub(crate) const fn new(v: $int_type) -> Self { diff --git a/src/imp/core_atomic.rs b/src/imp/core_atomic.rs index bdbf68b0c..02866791f 100644 --- a/src/imp/core_atomic.rs +++ b/src/imp/core_atomic.rs @@ -198,6 +198,17 @@ macro_rules! atomic_int { )] #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] no_fetch_ops_impl!($atomic_type, $int_type); + #[cfg(not(all( + not(any(miri, portable_atomic_sanitize_thread)), + any(not(portable_atomic_no_asm), portable_atomic_unstable_asm), + any(target_arch = "x86", target_arch = "x86_64"), + )))] + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(not(portable_atomic_no_atomic_cas)) + )] + #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] + bit_opts_fetch_impl!($atomic_type, $int_type); impl $atomic_type { #[inline] pub(crate) const fn new(v: $int_type) -> Self { diff --git a/src/imp/fallback/imp.rs b/src/imp/fallback/imp.rs index 39e398f1c..0e0ff023e 100644 --- a/src/imp/fallback/imp.rs +++ b/src/imp/fallback/imp.rs @@ -122,6 +122,8 @@ macro_rules! atomic { #[cfg(any(test, not(portable_atomic_unstable_cmpxchg16b_target_feature)))] no_fetch_ops_impl!($atomic_type, $int_type); + #[cfg(any(test, not(portable_atomic_unstable_cmpxchg16b_target_feature)))] + bit_opts_fetch_impl!($atomic_type, $int_type); impl $atomic_type { #[cfg(any(test, not(portable_atomic_unstable_cmpxchg16b_target_feature)))] #[inline] diff --git a/src/imp/interrupt/mod.rs b/src/imp/interrupt/mod.rs index 0af879963..9a3f613b5 100644 --- a/src/imp/interrupt/mod.rs +++ b/src/imp/interrupt/mod.rs @@ -515,6 +515,7 @@ macro_rules! atomic_int { #[cfg(not(all(target_arch = "msp430", not(feature = "critical-section"))))] no_fetch_ops_impl!($atomic_type, $int_type); + bit_opts_fetch_impl!($atomic_type, $int_type); #[cfg(not(all(target_arch = "msp430", not(feature = "critical-section"))))] impl $atomic_type { #[inline] @@ -578,6 +579,7 @@ macro_rules! atomic_int { atomic_int!(base, $atomic_type, $int_type, $align); atomic_int!($kind, cas, $atomic_type, $int_type); no_fetch_ops_impl!($atomic_type, $int_type); + bit_opts_fetch_impl!($atomic_type, $int_type); impl $atomic_type { #[inline] #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] diff --git a/src/imp/x86.rs b/src/imp/x86.rs index 5843177f8..d777b5beb 100644 --- a/src/imp/x86.rs +++ b/src/imp/x86.rs @@ -7,7 +7,10 @@ use core::arch::asm; use core::sync::atomic::Ordering; -use super::core_atomic as imp; +use super::core_atomic::{ + AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64, + AtomicU8, AtomicUsize, +}; #[cfg(target_pointer_width = "32")] macro_rules! ptr_modifier { @@ -24,7 +27,7 @@ macro_rules! ptr_modifier { macro_rules! atomic_int { (uint, $atomic_type:ident, $int_type:ident, $ptr_size:tt) => { - impl imp::$atomic_type { + impl $atomic_type { #[inline] pub(crate) fn not(&self, _order: Ordering) { let dst = self.as_ptr(); @@ -45,7 +48,7 @@ macro_rules! atomic_int { }; (int, $atomic_type:ident, $int_type:ident, $ptr_size:tt) => { atomic_int!(uint, $atomic_type, $int_type, $ptr_size); - impl imp::$atomic_type { + impl $atomic_type { #[inline] pub(crate) fn neg(&self, _order: Ordering) { let dst = self.as_ptr(); @@ -87,7 +90,7 @@ atomic_int!(int, AtomicIsize, isize, "qword"); atomic_int!(uint, AtomicUsize, usize, "qword"); #[cfg(target_arch = "x86")] -impl imp::AtomicI64 { +impl AtomicI64 { #[inline] pub(crate) fn not(&self, order: Ordering) { self.fetch_not(order); @@ -98,9 +101,115 @@ impl imp::AtomicI64 { } } #[cfg(target_arch = "x86")] -impl imp::AtomicU64 { +impl AtomicU64 { #[inline] pub(crate) fn not(&self, order: Ordering) { self.fetch_not(order); } } + +// Note: As of LLVM 15, LLVM only supports generating `lock bt{s,r,c}` for immediate bit offsets. +// https://godbolt.org/z/aM5rGPYn8 +macro_rules! atomic_bit_opts { + ($atomic_type:ident, $int_type:ident, $val_modifier:tt, $ptr_size:tt) => { + impl $atomic_type { + // `::BITS` is not available on old nightly. + const BITS: u32 = (core::mem::size_of::<$int_type>() * 8) as u32; + #[inline] + pub(crate) fn bit_set(&self, bit: u32, _order: Ordering) -> bool { + let dst = self.as_ptr(); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + // the masking by the bit size of the type ensures that we do not shift + // out of bounds. + // + // https://www.felixcloutier.com/x86/bts + unsafe { + let out: u8; + // atomic RMW is always SeqCst. + asm!( + concat!("lock bts ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}], {bit", $val_modifier, "}"), + "setb {out}", + dst = in(reg) dst, + bit = in(reg) (bit & (Self::BITS - 1)) as $int_type, + out = out(reg_byte) out, + // Do not use `preserves_flags` because BTS modifies the CF flag. + options(nostack), + ); + out != 0 + } + } + #[inline] + pub(crate) fn bit_clear(&self, bit: u32, _order: Ordering) -> bool { + let dst = self.as_ptr(); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + // the masking by the bit size of the type ensures that we do not shift + // out of bounds. + // + // https://www.felixcloutier.com/x86/btr + unsafe { + let out: u8; + // atomic RMW is always SeqCst. + asm!( + concat!("lock btr ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}], {bit", $val_modifier, "}"), + "setb {out}", + dst = in(reg) dst, + bit = in(reg) (bit & (Self::BITS - 1)) as $int_type, + out = out(reg_byte) out, + // Do not use `preserves_flags` because BTR modifies the CF flag. + options(nostack), + ); + out != 0 + } + } + #[inline] + pub(crate) fn bit_toggle(&self, bit: u32, _order: Ordering) -> bool { + let dst = self.as_ptr(); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + // the masking by the bit size of the type ensures that we do not shift + // out of bounds. + // + // https://www.felixcloutier.com/x86/btc + unsafe { + let out: u8; + // atomic RMW is always SeqCst. + asm!( + concat!("lock btc ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}], {bit", $val_modifier, "}"), + "setb {out}", + dst = in(reg) dst, + bit = in(reg) (bit & (Self::BITS - 1)) as $int_type, + out = out(reg_byte) out, + // Do not use `preserves_flags` because BTC modifies the CF flag. + options(nostack), + ); + out != 0 + } + } + } + }; +} + +bit_opts_fetch_impl!(AtomicI8, i8); +bit_opts_fetch_impl!(AtomicU8, u8); +atomic_bit_opts!(AtomicI16, i16, ":x", "word"); +atomic_bit_opts!(AtomicU16, u16, ":x", "word"); +atomic_bit_opts!(AtomicI32, i32, ":e", "dword"); +atomic_bit_opts!(AtomicU32, u32, ":e", "dword"); +#[cfg(target_arch = "x86_64")] +atomic_bit_opts!(AtomicI64, i64, "", "qword"); +#[cfg(target_arch = "x86_64")] +atomic_bit_opts!(AtomicU64, u64, "", "qword"); +#[cfg(target_arch = "x86")] +bit_opts_fetch_impl!(AtomicI64, i64); +#[cfg(target_arch = "x86")] +bit_opts_fetch_impl!(AtomicU64, u64); +#[cfg(target_pointer_width = "32")] +atomic_bit_opts!(AtomicIsize, isize, ":e", "dword"); +#[cfg(target_pointer_width = "32")] +atomic_bit_opts!(AtomicUsize, usize, ":e", "dword"); +#[cfg(target_pointer_width = "64")] +atomic_bit_opts!(AtomicIsize, isize, "", "qword"); +#[cfg(target_pointer_width = "64")] +atomic_bit_opts!(AtomicUsize, usize, "", "qword"); diff --git a/src/lib.rs b/src/lib.rs index bda288c76..587faedd4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2407,6 +2407,177 @@ impl AtomicPtr { } } + /// Sets the bit in the bit string at the bit-position specified to 1. + /// + /// Returns `true` if the specified bit was set to 1 in the previous value. + /// + /// `bit_set` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let pointer = &mut 3i64 as *mut i64; + /// + /// let atom = AtomicPtr::::new(pointer); + /// // Tag the bottom bit of the pointer. + /// assert!(!atom.bit_set(0, Ordering::Relaxed)); + /// // Extract and untag. + /// let tagged = atom.load(Ordering::Relaxed); + /// assert_eq!(tagged.addr() & 1, 1); + /// assert_eq!(tagged.map_addr(|p| p & !1), pointer); + /// ``` + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any( + not(portable_atomic_no_atomic_cas), + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any( + target_has_atomic = "ptr", + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[inline] + pub fn bit_set(&self, bit: u32, order: Ordering) -> bool { + #[cfg(miri)] + { + let mask = 1_usize.wrapping_shl(bit); + strict::addr(self.fetch_or(mask, order)) & mask != 0 + } + #[cfg(not(miri))] + { + self.as_atomic_usize().bit_set(bit, order) + } + } + + /// Clears the bit in the bit string at the bit-position specified. + /// + /// Returns `true` if the specified bit was set to 1 in the previous value. + /// + /// `bit_clear` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let pointer = &mut 3i64 as *mut i64; + /// // A tagged pointer + /// let atom = AtomicPtr::::new(pointer.map_addr(|a| a | 1)); + /// assert!(atom.bit_set(0, Ordering::Relaxed)); + /// // Untag + /// assert!(atom.bit_clear(0, Ordering::Relaxed)); + /// ``` + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any( + not(portable_atomic_no_atomic_cas), + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any( + target_has_atomic = "ptr", + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[inline] + pub fn bit_clear(&self, bit: u32, order: Ordering) -> bool { + #[cfg(miri)] + { + let mask = 1_usize.wrapping_shl(bit); + strict::addr(self.fetch_and(!mask, order)) & mask != 0 + } + #[cfg(not(miri))] + { + self.as_atomic_usize().bit_clear(bit, order) + } + } + + /// Toggles the bit in the bit string at the bit-position specified. + /// + /// Returns `true` if the specified bit was set to 1 in the previous value. + /// + /// `bit_toggle` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// # Examples + /// + /// ``` + /// # #![allow(unstable_name_collisions)] + /// use portable_atomic::{AtomicPtr, Ordering}; + /// use sptr::Strict; // stable polyfill for strict provenance + /// + /// let pointer = &mut 3i64 as *mut i64; + /// let atom = AtomicPtr::::new(pointer); + /// + /// // Toggle a tag bit on the pointer. + /// atom.bit_toggle(0, Ordering::Relaxed); + /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1); + /// ``` + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any( + not(portable_atomic_no_atomic_cas), + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any( + target_has_atomic = "ptr", + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[inline] + pub fn bit_toggle(&self, bit: u32, order: Ordering) -> bool { + #[cfg(miri)] + { + let mask = 1_usize.wrapping_shl(bit); + strict::addr(self.fetch_xor(mask, order)) & mask != 0 + } + #[cfg(not(miri))] + { + self.as_atomic_usize().bit_toggle(bit, order) + } + } + #[cfg(not(miri))] #[inline] #[cfg_attr( @@ -3669,6 +3840,145 @@ assert_eq!(min_foo, 12); } } + doc_comment! { + concat!("Sets the bit in the bit string at the bit-position specified to 1. + +Returns `true` if the specified bit was set to 1 in the previous value. + +`bit_set` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +# Examples + +``` +use portable_atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(0b0000); +assert!(!foo.bit_set(0, Ordering::Relaxed)); +assert_eq!(foo.load(Ordering::Relaxed), 0b0001); +assert!(foo.bit_set(0, Ordering::Relaxed)); +assert_eq!(foo.load(Ordering::Relaxed), 0b0001); +```"), + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any( + not(portable_atomic_no_atomic_cas), + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any( + target_has_atomic = "ptr", + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[inline] + pub fn bit_set(&self, bit: u32, order: Ordering) -> bool { + self.inner.bit_set(bit, order) + } + } + + doc_comment! { + concat!("Clears the bit in the bit string at the bit-position specified. + +Returns `true` if the specified bit was set to 1 in the previous value. + +`bit_clear` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +# Examples + +``` +use portable_atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(0b0001); +assert!(foo.bit_clear(0, Ordering::Relaxed)); +assert_eq!(foo.load(Ordering::Relaxed), 0b0000); +```"), + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any( + not(portable_atomic_no_atomic_cas), + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any( + target_has_atomic = "ptr", + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[inline] + pub fn bit_clear(&self, bit: u32, order: Ordering) -> bool { + self.inner.bit_clear(bit, order) + } + } + + doc_comment! { + concat!("Toggles the bit in the bit string at the bit-position specified. + +Returns `true` if the specified bit was set to 1 in the previous value. + +`bit_toggle` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +# Examples + +``` +use portable_atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(0b0000); +assert!(!foo.bit_toggle(0, Ordering::Relaxed)); +assert_eq!(foo.load(Ordering::Relaxed), 0b0001); +assert!(foo.bit_toggle(0, Ordering::Relaxed)); +assert_eq!(foo.load(Ordering::Relaxed), 0b0000); +```"), + #[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any( + not(portable_atomic_no_atomic_cas), + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any( + target_has_atomic = "ptr", + portable_atomic_unsafe_assume_single_core, + feature = "critical-section", + target_arch = "avr", + target_arch = "msp430", + )) + )] + #[inline] + pub fn bit_toggle(&self, bit: u32, order: Ordering) -> bool { + self.inner.bit_toggle(bit, order) + } + } + doc_comment! { concat!("Logical negates the current value, and sets the new value to the result. diff --git a/src/tests/helper.rs b/src/tests/helper.rs index b23e964a0..8460013e5 100644 --- a/src/tests/helper.rs +++ b/src/tests/helper.rs @@ -468,6 +468,7 @@ macro_rules! __test_atomic_int { assert_eq!(a.load(Ordering::Relaxed), !1); } } + // TODO: normal tests for bit_set/bit_clear/bit_toggle ::quickcheck::quickcheck! { fn quickcheck_swap(x: $int_type, y: $int_type) -> bool { for &order in &SWAP_ORDERINGS { @@ -668,6 +669,36 @@ macro_rules! __test_atomic_int { } true } + fn quickcheck_bit_set(x: $int_type, bit: u32) -> bool { + for &order in &SWAP_ORDERINGS { + let a = <$atomic_type>::new(x); + let b = a.bit_set(bit, order); + let mask = (1 as $int_type).wrapping_shl(bit); + assert_eq!(a.load(Ordering::Relaxed), x | mask); + assert_eq!(b, (x & mask) != 0); + } + true + } + fn quickcheck_bit_clear(x: $int_type, bit: u32) -> bool { + for &order in &SWAP_ORDERINGS { + let a = <$atomic_type>::new(x); + let b = a.bit_clear(bit, order); + let mask = (1 as $int_type).wrapping_shl(bit); + assert_eq!(a.load(Ordering::Relaxed), x & !mask); + assert_eq!(b, (x & mask) != 0); + } + true + } + fn quickcheck_bit_toggle(x: $int_type, bit: u32) -> bool { + for &order in &SWAP_ORDERINGS { + let a = <$atomic_type>::new(x); + let b = a.bit_toggle(bit, order); + let mask = (1 as $int_type).wrapping_shl(bit); + assert_eq!(a.load(Ordering::Relaxed), x ^ mask); + assert_eq!(b, (x & mask) != 0); + } + true + } } }; (int, $atomic_type:ty, $int_type:ident, single_thread) => { @@ -1551,6 +1582,7 @@ macro_rules! __test_atomic_ptr_pub { assert_eq!(atom.fetch_and(MASK_PTR, Ordering::SeqCst), ptr.map_addr(|a| a | 0b1001)); assert_eq!(atom.load(Ordering::SeqCst), ptr); } + // TODO: bit_set/bit_clear/bit_toggle }; } diff --git a/src/utils.rs b/src/utils.rs index 4c2e15f06..a5471ed1a 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -241,6 +241,27 @@ macro_rules! no_fetch_ops_impl { } }; } +macro_rules! bit_opts_fetch_impl { + ($atomic_type:ident, $int_type:ident) => { + impl $atomic_type { + #[inline] + pub(crate) fn bit_set(&self, bit: u32, order: Ordering) -> bool { + let mask = (1 as $int_type).wrapping_shl(bit); + self.fetch_or(mask, order) & mask != 0 + } + #[inline] + pub(crate) fn bit_clear(&self, bit: u32, order: Ordering) -> bool { + let mask = (1 as $int_type).wrapping_shl(bit); + self.fetch_and(!mask, order) & mask != 0 + } + #[inline] + pub(crate) fn bit_toggle(&self, bit: u32, order: Ordering) -> bool { + let mask = (1 as $int_type).wrapping_shl(bit); + self.fetch_xor(mask, order) & mask != 0 + } + } + }; +} pub(crate) struct NoRefUnwindSafe(UnsafeCell<()>); // SAFETY: this is a marker type and we'll never access the value.