Skip to content

Commit

Permalink
feat: remove loom implementation from the revert PR
Browse files Browse the repository at this point in the history
  • Loading branch information
jbr authored and notgull committed Apr 16, 2024
1 parent ed02521 commit 70afc24
Show file tree
Hide file tree
Showing 7 changed files with 60 additions and 360 deletions.
10 changes: 0 additions & 10 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,3 @@ jobs:
with:
token: ${{ secrets.GITHUB_TOKEN }}

loom:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
run: rustup update stable
- name: Loom tests
run: RUSTFLAGS="--cfg=loom" cargo test --release --test loom --features loom
- name: Loom tests for lock-free
run: RUSTFLAGS="--cfg=loom" cargo test --no-default-features --release --test loom --features loom
3 changes: 0 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@ futures-lite = "2.3.0"
criterion = { version = "0.3.4", default-features = false, features = ["cargo_bench_support"] }
waker-fn = "1"

[target.'cfg(loom)'.dependencies]
loom = { version = "0.7", optional = true }

[[bench]]
name = "bench"
harness = false
Expand Down
138 changes: 41 additions & 97 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,18 +78,12 @@
#![no_std]
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]

#[cfg(all(
not(all(feature = "std", loom)),
any(not(feature = "std"), not(feature = "portable-atomic"))
))]
#[cfg(any(not(feature = "std"), not(feature = "portable-atomic")))]
extern crate alloc;

#[cfg(feature = "std")]
extern crate std;

use loom::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use loom::Arc;
use notify::{Internal, NotificationPrivate};

use core::fmt;
use core::future::Future;
use core::mem::ManuallyDrop;
Expand All @@ -98,6 +92,10 @@ use core::pin::Pin;
use core::ptr;
use core::task::{Context, Poll};
use core::usize;
use notify::{Internal, NotificationPrivate};

use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::sync::Arc;

#[cfg(all(feature = "std", not(target_family = "wasm")))]
use std::time::{Duration, Instant};
Expand All @@ -108,22 +106,6 @@ mod notify;
pub(crate) use linked_list::Inner;
pub use notify::{IntoNotification, Notification};

/// Make the given function const if the given condition is true.
macro_rules! const_fn {
(
const_if: #[cfg($($cfg:tt)+)];
$(#[$($attr:tt)*])*
$vis:vis const fn $($rest:tt)*
) => {
#[cfg($($cfg)+)]
$(#[$($attr)*])*
$vis const fn $($rest)*
#[cfg(not($($cfg)+))]
$(#[$($attr)*])*
$vis fn $($rest)*
};
}

/// Create a stack-based event listener for an [`Event`].
///
/// [`EventListener`] allocates the listener on the heap. While this works for most use cases, in
Expand Down Expand Up @@ -258,21 +240,18 @@ impl<T> UnwindSafe for Event<T> {}
impl<T> RefUnwindSafe for Event<T> {}

impl Event {
const_fn! {
const_if: #[cfg(not(loom))];
/// Creates a new [`Event`].
///
/// # Examples
///
/// ```
/// use event_listener::Event;
///
/// let event = Event::new();
/// ```
#[inline]
pub const fn new() -> Event {
Self::with_tag()
}
/// Creates a new [`Event`].
///
/// # Examples
///
/// ```
/// use event_listener::Event;
///
/// let event = Event::new();
/// ```
#[inline]
pub const fn new() -> Event {
Self::with_tag()
}

/// Notifies a number of active listeners without emitting a `SeqCst` fence.
Expand Down Expand Up @@ -390,22 +369,19 @@ impl Event {
}

impl<T> Event<T> {
const_fn! {
const_if: #[cfg(not(loom))];
/// Creates a new [`Event`] with a tag.
///
/// # Examples
///
/// ```
/// use event_listener::Event;
///
/// let event = Event::<usize>::with_tag();
/// ```
#[inline]
pub const fn with_tag() -> Event<T> {
Event {
inner: AtomicPtr::new(ptr::null_mut()),
}
/// Creates a new [`Event`] with a tag.
///
/// # Examples
///
/// ```
/// use event_listener::Event;
///
/// let event = Event::<usize>::with_tag();
/// ```
#[inline]
pub const fn with_tag() -> Event<T> {
Event {
inner: AtomicPtr::new(ptr::null_mut()),
}
}

Expand Down Expand Up @@ -590,16 +566,13 @@ impl<T> Event<T> {
impl<T> Drop for Event<T> {
#[inline]
fn drop(&mut self) {
loom::ptr_with_mut(&mut self.inner, |inner| {
let inner: *mut Inner<T> = *inner;

// If the state pointer has been initialized, deallocate it.
if !inner.is_null() {
unsafe {
drop(Arc::from_raw(inner));
}
let inner = self.inner.get_mut();
// If the state pointer has been initialized, deallocate it.
if !inner.is_null() {
unsafe {
drop(Arc::from_raw(*inner));
}
});
}
}
}

Expand Down Expand Up @@ -835,19 +808,16 @@ fn full_fence() {
// The ideal solution here would be to use inline assembly, but we're instead creating a
// temporary atomic variable and compare-and-exchanging its value. No sane compiler to
// x86 platforms is going to optimize this away.
atomic::compiler_fence(Ordering::SeqCst);
sync::atomic::compiler_fence(Ordering::SeqCst);
let a = AtomicUsize::new(0);
let _ = a.compare_exchange(0, 1, Ordering::SeqCst, Ordering::SeqCst);
atomic::compiler_fence(Ordering::SeqCst);
sync::atomic::compiler_fence(Ordering::SeqCst);
} else {
atomic::fence(Ordering::SeqCst);
sync::atomic::fence(Ordering::SeqCst);
}
}

#[cfg(not(loom))]
mod loom {
pub(crate) use core::cell;

pub(crate) mod sync {
#[cfg(not(feature = "portable-atomic"))]
pub(crate) use core::sync::atomic;

Expand All @@ -859,32 +829,6 @@ mod loom {

#[cfg(feature = "portable-atomic")]
pub(crate) use portable_atomic_util::Arc;

/// Equivalent to `loom::AtomicPtr::with_mut`
pub(crate) fn ptr_with_mut<T, R>(
ptr: &mut atomic::AtomicPtr<T>,
f: impl FnOnce(&mut *mut T) -> R,
) -> R {
f(ptr.get_mut())
}
}

#[cfg(loom)]
mod loom {
pub(crate) use loom::cell;
pub(crate) use loom::sync::Arc;

pub(crate) mod atomic {
pub(crate) use core::sync::atomic::compiler_fence;
pub(crate) use loom::sync::atomic::*;
}

pub(crate) fn ptr_with_mut<T, R>(
ptr: &mut atomic::AtomicPtr<T>,
f: impl FnOnce(&mut *mut T) -> R,
) -> R {
ptr.with_mut(f)
}
}

mod __sealed {
Expand Down
40 changes: 12 additions & 28 deletions src/linked_list/lock_free.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,10 @@
//! Implementation of the linked list using lock-free primitives.

use crate::loom::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
use crate::loom::cell::{Cell, UnsafeCell};
use crate::notify::{GenericNotify, Internal, Notification};

#[cfg(not(loom))]
use core::hint::spin_loop;
#[cfg(loom)]
use loom::hint::spin_loop;

use core::cell::{Cell, UnsafeCell};
use core::cmp::Reverse;
use core::fmt;
use core::hint::spin_loop;
use core::iter;
use core::marker::PhantomData;
use core::mem::{self, MaybeUninit};
Expand All @@ -22,6 +16,8 @@ use core::task::{Context, Poll, Waker};
use alloc::boxed::Box;
use alloc::collections::BinaryHeap;

use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};

/// The total number of buckets stored in each thread local.
/// All buckets combined can hold up to `usize::MAX - 1` entries.
const BUCKETS: usize = (usize::BITS - 1) as usize;
Expand Down Expand Up @@ -666,16 +662,14 @@ impl<T> Drop for Slots<T> {
fn drop(&mut self) {
// Free every bucket.
for (i, bucket) in self.buckets.iter_mut().enumerate() {
crate::loom::ptr_with_mut(bucket, |bucket| {
let bucket = *bucket;
if bucket.is_null() {
return;
}
let bucket = bucket.get_mut();
if bucket.is_null() {
return;
}

// Drop the bucket.
let size = bucket_index_to_size(i);
drop(unsafe { Box::from_raw(slice::from_raw_parts_mut(bucket, size)) });
});
// Drop the bucket.
let size = bucket_index_to_size(i);
drop(unsafe { Box::from_raw(slice::from_raw_parts_mut(*bucket, size)) });
}
}
}
Expand Down Expand Up @@ -743,23 +737,13 @@ impl<T> Lock<T> {
let _drop = CallOnDrop(|| self.is_locked.store(false, Ordering::Release));

// SAFETY: We have exclusive access.
Some(cell_with_mut(&self.data, |ptr| f(unsafe { &mut *ptr })))
Some(f(unsafe { &mut *self.data.get() }))
} else {
None
}
}
}

#[cfg(not(loom))]
fn cell_with_mut<T, R>(cell: &UnsafeCell<T>, f: impl FnOnce(*mut T) -> R) -> R {
f(cell.get())
}

#[cfg(loom)]
fn cell_with_mut<T, R>(cell: &UnsafeCell<T>, f: impl FnOnce(*mut T) -> R) -> R {
cell.with_mut(f)
}

#[inline]
fn bucket_index_to_size(i: usize) -> usize {
1 << i
Expand Down
13 changes: 5 additions & 8 deletions src/linked_list/mutex.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
//! Implementation of the linked list using standard library mutexes.

use crate::loom::atomic::{AtomicUsize, Ordering};
use crate::loom::cell::Cell;
use crate::notify::{GenericNotify, Internal, Notification};

use crate::sync::atomic::{AtomicUsize, Ordering};
use std::boxed::Box;
use std::cell::{Cell, UnsafeCell};
use std::fmt;
use std::mem;
use std::ops::{Deref, DerefMut};
Expand All @@ -13,7 +12,6 @@ use std::sync::{Mutex, MutexGuard, TryLockError};
use std::task::{Context, Poll, Waker};
use std::thread::{self, Thread};
use std::time::Instant;
use std::usize;

/// Inner state of [`Event`].
pub(crate) struct Inner<T> {
Expand All @@ -26,24 +24,23 @@ pub(crate) struct Inner<T> {
list: Mutex<List<T>>,

/// A single cached list entry to avoid allocations on the fast path of the insertion.
// TODO: Add ability to use loom::cell::UnsafeCell
cache: std::cell::UnsafeCell<Entry<T>>,
cache: UnsafeCell<Entry<T>>,
}

impl<T> Inner<T> {
/// Create a new linked list.
pub(crate) fn new() -> Self {
Inner {
notified: AtomicUsize::new(usize::MAX),
list: std::sync::Mutex::new(List::<T> {
list: Mutex::new(List::<T> {
head: None,
tail: None,
start: None,
len: 0,
notified: 0,
cache_used: false,
}),
cache: std::cell::UnsafeCell::new(Entry {
cache: UnsafeCell::new(Entry {
state: Cell::new(State::Created),
prev: Cell::new(None),
next: Cell::new(None),
Expand Down
4 changes: 2 additions & 2 deletions src/notify.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#[cfg(feature = "std")]
use core::fmt;

use crate::loom::atomic::{self, Ordering};
use crate::sync::atomic::{self, Ordering};

pub(crate) use __private::Internal;

Expand Down Expand Up @@ -568,7 +568,7 @@ impl_for_numeric_types! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
/// Equivalent to `atomic::fence(Ordering::SeqCst)`, but in some cases faster.
#[inline]
pub(super) fn full_fence() {
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), not(miri), not(loom)))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), not(miri)))]
{
use core::{arch::asm, cell::UnsafeCell};
// HACK(stjepang): On x86 architectures there are two different ways of executing
Expand Down
Loading

0 comments on commit 70afc24

Please sign in to comment.