We've made NonZero
generic instead of having multiple distinct NonZero*
types. We could do a similar thing to unify the Atomic*
types. I drafted out what that would look like; does this seem like a reasonable API shape for std to refactor towards? [playground]
trait Sealed {}
#[allow(private_bounds)]
#[unstable(feature = "atomic_internals", issue = "none")]
pub unsafe trait AtomicPrimitive: Sized + Copy + Sealed {
type AlignedForAtomic;
}
macro impl_atomic_primitive(
$Inner:ident $(<$T:ident>)? ($Primitive:ty),
size($size:literal),
align($align:literal) $(,)?
) {
impl $(<$T>)? Sealed for $Primitive {}
#[cfg(target_has_atomic_load_store = $size)]
const _: () = {
unsafe impl $(<$T>)? AtomicPrimitive for $Primitive {
#[cfg(target_has_atomic_equal_alignment = $size)]
type AlignedForAtomic = $Primitive;
#[cfg(not(target_has_atomic_equal_alignment = $size))]
type AlignedForAtomic = $Inner $(<$T>)?;
}
#[repr(C, align($align))]
#[cfg(not(target_has_atomic_equal_alignment = $size))]
pub struct $Inner $(<$T>)? ($Primitive);
};
}
impl_atomic_primitive!(AlignedBool(bool), size("8"), align(1));
impl_atomic_primitive!(AlignedI32(i32), size("32"), align(4));
impl_atomic_primitive!(AlignedU32(u32), size("32"), align(4));
// ... snip ...
#[cfg(target_pointer_width = "64")]
impl_atomic_primitive!(AlignedPtr<T>(*mut T), size("ptr"), align(8));
#[repr(C)]
pub struct Atomic<T: AtomicPrimitive>(UnsafeCell<T::AlignedForAtomic>);
macro impl_atomic_fmt($Trait:ident) {
impl<T> fmt::$Trait for Atomic<T>
where T: AtomicPrimitive + fmt::$Trait;
}
impl_atomic_fmt!(Debug);
impl_atomic_fmt!(Pointer);
macro impl_atomic_auto_trait {
(unsafe $Trait:ident) => {
unsafe impl<T> $Trait for Atomic<T> where T: AtomicPrimitive {}
},
($Trait:ident) => {
impl<T> $Trait for Atomic<T> where T: AtomicPrimitive {}
},
}
// Implement auto-traits manually based on `T` to avoid docs showing
// the `AtomicPrimitive::AlignedForAtomic` implementation detail.
impl_atomic_auto_trait!(unsafe Send);
impl_atomic_auto_trait!(unsafe Sync);
impl_atomic_auto_trait!(Unpin);
impl_atomic_auto_trait!(UnwindSafe);
impl_atomic_auto_trait!(RefUnwindSafe);
impl<T> Atomic<T>
where
T: AtomicPrimitive,
{
pub const fn new(val: T) -> Self;
pub const fn into_inner(self) -> T;
pub const fn as_ptr(&self) -> *mut T;
pub const unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self;
pub fn get_mut(&mut self) -> &mut T;
pub fn get_mut_slice(this: &mut [Self]) -> &mut [T];
pub fn load(&self, order: Ordering) -> T;
pub fn store(&self, val: T, order: Ordering);
}
impl<T> Atomic<T>
where
T: AtomicPrimitive<AlignedForAtomic = T>,
{
pub fn from_mut(val: &mut T) -> &mut Self;
pub fn from_mut_slice(val: &mut [T]) -> &mut [Self];
}
macro atomic_int($Atom:ident($Int:ty), size($size:literal)) {
#[cfg(target_has_atomic_load_store = $size)]
pub type $Atom = Atomic<$Int>;
#[cfg(target_has_atomic = $size)]
impl $Atom {
pub fn swap(&self, val: $Int, order: Ordering) -> $Int;
pub fn fetch_add(&self, val: $Int, order: Ordering) -> $Int;
pub fn fetch_sub(&self, val: $Int, order: Ordering) -> $Int;
// et al
}
}
atomic_int!(AtomicI32(i32), size("32"));
atomic_int!(AtomicU32(u32), size("32"));
#[cfg(target_has_atomic_load_store = "8")]
pub type AtomicBool = Atomic<bool>;
#[cfg(target_has_atomic = "8")]
impl AtomicBool {
pub fn swap(&self, val: bool, order: Ordering) -> bool;
// et al
}
#[cfg(target_has_atomic_load_store = "ptr")]
pub type AtomicPtr<T> = Atomic<*mut T>;
#[cfg(target_has_atomic = "ptr")]
impl<T> AtomicPtr<T> {
pub fn swap(&self, val: *mut T, order: Ordering) -> *mut T;
pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T;
pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T;
pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T;
pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T;
// et al
}
I think this sketch both moves everything onto the generic type that's fully just duplicated between all of the Atomic*
types. fetch_update
might be possible to provide only once on the generic, but I wasn't able to figure out a good way to both keep it gated like it is today and delegate to a non-generic compare_exchange
(required due to AtomicBool
being a special case).