Implement a lot of atomic intrinsics

This commit is contained in:
bjorn3 2018-10-06 10:24:09 +02:00
parent 42b80c21af
commit f8e2ce6b4b
3 changed files with 112 additions and 31 deletions

View file

@ -0,0 +1,32 @@
From 6a5c292f9f9e28de4319f52b05744ed7d8863f76 Mon Sep 17 00:00:00 2001
From: bjorn3 <bjorn3@users.noreply.github.com>
Date: Sat, 6 Oct 2018 10:22:16 +0200
Subject: [PATCH] Disable inline assembly in spin_loop_hint
---
src/libcore/sync/atomic.rs | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs
index f130dbf..31db7a2 100644
--- a/src/libcore/sync/atomic.rs
+++ b/src/libcore/sync/atomic.rs
@@ -106,6 +106,7 @@ use fmt;
#[inline]
#[stable(feature = "spin_loop_hint", since = "1.24.0")]
pub fn spin_loop_hint() {
+ /*
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
unsafe {
asm!("pause" ::: "memory" : "volatile");
@@ -115,6 +116,7 @@ pub fn spin_loop_hint() {
unsafe {
asm!("yield" ::: "memory" : "volatile");
}
+ */
}
/// A boolean type which can be safely shared between threads.
--
2.17.1 (Apple Git-112)

View file

@ -1,23 +0,0 @@
From 950bfa9eb7a0eb441601cbe0adc1aefcfab8e031 Mon Sep 17 00:00:00 2001
From: bjorn3 <bjorn3@users.noreply.github.com>
Date: Fri, 27 Jul 2018 19:07:01 +0200
Subject: [PATCH] Remove atomics from libcore
---
src/libcore/lib.rs | 1 -
1 files changed, 1 deletions(-)
diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs
index 3aa79087db..71fcff2e8b 100644
--- a/src/libcore/lib.rs
+++ b/src/libcore/lib.rs
@@ -185,7 +185,6 @@ pub mod borrow;
pub mod any;
pub mod array;
pub mod ascii;
-pub mod sync;
pub mod cell;
pub mod char;
pub mod panic;
2.11.0

View file

@ -58,6 +58,34 @@ macro_rules! intrinsic_match {
};
}
macro_rules! atomic_binop_return_old {
($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) => {
let clif_ty = $fx.cton_type($T).unwrap();
let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
let new = $fx.bcx.ins().band(old, $src);
$fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
$ret.write_cvalue($fx, CValue::ByVal(old, $fx.layout_of($T)));
};
}
macro_rules! atomic_minmax {
($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) => {
// Read old
let clif_ty = $fx.cton_type($T).unwrap();
let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
// Compare
let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
let new = $fx.bcx.ins().select(is_eq, old, $src);
// Write new
$fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
let ret_val = CValue::ByVal(old, $ret.layout());
$ret.write_cvalue($fx, ret_val);
};
}
pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
def_id: DefId,
@ -317,6 +345,7 @@ pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
let needs_drop = CValue::const_val(fx, fx.tcx.types.bool, needs_drop);
ret.write_cvalue(fx, needs_drop);
};
_ if intrinsic.starts_with("atomic_fence"), () {};
_ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
_ if intrinsic.starts_with("atomic_load"), (c ptr) {
@ -329,19 +358,62 @@ pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
let dest = CPlace::Addr(ptr, None, val.layout());
dest.write_cvalue(fx, val);
};
_ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
_ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
// Read old
let clif_ty = fx.cton_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
let new = fx.bcx.ins().iadd(old, amount);
fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(T)));
// Write new
let dest = CPlace::Addr(ptr, None, src.layout());
dest.write_cvalue(fx, src);
};
_ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
// Read old
let clif_ty = fx.cton_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
// Compare
let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
let new = fx.bcx.ins().select(is_eq, old, new); // Keep old if not equal to test_old
// Write new
fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
let ret_val = CValue::ByValPair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
ret.write_cvalue(fx, ret_val);
};
_ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
};
_ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, v amount) {
let clif_ty = fx.cton_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
let new = fx.bcx.ins().isub(old, amount);
fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(T)));
atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
};
_ if intrinsic.starts_with("atomic_and"), <T> (v ptr, v src) {
atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
atomic_binop_return_old! (fx, bnand<T>(ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, v src) {
atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_max"), <T> (v ptr, v src) {
atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, v src) {
atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_min"), <T> (v ptr, v src) {
atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, v src) {
atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
};
}