Skip to content

Commit 3494e9c

Browse files
committed
atomicrmw on pointers: move integer-pointer cast hacks into backend
1 parent ace4ac0 commit 3494e9c

File tree

11 files changed

+165
-109
lines changed

11 files changed

+165
-109
lines changed

compiler/rustc_codegen_gcc/src/builder.rs

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1656,6 +1656,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
16561656
dst: RValue<'gcc>,
16571657
src: RValue<'gcc>,
16581658
order: AtomicOrdering,
1659+
ret_ptr: bool,
16591660
) -> RValue<'gcc> {
16601661
let size = get_maybe_pointer_size(src);
16611662
let name = match op {
@@ -1683,14 +1684,18 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
16831684
let atomic_function = self.context.get_builtin_function(name);
16841685
let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
16851686

1687+
// FIXME: If `ret_ptr` is true and `src` is an integer, we should really tell GCC
1688+
// that this is a pointer operation that needs to preserve provenance -- but like LLVM,
1689+
// GCC does not currently seems to support that.
16861690
let void_ptr_type = self.context.new_type::<*mut ()>();
16871691
let volatile_void_ptr_type = void_ptr_type.make_volatile();
16881692
let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
16891693
// FIXME(antoyo): not sure why, but we have the wrong type here.
16901694
let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
16911695
let src = self.context.new_bitcast(self.location, src, new_src_type);
16921696
let res = self.context.new_call(self.location, atomic_function, &[dst, src, order]);
1693-
self.context.new_cast(self.location, res, src.get_type())
1697+
let res_type = if ret_ptr { void_ptr_type } else { src.get_type() };
1698+
self.context.new_cast(self.location, res, res_type)
16941699
}
16951700

16961701
fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {

compiler/rustc_codegen_llvm/src/builder.rs

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1257,15 +1257,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
12571257
&mut self,
12581258
op: rustc_codegen_ssa::common::AtomicRmwBinOp,
12591259
dst: &'ll Value,
1260-
mut src: &'ll Value,
1260+
src: &'ll Value,
12611261
order: rustc_middle::ty::AtomicOrdering,
1262+
ret_ptr: bool,
12621263
) -> &'ll Value {
1263-
// The only RMW operation that LLVM supports on pointers is compare-exchange.
1264-
let requires_cast_to_int = self.val_ty(src) == self.type_ptr()
1265-
&& op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg;
1266-
if requires_cast_to_int {
1267-
src = self.ptrtoint(src, self.type_isize());
1268-
}
1264+
// FIXME: If `ret_ptr` is true and `src` is not a pointer, we *should* tell LLVM that the
1265+
// LHS is a pointer and the operation should be provenance-preserving, but LLVM does not
1266+
// currently support that (https://github.com/llvm/llvm-project/issues/120837).
12691267
let mut res = unsafe {
12701268
llvm::LLVMBuildAtomicRMW(
12711269
self.llbuilder,
@@ -1276,7 +1274,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
12761274
llvm::False, // SingleThreaded
12771275
)
12781276
};
1279-
if requires_cast_to_int {
1277+
if ret_ptr && self.val_ty(res) != self.type_ptr() {
12801278
res = self.inttoptr(res, self.type_ptr());
12811279
}
12821280
res

compiler/rustc_codegen_ssa/messages.ftl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,8 @@ codegen_ssa_invalid_monomorphization_basic_float_type = invalid monomorphization
9797
9898
codegen_ssa_invalid_monomorphization_basic_integer_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer type, found `{$ty}`
9999
100+
codegen_ssa_invalid_monomorphization_basic_integer_or_ptr_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer or pointer type, found `{$ty}`
101+
100102
codegen_ssa_invalid_monomorphization_cannot_return = invalid monomorphization of `{$name}` intrinsic: cannot return `{$ret_ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]`
101103
102104
codegen_ssa_invalid_monomorphization_cast_wide_pointer = invalid monomorphization of `{$name}` intrinsic: cannot cast wide pointer `{$ty}`

compiler/rustc_codegen_ssa/src/errors.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -764,6 +764,14 @@ pub enum InvalidMonomorphization<'tcx> {
764764
ty: Ty<'tcx>,
765765
},
766766

767+
#[diag(codegen_ssa_invalid_monomorphization_basic_integer_or_ptr_type, code = E0511)]
768+
BasicIntegerOrPtrType {
769+
#[primary_span]
770+
span: Span,
771+
name: Symbol,
772+
ty: Ty<'tcx>,
773+
},
774+
767775
#[diag(codegen_ssa_invalid_monomorphization_basic_float_type, code = E0511)]
768776
BasicFloatType {
769777
#[primary_span]

compiler/rustc_codegen_ssa/src/mir/intrinsic.rs

Lines changed: 66 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
9292
let invalid_monomorphization_int_type = |ty| {
9393
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
9494
};
95+
let invalid_monomorphization_int_or_ptr_type = |ty| {
96+
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
97+
span,
98+
name,
99+
ty,
100+
});
101+
};
95102

96103
let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
97104
let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
@@ -351,7 +358,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
351358
sym::atomic_load => {
352359
let ty = fn_args.type_at(0);
353360
if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
354-
invalid_monomorphization_int_type(ty);
361+
invalid_monomorphization_int_or_ptr_type(ty);
355362
return Ok(());
356363
}
357364
let ordering = fn_args.const_at(1).to_value();
@@ -367,7 +374,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
367374
sym::atomic_store => {
368375
let ty = fn_args.type_at(0);
369376
if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
370-
invalid_monomorphization_int_type(ty);
377+
invalid_monomorphization_int_or_ptr_type(ty);
371378
return Ok(());
372379
}
373380
let ordering = fn_args.const_at(1).to_value();
@@ -377,10 +384,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
377384
bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
378385
return Ok(());
379386
}
387+
// These are all AtomicRMW ops
380388
sym::atomic_cxchg | sym::atomic_cxchgweak => {
381389
let ty = fn_args.type_at(0);
382390
if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
383-
invalid_monomorphization_int_type(ty);
391+
invalid_monomorphization_int_or_ptr_type(ty);
384392
return Ok(());
385393
}
386394
let succ_ordering = fn_args.const_at(1).to_value();
@@ -407,7 +415,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
407415

408416
return Ok(());
409417
}
410-
// These are all AtomicRMW ops
411418
sym::atomic_max | sym::atomic_min => {
412419
let atom_op = if name == sym::atomic_max {
413420
AtomicRmwBinOp::AtomicMax
@@ -420,7 +427,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
420427
let ordering = fn_args.const_at(1).to_value();
421428
let ptr = args[0].immediate();
422429
let val = args[1].immediate();
423-
bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
430+
bx.atomic_rmw(
431+
atom_op,
432+
ptr,
433+
val,
434+
parse_atomic_ordering(ordering),
435+
/* ret_ptr */ false,
436+
)
424437
} else {
425438
invalid_monomorphization_int_type(ty);
426439
return Ok(());
@@ -438,21 +451,44 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
438451
let ordering = fn_args.const_at(1).to_value();
439452
let ptr = args[0].immediate();
440453
let val = args[1].immediate();
441-
bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
454+
bx.atomic_rmw(
455+
atom_op,
456+
ptr,
457+
val,
458+
parse_atomic_ordering(ordering),
459+
/* ret_ptr */ false,
460+
)
442461
} else {
443462
invalid_monomorphization_int_type(ty);
444463
return Ok(());
445464
}
446465
}
447-
sym::atomic_xchg
448-
| sym::atomic_xadd
466+
sym::atomic_xchg => {
467+
let ty = fn_args.type_at(0);
468+
let ordering = fn_args.const_at(fn_args.len() - 1).to_value();
469+
let ptr = args[0].immediate();
470+
let val = args[1].immediate();
471+
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
472+
let atomic_op = AtomicRmwBinOp::AtomicXchg;
473+
bx.atomic_rmw(
474+
atomic_op,
475+
ptr,
476+
val,
477+
parse_atomic_ordering(ordering),
478+
/* ret_ptr */ ty.is_raw_ptr(),
479+
)
480+
} else {
481+
invalid_monomorphization_int_or_ptr_type(ty);
482+
return Ok(());
483+
}
484+
}
485+
sym::atomic_xadd
449486
| sym::atomic_xsub
450487
| sym::atomic_and
451488
| sym::atomic_nand
452489
| sym::atomic_or
453490
| sym::atomic_xor => {
454491
let atom_op = match name {
455-
sym::atomic_xchg => AtomicRmwBinOp::AtomicXchg,
456492
sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
457493
sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
458494
sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
@@ -462,14 +498,28 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
462498
_ => unreachable!(),
463499
};
464500

465-
let ty = fn_args.type_at(0);
466-
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
467-
let ordering = fn_args.const_at(1).to_value();
468-
let ptr = args[0].immediate();
469-
let val = args[1].immediate();
470-
bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
501+
// The type of the in-memory data.
502+
let ty_mem = fn_args.type_at(0);
503+
// The type of the 2nd operand, given by-value.
504+
let ty_op = fn_args.type_at(1);
505+
506+
let ordering = fn_args.const_at(2).to_value();
507+
let ptr = args[0].immediate(); // of type "pointer to `ty_mem`"
508+
let val = args[1].immediate(); // of type `ty_op`
509+
// We require either both arguments to have the same integer type, or the first to
510+
// be a pointer and the second to be `usize`.
511+
if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
512+
|| (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
513+
{
514+
bx.atomic_rmw(
515+
atom_op,
516+
ptr,
517+
val,
518+
parse_atomic_ordering(ordering),
519+
/* ret_ptr */ ty_mem.is_raw_ptr(),
520+
)
471521
} else {
472-
invalid_monomorphization_int_type(ty);
522+
invalid_monomorphization_int_or_ptr_type(ty_mem);
473523
return Ok(());
474524
}
475525
}

compiler/rustc_codegen_ssa/src/traits/builder.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -548,12 +548,15 @@ pub trait BuilderMethods<'a, 'tcx>:
548548
failure_order: AtomicOrdering,
549549
weak: bool,
550550
) -> (Self::Value, Self::Value);
551+
/// `ret_ptr` indicates whether the return type (which is also the type `dst` points to)
552+
/// is a pointer or the same type as `src`.
551553
fn atomic_rmw(
552554
&mut self,
553555
op: AtomicRmwBinOp,
554556
dst: Self::Value,
555557
src: Self::Value,
556558
order: AtomicOrdering,
559+
ret_ptr: bool,
557560
) -> Self::Value;
558561
fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
559562
fn set_invariant_load(&mut self, load: Self::Value);

compiler/rustc_hir_analysis/src/check/intrinsic.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -652,16 +652,16 @@ pub(crate) fn check_intrinsic_type(
652652
sym::atomic_store => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
653653

654654
sym::atomic_xchg
655-
| sym::atomic_xadd
656-
| sym::atomic_xsub
657-
| sym::atomic_and
658-
| sym::atomic_nand
659-
| sym::atomic_or
660-
| sym::atomic_xor
661655
| sym::atomic_max
662656
| sym::atomic_min
663657
| sym::atomic_umax
664658
| sym::atomic_umin => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
659+
sym::atomic_xadd
660+
| sym::atomic_xsub
661+
| sym::atomic_and
662+
| sym::atomic_nand
663+
| sym::atomic_or
664+
| sym::atomic_xor => (2, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(1)], param(0)),
665665
sym::atomic_fence | sym::atomic_singlethreadfence => (0, 1, Vec::new(), tcx.types.unit),
666666

667667
other => {

library/core/src/intrinsics/mod.rs

Lines changed: 12 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -150,69 +150,63 @@ pub unsafe fn atomic_xchg<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src:
150150

151151
/// Adds to the current value, returning the previous value.
152152
/// `T` must be an integer or pointer type.
153-
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
154-
/// value stored at `*dst` will have the provenance of the old value stored there.
153+
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
155154
///
156155
/// The stabilized version of this intrinsic is available on the
157156
/// [`atomic`] types via the `fetch_add` method. For example, [`AtomicIsize::fetch_add`].
158157
#[rustc_intrinsic]
159158
#[rustc_nounwind]
160-
pub unsafe fn atomic_xadd<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
159+
pub unsafe fn atomic_xadd<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
161160

162161
/// Subtract from the current value, returning the previous value.
163162
/// `T` must be an integer or pointer type.
164-
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
165-
/// value stored at `*dst` will have the provenance of the old value stored there.
163+
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
166164
///
167165
/// The stabilized version of this intrinsic is available on the
168166
/// [`atomic`] types via the `fetch_sub` method. For example, [`AtomicIsize::fetch_sub`].
169167
#[rustc_intrinsic]
170168
#[rustc_nounwind]
171-
pub unsafe fn atomic_xsub<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
169+
pub unsafe fn atomic_xsub<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
172170

173171
/// Bitwise and with the current value, returning the previous value.
174172
/// `T` must be an integer or pointer type.
175-
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
176-
/// value stored at `*dst` will have the provenance of the old value stored there.
173+
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
177174
///
178175
/// The stabilized version of this intrinsic is available on the
179176
/// [`atomic`] types via the `fetch_and` method. For example, [`AtomicBool::fetch_and`].
180177
#[rustc_intrinsic]
181178
#[rustc_nounwind]
182-
pub unsafe fn atomic_and<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
179+
pub unsafe fn atomic_and<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
183180

184181
/// Bitwise nand with the current value, returning the previous value.
185182
/// `T` must be an integer or pointer type.
186-
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
187-
/// value stored at `*dst` will have the provenance of the old value stored there.
183+
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
188184
///
189185
/// The stabilized version of this intrinsic is available on the
190186
/// [`AtomicBool`] type via the `fetch_nand` method. For example, [`AtomicBool::fetch_nand`].
191187
#[rustc_intrinsic]
192188
#[rustc_nounwind]
193-
pub unsafe fn atomic_nand<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
189+
pub unsafe fn atomic_nand<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
194190

195191
/// Bitwise or with the current value, returning the previous value.
196192
/// `T` must be an integer or pointer type.
197-
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
198-
/// value stored at `*dst` will have the provenance of the old value stored there.
193+
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
199194
///
200195
/// The stabilized version of this intrinsic is available on the
201196
/// [`atomic`] types via the `fetch_or` method. For example, [`AtomicBool::fetch_or`].
202197
#[rustc_intrinsic]
203198
#[rustc_nounwind]
204-
pub unsafe fn atomic_or<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
199+
pub unsafe fn atomic_or<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
205200

206201
/// Bitwise xor with the current value, returning the previous value.
207202
/// `T` must be an integer or pointer type.
208-
/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
209-
/// value stored at `*dst` will have the provenance of the old value stored there.
203+
/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
210204
///
211205
/// The stabilized version of this intrinsic is available on the
212206
/// [`atomic`] types via the `fetch_xor` method. For example, [`AtomicBool::fetch_xor`].
213207
#[rustc_intrinsic]
214208
#[rustc_nounwind]
215-
pub unsafe fn atomic_xor<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
209+
pub unsafe fn atomic_xor<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
216210

217211
/// Maximum with the current value using a signed comparison.
218212
/// `T` must be a signed integer type.

0 commit comments

Comments
 (0)