diff --git a/compiler-rt/lib/tsan/rtl/tsan_defs.h b/compiler-rt/lib/tsan/rtl/tsan_defs.h index 1ffa3d6aec40b..270d441dc90b7 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_defs.h +++ b/compiler-rt/lib/tsan/rtl/tsan_defs.h @@ -30,7 +30,7 @@ # define __MM_MALLOC_H # include # include -# define VECTOR_ALIGNED ALIGNED(16) +# define VECTOR_ALIGNED alignas(16) typedef __m128i m128; #else # define VECTOR_ALIGNED diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp index 034ae3d322b56..9cab2a3727128 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -208,7 +208,7 @@ struct AtExitCtx { struct InterceptorContext { // The object is 64-byte aligned, because we want hot data to be located // in a single cache line if possible (it's accessed in every interceptor). - ALIGNED(64) LibIgnore libignore; + alignas(64) LibIgnore libignore; __sanitizer_sigaction sigactions[kSigCount]; #if !SANITIZER_APPLE && !SANITIZER_NETBSD unsigned finalize_key; @@ -220,7 +220,7 @@ struct InterceptorContext { InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {} }; -static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)]; +alignas(64) static char interceptor_placeholder[sizeof(InterceptorContext)]; InterceptorContext *interceptor_ctx() { return reinterpret_cast(&interceptor_placeholder[0]); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp index 5154662034c56..befd6a369026d 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp @@ -76,7 +76,7 @@ struct DynamicAnnContext { }; static DynamicAnnContext *dyn_ann_ctx; -static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64); +alignas(64) static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)]; static void AddExpectRace(ExpectRace *list, char *f, int l, uptr addr, uptr size, char *desc) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp index e129e9af272f5..0705365d77427 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp @@ -54,7 +54,7 @@ struct MapUnmapCallback { } }; -static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64); +alignas(64) static char allocator_placeholder[sizeof(Allocator)]; Allocator *allocator() { return reinterpret_cast(&allocator_placeholder); } @@ -75,7 +75,7 @@ struct GlobalProc { internal_alloc_mtx(MutexTypeInternalAlloc) {} }; -static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64); +alignas(64) static char global_proc_placeholder[sizeof(GlobalProc)]; GlobalProc *global_proc() { return reinterpret_cast(&global_proc_placeholder); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp index 07d83e1a9a9ff..c8a66e60a69f1 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp @@ -46,7 +46,7 @@ namespace __tsan { #if !SANITIZER_GO -static char main_thread_state[sizeof(ThreadState)] ALIGNED( +static char main_thread_state[sizeof(ThreadState)] alignas( SANITIZER_CACHE_LINE_SIZE); static ThreadState *dead_thread_state; static pthread_key_t thread_state_key; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp index e5ebb65754b32..bf29aa316f680 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -48,11 +48,10 @@ int (*on_finalize)(int); #endif #if !SANITIZER_GO && !SANITIZER_APPLE -__attribute__((tls_model("initial-exec"))) -THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED( - SANITIZER_CACHE_LINE_SIZE); +alignas(SANITIZER_CACHE_LINE_SIZE) THREADLOCAL __attribute__((tls_model( + "initial-exec"))) char cur_thread_placeholder[sizeof(ThreadState)]; #endif -static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE); +alignas(SANITIZER_CACHE_LINE_SIZE) static char ctx_placeholder[sizeof(Context)]; Context *ctx; // Can be overriden by a front-end. diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index de4ea0bb5f487..f48be8e0a4fe0 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -136,7 +136,7 @@ struct TidEpoch { Epoch epoch; }; -struct TidSlot { +struct alignas(SANITIZER_CACHE_LINE_SIZE) TidSlot { Mutex mtx; Sid sid; atomic_uint32_t raw_epoch; @@ -153,10 +153,10 @@ struct TidSlot { } TidSlot(); -} ALIGNED(SANITIZER_CACHE_LINE_SIZE); +}; // This struct is stored in TLS. -struct ThreadState { +struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState { FastState fast_state; int ignore_sync; #if !SANITIZER_GO @@ -234,7 +234,7 @@ struct ThreadState { const ReportDesc *current_report; explicit ThreadState(Tid tid); -} ALIGNED(SANITIZER_CACHE_LINE_SIZE); +}; #if !SANITIZER_GO #if SANITIZER_APPLE || SANITIZER_ANDROID diff --git a/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp b/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp index 70642124990d7..0559df06e7e2e 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp @@ -42,7 +42,7 @@ const char *__tsan_default_suppressions() { namespace __tsan { -ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +alignas(64) static char suppression_placeholder[sizeof(SuppressionContext)]; static SuppressionContext *suppression_ctx = nullptr; static const char *kSuppressionTypes[] = { kSuppressionRace, kSuppressionRaceTop, kSuppressionMutex, diff --git a/compiler-rt/lib/tsan/rtl/tsan_vector_clock.h b/compiler-rt/lib/tsan/rtl/tsan_vector_clock.h index 63b206302190d..51d98113d8e78 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_vector_clock.h +++ b/compiler-rt/lib/tsan/rtl/tsan_vector_clock.h @@ -34,7 +34,7 @@ class VectorClock { VectorClock& operator=(const VectorClock& other); private: - Epoch clk_[kThreadSlotCount] VECTOR_ALIGNED; + VECTOR_ALIGNED Epoch clk_[kThreadSlotCount]; }; ALWAYS_INLINE Epoch VectorClock::Get(Sid sid) const {