003 File Manager
Current Path:
/usr/src/contrib/llvm-project/compiler-rt/lib/scudo/standalone
usr
/
src
/
contrib
/
llvm-project
/
compiler-rt
/
lib
/
scudo
/
standalone
/
📁
..
📄
allocator_config.h
(2.57 KB)
📄
atomic_helpers.h
(4.16 KB)
📄
bytemap.h
(992 B)
📄
checksum.cpp
(2.52 KB)
📄
checksum.h
(1.67 KB)
📄
chunk.h
(5.83 KB)
📄
combined.h
(40.99 KB)
📄
common.cpp
(874 B)
📄
common.h
(5.29 KB)
📄
crc32_hw.cpp
(656 B)
📄
flags.cpp
(2.09 KB)
📄
flags.h
(975 B)
📄
flags.inc
(2.41 KB)
📄
flags_parser.cpp
(4.38 KB)
📄
flags_parser.h
(1.23 KB)
📄
fuchsia.cpp
(6.04 KB)
📄
fuchsia.h
(695 B)
📁
fuzz
📁
include
📄
internal_defs.h
(4.61 KB)
📄
linux.cpp
(6.1 KB)
📄
linux.h
(2.77 KB)
📄
list.h
(4.95 KB)
📄
local_cache.h
(5.21 KB)
📄
memtag.h
(7.21 KB)
📄
mutex.h
(1.69 KB)
📄
platform.h
(2.03 KB)
📄
primary32.h
(18.05 KB)
📄
primary64.h
(17.3 KB)
📄
quarantine.h
(9.64 KB)
📄
release.cpp
(522 B)
📄
release.h
(10.27 KB)
📄
report.cpp
(6.62 KB)
📄
report.h
(1.97 KB)
📄
secondary.h
(13.22 KB)
📄
size_class_map.h
(9.9 KB)
📄
stack_depot.h
(4.47 KB)
📄
stats.h
(2.77 KB)
📄
string_utils.cpp
(7.93 KB)
📄
string_utils.h
(1.05 KB)
📄
tsd.h
(1.84 KB)
📄
tsd_exclusive.h
(4.03 KB)
📄
tsd_shared.h
(5.53 KB)
📄
vector.h
(3.07 KB)
📄
wrappers_c.cpp
(1.19 KB)
📄
wrappers_c.h
(1.46 KB)
📄
wrappers_c.inc
(8.6 KB)
📄
wrappers_c_bionic.cpp
(2.23 KB)
📄
wrappers_c_checks.h
(1.99 KB)
📄
wrappers_cpp.cpp
(4.53 KB)
Editing: atomic_helpers.h
//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef SCUDO_ATOMIC_H_ #define SCUDO_ATOMIC_H_ #include "internal_defs.h" namespace scudo { enum memory_order { memory_order_relaxed = 0, memory_order_consume = 1, memory_order_acquire = 2, memory_order_release = 3, memory_order_acq_rel = 4, memory_order_seq_cst = 5 }; static_assert(memory_order_relaxed == __ATOMIC_RELAXED, ""); static_assert(memory_order_consume == __ATOMIC_CONSUME, ""); static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, ""); static_assert(memory_order_release == __ATOMIC_RELEASE, ""); static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, ""); static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, ""); struct atomic_u8 { typedef u8 Type; volatile Type ValDoNotUse; }; struct atomic_u16 { typedef u16 Type; volatile Type ValDoNotUse; }; struct atomic_s32 { typedef s32 Type; volatile Type ValDoNotUse; }; struct atomic_u32 { typedef u32 Type; volatile Type ValDoNotUse; }; struct atomic_u64 { typedef u64 Type; // On 32-bit platforms u64 is not necessarily aligned on 8 bytes. alignas(8) volatile Type ValDoNotUse; }; struct atomic_uptr { typedef uptr Type; volatile Type ValDoNotUse; }; template <typename T> inline typename T::Type atomic_load(const volatile T *A, memory_order MO) { DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); typename T::Type V; __atomic_load(&A->ValDoNotUse, &V, MO); return V; } template <typename T> inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) { DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); __atomic_store(&A->ValDoNotUse, &V, MO); } inline void atomic_thread_fence(memory_order) { __sync_synchronize(); } template <typename T> inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V, memory_order MO) { DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); return __atomic_fetch_add(&A->ValDoNotUse, V, MO); } template <typename T> inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V, memory_order MO) { DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); return __atomic_fetch_sub(&A->ValDoNotUse, V, MO); } template <typename T> inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V, memory_order MO) { DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A))); typename T::Type R; __atomic_exchange(&A->ValDoNotUse, &V, &R, MO); return R; } template <typename T> inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp, typename T::Type Xchg, memory_order MO) { return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO, __ATOMIC_RELAXED); } template <typename T> inline bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp, typename T::Type Xchg, memory_order MO) { return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO, __ATOMIC_RELAXED); } // Clutter-reducing helpers. template <typename T> inline typename T::Type atomic_load_relaxed(const volatile T *A) { return atomic_load(A, memory_order_relaxed); } template <typename T> inline void atomic_store_relaxed(volatile T *A, typename T::Type V) { atomic_store(A, V, memory_order_relaxed); } template <typename T> inline typename T::Type atomic_compare_exchange(volatile T *A, typename T::Type Cmp, typename T::Type Xchg) { atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire); return Cmp; } } // namespace scudo #endif // SCUDO_ATOMIC_H_
Upload File
Create Folder