003 File Manager
Current Path:
/usr/src/contrib/llvm-project/compiler-rt/lib/builtins
usr
/
src
/
contrib
/
llvm-project
/
compiler-rt
/
lib
/
builtins
/
📁
..
📄
README.txt
(14.85 KB)
📁
aarch64
📄
absvdi2.c
(815 B)
📄
absvsi2.c
(815 B)
📄
absvti2.c
(864 B)
📄
adddf3.c
(859 B)
📄
addsf3.c
(853 B)
📄
addtf3.c
(730 B)
📄
addvdi3.c
(819 B)
📄
addvsi3.c
(819 B)
📄
addvti3.c
(868 B)
📄
apple_versioning.c
(13.1 KB)
📁
arm
📄
ashldi3.c
(1.17 KB)
📄
ashlti3.c
(1.15 KB)
📄
ashrdi3.c
(1.27 KB)
📄
ashrti3.c
(1.25 KB)
📄
assembly.h
(7.11 KB)
📄
atomic.c
(16.86 KB)
📄
atomic_flag_clear.c
(791 B)
📄
atomic_flag_clear_explicit.c
(859 B)
📄
atomic_flag_test_and_set.c
(823 B)
📄
atomic_flag_test_and_set_explicit.c
(898 B)
📄
atomic_signal_fence.c
(761 B)
📄
atomic_thread_fence.c
(761 B)
📄
bswapdi2.c
(958 B)
📄
bswapsi2.c
(743 B)
📄
clear_cache.c
(6.13 KB)
📄
clzdi2.c
(1.27 KB)
📄
clzsi2.c
(1.48 KB)
📄
clzti2.c
(884 B)
📄
cmpdi2.c
(1.12 KB)
📄
cmpti2.c
(974 B)
📄
comparedf2.c
(4.29 KB)
📄
comparesf2.c
(4.28 KB)
📄
comparetf2.c
(3.79 KB)
📄
cpu_model.c
(21.28 KB)
📄
ctzdi2.c
(1.27 KB)
📄
ctzsi2.c
(1.61 KB)
📄
ctzti2.c
(884 B)
📄
divdc3.c
(2.18 KB)
📄
divdf3.c
(7.55 KB)
📄
divdi3.c
(1.13 KB)
📄
divmoddi4.c
(712 B)
📄
divmodsi4.c
(715 B)
📄
divsc3.c
(2.14 KB)
📄
divsf3.c
(6.96 KB)
📄
divsi3.c
(1.32 KB)
📄
divtc3.c
(2.23 KB)
📄
divtf3.c
(8.01 KB)
📄
divti3.c
(1.18 KB)
📄
divxc3.c
(2.17 KB)
📄
emutls.c
(12.39 KB)
📄
enable_execute_stack.c
(2.08 KB)
📄
eprintf.c
(953 B)
📄
extenddftf2.c
(623 B)
📄
extendhfsf2.c
(946 B)
📄
extendsfdf2.c
(716 B)
📄
extendsftf2.c
(622 B)
📄
ffsdi2.c
(900 B)
📄
ffssi2.c
(776 B)
📄
ffsti2.c
(969 B)
📄
fixdfdi.c
(1.22 KB)
📄
fixdfsi.c
(755 B)
📄
fixdfti.c
(633 B)
📄
fixsfdi.c
(1.22 KB)
📄
fixsfsi.c
(755 B)
📄
fixsfti.c
(633 B)
📄
fixtfdi.c
(624 B)
📄
fixtfsi.c
(624 B)
📄
fixtfti.c
(624 B)
📄
fixunsdfdi.c
(1.29 KB)
📄
fixunsdfsi.c
(743 B)
📄
fixunsdfti.c
(611 B)
📄
fixunssfdi.c
(1.3 KB)
📄
fixunssfsi.c
(896 B)
📄
fixunssfti.c
(731 B)
📄
fixunstfdi.c
(604 B)
📄
fixunstfsi.c
(604 B)
📄
fixunstfti.c
(604 B)
📄
fixunsxfdi.c
(1.65 KB)
📄
fixunsxfsi.c
(1.62 KB)
📄
fixunsxfti.c
(1.41 KB)
📄
fixxfdi.c
(1.75 KB)
📄
fixxfti.c
(1.48 KB)
📄
floatdidf.c
(3.37 KB)
📄
floatdisf.c
(2.49 KB)
📄
floatditf.c
(1.5 KB)
📄
floatdixf.c
(1.37 KB)
📄
floatsidf.c
(1.7 KB)
📄
floatsisf.c
(1.89 KB)
📄
floatsitf.c
(1.5 KB)
📄
floattidf.c
(2.49 KB)
📄
floattisf.c
(2.33 KB)
📄
floattitf.c
(2.72 KB)
📄
floattixf.c
(2.53 KB)
📄
floatundidf.c
(3.43 KB)
📄
floatundisf.c
(2.4 KB)
📄
floatunditf.c
(1.27 KB)
📄
floatundixf.c
(1.24 KB)
📄
floatunsidf.c
(1.43 KB)
📄
floatunsisf.c
(1.75 KB)
📄
floatunsitf.c
(1.29 KB)
📄
floatuntidf.c
(2.38 KB)
📄
floatuntisf.c
(2.24 KB)
📄
floatuntitf.c
(2.6 KB)
📄
floatuntixf.c
(2.39 KB)
📄
fp_add_impl.inc
(5.4 KB)
📄
fp_extend.h
(2.36 KB)
📄
fp_extend_impl.inc
(4.53 KB)
📄
fp_fixint_impl.inc
(1.52 KB)
📄
fp_fixuint_impl.inc
(1.42 KB)
📄
fp_lib.h
(10.12 KB)
📄
fp_mode.c
(812 B)
📄
fp_mode.h
(824 B)
📄
fp_mul_impl.inc
(4.37 KB)
📄
fp_trunc.h
(2.01 KB)
📄
fp_trunc_impl.inc
(5.57 KB)
📄
gcc_personality_v0.c
(8.3 KB)
📁
hexagon
📁
i386
📄
int_div_impl.inc
(2.19 KB)
📄
int_endianness.h
(2.75 KB)
📄
int_lib.h
(4.57 KB)
📄
int_math.h
(3.6 KB)
📄
int_types.h
(4.16 KB)
📄
int_util.c
(1.95 KB)
📄
int_util.h
(1.32 KB)
📄
lshrdi3.c
(1.18 KB)
📄
lshrti3.c
(1.17 KB)
📄
mingw_fixfloat.c
(1.29 KB)
📄
moddi3.c
(1004 B)
📄
modsi3.c
(649 B)
📄
modti3.c
(1.03 KB)
📄
muldc3.c
(2.14 KB)
📄
muldf3.c
(918 B)
📄
muldi3.c
(1.55 KB)
📄
mulodi4.c
(1.31 KB)
📄
mulosi4.c
(1.31 KB)
📄
muloti4.c
(1.36 KB)
📄
mulsc3.c
(2.11 KB)
📄
mulsf3.c
(918 B)
📄
multc3.c
(2.02 KB)
📄
multf3.c
(799 B)
📄
multi3.c
(1.53 KB)
📄
mulvdi3.c
(1.26 KB)
📄
mulvsi3.c
(1.26 KB)
📄
mulvti3.c
(1.31 KB)
📄
mulxc3.c
(2.22 KB)
📄
negdf2.c
(832 B)
📄
negdi2.c
(719 B)
📄
negsf2.c
(832 B)
📄
negti2.c
(768 B)
📄
negvdi2.c
(768 B)
📄
negvsi2.c
(768 B)
📄
negvti2.c
(817 B)
📄
os_version_check.c
(8.07 KB)
📄
paritydi2.c
(712 B)
📄
paritysi2.c
(751 B)
📄
parityti2.c
(761 B)
📄
popcountdi2.c
(1.33 KB)
📄
popcountsi2.c
(1.13 KB)
📄
popcountti2.c
(1.69 KB)
📄
powidf2.c
(786 B)
📄
powisf2.c
(783 B)
📄
powitf2.c
(888 B)
📄
powixf2.c
(825 B)
📁
ppc
📁
riscv
📁
sparc64
📄
subdf3.c
(917 B)
📄
subsf3.c
(917 B)
📄
subtf3.c
(825 B)
📄
subvdi3.c
(819 B)
📄
subvsi3.c
(819 B)
📄
subvti3.c
(868 B)
📄
trampoline_setup.c
(1.75 KB)
📄
truncdfhf2.c
(715 B)
📄
truncdfsf2.c
(711 B)
📄
truncsfhf2.c
(940 B)
📄
trunctfdf2.c
(625 B)
📄
trunctfsf2.c
(624 B)
📄
ucmpdi2.c
(1.13 KB)
📄
ucmpti2.c
(978 B)
📄
udivdi3.c
(724 B)
📄
udivmoddi4.c
(5.4 KB)
📄
udivmodsi4.c
(715 B)
📄
udivmodti4.c
(4.87 KB)
📄
udivsi3.c
(802 B)
📄
udivti3.c
(699 B)
📄
umoddi3.c
(724 B)
📄
umodsi3.c
(724 B)
📄
umodti3.c
(717 B)
📄
unwind-ehabi-helpers.h
(1.86 KB)
📁
ve
📁
x86_64
Editing: atomic.c
//===-- atomic.c - Implement support functions for atomic operations.------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // atomic.c defines a set of functions for performing atomic accesses on // arbitrary-sized memory locations. This design uses locks that should // be fast in the uncontended case, for two reasons: // // 1) This code must work with C programs that do not link to anything // (including pthreads) and so it should not depend on any pthread // functions. // 2) Atomic operations, rather than explicit mutexes, are most commonly used // on code where contended operations are rate. // // To avoid needing a per-object lock, this code allocates an array of // locks and hashes the object pointers to find the one that it should use. // For operations that must be atomic on two locations, the lower lock is // always acquired first, to avoid deadlock. // //===----------------------------------------------------------------------===// #include <stdbool.h> #include <stdint.h> #include <string.h> #include "assembly.h" // Clang objects if you redefine a builtin. This little hack allows us to // define a function with the same name as an intrinsic. #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load) #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store) #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange) #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \ __atomic_compare_exchange) /// Number of locks. This allocates one page on 32-bit platforms, two on /// 64-bit. This can be specified externally if a different trade between /// memory usage and contention probability is required for a given platform. #ifndef SPINLOCK_COUNT #define SPINLOCK_COUNT (1 << 10) #endif static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1; //////////////////////////////////////////////////////////////////////////////// // Platform-specific lock implementation. Falls back to spinlocks if none is // defined. Each platform should define the Lock type, and corresponding // lock() and unlock() functions. //////////////////////////////////////////////////////////////////////////////// #ifdef __FreeBSD__ #include <errno.h> // clang-format off #include <sys/types.h> #include <machine/atomic.h> #include <sys/umtx.h> // clang-format on typedef struct _usem Lock; __inline static void unlock(Lock *l) { __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE); __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); if (l->_has_waiters) _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0); } __inline static void lock(Lock *l) { uint32_t old = 1; while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count, &old, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0); old = 1; } } /// locks for atomic operations static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}}; #elif defined(__APPLE__) #include <libkern/OSAtomic.h> typedef OSSpinLock Lock; __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); } /// Locks a lock. In the current implementation, this is potentially /// unbounded in the contended case. __inline static void lock(Lock *l) { OSSpinLockLock(l); } static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0 #else typedef _Atomic(uintptr_t) Lock; /// Unlock a lock. This is a release operation. __inline static void unlock(Lock *l) { __c11_atomic_store(l, 0, __ATOMIC_RELEASE); } /// Locks a lock. In the current implementation, this is potentially /// unbounded in the contended case. __inline static void lock(Lock *l) { uintptr_t old = 0; while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) old = 0; } /// locks for atomic operations static Lock locks[SPINLOCK_COUNT]; #endif /// Returns a lock to use for a given pointer. static __inline Lock *lock_for_pointer(void *ptr) { intptr_t hash = (intptr_t)ptr; // Disregard the lowest 4 bits. We want all values that may be part of the // same memory operation to hash to the same value and therefore use the same // lock. hash >>= 4; // Use the next bits as the basis for the hash intptr_t low = hash & SPINLOCK_MASK; // Now use the high(er) set of bits to perturb the hash, so that we don't // get collisions from atomic fields in a single object hash >>= 16; hash ^= low; // Return a pointer to the word to use return locks + (hash & SPINLOCK_MASK); } /// Macros for determining whether a size is lock free. #define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1) #define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2) #define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4) /// 32 bit MIPS and PowerPC don't support 8-byte lock_free atomics #if defined(__mips__) || (!defined(__powerpc64__) && defined(__powerpc__)) #define IS_LOCK_FREE_8 0 #else #define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8) #endif /// Clang can not yet codegen __atomic_is_lock_free(16), so for now we assume /// 16-byte values are not lock free. #define IS_LOCK_FREE_16 0 /// Macro that calls the compiler-generated lock-free versions of functions /// when they exist. #define LOCK_FREE_CASES() \ do { \ switch (size) { \ case 1: \ if (IS_LOCK_FREE_1) { \ LOCK_FREE_ACTION(uint8_t); \ } \ break; \ case 2: \ if (IS_LOCK_FREE_2) { \ LOCK_FREE_ACTION(uint16_t); \ } \ break; \ case 4: \ if (IS_LOCK_FREE_4) { \ LOCK_FREE_ACTION(uint32_t); \ } \ break; \ case 8: \ if (IS_LOCK_FREE_8) { \ LOCK_FREE_ACTION(uint64_t); \ } \ break; \ case 16: \ if (IS_LOCK_FREE_16) { \ /* FIXME: __uint128_t isn't available on 32 bit platforms. \ LOCK_FREE_ACTION(__uint128_t);*/ \ } \ break; \ } \ } while (0) /// An atomic load operation. This is atomic with respect to the source /// pointer only. void __atomic_load_c(int size, void *src, void *dest, int model) { #define LOCK_FREE_ACTION(type) \ *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION Lock *l = lock_for_pointer(src); lock(l); memcpy(dest, src, size); unlock(l); } /// An atomic store operation. This is atomic with respect to the destination /// pointer only. void __atomic_store_c(int size, void *dest, void *src, int model) { #define LOCK_FREE_ACTION(type) \ __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION Lock *l = lock_for_pointer(dest); lock(l); memcpy(dest, src, size); unlock(l); } /// Atomic compare and exchange operation. If the value at *ptr is identical /// to the value at *expected, then this copies value at *desired to *ptr. If /// they are not, then this stores the current value from *ptr in *expected. /// /// This function returns 1 if the exchange takes place or 0 if it fails. int __atomic_compare_exchange_c(int size, void *ptr, void *expected, void *desired, int success, int failure) { #define LOCK_FREE_ACTION(type) \ return __c11_atomic_compare_exchange_strong( \ (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \ failure) LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION Lock *l = lock_for_pointer(ptr); lock(l); if (memcmp(ptr, expected, size) == 0) { memcpy(ptr, desired, size); unlock(l); return 1; } memcpy(expected, ptr, size); unlock(l); return 0; } /// Performs an atomic exchange operation between two pointers. This is atomic /// with respect to the target address. void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) { #define LOCK_FREE_ACTION(type) \ *(type *)old = \ __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION Lock *l = lock_for_pointer(ptr); lock(l); memcpy(old, ptr, size); memcpy(ptr, val, size); unlock(l); } //////////////////////////////////////////////////////////////////////////////// // Where the size is known at compile time, the compiler may emit calls to // specialised versions of the above functions. //////////////////////////////////////////////////////////////////////////////// #ifdef __SIZEOF_INT128__ #define OPTIMISED_CASES \ OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \ OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \ OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \ OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \ OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t) #else #define OPTIMISED_CASES \ OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \ OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \ OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \ OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) #endif #define OPTIMISED_CASE(n, lockfree, type) \ type __atomic_load_##n(type *src, int model) { \ if (lockfree) \ return __c11_atomic_load((_Atomic(type) *)src, model); \ Lock *l = lock_for_pointer(src); \ lock(l); \ type val = *src; \ unlock(l); \ return val; \ } OPTIMISED_CASES #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ void __atomic_store_##n(type *dest, type val, int model) { \ if (lockfree) { \ __c11_atomic_store((_Atomic(type) *)dest, val, model); \ return; \ } \ Lock *l = lock_for_pointer(dest); \ lock(l); \ *dest = val; \ unlock(l); \ return; \ } OPTIMISED_CASES #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ type __atomic_exchange_##n(type *dest, type val, int model) { \ if (lockfree) \ return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \ Lock *l = lock_for_pointer(dest); \ lock(l); \ type tmp = *dest; \ *dest = val; \ unlock(l); \ return tmp; \ } OPTIMISED_CASES #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ int success, int failure) { \ if (lockfree) \ return __c11_atomic_compare_exchange_strong( \ (_Atomic(type) *)ptr, expected, desired, success, failure); \ Lock *l = lock_for_pointer(ptr); \ lock(l); \ if (*ptr == *expected) { \ *ptr = desired; \ unlock(l); \ return true; \ } \ *expected = *ptr; \ unlock(l); \ return false; \ } OPTIMISED_CASES #undef OPTIMISED_CASE //////////////////////////////////////////////////////////////////////////////// // Atomic read-modify-write operations for integers of various sizes. //////////////////////////////////////////////////////////////////////////////// #define ATOMIC_RMW(n, lockfree, type, opname, op) \ type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \ if (lockfree) \ return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \ Lock *l = lock_for_pointer(ptr); \ lock(l); \ type tmp = *ptr; \ *ptr = tmp op val; \ unlock(l); \ return tmp; \ } #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +) OPTIMISED_CASES #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -) OPTIMISED_CASES #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &) OPTIMISED_CASES #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |) OPTIMISED_CASES #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^) OPTIMISED_CASES #undef OPTIMISED_CASE
Upload File
Create Folder