003 File Manager
Current Path:
/usr/src/contrib/llvm-project/compiler-rt/lib/scudo/standalone
usr
/
src
/
contrib
/
llvm-project
/
compiler-rt
/
lib
/
scudo
/
standalone
/
📁
..
📄
allocator_config.h
(2.57 KB)
📄
atomic_helpers.h
(4.16 KB)
📄
bytemap.h
(992 B)
📄
checksum.cpp
(2.52 KB)
📄
checksum.h
(1.67 KB)
📄
chunk.h
(5.83 KB)
📄
combined.h
(40.99 KB)
📄
common.cpp
(874 B)
📄
common.h
(5.29 KB)
📄
crc32_hw.cpp
(656 B)
📄
flags.cpp
(2.09 KB)
📄
flags.h
(975 B)
📄
flags.inc
(2.41 KB)
📄
flags_parser.cpp
(4.38 KB)
📄
flags_parser.h
(1.23 KB)
📄
fuchsia.cpp
(6.04 KB)
📄
fuchsia.h
(695 B)
📁
fuzz
📁
include
📄
internal_defs.h
(4.61 KB)
📄
linux.cpp
(6.1 KB)
📄
linux.h
(2.77 KB)
📄
list.h
(4.95 KB)
📄
local_cache.h
(5.21 KB)
📄
memtag.h
(7.21 KB)
📄
mutex.h
(1.69 KB)
📄
platform.h
(2.03 KB)
📄
primary32.h
(18.05 KB)
📄
primary64.h
(17.3 KB)
📄
quarantine.h
(9.64 KB)
📄
release.cpp
(522 B)
📄
release.h
(10.27 KB)
📄
report.cpp
(6.62 KB)
📄
report.h
(1.97 KB)
📄
secondary.h
(13.22 KB)
📄
size_class_map.h
(9.9 KB)
📄
stack_depot.h
(4.47 KB)
📄
stats.h
(2.77 KB)
📄
string_utils.cpp
(7.93 KB)
📄
string_utils.h
(1.05 KB)
📄
tsd.h
(1.84 KB)
📄
tsd_exclusive.h
(4.03 KB)
📄
tsd_shared.h
(5.53 KB)
📄
vector.h
(3.07 KB)
📄
wrappers_c.cpp
(1.19 KB)
📄
wrappers_c.h
(1.46 KB)
📄
wrappers_c.inc
(8.6 KB)
📄
wrappers_c_bionic.cpp
(2.23 KB)
📄
wrappers_c_checks.h
(1.99 KB)
📄
wrappers_cpp.cpp
(4.53 KB)
Editing: local_cache.h
//===-- local_cache.h -------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef SCUDO_LOCAL_CACHE_H_ #define SCUDO_LOCAL_CACHE_H_ #include "internal_defs.h" #include "report.h" #include "stats.h" namespace scudo { template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache { typedef typename SizeClassAllocator::SizeClassMap SizeClassMap; struct TransferBatch { static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint; void setFromArray(void **Array, u32 N) { DCHECK_LE(N, MaxNumCached); Count = N; memcpy(Batch, Array, sizeof(void *) * Count); } void clear() { Count = 0; } void add(void *P) { DCHECK_LT(Count, MaxNumCached); Batch[Count++] = P; } void copyToArray(void **Array) const { memcpy(Array, Batch, sizeof(void *) * Count); } u32 getCount() const { return Count; } void *get(u32 I) const { DCHECK_LE(I, Count); return Batch[I]; } static u32 getMaxCached(uptr Size) { return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size)); } TransferBatch *Next; private: u32 Count; void *Batch[MaxNumCached]; }; void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) { Stats.initLinkerInitialized(); if (LIKELY(S)) S->link(&Stats); Allocator = A; } void init(GlobalStats *S, SizeClassAllocator *A) { memset(this, 0, sizeof(*this)); initLinkerInitialized(S, A); } void destroy(GlobalStats *S) { drain(); if (LIKELY(S)) S->unlink(&Stats); } void *allocate(uptr ClassId) { DCHECK_LT(ClassId, NumClasses); PerClass *C = &PerClassArray[ClassId]; if (C->Count == 0) { if (UNLIKELY(!refill(C, ClassId))) return nullptr; DCHECK_GT(C->Count, 0); } // We read ClassSize first before accessing Chunks because it's adjacent to // Count, while Chunks might be further off (depending on Count). That keeps // the memory accesses in close quarters. const uptr ClassSize = C->ClassSize; void *P = C->Chunks[--C->Count]; // The jury is still out as to whether any kind of PREFETCH here increases // performance. It definitely decreases performance on Android though. // if (!SCUDO_ANDROID) PREFETCH(P); Stats.add(StatAllocated, ClassSize); Stats.sub(StatFree, ClassSize); return P; } void deallocate(uptr ClassId, void *P) { CHECK_LT(ClassId, NumClasses); PerClass *C = &PerClassArray[ClassId]; // We still have to initialize the cache in the event that the first heap // operation in a thread is a deallocation. initCacheMaybe(C); if (C->Count == C->MaxCount) drain(C, ClassId); // See comment in allocate() about memory accesses. const uptr ClassSize = C->ClassSize; C->Chunks[C->Count++] = P; Stats.sub(StatAllocated, ClassSize); Stats.add(StatFree, ClassSize); } void drain() { for (uptr I = 0; I < NumClasses; I++) { PerClass *C = &PerClassArray[I]; while (C->Count > 0) drain(C, I); } } TransferBatch *createBatch(uptr ClassId, void *B) { if (ClassId != SizeClassMap::BatchClassId) B = allocate(SizeClassMap::BatchClassId); return reinterpret_cast<TransferBatch *>(B); } LocalStats &getStats() { return Stats; } private: static const uptr NumClasses = SizeClassMap::NumClasses; struct PerClass { u32 Count; u32 MaxCount; uptr ClassSize; void *Chunks[2 * TransferBatch::MaxNumCached]; }; PerClass PerClassArray[NumClasses]; LocalStats Stats; SizeClassAllocator *Allocator; ALWAYS_INLINE void initCacheMaybe(PerClass *C) { if (LIKELY(C->MaxCount)) return; initCache(); DCHECK_NE(C->MaxCount, 0U); } NOINLINE void initCache() { for (uptr I = 0; I < NumClasses; I++) { PerClass *P = &PerClassArray[I]; const uptr Size = SizeClassAllocator::getSizeByClassId(I); P->MaxCount = 2 * TransferBatch::getMaxCached(Size); P->ClassSize = Size; } } void destroyBatch(uptr ClassId, void *B) { if (ClassId != SizeClassMap::BatchClassId) deallocate(SizeClassMap::BatchClassId, B); } NOINLINE bool refill(PerClass *C, uptr ClassId) { initCacheMaybe(C); TransferBatch *B = Allocator->popBatch(this, ClassId); if (UNLIKELY(!B)) return false; DCHECK_GT(B->getCount(), 0); C->Count = B->getCount(); B->copyToArray(C->Chunks); destroyBatch(ClassId, B); return true; } NOINLINE void drain(PerClass *C, uptr ClassId) { const u32 Count = Min(C->MaxCount / 2, C->Count); TransferBatch *B = createBatch(ClassId, C->Chunks[0]); if (UNLIKELY(!B)) reportOutOfMemory( SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId)); B->setFromArray(&C->Chunks[0], Count); C->Count -= Count; for (uptr I = 0; I < C->Count; I++) C->Chunks[I] = C->Chunks[I + Count]; Allocator->pushBatch(ClassId, B); } }; } // namespace scudo #endif // SCUDO_LOCAL_CACHE_H_
Upload File
Create Folder