003 File Manager
Current Path:
/usr/src/contrib/llvm-project/compiler-rt/lib/sanitizer_common
usr
/
src
/
contrib
/
llvm-project
/
compiler-rt
/
lib
/
sanitizer_common
/
📁
..
📄
sancov_flags.cpp
(1.64 KB)
📄
sancov_flags.h
(1.08 KB)
📄
sancov_flags.inc
(777 B)
📄
sanitizer_addrhashmap.h
(9.19 KB)
📄
sanitizer_allocator.cpp
(8.73 KB)
📄
sanitizer_allocator.h
(2.69 KB)
📄
sanitizer_allocator_bytemap.h
(3.12 KB)
📄
sanitizer_allocator_checks.cpp
(686 B)
📄
sanitizer_allocator_checks.h
(2.67 KB)
📄
sanitizer_allocator_combined.h
(6.29 KB)
📄
sanitizer_allocator_interface.h
(1.83 KB)
📄
sanitizer_allocator_internal.h
(2.03 KB)
📄
sanitizer_allocator_local_cache.h
(8.95 KB)
📄
sanitizer_allocator_primary32.h
(12.64 KB)
📄
sanitizer_allocator_primary64.h
(32.71 KB)
📄
sanitizer_allocator_report.cpp
(4.68 KB)
📄
sanitizer_allocator_report.h
(1.72 KB)
📄
sanitizer_allocator_secondary.h
(10.78 KB)
📄
sanitizer_allocator_size_class_map.h
(10.12 KB)
📄
sanitizer_allocator_stats.h
(2.76 KB)
📄
sanitizer_asm.h
(2.57 KB)
📄
sanitizer_atomic.h
(1.97 KB)
📄
sanitizer_atomic_clang.h
(3.42 KB)
📄
sanitizer_atomic_clang_mips.h
(3.77 KB)
📄
sanitizer_atomic_clang_other.h
(3.02 KB)
📄
sanitizer_atomic_clang_x86.h
(3.77 KB)
📄
sanitizer_atomic_msvc.h
(8.53 KB)
📄
sanitizer_bitvector.h
(9.43 KB)
📄
sanitizer_bvgraph.h
(4.62 KB)
📄
sanitizer_common.cpp
(10.13 KB)
📄
sanitizer_common.h
(30.64 KB)
📄
sanitizer_common_interceptors.inc
(348.66 KB)
📄
sanitizer_common_interceptors_format.inc
(16.84 KB)
📄
sanitizer_common_interceptors_ioctl.inc
(22.13 KB)
📄
sanitizer_common_interceptors_netbsd_compat.inc
(4.07 KB)
📄
sanitizer_common_interceptors_vfork_aarch64.inc.S
(1.23 KB)
📄
sanitizer_common_interceptors_vfork_arm.inc.S
(1.24 KB)
📄
sanitizer_common_interceptors_vfork_i386.inc.S
(1.66 KB)
📄
sanitizer_common_interceptors_vfork_x86_64.inc.S
(1.03 KB)
📄
sanitizer_common_interface.inc
(2.05 KB)
📄
sanitizer_common_interface_posix.inc
(732 B)
📄
sanitizer_common_libcdep.cpp
(5.4 KB)
📄
sanitizer_common_nolibc.cpp
(1.12 KB)
📄
sanitizer_common_syscalls.inc
(85 KB)
📄
sanitizer_coverage_fuchsia.cpp
(10.1 KB)
📄
sanitizer_coverage_interface.inc
(1.73 KB)
📄
sanitizer_coverage_libcdep_new.cpp
(7.39 KB)
📄
sanitizer_coverage_win_dll_thunk.cpp
(1.03 KB)
📄
sanitizer_coverage_win_dynamic_runtime_thunk.cpp
(1.16 KB)
📄
sanitizer_coverage_win_sections.cpp
(3.36 KB)
📄
sanitizer_coverage_win_weak_interception.cpp
(1.17 KB)
📄
sanitizer_dbghelp.h
(1.44 KB)
📄
sanitizer_deadlock_detector.h
(13.28 KB)
📄
sanitizer_deadlock_detector1.cpp
(5.6 KB)
📄
sanitizer_deadlock_detector2.cpp
(10.84 KB)
📄
sanitizer_deadlock_detector_interface.h
(2.66 KB)
📄
sanitizer_errno.cpp
(1.09 KB)
📄
sanitizer_errno.h
(1.3 KB)
📄
sanitizer_errno_codes.h
(1.1 KB)
📄
sanitizer_file.cpp
(6.24 KB)
📄
sanitizer_file.h
(3.67 KB)
📄
sanitizer_flag_parser.cpp
(5.55 KB)
📄
sanitizer_flag_parser.h
(5.71 KB)
📄
sanitizer_flags.cpp
(3.95 KB)
📄
sanitizer_flags.h
(2.13 KB)
📄
sanitizer_flags.inc
(12.73 KB)
📄
sanitizer_freebsd.h
(3.31 KB)
📄
sanitizer_fuchsia.cpp
(16.58 KB)
📄
sanitizer_fuchsia.h
(1.03 KB)
📄
sanitizer_getauxval.h
(1.73 KB)
📄
sanitizer_glibc_version.h
(748 B)
📄
sanitizer_hash.h
(1.04 KB)
📄
sanitizer_interceptors_ioctl_netbsd.inc
(68.81 KB)
📄
sanitizer_interface_internal.h
(5.28 KB)
📄
sanitizer_internal_defs.h
(14.28 KB)
📄
sanitizer_lfstack.h
(2.08 KB)
📄
sanitizer_libc.cpp
(7.21 KB)
📄
sanitizer_libc.h
(3.23 KB)
📄
sanitizer_libignore.cpp
(4.24 KB)
📄
sanitizer_libignore.h
(3.5 KB)
📄
sanitizer_linux.cpp
(71.58 KB)
📄
sanitizer_linux.h
(5.47 KB)
📄
sanitizer_linux_libcdep.cpp
(25.67 KB)
📄
sanitizer_linux_s390.cpp
(6.91 KB)
📄
sanitizer_list.h
(3.81 KB)
📄
sanitizer_local_address_space_view.h
(3.67 KB)
📄
sanitizer_mac.cpp
(38.67 KB)
📄
sanitizer_mac.h
(2.49 KB)
📄
sanitizer_mac_libcdep.cpp
(968 B)
📄
sanitizer_malloc_mac.inc
(13.06 KB)
📄
sanitizer_mutex.h
(5.17 KB)
📄
sanitizer_netbsd.cpp
(9.95 KB)
📄
sanitizer_openbsd.cpp
(3.17 KB)
📄
sanitizer_persistent_allocator.cpp
(687 B)
📄
sanitizer_persistent_allocator.h
(2.31 KB)
📄
sanitizer_placement_new.h
(885 B)
📄
sanitizer_platform.h
(10.11 KB)
📄
sanitizer_platform_interceptors.h
(24.93 KB)
📄
sanitizer_platform_limits_freebsd.cpp
(19.79 KB)
📄
sanitizer_platform_limits_freebsd.h
(18.41 KB)
📄
sanitizer_platform_limits_linux.cpp
(3.32 KB)
📄
sanitizer_platform_limits_netbsd.cpp
(109.73 KB)
📄
sanitizer_platform_limits_netbsd.h
(81.54 KB)
📄
sanitizer_platform_limits_openbsd.cpp
(8.69 KB)
📄
sanitizer_platform_limits_openbsd.h
(8.68 KB)
📄
sanitizer_platform_limits_posix.cpp
(48.89 KB)
📄
sanitizer_platform_limits_posix.h
(40.65 KB)
📄
sanitizer_platform_limits_solaris.cpp
(11.93 KB)
📄
sanitizer_platform_limits_solaris.h
(12.26 KB)
📄
sanitizer_posix.cpp
(11.93 KB)
📄
sanitizer_posix.h
(5.14 KB)
📄
sanitizer_posix_libcdep.cpp
(16.21 KB)
📄
sanitizer_printf.cpp
(12.07 KB)
📄
sanitizer_procmaps.h
(2.93 KB)
📄
sanitizer_procmaps_bsd.cpp
(3.81 KB)
📄
sanitizer_procmaps_common.cpp
(5.27 KB)
📄
sanitizer_procmaps_fuchsia.cpp
(2.51 KB)
📄
sanitizer_procmaps_linux.cpp
(2.9 KB)
📄
sanitizer_procmaps_mac.cpp
(13.7 KB)
📄
sanitizer_procmaps_solaris.cpp
(2.18 KB)
📄
sanitizer_ptrauth.h
(720 B)
📄
sanitizer_quarantine.h
(9.57 KB)
📄
sanitizer_report_decorator.h
(1.89 KB)
📄
sanitizer_ring_buffer.h
(4.7 KB)
📄
sanitizer_rtems.cpp
(7.61 KB)
📄
sanitizer_rtems.h
(772 B)
📄
sanitizer_signal_interceptors.inc
(2.89 KB)
📄
sanitizer_solaris.cpp
(6.6 KB)
📄
sanitizer_stackdepot.cpp
(4.54 KB)
📄
sanitizer_stackdepot.h
(2.13 KB)
📄
sanitizer_stackdepotbase.h
(5.58 KB)
📄
sanitizer_stacktrace.cpp
(4.58 KB)
📄
sanitizer_stacktrace.h
(5.65 KB)
📄
sanitizer_stacktrace_libcdep.cpp
(5.54 KB)
📄
sanitizer_stacktrace_printer.cpp
(9.08 KB)
📄
sanitizer_stacktrace_printer.h
(3.08 KB)
📄
sanitizer_stacktrace_sparc.cpp
(3.05 KB)
📄
sanitizer_stoptheworld.h
(2.34 KB)
📄
sanitizer_stoptheworld_fuchsia.cpp
(1.28 KB)
📄
sanitizer_stoptheworld_linux_libcdep.cpp
(20.8 KB)
📄
sanitizer_stoptheworld_mac.cpp
(5.29 KB)
📄
sanitizer_stoptheworld_netbsd_libcdep.cpp
(11.02 KB)
📄
sanitizer_suppressions.cpp
(5.67 KB)
📄
sanitizer_suppressions.h
(1.69 KB)
📄
sanitizer_symbolizer.cpp
(3.65 KB)
📄
sanitizer_symbolizer.h
(6.62 KB)
📄
sanitizer_symbolizer_fuchsia.h
(1.56 KB)
📄
sanitizer_symbolizer_internal.h
(5.8 KB)
📄
sanitizer_symbolizer_libbacktrace.cpp
(6.1 KB)
📄
sanitizer_symbolizer_libbacktrace.h
(1.49 KB)
📄
sanitizer_symbolizer_libcdep.cpp
(17.45 KB)
📄
sanitizer_symbolizer_mac.cpp
(8.48 KB)
📄
sanitizer_symbolizer_mac.h
(1.37 KB)
📄
sanitizer_symbolizer_markup.cpp
(5.39 KB)
📄
sanitizer_symbolizer_posix_libcdep.cpp
(16.5 KB)
📄
sanitizer_symbolizer_report.cpp
(10.01 KB)
📄
sanitizer_symbolizer_rtems.h
(1.56 KB)
📄
sanitizer_symbolizer_win.cpp
(11.59 KB)
📄
sanitizer_syscall_generic.inc
(1.06 KB)
📄
sanitizer_syscall_linux_aarch64.inc
(4.58 KB)
📄
sanitizer_syscall_linux_arm.inc
(4.58 KB)
📄
sanitizer_syscall_linux_x86_64.inc
(3.07 KB)
📄
sanitizer_syscalls_netbsd.inc
(120.97 KB)
📄
sanitizer_termination.cpp
(2.86 KB)
📄
sanitizer_thread_registry.cpp
(10.31 KB)
📄
sanitizer_thread_registry.h
(5.44 KB)
📄
sanitizer_tls_get_addr.cpp
(5.29 KB)
📄
sanitizer_tls_get_addr.h
(2.21 KB)
📄
sanitizer_type_traits.cpp
(708 B)
📄
sanitizer_type_traits.h
(1.49 KB)
📄
sanitizer_unwind_linux_libcdep.cpp
(6.13 KB)
📄
sanitizer_unwind_win.cpp
(2.41 KB)
📄
sanitizer_vector.h
(2.57 KB)
📄
sanitizer_win.cpp
(33.3 KB)
📄
sanitizer_win.h
(851 B)
📄
sanitizer_win_defs.h
(7.23 KB)
📄
sanitizer_win_dll_thunk.cpp
(3.41 KB)
📄
sanitizer_win_dll_thunk.h
(11.13 KB)
📄
sanitizer_win_dynamic_runtime_thunk.cpp
(1.16 KB)
📄
sanitizer_win_weak_interception.cpp
(3.49 KB)
📄
sanitizer_win_weak_interception.h
(1.64 KB)
📁
symbolizer
📄
weak_symbols.txt
(230 B)
Editing: sanitizer_deadlock_detector.h
//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file is a part of Sanitizer runtime. // The deadlock detector maintains a directed graph of lock acquisitions. // When a lock event happens, the detector checks if the locks already held by // the current thread are reachable from the newly acquired lock. // // The detector can handle only a fixed amount of simultaneously live locks // (a lock is alive if it has been locked at least once and has not been // destroyed). When the maximal number of locks is reached the entire graph // is flushed and the new lock epoch is started. The node ids from the old // epochs can not be used with any of the detector methods except for // nodeBelongsToCurrentEpoch(). // // FIXME: this is work in progress, nothing really works yet. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_DEADLOCK_DETECTOR_H #define SANITIZER_DEADLOCK_DETECTOR_H #include "sanitizer_bvgraph.h" #include "sanitizer_common.h" namespace __sanitizer { // Thread-local state for DeadlockDetector. // It contains the locks currently held by the owning thread. template <class BV> class DeadlockDetectorTLS { public: // No CTOR. void clear() { bv_.clear(); epoch_ = 0; n_recursive_locks = 0; n_all_locks_ = 0; } bool empty() const { return bv_.empty(); } void ensureCurrentEpoch(uptr current_epoch) { if (epoch_ == current_epoch) return; bv_.clear(); epoch_ = current_epoch; n_recursive_locks = 0; n_all_locks_ = 0; } uptr getEpoch() const { return epoch_; } // Returns true if this is the first (non-recursive) acquisition of this lock. bool addLock(uptr lock_id, uptr current_epoch, u32 stk) { CHECK_EQ(epoch_, current_epoch); if (!bv_.setBit(lock_id)) { // The lock is already held by this thread, it must be recursive. CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks)); recursive_locks[n_recursive_locks++] = lock_id; return false; } CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_)); // lock_id < BV::kSize, can cast to a smaller int. u32 lock_id_short = static_cast<u32>(lock_id); LockWithContext l = {lock_id_short, stk}; all_locks_with_contexts_[n_all_locks_++] = l; return true; } void removeLock(uptr lock_id) { if (n_recursive_locks) { for (sptr i = n_recursive_locks - 1; i >= 0; i--) { if (recursive_locks[i] == lock_id) { n_recursive_locks--; Swap(recursive_locks[i], recursive_locks[n_recursive_locks]); return; } } } if (!bv_.clearBit(lock_id)) return; // probably addLock happened before flush if (n_all_locks_) { for (sptr i = n_all_locks_ - 1; i >= 0; i--) { if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) { Swap(all_locks_with_contexts_[i], all_locks_with_contexts_[n_all_locks_ - 1]); n_all_locks_--; break; } } } } u32 findLockContext(uptr lock_id) { for (uptr i = 0; i < n_all_locks_; i++) if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) return all_locks_with_contexts_[i].stk; return 0; } const BV &getLocks(uptr current_epoch) const { CHECK_EQ(epoch_, current_epoch); return bv_; } uptr getNumLocks() const { return n_all_locks_; } uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; } private: BV bv_; uptr epoch_; uptr recursive_locks[64]; uptr n_recursive_locks; struct LockWithContext { u32 lock; u32 stk; }; LockWithContext all_locks_with_contexts_[64]; uptr n_all_locks_; }; // DeadlockDetector. // For deadlock detection to work we need one global DeadlockDetector object // and one DeadlockDetectorTLS object per evey thread. // This class is not thread safe, all concurrent accesses should be guarded // by an external lock. // Most of the methods of this class are not thread-safe (i.e. should // be protected by an external lock) unless explicitly told otherwise. template <class BV> class DeadlockDetector { public: typedef BV BitVector; uptr size() const { return g_.size(); } // No CTOR. void clear() { current_epoch_ = 0; available_nodes_.clear(); recycled_nodes_.clear(); g_.clear(); n_edges_ = 0; } // Allocate new deadlock detector node. // If we are out of available nodes first try to recycle some. // If there is nothing to recycle, flush the graph and increment the epoch. // Associate 'data' (opaque user's object) with the new node. uptr newNode(uptr data) { if (!available_nodes_.empty()) return getAvailableNode(data); if (!recycled_nodes_.empty()) { for (sptr i = n_edges_ - 1; i >= 0; i--) { if (recycled_nodes_.getBit(edges_[i].from) || recycled_nodes_.getBit(edges_[i].to)) { Swap(edges_[i], edges_[n_edges_ - 1]); n_edges_--; } } CHECK(available_nodes_.empty()); // removeEdgesFrom was called in removeNode. g_.removeEdgesTo(recycled_nodes_); available_nodes_.setUnion(recycled_nodes_); recycled_nodes_.clear(); return getAvailableNode(data); } // We are out of vacant nodes. Flush and increment the current_epoch_. current_epoch_ += size(); recycled_nodes_.clear(); available_nodes_.setAll(); g_.clear(); n_edges_ = 0; return getAvailableNode(data); } // Get data associated with the node created by newNode(). uptr getData(uptr node) const { return data_[nodeToIndex(node)]; } bool nodeBelongsToCurrentEpoch(uptr node) { return node && (node / size() * size()) == current_epoch_; } void removeNode(uptr node) { uptr idx = nodeToIndex(node); CHECK(!available_nodes_.getBit(idx)); CHECK(recycled_nodes_.setBit(idx)); g_.removeEdgesFrom(idx); } void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) { dtls->ensureCurrentEpoch(current_epoch_); } // Returns true if there is a cycle in the graph after this lock event. // Ideally should be called before the lock is acquired so that we can // report a deadlock before a real deadlock happens. bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) { ensureCurrentEpoch(dtls); uptr cur_idx = nodeToIndex(cur_node); return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_)); } u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) { return dtls->findLockContext(nodeToIndex(node)); } // Add cur_node to the set of locks held currently by dtls. void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) { ensureCurrentEpoch(dtls); uptr cur_idx = nodeToIndex(cur_node); dtls->addLock(cur_idx, current_epoch_, stk); } // Experimental *racy* fast path function. // Returns true if all edges from the currently held locks to cur_node exist. bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) { uptr local_epoch = dtls->getEpoch(); // Read from current_epoch_ is racy. if (cur_node && local_epoch == current_epoch_ && local_epoch == nodeToEpoch(cur_node)) { uptr cur_idx = nodeToIndexUnchecked(cur_node); for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) { if (!g_.hasEdge(dtls->getLock(i), cur_idx)) return false; } return true; } return false; } // Adds edges from currently held locks to cur_node, // returns the number of added edges, and puts the sources of added edges // into added_edges[]. // Should be called before onLockAfter. uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk, int unique_tid) { ensureCurrentEpoch(dtls); uptr cur_idx = nodeToIndex(cur_node); uptr added_edges[40]; uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx, added_edges, ARRAY_SIZE(added_edges)); for (uptr i = 0; i < n_added_edges; i++) { if (n_edges_ < ARRAY_SIZE(edges_)) { Edge e = {(u16)added_edges[i], (u16)cur_idx, dtls->findLockContext(added_edges[i]), stk, unique_tid}; edges_[n_edges_++] = e; } } return n_added_edges; } bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to, int *unique_tid) { uptr from_idx = nodeToIndex(from_node); uptr to_idx = nodeToIndex(to_node); for (uptr i = 0; i < n_edges_; i++) { if (edges_[i].from == from_idx && edges_[i].to == to_idx) { *stk_from = edges_[i].stk_from; *stk_to = edges_[i].stk_to; *unique_tid = edges_[i].unique_tid; return true; } } return false; } // Test-only function. Handles the before/after lock events, // returns true if there is a cycle. bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) { ensureCurrentEpoch(dtls); bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node); addEdges(dtls, cur_node, stk, 0); onLockAfter(dtls, cur_node, stk); return is_reachable; } // Handles the try_lock event, returns false. // When a try_lock event happens (i.e. a try_lock call succeeds) we need // to add this lock to the currently held locks, but we should not try to // change the lock graph or to detect a cycle. We may want to investigate // whether a more aggressive strategy is possible for try_lock. bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) { ensureCurrentEpoch(dtls); uptr cur_idx = nodeToIndex(cur_node); dtls->addLock(cur_idx, current_epoch_, stk); return false; } // Returns true iff dtls is empty (no locks are currently held) and we can // add the node to the currently held locks w/o chanding the global state. // This operation is thread-safe as it only touches the dtls. bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) { if (!dtls->empty()) return false; if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) { dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk); return true; } return false; } // Finds a path between the lock 'cur_node' (currently not held in dtls) // and some currently held lock, returns the length of the path // or 0 on failure. uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path, uptr path_size) { tmp_bv_.copyFrom(dtls->getLocks(current_epoch_)); uptr idx = nodeToIndex(cur_node); CHECK(!tmp_bv_.getBit(idx)); uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size); for (uptr i = 0; i < res; i++) path[i] = indexToNode(path[i]); if (res) CHECK_EQ(path[0], cur_node); return res; } // Handle the unlock event. // This operation is thread-safe as it only touches the dtls. void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) { if (dtls->getEpoch() == nodeToEpoch(node)) dtls->removeLock(nodeToIndexUnchecked(node)); } // Tries to handle the lock event w/o writing to global state. // Returns true on success. // This operation is thread-safe as it only touches the dtls // (modulo racy nature of hasAllEdges). bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) { if (hasAllEdges(dtls, node)) { dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk); return true; } return false; } bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const { return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node)); } uptr testOnlyGetEpoch() const { return current_epoch_; } bool testOnlyHasEdge(uptr l1, uptr l2) { return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2)); } // idx1 and idx2 are raw indices to g_, not lock IDs. bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) { return g_.hasEdge(idx1, idx2); } void Print() { for (uptr from = 0; from < size(); from++) for (uptr to = 0; to < size(); to++) if (g_.hasEdge(from, to)) Printf(" %zx => %zx\n", from, to); } private: void check_idx(uptr idx) const { CHECK_LT(idx, size()); } void check_node(uptr node) const { CHECK_GE(node, size()); CHECK_EQ(current_epoch_, nodeToEpoch(node)); } uptr indexToNode(uptr idx) const { check_idx(idx); return idx + current_epoch_; } uptr nodeToIndexUnchecked(uptr node) const { return node % size(); } uptr nodeToIndex(uptr node) const { check_node(node); return nodeToIndexUnchecked(node); } uptr nodeToEpoch(uptr node) const { return node / size() * size(); } uptr getAvailableNode(uptr data) { uptr idx = available_nodes_.getAndClearFirstOne(); data_[idx] = data; return indexToNode(idx); } struct Edge { u16 from; u16 to; u32 stk_from; u32 stk_to; int unique_tid; }; uptr current_epoch_; BV available_nodes_; BV recycled_nodes_; BV tmp_bv_; BVGraph<BV> g_; uptr data_[BV::kSize]; Edge edges_[BV::kSize * 32]; uptr n_edges_; }; } // namespace __sanitizer #endif // SANITIZER_DEADLOCK_DETECTOR_H
Upload File
Create Folder