# 1 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" # 1 "" 1 # 1 "" 3 # 346 "" 3 # 1 "" 1 # 1 "" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 13 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic.h" 1 # 15 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic.h" # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_internal_defs.h" 1 # 14 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_internal_defs.h" # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_platform.h" 1 # 15 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_internal_defs.h" 2 # 56 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_internal_defs.h" namespace __sanitizer { typedef unsigned long uptr; typedef signed long sptr; typedef unsigned long long uhwptr; typedef unsigned char u8; typedef unsigned short u16; typedef unsigned int u32; typedef unsigned long long u64; typedef signed char s8; typedef signed short s16; typedef signed int s32; typedef signed long long s64; typedef int fd_t; typedef int error_t; typedef int pid_t; typedef u64 OFF_T; typedef u64 OFF64_T; typedef uptr operator_new_size_type; # 175 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_internal_defs.h" typedef __attribute__((aligned(1))) u16 uu16; typedef __attribute__((aligned(1))) u32 uu32; typedef __attribute__((aligned(1))) u64 uu64; typedef __attribute__((aligned(1))) s16 us16; typedef __attribute__((aligned(1))) s32 us32; typedef __attribute__((aligned(1))) s64 us64; # 189 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_internal_defs.h" typedef void* thread_return_t; typedef thread_return_t ( *thread_callback_t)(void* arg); void __attribute__((noreturn)) Die(); __attribute__((visibility("default"))) void __attribute__((noreturn)) CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); # 287 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_internal_defs.h" enum LinkerInitialized { LINKER_INITIALIZED = 0 }; inline void Trap() { __builtin_trap(); } # 327 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_internal_defs.h" } namespace __asan { using namespace __sanitizer; } namespace __dsan { using namespace __sanitizer; } namespace __dfsan { using namespace __sanitizer; } namespace __esan { using namespace __sanitizer; } namespace __lsan { using namespace __sanitizer; } namespace __msan { using namespace __sanitizer; } namespace __tsan { using namespace __sanitizer; } namespace __scudo { using namespace __sanitizer; } namespace __ubsan { using namespace __sanitizer; } namespace __xray { using namespace __sanitizer; } namespace __interception { using namespace __sanitizer; } # 16 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic.h" 2 namespace __sanitizer { enum memory_order { memory_order_relaxed = 1 << 0, memory_order_consume = 1 << 1, memory_order_acquire = 1 << 2, memory_order_release = 1 << 3, memory_order_acq_rel = 1 << 4, memory_order_seq_cst = 1 << 5 }; struct atomic_uint8_t { typedef u8 Type; volatile Type val_dont_use; }; struct atomic_uint16_t { typedef u16 Type; volatile Type val_dont_use; }; struct atomic_uint32_t { typedef u32 Type; volatile Type val_dont_use; }; struct atomic_uint64_t { typedef u64 Type; volatile __attribute__((aligned(8))) Type val_dont_use; }; struct atomic_uintptr_t { typedef uptr Type; volatile Type val_dont_use; }; } # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h" 1 # 17 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h" # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h" 1 # 16 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h" namespace __sanitizer { inline void proc_yield(int cnt) { __asm__ __volatile__("" ::: "memory"); for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause"); __asm__ __volatile__("" ::: "memory"); } template inline typename T::Type atomic_load( const volatile T *a, memory_order mo) { ; ; typename T::Type v; if (sizeof(*a) < 8 || sizeof(void*) == 8) { if (mo == memory_order_relaxed) { v = a->val_dont_use; } else if (mo == memory_order_consume) { __asm__ __volatile__("" ::: "memory"); v = a->val_dont_use; __asm__ __volatile__("" ::: "memory"); } else if (mo == memory_order_acquire) { __asm__ __volatile__("" ::: "memory"); v = a->val_dont_use; __asm__ __volatile__("" ::: "memory"); } else { __asm__ __volatile__("" ::: "memory"); v = a->val_dont_use; __asm__ __volatile__("" ::: "memory"); } } else { __asm__ __volatile__( "movq %1, %%mm0;" "movq %%mm0, %0;" "emms;" : "=m" (v) : "m" (a->val_dont_use) : "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", "memory"); } return v; } template inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { ; ; if (sizeof(*a) < 8 || sizeof(void*) == 8) { if (mo == memory_order_relaxed) { a->val_dont_use = v; } else if (mo == memory_order_release) { __asm__ __volatile__("" ::: "memory"); a->val_dont_use = v; __asm__ __volatile__("" ::: "memory"); } else { __asm__ __volatile__("" ::: "memory"); a->val_dont_use = v; __sync_synchronize(); } } else { __asm__ __volatile__( "movq %1, %%mm0;" "movq %%mm0, %0;" "emms;" : "=m" (a->val_dont_use) : "m" (v) : "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", "memory"); if (mo == memory_order_seq_cst) __sync_synchronize(); } } } # 18 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h" 2 namespace __sanitizer { # 36 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h" inline void atomic_signal_fence(memory_order) { __asm__ __volatile__("" ::: "memory"); } inline void atomic_thread_fence(memory_order) { __sync_synchronize(); } template inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v, memory_order mo) { (void)mo; ; return __sync_fetch_and_add(&a->val_dont_use, v); } template inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v, memory_order mo) { (void)mo; ; return __sync_fetch_and_add(&a->val_dont_use, -v); } template inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v, memory_order mo) { ; if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst)) __sync_synchronize(); v = __sync_lock_test_and_set(&a->val_dont_use, v); if (mo == memory_order_seq_cst) __sync_synchronize(); return v; } template inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { typedef typename T::Type Type; Type cmpv = *cmp; Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); if (prev == cmpv) return true; *cmp = prev; return false; } template inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { return atomic_compare_exchange_strong(a, cmp, xchg, mo); } } # 58 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_atomic.h" 2 namespace __sanitizer { template inline typename T::Type atomic_load_relaxed(const volatile T *a) { return atomic_load(a, memory_order_relaxed); } template inline void atomic_store_relaxed(volatile T *a, typename T::Type v) { atomic_store(a, v, memory_order_relaxed); } } # 14 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" 1 # 17 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_flags.h" 1 # 17 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_flags.h" namespace __sanitizer { struct CommonFlags { # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_flags.inc" 1 # 21 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_flags.inc" bool symbolize; const char * external_symbolizer_path; bool allow_addr2line; const char * strip_path_prefix; bool fast_unwind_on_check; bool fast_unwind_on_fatal; bool fast_unwind_on_malloc; bool handle_ioctl; int malloc_context_size; const char * log_path; bool log_exe_name; bool log_to_syslog; int verbosity; bool detect_leaks; bool leak_check_at_exit; bool allocator_may_return_null; bool print_summary; bool check_printf; bool handle_segv; bool handle_abort; bool handle_sigill; bool handle_sigfpe; bool allow_user_segv_handler; bool use_sigaltstack; bool detect_deadlocks; uptr clear_shadow_mmap_threshold; const char * color; bool legacy_pthread_cond; bool intercept_tls_get_addr; bool help; uptr mmap_limit_mb; uptr hard_rss_limit_mb; uptr soft_rss_limit_mb; # 119 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_flags.inc" bool heap_profile; bool allocator_release_to_os; bool can_use_proc_maps_statm; bool coverage; bool coverage_pcs; bool coverage_order_pcs; bool coverage_bitset; bool coverage_counters; bool coverage_direct; const char * coverage_dir; bool full_address_space; bool print_suppressions; bool disable_coredump; bool use_madv_dontdump; bool symbolize_inline_frames; bool symbolize_vs_style; int dedup_token_length; const char * stack_trace_format; bool no_huge_pages_for_shadow; bool strict_string_checks; bool intercept_strstr; bool intercept_strspn; bool intercept_strpbrk; bool intercept_strlen; bool intercept_strchr; bool intercept_memcmp; bool strict_memcmp; bool intercept_memmem; bool intercept_intrin; bool intercept_stat; bool intercept_send; bool decorate_proc_maps; int exitcode; bool abort_on_error; bool suppress_equal_pcs; bool print_cmdline; bool html_cov_report; const char * sancov_path; # 22 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_flags.h" 2 void SetDefaults(); void CopyFrom(const CommonFlags &other); }; extern CommonFlags common_flags_dont_use; inline const CommonFlags *common_flags() { return &common_flags_dont_use; } inline void SetCommonFlagsDefaults() { common_flags_dont_use.SetDefaults(); } inline void OverrideCommonFlags(const CommonFlags &cf) { common_flags_dont_use.CopyFrom(cf); } void SubstituteForFlagValue(const char *s, char *out, uptr out_size); class FlagParser; void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf = &common_flags_dont_use); void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf); void InitializeCommonFlags(CommonFlags *cf = &common_flags_dont_use); } # 18 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_interface_internal.h" 1 # 21 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_interface_internal.h" extern "C" { __attribute__((visibility("default"))) void __sanitizer_set_report_path(const char *path); __attribute__((visibility("default"))) void __sanitizer_set_report_fd(void *fd); typedef struct { int coverage_sandboxed; __sanitizer::sptr coverage_fd; unsigned int coverage_max_block_size; } __sanitizer_sandbox_arguments; __attribute__((visibility("default"))) __attribute__((weak)) void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args); __attribute__((visibility("default"))) __attribute__((weak)) void __sanitizer_report_error_summary(const char *error_summary); __attribute__((visibility("default"))) void __sanitizer_cov_dump(); __attribute__((visibility("default"))) void __sanitizer_cov_init(); __attribute__((visibility("default"))) void __sanitizer_cov(__sanitizer::u32 *guard); __attribute__((visibility("default"))) void __sanitizer_annotate_contiguous_container(const void *beg, const void *end, const void *old_mid, const void *new_mid); __attribute__((visibility("default"))) int __sanitizer_verify_contiguous_container(const void *beg, const void *mid, const void *end); __attribute__((visibility("default"))) const void *__sanitizer_contiguous_container_find_bad_address( const void *beg, const void *mid, const void *end); } # 19 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_libc.h" 1 # 22 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_libc.h" namespace __sanitizer { s64 internal_atoll(const char *nptr); void *internal_memchr(const void *s, int c, uptr n); void *internal_memrchr(const void *s, int c, uptr n); int internal_memcmp(const void* s1, const void* s2, uptr n); void *internal_memcpy(void *dest, const void *src, uptr n); void *internal_memmove(void *dest, const void *src, uptr n); void internal_bzero_aligned16(void *s, uptr n); void *internal_memset(void *s, int c, uptr n); char* internal_strchr(const char *s, int c); char *internal_strchrnul(const char *s, int c); int internal_strcmp(const char *s1, const char *s2); uptr internal_strcspn(const char *s, const char *reject); char *internal_strdup(const char *s); char *internal_strndup(const char *s, uptr n); uptr internal_strlen(const char *s); uptr internal_strlcat(char *dst, const char *src, uptr maxlen); char *internal_strncat(char *dst, const char *src, uptr n); int internal_strncmp(const char *s1, const char *s2, uptr n); uptr internal_strlcpy(char *dst, const char *src, uptr maxlen); char *internal_strncpy(char *dst, const char *src, uptr n); uptr internal_strnlen(const char *s, uptr maxlen); char *internal_strrchr(const char *s, int c); uptr internal_wcslen(const wchar_t *s); char *internal_strstr(const char *haystack, const char *needle); s64 internal_simple_strtoll(const char *nptr, char **endptr, int base); int internal_snprintf(char *buffer, uptr length, const char *format, ...); bool mem_is_zero(const char *mem, uptr size); # 70 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_libc.h" uptr internal_ftruncate(fd_t fd, uptr size); void __attribute__((noreturn)) internal__exit(int exitcode); unsigned int internal_sleep(unsigned int seconds); uptr internal_getpid(); uptr internal_getppid(); uptr internal_sched_yield(); bool internal_iserror(uptr retval, int *rverrno = nullptr); } # 21 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h" 1 # 18 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h" namespace __sanitizer { template struct IntrusiveList { friend class Iterator; void clear() { first_ = last_ = nullptr; size_ = 0; } bool empty() const { return size_ == 0; } uptr size() const { return size_; } void push_back(Item *x) { if (empty()) { x->next = nullptr; first_ = last_ = x; size_ = 1; } else { x->next = nullptr; last_->next = x; last_ = x; size_++; } } void push_front(Item *x) { if (empty()) { x->next = nullptr; first_ = last_ = x; size_ = 1; } else { x->next = first_; first_ = x; size_++; } } void pop_front() { do { __sanitizer::u64 v1 = (u64)((!empty())); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h", 64, "(" "(!empty())" ") " "!=" " (" "0" ")", v1, v2); } while (false); first_ = first_->next; if (!first_) last_ = nullptr; size_--; } Item *front() { return first_; } const Item *front() const { return first_; } Item *back() { return last_; } const Item *back() const { return last_; } void append_front(IntrusiveList *l) { do { __sanitizer::u64 v1 = (u64)((this)); __sanitizer::u64 v2 = (u64)((l)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h", 77, "(" "(this)" ") " "!=" " (" "(l)" ")", v1, v2); } while (false); if (l->empty()) return; if (empty()) { *this = *l; } else if (!l->empty()) { l->last_->next = first_; first_ = l->first_; size_ += l->size(); } l->clear(); } void append_back(IntrusiveList *l) { do { __sanitizer::u64 v1 = (u64)((this)); __sanitizer::u64 v2 = (u64)((l)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h", 91, "(" "(this)" ") " "!=" " (" "(l)" ")", v1, v2); } while (false); if (l->empty()) return; if (empty()) { *this = *l; } else { last_->next = l->first_; last_ = l->last_; size_ += l->size(); } l->clear(); } void CheckConsistency() { if (size_ == 0) { do { __sanitizer::u64 v1 = (u64)((first_)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h", 106, "(" "(first_)" ") " "==" " (" "(0)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((last_)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h", 107, "(" "(last_)" ") " "==" " (" "(0)" ")", v1, v2); } while (false); } else { uptr count = 0; for (Item *i = first_; ; i = i->next) { count++; if (i == last_) break; } do { __sanitizer::u64 v1 = (u64)((size())); __sanitizer::u64 v2 = (u64)((count)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h", 114, "(" "(size())" ") " "==" " (" "(count)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((last_->next)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_list.h", 115, "(" "(last_->next)" ") " "==" " (" "(0)" ")", v1, v2); } while (false); } } template class IteratorBase { public: explicit IteratorBase(ItemTy *current) : current_(current) {} IteratorBase &operator++() { current_ = current_->next; return *this; } bool operator!=(IteratorBase other) const { return current_ != other.current_; } ItemTy &operator*() { return *current_; } private: ItemTy *current_; }; typedef IteratorBase Iterator; typedef IteratorBase ConstIterator; Iterator begin() { return Iterator(first_); } Iterator end() { return Iterator(0); } ConstIterator begin() const { return ConstIterator(first_); } ConstIterator end() const { return ConstIterator(0); } uptr size_; Item *first_; Item *last_; }; } # 22 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_mutex.h" 1 # 19 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_mutex.h" namespace __sanitizer { class StaticSpinMutex { public: void Init() { atomic_store(&state_, 0, memory_order_relaxed); } void Lock() { if (TryLock()) return; LockSlow(); } bool TryLock() { return atomic_exchange(&state_, 1, memory_order_acquire) == 0; } void Unlock() { atomic_store(&state_, 0, memory_order_release); } void CheckLocked() { do { __sanitizer::u64 v1 = (u64)((atomic_load(&state_, memory_order_relaxed))); __sanitizer::u64 v2 = (u64)((1)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_mutex.h", 42, "(" "(atomic_load(&state_, memory_order_relaxed))" ") " "==" " (" "(1)" ")", v1, v2); } while (false); } private: atomic_uint8_t state_; void __attribute__((noinline)) LockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); if (atomic_load(&state_, memory_order_relaxed) == 0 && atomic_exchange(&state_, 1, memory_order_acquire) == 0) return; } } }; class SpinMutex : public StaticSpinMutex { public: SpinMutex() { Init(); } private: SpinMutex(const SpinMutex&); void operator=(const SpinMutex&); }; class BlockingMutex { public: explicit constexpr BlockingMutex(LinkerInitialized) : opaque_storage_ {0, }, owner_(0) {} BlockingMutex(); void Lock(); void Unlock(); void CheckLocked(); private: uptr opaque_storage_[10]; uptr owner_; }; class RWMutex { public: RWMutex() { atomic_store(&state_, kUnlocked, memory_order_relaxed); } ~RWMutex() { do { __sanitizer::u64 v1 = (u64)((atomic_load(&state_, memory_order_relaxed))); __sanitizer::u64 v2 = (u64)((kUnlocked)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_mutex.h", 98, "(" "(atomic_load(&state_, memory_order_relaxed))" ") " "==" " (" "(kUnlocked)" ")", v1, v2); } while (false); } void Lock() { u32 cmp = kUnlocked; if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, memory_order_acquire)) return; LockSlow(); } void Unlock() { u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); ; (void)prev; } void ReadLock() { u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); if ((prev & kWriteLock) == 0) return; ReadLockSlow(); } void ReadUnlock() { u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); ; ; (void)prev; } void CheckLocked() { do { __sanitizer::u64 v1 = (u64)((atomic_load(&state_, memory_order_relaxed))); __sanitizer::u64 v2 = (u64)((kUnlocked)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_mutex.h", 130, "(" "(atomic_load(&state_, memory_order_relaxed))" ") " "!=" " (" "(kUnlocked)" ")", v1, v2); } while (false); } private: atomic_uint32_t state_; enum { kUnlocked = 0, kWriteLock = 1, kReadLock = 2 }; void __attribute__((noinline)) LockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); u32 cmp = atomic_load(&state_, memory_order_relaxed); if (cmp == kUnlocked && atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, memory_order_acquire)) return; } } void __attribute__((noinline)) ReadLockSlow() { for (int i = 0;; i++) { if (i < 10) proc_yield(10); else internal_sched_yield(); u32 prev = atomic_load(&state_, memory_order_acquire); if ((prev & kWriteLock) == 0) return; } } RWMutex(const RWMutex&); void operator = (const RWMutex&); }; template class GenericScopedLock { public: explicit GenericScopedLock(MutexType *mu) : mu_(mu) { mu_->Lock(); } ~GenericScopedLock() { mu_->Unlock(); } private: MutexType *mu_; GenericScopedLock(const GenericScopedLock&); void operator=(const GenericScopedLock&); }; template class GenericScopedReadLock { public: explicit GenericScopedReadLock(MutexType *mu) : mu_(mu) { mu_->ReadLock(); } ~GenericScopedReadLock() { mu_->ReadUnlock(); } private: MutexType *mu_; GenericScopedReadLock(const GenericScopedReadLock&); void operator=(const GenericScopedReadLock&); }; typedef GenericScopedLock SpinMutexLock; typedef GenericScopedLock BlockingMutexLock; typedef GenericScopedLock RWMutexLock; typedef GenericScopedReadLock RWMutexReadLock; } # 23 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" 2 namespace __sanitizer { struct StackTrace; struct AddressInfo; const uptr kWordSize = 64 / 8; const uptr kWordSizeInBits = 8 * kWordSize; const uptr kCacheLineSize = 64; const uptr kMaxPathLength = 4096; const uptr kMaxThreadStackSize = 1 << 30; static const uptr kErrorMessageBufferSize = 1 << 16; const u64 kExternalPCBit = 1ULL << 60; extern const char *SanitizerToolName; extern atomic_uint32_t current_verbosity; inline void SetVerbosity(int verbosity) { atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); } inline int Verbosity() { return atomic_load(¤t_verbosity, memory_order_relaxed); } uptr GetPageSize(); extern uptr PageSizeCached; inline uptr GetPageSizeCached() { if (!PageSizeCached) PageSizeCached = GetPageSize(); return PageSizeCached; } uptr GetMmapGranularity(); uptr GetMaxVirtualAddress(); uptr GetTid(); uptr GetThreadSelf(); void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom); void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size); void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); inline void *MmapOrDieQuietly(uptr size, const char *mem_type) { return MmapOrDie(size, mem_type, true); } void UnmapOrDie(void *addr, uptr size); void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoReserveOrDie(uptr size, const char *mem_type); void *MmapFixedOrDie(uptr fixed_addr, uptr size); void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoAccess(uptr size); void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type); bool MprotectNoAccess(uptr addr, uptr size); bool MprotectReadOnly(uptr addr, uptr size); uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding); bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); void ReleaseMemoryToOS(uptr addr, uptr size); void IncreaseTotalMmap(uptr size); void DecreaseTotalMmap(uptr size); uptr GetRSS(); void NoHugePagesInRegion(uptr addr, uptr length); void DontDumpShadowMemory(uptr addr, uptr length); void CheckVMASize(); void RunMallocHooks(const void *ptr, uptr size); void RunFreeHooks(const void *ptr); template class InternalScopedBuffer { public: explicit InternalScopedBuffer(uptr cnt) { cnt_ = cnt; ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer"); } ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); } T &operator[](uptr i) { return ptr_[i]; } T *data() { return ptr_; } uptr size() { return cnt_ * sizeof(T); } private: T *ptr_; uptr cnt_; InternalScopedBuffer(const InternalScopedBuffer &) = delete; InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete; InternalScopedBuffer(InternalScopedBuffer &&) = delete; InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete; }; class InternalScopedString : public InternalScopedBuffer { public: explicit InternalScopedString(uptr max_length) : InternalScopedBuffer(max_length), length_(0) { (*this)[0] = '\0'; } uptr length() { return length_; } void clear() { (*this)[0] = '\0'; length_ = 0; } void append(const char *format, ...); private: uptr length_; }; class LowLevelAllocator { public: void *Allocate(uptr size); private: char *allocated_end_; char *allocated_current_; }; typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size); void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback); void RawWrite(const char *buffer); bool ColorizeReports(); void RemoveANSIEscapeSequencesFromString(char *buffer); void Printf(const char *format, ...); void Report(const char *format, ...); void SetPrintfAndReportCallback(void (*callback)(const char *)); # 191 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" extern StaticSpinMutex CommonSanitizerReportMutex; struct ReportFile { void Write(const char *buffer, uptr length); bool SupportsColors(); void SetReportPath(const char *path); StaticSpinMutex *mu; fd_t fd; char path_prefix[kMaxPathLength]; char full_path[kMaxPathLength]; uptr fd_pid; private: void ReopenIfNecessary(); }; extern ReportFile report_file; extern uptr stoptheworld_tracer_pid; extern uptr stoptheworld_tracer_ppid; enum FileAccessMode { RdOnly, WrOnly, RdWr }; fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p = nullptr); void CloseFile(fd_t); bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read = nullptr, error_t *error_p = nullptr); bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written = nullptr, error_t *error_p = nullptr); bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p = nullptr); struct FileCloser { explicit FileCloser(fd_t fd) : fd(fd) {} ~FileCloser() { CloseFile(fd); } fd_t fd; }; bool SupportsColoredOutput(fd_t fd); bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, uptr *read_len, uptr max_len = 1 << 26, error_t *errno_p = nullptr); void *MapFileToMemory(const char *file_name, uptr *buff_size); void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset); bool IsAccessibleMemoryRange(uptr beg, uptr size); const char *StripPathPrefix(const char *filepath, const char *strip_file_prefix); const char *StripModuleName(const char *module); uptr ReadBinaryName( char *buf, uptr buf_len); uptr ReadBinaryNameCached( char *buf, uptr buf_len); uptr ReadLongProcessName( char *buf, uptr buf_len); const char *GetProcessName(); void UpdateProcessName(); void CacheBinaryName(); void DisableCoreDumperIfNecessary(); void DumpProcessMap(); bool FileExists(const char *filename); const char *GetEnv(const char *name); bool SetEnv(const char *name, const char *value); const char *GetPwd(); char *FindPathToBinary(const char *name); bool IsPathSeparator(const char c); bool IsAbsolutePath(const char *path); pid_t StartSubprocess(const char *filename, const char *const argv[], fd_t stdin_fd = ((fd_t)-1), fd_t stdout_fd = ((fd_t)-1), fd_t stderr_fd = ((fd_t)-1)); bool IsProcessRunning(pid_t pid); int WaitForProcess(pid_t pid); u32 GetUid(); void ReExec(); char **GetArgv(); void PrintCmdline(); bool StackSizeIsUnlimited(); uptr GetStackSizeLimitInBytes(); void SetStackSizeLimitInBytes(uptr limit); bool AddressSpaceIsUnlimited(); void SetAddressSpaceUnlimited(); void AdjustStackSize(void *attr); void PrepareForSandboxing(__sanitizer_sandbox_arguments *args); void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args); void SetSandboxingCallback(void (*f)()); void CoverageUpdateMapping(); void CovBeforeFork(); void CovAfterFork(int child_pid); void InitializeCoverage(bool enabled, const char *coverage_dir); void ReInitializeCoverage(bool enabled, const char *coverage_dir); void InitTlsSize(); uptr GetTlsSize(); void SleepForSeconds(int seconds); void SleepForMillis(int millis); u64 NanoTime(); int Atexit(void (*function)(void)); void SortArray(uptr *array, uptr size); void SortArray(u32 *array, uptr size); bool TemplateMatch(const char *templ, const char *str); void __attribute__((noreturn)) Abort(); void __attribute__((noreturn)) Die(); void __attribute__((noreturn)) CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); void __attribute__((noreturn)) ReportMmapFailureAndDie(uptr size, const char *mem_type, const char *mmap_type, error_t err, bool raw_report = false); bool SanitizerSetThreadName(const char *name); bool SanitizerGetThreadName(char *name, int max_len); typedef void (*DieCallbackType)(void); bool AddDieCallback(DieCallbackType callback); bool RemoveDieCallback(DieCallbackType callback); void SetUserDieCallback(DieCallbackType callback); typedef void (*CheckFailedCallbackType)(const char *, int, const char *, u64, u64); void SetCheckFailedCallback(CheckFailedCallbackType callback); void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); typedef void (*AllocatorReleaseToOSCallback)(); void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback); typedef void (*SignalHandlerType)(int, void *, void *); bool IsHandledDeadlySignal(int signum); void InstallDeadlySignalHandlers(SignalHandlerType handler); void SetAlternateSignalStack(); void UnsetAlternateSignalStack(); const int kMaxSummaryLength = 1024; void ReportErrorSummary(const char *error_message); void ReportErrorSummary(const char *error_type, const AddressInfo &info); void ReportErrorSummary(const char *error_type, const StackTrace *trace); # 414 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" inline uptr MostSignificantSetBitIndex(uptr x) { do { __sanitizer::u64 v1 = (u64)((x)); __sanitizer::u64 v2 = (u64)((0U)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 415, "(" "(x)" ") " "!=" " (" "(0U)" ")", v1, v2); } while (false); unsigned long up; up = 64 - 1 - __builtin_clzl(x); return up; } inline uptr LeastSignificantSetBitIndex(uptr x) { do { __sanitizer::u64 v1 = (u64)((x)); __sanitizer::u64 v2 = (u64)((0U)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 432, "(" "(x)" ") " "!=" " (" "(0U)" ")", v1, v2); } while (false); unsigned long up; up = __builtin_ctzl(x); return up; } inline bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; } inline uptr RoundUpToPowerOfTwo(uptr size) { do { __sanitizer::u64 v1 = (u64)((size)); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 453, "(" "(size)" ") " "!=" " (" "0" ")", v1, v2); } while (false); if (IsPowerOfTwo(size)) return size; uptr up = MostSignificantSetBitIndex(size); do { __sanitizer::u64 v1 = (u64)((size)); __sanitizer::u64 v2 = (u64)(((1ULL << (up + 1)))); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 457, "(" "(size)" ") " "<" " (" "((1ULL << (up + 1)))" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((size)); __sanitizer::u64 v2 = (u64)(((1ULL << up))); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 458, "(" "(size)" ") " ">" " (" "((1ULL << up))" ")", v1, v2); } while (false); return 1ULL << (up + 1); } inline uptr RoundUpTo(uptr size, uptr boundary) { do { if (__builtin_expect(!!(!(IsPowerOfTwo(boundary))), 0)) { RawWrite("IsPowerOfTwo(boundary)"); Die(); } } while (0); return (size + boundary - 1) & ~(boundary - 1); } inline uptr RoundDownTo(uptr x, uptr boundary) { return x & ~(boundary - 1); } inline bool IsAligned(uptr a, uptr alignment) { return (a & (alignment - 1)) == 0; } inline uptr Log2(uptr x) { do { __sanitizer::u64 v1 = (u64)((IsPowerOfTwo(x))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 476, "(" "(IsPowerOfTwo(x))" ") " "!=" " (" "0" ")", v1, v2); } while (false); return LeastSignificantSetBitIndex(x); } template T Min(T a, T b) { return a < b ? a : b; } template T Max(T a, T b) { return a > b ? a : b; } template void Swap(T& a, T& b) { T tmp = a; a = b; b = tmp; } inline bool IsSpace(int c) { return (c == ' ') || (c == '\n') || (c == '\t') || (c == '\f') || (c == '\r') || (c == '\v'); } inline bool IsDigit(int c) { return (c >= '0') && (c <= '9'); } inline int ToLower(int c) { return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c; } template class InternalMmapVectorNoCtor { public: void Initialize(uptr initial_capacity) { capacity_ = Max(initial_capacity, (uptr)1); size_ = 0; data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor"); } void Destroy() { UnmapOrDie(data_, capacity_ * sizeof(T)); } T &operator[](uptr i) { do { __sanitizer::u64 v1 = (u64)((i)); __sanitizer::u64 v2 = (u64)((size_)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 517, "(" "(i)" ") " "<" " (" "(size_)" ")", v1, v2); } while (false); return data_[i]; } const T &operator[](uptr i) const { do { __sanitizer::u64 v1 = (u64)((i)); __sanitizer::u64 v2 = (u64)((size_)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 521, "(" "(i)" ") " "<" " (" "(size_)" ")", v1, v2); } while (false); return data_[i]; } void push_back(const T &element) { do { __sanitizer::u64 v1 = (u64)((size_)); __sanitizer::u64 v2 = (u64)((capacity_)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 525, "(" "(size_)" ") " "<=" " (" "(capacity_)" ")", v1, v2); } while (false); if (size_ == capacity_) { uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); Resize(new_capacity); } internal_memcpy(&data_[size_++], &element, sizeof(T)); } T &back() { do { __sanitizer::u64 v1 = (u64)((size_)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 533, "(" "(size_)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); return data_[size_ - 1]; } void pop_back() { do { __sanitizer::u64 v1 = (u64)((size_)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 537, "(" "(size_)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); size_--; } uptr size() const { return size_; } const T *data() const { return data_; } T *data() { return data_; } uptr capacity() const { return capacity_; } void clear() { size_ = 0; } bool empty() const { return size() == 0; } const T *begin() const { return data(); } T *begin() { return data(); } const T *end() const { return data() + size(); } T *end() { return data() + size(); } private: void Resize(uptr new_capacity) { do { __sanitizer::u64 v1 = (u64)((new_capacity)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 571, "(" "(new_capacity)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((size_)); __sanitizer::u64 v2 = (u64)((new_capacity)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 572, "(" "(size_)" ") " "<=" " (" "(new_capacity)" ")", v1, v2); } while (false); T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T), "InternalMmapVector"); internal_memcpy(new_data, data_, size_ * sizeof(T)); T *old_data = data_; data_ = new_data; UnmapOrDie(old_data, capacity_ * sizeof(T)); capacity_ = new_capacity; } T *data_; uptr capacity_; uptr size_; }; template class InternalMmapVector : public InternalMmapVectorNoCtor { public: explicit InternalMmapVector(uptr initial_capacity) { InternalMmapVectorNoCtor::Initialize(initial_capacity); } ~InternalMmapVector() { InternalMmapVectorNoCtor::Destroy(); } InternalMmapVector(const InternalMmapVector&); void operator=(const InternalMmapVector&); }; template void InternalSort(Container *v, uptr size, Compare comp) { if (size < 2) return; for (uptr i = 1; i < size; i++) { uptr j, p; for (j = i; j > 0; j = p) { p = (j - 1) / 2; if (comp((*v)[p], (*v)[j])) Swap((*v)[j], (*v)[p]); else break; } } for (uptr i = size - 1; i > 0; i--) { Swap((*v)[0], (*v)[i]); uptr j, max_ind; for (j = 0; j < i; j = max_ind) { uptr left = 2 * j + 1; uptr right = 2 * j + 2; max_ind = j; if (left < i && comp((*v)[max_ind], (*v)[left])) max_ind = left; if (right < i && comp((*v)[max_ind], (*v)[right])) max_ind = right; if (max_ind != j) Swap((*v)[j], (*v)[max_ind]); else break; } } } template uptr InternalBinarySearch(const Container &v, uptr first, uptr last, const Value &val, Compare comp) { uptr not_found = last + 1; while (last >= first) { uptr mid = (first + last) / 2; if (comp(v[mid], val)) first = mid + 1; else if (comp(val, v[mid])) last = mid - 1; else return mid; } return not_found; } class LoadedModule { public: LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); } void set(const char *module_name, uptr base_address); void clear(); void addAddressRange(uptr beg, uptr end, bool executable); bool containsAddress(uptr address) const; const char *full_name() const { return full_name_; } uptr base_address() const { return base_address_; } struct AddressRange { AddressRange *next; uptr beg; uptr end; bool executable; AddressRange(uptr beg, uptr end, bool executable) : next(nullptr), beg(beg), end(end), executable(executable) {} }; const IntrusiveList &ranges() const { return ranges_; } private: char *full_name_; uptr base_address_; IntrusiveList ranges_; }; class ListOfModules { public: ListOfModules() : modules_(kInitialCapacity) {} ~ListOfModules() { clear(); } void init(); const LoadedModule *begin() const { return modules_.begin(); } LoadedModule *begin() { return modules_.begin(); } const LoadedModule *end() const { return modules_.end(); } LoadedModule *end() { return modules_.end(); } uptr size() const { return modules_.size(); } const LoadedModule &operator[](uptr i) const { do { __sanitizer::u64 v1 = (u64)((i)); __sanitizer::u64 v2 = (u64)((modules_.size())); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h", 696, "(" "(i)" ") " "<" " (" "(modules_.size())" ")", v1, v2); } while (false); return modules_[i]; } private: void clear() { for (auto &module : modules_) module.clear(); modules_.clear(); } InternalMmapVector modules_; static const uptr kInitialCapacity = 1 << 14; }; typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); enum AndroidApiLevel { ANDROID_NOT_ANDROID = 0, ANDROID_KITKAT = 19, ANDROID_LOLLIPOP_MR1 = 22, ANDROID_POST_LOLLIPOP = 23 }; void WriteToSyslog(const char *buffer); inline void LogFullErrorReport(const char *buffer) {} void WriteOneLineToSyslog(const char *s); void LogMessageOnPrintf(const char *str); void AndroidLogInit(); # 748 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" inline void AndroidLogWrite(const char *buffer_unused) {} inline void SanitizerInitializeUnwinder() {} inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } inline uptr GetPthreadDestructorIterations() { return 4; } void *internal_start_thread(void(*func)(void*), void *arg); void internal_join_thread(void *th); void MaybeStartBackgroudThread(); static inline void SanitizerBreakOptimization(void *arg) { __asm__ __volatile__("" : : "r" (arg) : "memory"); } struct SignalContext { void *context; uptr addr; uptr pc; uptr sp; uptr bp; bool is_memory_access; enum WriteFlag { UNKNOWN, READ, WRITE } write_flag; SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp, bool is_memory_access, WriteFlag write_flag) : context(context), addr(addr), pc(pc), sp(sp), bp(bp), is_memory_access(is_memory_access), write_flag(write_flag) {} static SignalContext Create(void *siginfo, void *context); static WriteFlag GetWriteFlag(void *context); }; void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp); void MaybeReexec(); template class RunOnDestruction { public: explicit RunOnDestruction(Fn fn) : fn_(fn) {} ~RunOnDestruction() { fn_(); } private: Fn fn_; }; template RunOnDestruction at_scope_exit(Fn fn) { return RunOnDestruction(fn); } # 835 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_common.h" inline void AvoidCVE_2016_2143() {} struct StackDepotStats { uptr n_uniq_ids; uptr allocated; }; } inline void *operator new(__sanitizer::operator_new_size_type size, __sanitizer::LowLevelAllocator &alloc) { return alloc.Allocate(size); } # 15 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_stackdepot.h" 1 # 17 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_stackdepot.h" # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_stacktrace.h" 1 # 16 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_stacktrace.h" namespace __sanitizer { static const u32 kStackTraceMax = 256; # 37 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_stacktrace.h" struct StackTrace { const uptr *trace; u32 size; u32 tag; static const int TAG_UNKNOWN = 0; static const int TAG_ALLOC = 1; static const int TAG_DEALLOC = 2; static const int TAG_CUSTOM = 100; StackTrace() : trace(nullptr), size(0), tag(0) {} StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {} StackTrace(const uptr *trace, u32 size, u32 tag) : trace(trace), size(size), tag(tag) {} void Print() const; static bool WillUseFastUnwind(bool request_fast_unwind) { if (!1) return false; else if (!1) return true; return request_fast_unwind; } static uptr GetCurrentPc(); static inline uptr GetPreviousInstructionPc(uptr pc); static uptr GetNextInstructionPc(uptr pc); typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer, int out_size); }; inline __attribute__((always_inline)) uptr StackTrace::GetPreviousInstructionPc(uptr pc) { # 83 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_stacktrace.h" return pc - 1; } struct BufferedStackTrace : public StackTrace { uptr trace_buffer[kStackTraceMax]; uptr top_frame_bp; BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {} void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0); void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top, uptr stack_bottom, bool request_fast_unwind); private: void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom, u32 max_depth); void SlowUnwindStack(uptr pc, u32 max_depth); void SlowUnwindStackWithContext(uptr pc, void *context, u32 max_depth); void PopStackFrames(uptr count); uptr LocatePcInTrace(uptr pc); BufferedStackTrace(const BufferedStackTrace &); void operator=(const BufferedStackTrace &); }; static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) { return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr); } } # 18 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_stackdepot.h" 2 namespace __sanitizer { struct StackDepotNode; struct StackDepotHandle { StackDepotNode *node_; StackDepotHandle() : node_(nullptr) {} explicit StackDepotHandle(StackDepotNode *node) : node_(node) {} bool valid() { return node_; } u32 id(); int use_count(); void inc_use_count_unsafe(); }; const int kStackDepotMaxUseCount = 1U << 20; StackDepotStats *StackDepotGetStats(); u32 StackDepotPut(StackTrace stack); StackDepotHandle StackDepotPut_WithHandle(StackTrace stack); StackTrace StackDepotGet(u32 id); void StackDepotLockAll(); void StackDepotUnlockAll(); class StackDepotReverseMap { public: StackDepotReverseMap(); StackTrace Get(u32 id); private: struct IdDescPair { u32 id; StackDepotNode *desc; static bool IdComparator(const IdDescPair &a, const IdDescPair &b); }; InternalMmapVector map_; StackDepotReverseMap(const StackDepotReverseMap&); void operator=(const StackDepotReverseMap&); }; } # 17 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_placement_new.h" 1 # 19 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_placement_new.h" inline void *operator new(__sanitizer::operator_new_size_type sz, void *p) { return p; } # 18 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_symbolizer.h" 1 # 23 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_symbolizer.h" namespace __sanitizer { struct AddressInfo { uptr address; char *module; uptr module_offset; static const uptr kUnknown = ~(uptr)0; char *function; uptr function_offset; char *file; int line; int column; AddressInfo(); void Clear(); void FillModuleInfo(const char *mod_name, uptr mod_offset); }; struct SymbolizedStack { SymbolizedStack *next; AddressInfo info; static SymbolizedStack *New(uptr addr); void ClearAll(); private: SymbolizedStack(); }; struct DataInfo { char *module; uptr module_offset; char *file; uptr line; char *name; uptr start; uptr size; DataInfo(); void Clear(); }; class SymbolizerTool; class Symbolizer final { public: static Symbolizer *GetOrInit(); static void LateInitialize(); SymbolizedStack *SymbolizePC(uptr address); bool SymbolizeData(uptr address, DataInfo *info); bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name, uptr *module_address); const char *GetModuleNameForPc(uptr pc) { const char *module_name = nullptr; uptr unused; if (GetModuleNameAndOffsetForPC(pc, &module_name, &unused)) return module_name; return nullptr; } void Flush(); const char *Demangle(const char *name); void PrepareForSandboxing(); typedef void (*StartSymbolizationHook)(); typedef void (*EndSymbolizationHook)(); void AddHooks(StartSymbolizationHook start_hook, EndSymbolizationHook end_hook); const LoadedModule *FindModuleForAddress(uptr address); private: class ModuleNameOwner { public: explicit ModuleNameOwner(BlockingMutex *synchronized_by) : storage_(kInitialCapacity), last_match_(nullptr), mu_(synchronized_by) {} const char *GetOwnedCopy(const char *str); private: static const uptr kInitialCapacity = 1000; InternalMmapVector storage_; const char *last_match_; BlockingMutex *mu_; } module_names_; static Symbolizer *PlatformInit(); bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name, uptr *module_offset); ListOfModules modules_; bool modules_fresh_; const char *PlatformDemangle(const char *name); void PlatformPrepareForSandboxing(); static Symbolizer *symbolizer_; static StaticSpinMutex init_mu_; BlockingMutex mu_; IntrusiveList tools_; explicit Symbolizer(IntrusiveList tools); static LowLevelAllocator symbolizer_allocator_; StartSymbolizationHook start_hook_; EndSymbolizationHook end_hook_; class SymbolizerScope { public: explicit SymbolizerScope(const Symbolizer *sym); ~SymbolizerScope(); private: const Symbolizer *sym_; }; }; void InitializeDbgHelpIfNeeded(); } # 19 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_defs.h" 1 # 17 "../../../../gcc/libsanitizer/tsan/tsan_defs.h" # 1 "../../../../gcc/libsanitizer/tsan/tsan_stat.h" 1 # 15 "../../../../gcc/libsanitizer/tsan/tsan_stat.h" namespace __tsan { enum StatType { StatMop, StatMopRead, StatMopWrite, StatMop1, StatMop2, StatMop4, StatMop8, StatMopSame, StatMopIgnored, StatMopRange, StatMopRodata, StatMopRangeRodata, StatShadowProcessed, StatShadowZero, StatShadowNonZero, StatShadowSameSize, StatShadowIntersect, StatShadowNotIntersect, StatShadowSameThread, StatShadowAnotherThread, StatShadowReplace, StatFuncEnter, StatFuncExit, StatEvents, StatThreadCreate, StatThreadFinish, StatThreadReuse, StatThreadMaxTid, StatThreadMaxAlive, StatMutexCreate, StatMutexDestroy, StatMutexLock, StatMutexUnlock, StatMutexRecLock, StatMutexRecUnlock, StatMutexReadLock, StatMutexReadUnlock, StatSyncCreated, StatSyncDestroyed, StatSyncAcquire, StatSyncRelease, StatClockAcquire, StatClockAcquireEmpty, StatClockAcquireFastRelease, StatClockAcquireLarge, StatClockAcquireRepeat, StatClockAcquireFull, StatClockAcquiredSomething, StatClockRelease, StatClockReleaseResize, StatClockReleaseFast1, StatClockReleaseFast2, StatClockReleaseSlow, StatClockReleaseFull, StatClockReleaseAcquired, StatClockReleaseClearTail, StatClockStore, StatClockStoreResize, StatClockStoreFast, StatClockStoreFull, StatClockStoreTail, StatClockAcquireRelease, StatAtomic, StatAtomicLoad, StatAtomicStore, StatAtomicExchange, StatAtomicFetchAdd, StatAtomicFetchSub, StatAtomicFetchAnd, StatAtomicFetchOr, StatAtomicFetchXor, StatAtomicFetchNand, StatAtomicCAS, StatAtomicFence, StatAtomicRelaxed, StatAtomicConsume, StatAtomicAcquire, StatAtomicRelease, StatAtomicAcq_Rel, StatAtomicSeq_Cst, StatAtomic1, StatAtomic2, StatAtomic4, StatAtomic8, StatAtomic16, StatAnnotation, StatAnnotateHappensBefore, StatAnnotateHappensAfter, StatAnnotateCondVarSignal, StatAnnotateCondVarSignalAll, StatAnnotateMutexIsNotPHB, StatAnnotateCondVarWait, StatAnnotateRWLockCreate, StatAnnotateRWLockCreateStatic, StatAnnotateRWLockDestroy, StatAnnotateRWLockAcquired, StatAnnotateRWLockReleased, StatAnnotateTraceMemory, StatAnnotateFlushState, StatAnnotateNewMemory, StatAnnotateNoOp, StatAnnotateFlushExpectedRaces, StatAnnotateEnableRaceDetection, StatAnnotateMutexIsUsedAsCondVar, StatAnnotatePCQGet, StatAnnotatePCQPut, StatAnnotatePCQDestroy, StatAnnotatePCQCreate, StatAnnotateExpectRace, StatAnnotateBenignRaceSized, StatAnnotateBenignRace, StatAnnotateIgnoreReadsBegin, StatAnnotateIgnoreReadsEnd, StatAnnotateIgnoreWritesBegin, StatAnnotateIgnoreWritesEnd, StatAnnotateIgnoreSyncBegin, StatAnnotateIgnoreSyncEnd, StatAnnotatePublishMemoryRange, StatAnnotateUnpublishMemoryRange, StatAnnotateThreadName, StatMtxTotal, StatMtxTrace, StatMtxThreads, StatMtxReport, StatMtxSyncVar, StatMtxSyncTab, StatMtxSlab, StatMtxAnnotations, StatMtxAtExit, StatMtxMBlock, StatMtxDeadlockDetector, StatMtxFired, StatMtxRacy, StatMtxFD, StatMtxGlobalProc, StatCnt }; } # 18 "../../../../gcc/libsanitizer/tsan/tsan_defs.h" 2 # 1 "../../../../gcc/libsanitizer/ubsan/ubsan_platform.h" 1 # 19 "../../../../gcc/libsanitizer/tsan/tsan_defs.h" 2 # 37 "../../../../gcc/libsanitizer/tsan/tsan_defs.h" namespace __tsan { const int kTidBits = 13; const unsigned kMaxTid = 1 << kTidBits; const unsigned kMaxTidInClock = kMaxTid * 2; const int kClkBits = 42; const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1; const uptr kShadowStackSize = 64 * 1024; const uptr kShadowCnt = 4; const uptr kShadowCell = 8; const uptr kShadowSize = 8; const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell; const uptr kMetaShadowCell = 8; const uptr kMetaShadowSize = 4; const bool kCollectHistory = true; const unsigned kInvalidTid = (unsigned)-1; void build_consistency_release(); void build_consistency_nostats(); static inline void __attribute__((used)) build_consistency() { build_consistency_release(); build_consistency_nostats(); } template T min(T a, T b) { return a < b ? a : b; } template T max(T a, T b) { return a > b ? a : b; } template T RoundUp(T p, u64 align) { ; return (T)(((u64)p + align - 1) & ~(align - 1)); } template T RoundDown(T p, u64 align) { ; return (T)((u64)p & ~(align - 1)); } template T GetLsb(T v, int bits) { return (T)((u64)v & ((1ull << bits) - 1)); } struct MD5Hash { u64 hash[2]; bool operator==(const MD5Hash &other) const; }; MD5Hash md5_hash(const void *data, uptr size); struct Processor; struct ThreadState; class ThreadContext; struct Context; struct ReportStack; class ReportDesc; class RegionAlloc; struct MBlock { u64 siz; u32 stk; u16 tid; }; typedef char assertion_failed__155[2*(int)(sizeof(MBlock) == 16)-1]; } # 20 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" 1 # 21 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" # 1 "../../../../gcc/libsanitizer/tsan/tsan_trace.h" 1 # 15 "../../../../gcc/libsanitizer/tsan/tsan_trace.h" # 1 "../../../../gcc/libsanitizer/tsan/tsan_mutex.h" 1 # 18 "../../../../gcc/libsanitizer/tsan/tsan_mutex.h" namespace __tsan { enum MutexType { MutexTypeInvalid, MutexTypeTrace, MutexTypeThreads, MutexTypeReport, MutexTypeSyncVar, MutexTypeSyncTab, MutexTypeSlab, MutexTypeAnnotations, MutexTypeAtExit, MutexTypeMBlock, MutexTypeJavaMBlock, MutexTypeDDetector, MutexTypeFired, MutexTypeRacy, MutexTypeGlobalProc, MutexTypeCount }; class Mutex { public: explicit Mutex(MutexType type, StatType stat_type); ~Mutex(); void Lock(); void Unlock(); void ReadLock(); void ReadUnlock(); void CheckLocked(); private: atomic_uintptr_t state_; Mutex(const Mutex&); void operator = (const Mutex&); }; typedef GenericScopedLock Lock; typedef GenericScopedReadLock ReadLock; class InternalDeadlockDetector { public: InternalDeadlockDetector(); void Lock(MutexType t); void Unlock(MutexType t); void CheckNoLocks(); private: u64 seq_; u64 locked_[MutexTypeCount]; }; void InitializeMutex(); void CheckNoLocks(ThreadState *thr); } # 16 "../../../../gcc/libsanitizer/tsan/tsan_trace.h" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_stack_trace.h" 1 # 17 "../../../../gcc/libsanitizer/tsan/tsan_stack_trace.h" namespace __tsan { struct VarSizeStackTrace : public StackTrace { uptr *trace_buffer; VarSizeStackTrace(); ~VarSizeStackTrace(); void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0); private: void ResizeBuffer(uptr new_size); VarSizeStackTrace(const VarSizeStackTrace &); void operator=(const VarSizeStackTrace &); }; } # 17 "../../../../gcc/libsanitizer/tsan/tsan_trace.h" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_mutexset.h" 1 # 17 "../../../../gcc/libsanitizer/tsan/tsan_mutexset.h" namespace __tsan { class MutexSet { public: static const uptr kMaxSize = 16; struct Desc { u64 id; u64 epoch; int count; bool write; }; MutexSet(); void Add(u64 id, bool write, u64 epoch); void Del(u64 id, bool write); void Remove(u64 id); uptr Size() const; Desc Get(uptr i) const; void operator=(const MutexSet &other) { internal_memcpy(this, &other, sizeof(*this)); } private: uptr size_; Desc descs_[kMaxSize]; void RemovePos(uptr i); MutexSet(const MutexSet&); }; # 66 "../../../../gcc/libsanitizer/tsan/tsan_mutexset.h" } # 18 "../../../../gcc/libsanitizer/tsan/tsan_trace.h" 2 namespace __tsan { const int kTracePartSizeBits = 13; const int kTracePartSize = 1 << kTracePartSizeBits; const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize; const int kTraceSize = kTracePartSize * kTraceParts; enum EventType { EventTypeMop, EventTypeFuncEnter, EventTypeFuncExit, EventTypeLock, EventTypeUnlock, EventTypeRLock, EventTypeRUnlock }; typedef u64 Event; struct TraceHeader { BufferedStackTrace stack0; u64 epoch0; MutexSet mset0; TraceHeader() : stack0(), epoch0() {} }; struct Trace { Mutex mtx; uptr shadow_stack[kShadowStackSize]; TraceHeader headers[kTraceParts]; Trace() : mtx(MutexTypeTrace, StatMtxTrace) { } }; } # 22 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" 2 namespace __tsan { # 44 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" struct Mapping { static const uptr kMetaShadowBeg = 0x300000000000ull; static const uptr kMetaShadowEnd = 0x340000000000ull; static const uptr kTraceMemBeg = 0x600000000000ull; static const uptr kTraceMemEnd = 0x620000000000ull; static const uptr kShadowBeg = 0x010000000000ull; static const uptr kShadowEnd = 0x200000000000ull; static const uptr kHeapMemBeg = 0x7b0000000000ull; static const uptr kHeapMemEnd = 0x7c0000000000ull; static const uptr kLoAppMemBeg = 0x000000001000ull; static const uptr kLoAppMemEnd = 0x008000000000ull; static const uptr kMidAppMemBeg = 0x550000000000ull; static const uptr kMidAppMemEnd = 0x568000000000ull; static const uptr kHiAppMemBeg = 0x7e8000000000ull; static const uptr kHiAppMemEnd = 0x800000000000ull; static const uptr kAppMemMsk = 0x780000000000ull; static const uptr kAppMemXor = 0x040000000000ull; static const uptr kVdsoBeg = 0xf000000000000000ull; }; # 338 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" enum MappingType { MAPPING_LO_APP_BEG, MAPPING_LO_APP_END, MAPPING_HI_APP_BEG, MAPPING_HI_APP_END, MAPPING_MID_APP_BEG, MAPPING_MID_APP_END, MAPPING_HEAP_BEG, MAPPING_HEAP_END, MAPPING_APP_BEG, MAPPING_APP_END, MAPPING_SHADOW_BEG, MAPPING_SHADOW_END, MAPPING_META_SHADOW_BEG, MAPPING_META_SHADOW_END, MAPPING_TRACE_BEG, MAPPING_TRACE_END, MAPPING_VDSO_BEG, }; template uptr MappingImpl(void) { switch (Type) { case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg; case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd; case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg; case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd; case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg; case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd; case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg; case MAPPING_HEAP_END: return Mapping::kHeapMemEnd; case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg; case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg; case MAPPING_SHADOW_END: return Mapping::kShadowEnd; case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg; case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd; case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg; case MAPPING_TRACE_END: return Mapping::kTraceMemEnd; } } template uptr MappingArchImpl(void) { # 405 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return MappingImpl(); } inline __attribute__((always_inline)) uptr LoAppMemBeg(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr LoAppMemEnd(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr MidAppMemBeg(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr MidAppMemEnd(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr HeapMemBeg(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr HeapMemEnd(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr HiAppMemBeg(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr HiAppMemEnd(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr VdsoBeg(void) { return MappingArchImpl(); } # 466 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" static inline bool GetUserRegion(int i, uptr *start, uptr *end) { switch (i) { default: return false; case 0: *start = LoAppMemBeg(); *end = LoAppMemEnd(); return true; case 1: *start = HiAppMemBeg(); *end = HiAppMemEnd(); return true; case 2: *start = HeapMemBeg(); *end = HeapMemEnd(); return true; case 3: *start = MidAppMemBeg(); *end = MidAppMemEnd(); return true; } } inline __attribute__((always_inline)) uptr ShadowBeg(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr ShadowEnd(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr MetaShadowBeg(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr MetaShadowEnd(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr TraceMemBeg(void) { return MappingArchImpl(); } inline __attribute__((always_inline)) uptr TraceMemEnd(void) { return MappingArchImpl(); } template bool IsAppMemImpl(uptr mem) { return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) || (mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) || (mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) || (mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd); } inline __attribute__((always_inline)) bool IsAppMem(uptr mem) { # 558 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return IsAppMemImpl(mem); } template bool IsShadowMemImpl(uptr mem) { return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd; } inline __attribute__((always_inline)) bool IsShadowMem(uptr mem) { # 585 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return IsShadowMemImpl(mem); } template bool IsMetaMemImpl(uptr mem) { return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd; } inline __attribute__((always_inline)) bool IsMetaMem(uptr mem) { # 612 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return IsMetaMemImpl(mem); } template uptr MemToShadowImpl(uptr x) { ; return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1))) ^ Mapping::kAppMemXor) * kShadowCnt; } inline __attribute__((always_inline)) uptr MemToShadow(uptr x) { # 649 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return MemToShadowImpl(x); } template u32 *MemToMetaImpl(uptr x) { ; return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) / kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg); # 669 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" } inline __attribute__((always_inline)) u32 *MemToMeta(uptr x) { # 688 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return MemToMetaImpl(x); } template uptr ShadowToMemImpl(uptr s) { ; uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor; if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd && MemToShadow(p) == s) return p; p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) + (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk); if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd && MemToShadow(p) == s) return p; return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk; } inline __attribute__((always_inline)) uptr ShadowToMem(uptr s) { # 740 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return ShadowToMemImpl(s); } const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1); template uptr GetThreadTraceImpl(int tid) { uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize; ; return p; } inline __attribute__((always_inline)) uptr GetThreadTrace(int tid) { # 775 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return GetThreadTraceImpl(tid); } template uptr GetThreadTraceHeaderImpl(int tid) { uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize + kTraceSize * sizeof(Event); ; return p; } inline __attribute__((always_inline)) uptr GetThreadTraceHeader(int tid) { # 805 "../../../../gcc/libsanitizer/tsan/tsan_platform.h" return GetThreadTraceHeaderImpl(tid); } void InitializePlatform(); void InitializePlatformEarly(); void CheckAndProtect(); void InitializeShadowMemoryPlatform(); void FlushShadowMemory(); void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive); int ExtractResolvFDs(void *state, int *fds, int nfd); int ExtractRecvmsgFDs(void *msg, int *fds, int nfd); int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg); void DestroyThreadState(); } # 21 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 1 # 27 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 1 # 20 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_lfstack.h" 1 # 22 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_lfstack.h" namespace __sanitizer { template struct LFStack { void Clear() { atomic_store(&head_, 0, memory_order_relaxed); } bool Empty() const { return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0; } void Push(T *p) { u64 cmp = atomic_load(&head_, memory_order_relaxed); for (;;) { u64 cnt = (cmp & kCounterMask) + kCounterInc; u64 xch = (u64)(uptr)p | cnt; p->next = (T*)(uptr)(cmp & kPtrMask); if (atomic_compare_exchange_weak(&head_, &cmp, xch, memory_order_release)) break; } } T *Pop() { u64 cmp = atomic_load(&head_, memory_order_acquire); for (;;) { T *cur = (T*)(uptr)(cmp & kPtrMask); if (!cur) return nullptr; T *nxt = cur->next; u64 cnt = (cmp & kCounterMask); u64 xch = (u64)(uptr)nxt | cnt; if (atomic_compare_exchange_weak(&head_, &cmp, xch, memory_order_acquire)) return cur; } } static const int kCounterBits = (17); static const u64 kPtrMask = ((u64)-1) >> kCounterBits; static const u64 kCounterMask = ~kPtrMask; static const u64 kCounterInc = kPtrMask + 1; atomic_uint64_t head_; }; } # 21 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_procmaps.h" 1 # 19 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_procmaps.h" namespace __sanitizer { struct ProcSelfMapsBuff { char *data; uptr mmaped_size; uptr len; }; void ReadProcMaps(ProcSelfMapsBuff *proc_maps); class MemoryMappingLayout { public: explicit MemoryMappingLayout(bool cache_enabled); ~MemoryMappingLayout(); bool Next(uptr *start, uptr *end, uptr *offset, char filename[], uptr filename_size, uptr *protection); void Reset(); static void CacheMemoryMappings(); void DumpListOfModules(InternalMmapVector *modules); static const uptr kProtectionRead = 1; static const uptr kProtectionWrite = 2; static const uptr kProtectionExecute = 4; static const uptr kProtectionShared = 8; private: void LoadFromCache(); ProcSelfMapsBuff proc_self_maps_; const char *current_; static ProcSelfMapsBuff cached_proc_self_maps_; static StaticSpinMutex cache_lock_; # 76 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_procmaps.h" }; typedef void (*fill_profile_f)(uptr start, uptr rss, bool file, uptr *stats, uptr stats_size); void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size); bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end); bool IsDecimal(char c); uptr ParseDecimal(const char **p); bool IsHex(char c); uptr ParseHex(const char **p); } # 22 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 namespace __sanitizer { bool IsReportingOOM(); void __attribute__((noreturn)) ReportAllocatorCannotReturnNull(bool out_of_memory); struct NoOpMapUnmapCallback { void OnMap(uptr p, uptr size) const { } void OnUnmap(uptr p, uptr size) const { } }; typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n); # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h" 1 # 118 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h" template class SizeClassMap { static const uptr kMinSize = 1 << kMinSizeLog; static const uptr kMidSize = 1 << kMidSizeLog; static const uptr kMidClass = kMidSize / kMinSize; static const uptr S = kNumBits - 1; static const uptr M = (1 << S) - 1; public: static const uptr kMaxNumCachedHint = kMaxNumCachedHintT; typedef char assertion_failed__131[2*(int)((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0)-1]; static const uptr kMaxSize = 1UL << kMaxSizeLog; static const uptr kNumClasses = kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1; static const uptr kLargestClassID = kNumClasses - 2; typedef char assertion_failed__137[2*(int)(kNumClasses >= 16 && kNumClasses <= 256)-1]; static const uptr kNumClassesRounded = kNumClasses <= 32 ? 32 : kNumClasses <= 64 ? 64 : kNumClasses <= 128 ? 128 : 256; static uptr Size(uptr class_id) { if (class_id <= kMidClass) return kMinSize * class_id; class_id -= kMidClass; uptr t = kMidSize << (class_id >> S); return t + (t >> S) * (class_id & M); } static uptr ClassID(uptr size) { if (size <= kMidSize) return (size + kMinSize - 1) >> kMinSizeLog; if (size > kMaxSize) return 0; uptr l = MostSignificantSetBitIndex(size); uptr hbits = (size >> (l - S)) & M; uptr lbits = size & ((1 << (l - S)) - 1); uptr l1 = l - kMidSizeLog; return kMidClass + (l1 << S) + hbits + (lbits > 0); } static uptr MaxCachedHint(uptr class_id) { if (class_id == 0) return 0; uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id); return Max(1, Min(kMaxNumCachedHint, n)); } static void Print() { uptr prev_s = 0; uptr total_cached = 0; for (uptr i = 0; i < kNumClasses; i++) { uptr s = Size(i); if (s >= kMidSize / 2 && (s & (s - 1)) == 0) Printf("\n"); uptr d = s - prev_s; uptr p = prev_s ? (d * 100 / prev_s) : 0; uptr l = s ? MostSignificantSetBitIndex(s) : 0; uptr cached = MaxCachedHint(i) * s; Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd " "cached: %zd %zd; id %zd\n", i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s)); total_cached += cached; prev_s = s; } Printf("Total cached: %zd\n", total_cached); } static void Validate() { for (uptr c = 1; c < kNumClasses; c++) { uptr s = Size(c); do { __sanitizer::u64 v1 = (u64)((s)); __sanitizer::u64 v2 = (u64)((0U)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 192, "(" "(s)" ") " "!=" " (" "(0U)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((ClassID(s))); __sanitizer::u64 v2 = (u64)((c)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 193, "(" "(ClassID(s))" ") " "==" " (" "(c)" ")", v1, v2); } while (false); if (c != kNumClasses - 1) do { __sanitizer::u64 v1 = (u64)((ClassID(s + 1))); __sanitizer::u64 v2 = (u64)((c + 1)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 195, "(" "(ClassID(s + 1))" ") " "==" " (" "(c + 1)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((ClassID(s - 1))); __sanitizer::u64 v2 = (u64)((c)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 196, "(" "(ClassID(s - 1))" ") " "==" " (" "(c)" ")", v1, v2); } while (false); if (c) do { __sanitizer::u64 v1 = (u64)((Size(c))); __sanitizer::u64 v2 = (u64)((Size(c-1))); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 198, "(" "(Size(c))" ") " ">" " (" "(Size(c-1))" ")", v1, v2); } while (false); } do { __sanitizer::u64 v1 = (u64)((ClassID(kMaxSize + 1))); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 200, "(" "(ClassID(kMaxSize + 1))" ") " "==" " (" "(0)" ")", v1, v2); } while (false); for (uptr s = 1; s <= kMaxSize; s++) { uptr c = ClassID(s); do { __sanitizer::u64 v1 = (u64)((c)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 205, "(" "(c)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((Size(c))); __sanitizer::u64 v2 = (u64)((s)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 206, "(" "(Size(c))" ") " ">=" " (" "(s)" ")", v1, v2); } while (false); if (c > 0) do { __sanitizer::u64 v1 = (u64)((Size(c-1))); __sanitizer::u64 v2 = (u64)((s)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h", 208, "(" "(Size(c-1))" ") " "<" " (" "(s)" ")", v1, v2); } while (false); } } }; typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap; typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap; typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap; # 45 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_stats.h" 1 # 16 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_stats.h" enum AllocatorStat { AllocatorStatAllocated, AllocatorStatMapped, AllocatorStatCount }; typedef uptr AllocatorStatCounters[AllocatorStatCount]; class AllocatorStats { public: void Init() { internal_memset(this, 0, sizeof(*this)); } void InitLinkerInitialized() {} void Add(AllocatorStat i, uptr v) { v += atomic_load(&stats_[i], memory_order_relaxed); atomic_store(&stats_[i], v, memory_order_relaxed); } void Sub(AllocatorStat i, uptr v) { v = atomic_load(&stats_[i], memory_order_relaxed) - v; atomic_store(&stats_[i], v, memory_order_relaxed); } void Set(AllocatorStat i, uptr v) { atomic_store(&stats_[i], v, memory_order_relaxed); } uptr Get(AllocatorStat i) const { return atomic_load(&stats_[i], memory_order_relaxed); } private: friend class AllocatorGlobalStats; AllocatorStats *next_; AllocatorStats *prev_; atomic_uintptr_t stats_[AllocatorStatCount]; }; class AllocatorGlobalStats : public AllocatorStats { public: void InitLinkerInitialized() { next_ = this; prev_ = this; } void Init() { internal_memset(this, 0, sizeof(*this)); InitLinkerInitialized(); } void Register(AllocatorStats *s) { SpinMutexLock l(&mu_); s->next_ = next_; s->prev_ = this; next_->prev_ = s; next_ = s; } void Unregister(AllocatorStats *s) { SpinMutexLock l(&mu_); s->prev_->next_ = s->next_; s->next_->prev_ = s->prev_; for (int i = 0; i < AllocatorStatCount; i++) Add(AllocatorStat(i), s->Get(AllocatorStat(i))); } void Get(AllocatorStatCounters s) const { internal_memset(s, 0, AllocatorStatCount * sizeof(uptr)); SpinMutexLock l(&mu_); const AllocatorStats *stats = this; for (;;) { for (int i = 0; i < AllocatorStatCount; i++) s[i] += stats->Get(AllocatorStat(i)); stats = stats->next_; if (stats == this) break; } for (int i = 0; i < AllocatorStatCount; i++) s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0; } private: mutable SpinMutex mu_; }; # 46 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h" 1 # 15 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h" template struct SizeClassAllocator64LocalCache; # 38 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h" struct SizeClassAllocator64FlagMasks { enum { kRandomShuffleChunks = 1, }; }; template class SizeClassAllocator64 { public: static const uptr kSpaceBeg = Params::kSpaceBeg; static const uptr kSpaceSize = Params::kSpaceSize; static const uptr kMetadataSize = Params::kMetadataSize; typedef typename Params::SizeClassMap SizeClassMap; typedef typename Params::MapUnmapCallback MapUnmapCallback; static const bool kRandomShuffleChunks = Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks; typedef SizeClassAllocator64 ThisT; typedef SizeClassAllocator64LocalCache AllocatorCache; typedef u32 CompactPtrT; static const uptr kCompactPtrScale = 4; CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) { return static_cast((ptr - base) >> kCompactPtrScale); } uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) { return base + (static_cast(ptr32) << kCompactPtrScale); } void Init() { uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); if (kUsingConstantSpaceBeg) { do { __sanitizer::u64 v1 = (u64)((kSpaceBeg)); __sanitizer::u64 v2 = (u64)((reinterpret_cast( MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 74, "(" "(kSpaceBeg)" ") " "==" " (" "(reinterpret_cast( MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)))" ")", v1, v2); } while (false); } else { NonConstSpaceBeg = reinterpret_cast(MmapNoAccess(TotalSpaceSize)); do { __sanitizer::u64 v1 = (u64)((NonConstSpaceBeg)); __sanitizer::u64 v2 = (u64)((~(uptr)0)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 78, "(" "(NonConstSpaceBeg)" ") " "!=" " (" "(~(uptr)0)" ")", v1, v2); } while (false); } MapWithCallback(SpaceEnd(), AdditionalSize()); } void MapWithCallback(uptr beg, uptr size) { do { __sanitizer::u64 v1 = (u64)((beg)); __sanitizer::u64 v2 = (u64)((reinterpret_cast(MmapFixedOrDie(beg, size)))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 84, "(" "(beg)" ") " "==" " (" "(reinterpret_cast(MmapFixedOrDie(beg, size)))" ")", v1, v2); } while (false); MapUnmapCallback().OnMap(beg, size); } void UnmapWithCallback(uptr beg, uptr size) { MapUnmapCallback().OnUnmap(beg, size); UnmapOrDie(reinterpret_cast(beg), size); } static bool CanAllocate(uptr size, uptr alignment) { return size <= SizeClassMap::kMaxSize && alignment <= SizeClassMap::kMaxSize; } __attribute__((noinline)) void ReturnToAllocator(AllocatorStats *stat, uptr class_id, const CompactPtrT *chunks, uptr n_chunks) { RegionInfo *region = GetRegionInfo(class_id); uptr region_beg = GetRegionBeginBySizeClass(class_id); CompactPtrT *free_array = GetFreeArray(region_beg); BlockingMutexLock l(®ion->mutex); uptr old_num_chunks = region->num_freed_chunks; uptr new_num_freed_chunks = old_num_chunks + n_chunks; EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks); for (uptr i = 0; i < n_chunks; i++) free_array[old_num_chunks + i] = chunks[i]; region->num_freed_chunks = new_num_freed_chunks; region->n_freed += n_chunks; } __attribute__((noinline)) void GetFromAllocator(AllocatorStats *stat, uptr class_id, CompactPtrT *chunks, uptr n_chunks) { RegionInfo *region = GetRegionInfo(class_id); uptr region_beg = GetRegionBeginBySizeClass(class_id); CompactPtrT *free_array = GetFreeArray(region_beg); BlockingMutexLock l(®ion->mutex); if (__builtin_expect(!!(region->num_freed_chunks < n_chunks), 0)) { PopulateFreeArray(stat, class_id, region, n_chunks - region->num_freed_chunks); do { __sanitizer::u64 v1 = (u64)((region->num_freed_chunks)); __sanitizer::u64 v2 = (u64)((n_chunks)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 124, "(" "(region->num_freed_chunks)" ") " ">=" " (" "(n_chunks)" ")", v1, v2); } while (false); } region->num_freed_chunks -= n_chunks; uptr base_idx = region->num_freed_chunks; for (uptr i = 0; i < n_chunks; i++) chunks[i] = free_array[base_idx + i]; region->n_allocated += n_chunks; } bool PointerIsMine(const void *p) { uptr P = reinterpret_cast(p); if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) return P / kSpaceSize == kSpaceBeg / kSpaceSize; return P >= SpaceBeg() && P < SpaceEnd(); } uptr GetRegionBegin(const void *p) { if (kUsingConstantSpaceBeg) return reinterpret_cast(p) & ~(kRegionSize - 1); uptr space_beg = SpaceBeg(); return ((reinterpret_cast(p) - space_beg) & ~(kRegionSize - 1)) + space_beg; } uptr GetRegionBeginBySizeClass(uptr class_id) { return SpaceBeg() + kRegionSize * class_id; } uptr GetSizeClass(const void *p) { if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) return ((reinterpret_cast(p)) / kRegionSize) % kNumClassesRounded; return ((reinterpret_cast(p) - SpaceBeg()) / kRegionSize) % kNumClassesRounded; } void *GetBlockBegin(const void *p) { uptr class_id = GetSizeClass(p); uptr size = ClassIdToSize(class_id); if (!size) return nullptr; uptr chunk_idx = GetChunkIdx((uptr)p, size); uptr reg_beg = GetRegionBegin(p); uptr beg = chunk_idx * size; uptr next_beg = beg + size; if (class_id >= kNumClasses) return nullptr; RegionInfo *region = GetRegionInfo(class_id); if (region->mapped_user >= next_beg) return reinterpret_cast(reg_beg + beg); return nullptr; } uptr GetActuallyAllocatedSize(void *p) { do { __sanitizer::u64 v1 = (u64)((PointerIsMine(p))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 176, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2); } while (false); return ClassIdToSize(GetSizeClass(p)); } uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } void *GetMetaData(const void *p) { uptr class_id = GetSizeClass(p); uptr size = ClassIdToSize(class_id); uptr chunk_idx = GetChunkIdx(reinterpret_cast(p), size); uptr region_beg = GetRegionBeginBySizeClass(class_id); return reinterpret_cast(GetMetadataEnd(region_beg) - (1 + chunk_idx) * kMetadataSize); } uptr TotalMemoryUsed() { uptr res = 0; for (uptr i = 0; i < kNumClasses; i++) res += GetRegionInfo(i)->allocated_user; return res; } void TestOnlyUnmap() { UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize()); } static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats, uptr stats_size) { for (uptr class_id = 0; class_id < stats_size; class_id++) if (stats[class_id] == start) stats[class_id] = rss; } void PrintStats(uptr class_id, uptr rss) { RegionInfo *region = GetRegionInfo(class_id); if (region->mapped_user == 0) return; uptr in_use = region->n_allocated - region->n_freed; uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); Printf( " %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd " "num_freed_chunks %zd" " avail: %zd rss: %zdK releases: %zd\n", class_id, ClassIdToSize(class_id), region->mapped_user >> 10, region->n_allocated, region->n_freed, in_use, region->num_freed_chunks, avail_chunks, rss >> 10, region->rtoi.num_releases); } void PrintStats() { uptr total_mapped = 0; uptr n_allocated = 0; uptr n_freed = 0; for (uptr class_id = 1; class_id < kNumClasses; class_id++) { RegionInfo *region = GetRegionInfo(class_id); total_mapped += region->mapped_user; n_allocated += region->n_allocated; n_freed += region->n_freed; } Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; " "remains %zd\n", total_mapped >> 20, n_allocated, n_allocated - n_freed); uptr rss_stats[kNumClasses]; for (uptr class_id = 0; class_id < kNumClasses; class_id++) rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id; GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses); for (uptr class_id = 1; class_id < kNumClasses; class_id++) PrintStats(class_id, rss_stats[class_id]); } void ForceLock() { for (uptr i = 0; i < kNumClasses; i++) { GetRegionInfo(i)->mutex.Lock(); } } void ForceUnlock() { for (int i = (int)kNumClasses - 1; i >= 0; i--) { GetRegionInfo(i)->mutex.Unlock(); } } void ForEachChunk(ForEachChunkCallback callback, void *arg) { for (uptr class_id = 1; class_id < kNumClasses; class_id++) { RegionInfo *region = GetRegionInfo(class_id); uptr chunk_size = ClassIdToSize(class_id); uptr region_beg = SpaceBeg() + class_id * kRegionSize; for (uptr chunk = region_beg; chunk < region_beg + region->allocated_user; chunk += chunk_size) { callback(chunk, arg); } } } static uptr ClassIdToSize(uptr class_id) { return SizeClassMap::Size(class_id); } static uptr AdditionalSize() { return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded, GetPageSizeCached()); } void ReleaseToOS() { for (uptr class_id = 1; class_id < kNumClasses; class_id++) ReleaseToOS(class_id); } typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; private: static const uptr kRegionSize = kSpaceSize / kNumClassesRounded; static const uptr kFreeArraySize = kRegionSize / 8; static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0; uptr NonConstSpaceBeg; uptr SpaceBeg() const { return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg; } uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; } typedef char assertion_failed__309[2*(int)((kRegionSize) >= (1ULL << (64 / 2)))-1]; typedef char assertion_failed__311[2*(int)((kRegionSize) <= (1ULL << (64 / 2 + 4)))-1]; static const uptr kUserMapSize = 1 << 16; static const uptr kMetaMapSize = 1 << 16; static const uptr kFreeArrayMapSize = 1 << 16; static const uptr kReleaseToOsGranularity = 1 << 12; struct ReleaseToOsInfo { uptr n_freed_at_last_release; uptr num_releases; }; struct RegionInfo { BlockingMutex mutex; uptr num_freed_chunks; uptr mapped_free_array; uptr allocated_user; uptr allocated_meta; uptr mapped_user; uptr mapped_meta; u32 rand_state; uptr n_allocated, n_freed; ReleaseToOsInfo rtoi; }; typedef char assertion_failed__338[2*(int)(sizeof(RegionInfo) >= kCacheLineSize)-1]; u32 Rand(u32 *state) { return (*state = *state * 1103515245 + 12345) >> 16; } u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } void RandomShuffle(u32 *a, u32 n, u32 *rand_state) { if (n <= 1) return; for (u32 i = n - 1; i > 0; i--) Swap(a[i], a[RandN(rand_state, i + 1)]); } RegionInfo *GetRegionInfo(uptr class_id) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 353, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); RegionInfo *regions = reinterpret_cast(SpaceBeg() + kSpaceSize); return ®ions[class_id]; } uptr GetMetadataEnd(uptr region_beg) { return region_beg + kRegionSize - kFreeArraySize; } uptr GetChunkIdx(uptr chunk, uptr size) { if (!kUsingConstantSpaceBeg) chunk -= SpaceBeg(); uptr offset = chunk % kRegionSize; if (offset >> (64 / 2)) return offset / size; return (u32)offset / (u32)size; } CompactPtrT *GetFreeArray(uptr region_beg) { return reinterpret_cast(region_beg + kRegionSize - kFreeArraySize); } void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, uptr num_freed_chunks) { uptr needed_space = num_freed_chunks * sizeof(CompactPtrT); if (region->mapped_free_array < needed_space) { do { __sanitizer::u64 v1 = (u64)((needed_space)); __sanitizer::u64 v2 = (u64)((kFreeArraySize)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 384, "(" "(needed_space)" ") " "<=" " (" "(kFreeArraySize)" ")", v1, v2); } while (false); uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize); uptr current_map_end = reinterpret_cast(GetFreeArray(region_beg)) + region->mapped_free_array; uptr new_map_size = new_mapped_free_array - region->mapped_free_array; MapWithCallback(current_map_end, new_map_size); region->mapped_free_array = new_mapped_free_array; } } __attribute__((noinline)) void PopulateFreeArray(AllocatorStats *stat, uptr class_id, RegionInfo *region, uptr requested_count) { uptr size = ClassIdToSize(class_id); uptr beg_idx = region->allocated_user; uptr end_idx = beg_idx + requested_count * size; uptr region_beg = GetRegionBeginBySizeClass(class_id); if (end_idx > region->mapped_user) { if (!kUsingConstantSpaceBeg && region->mapped_user == 0) region->rand_state = static_cast(region_beg >> 12); uptr map_size = kUserMapSize; while (end_idx > region->mapped_user + map_size) map_size += kUserMapSize; do { __sanitizer::u64 v1 = (u64)((region->mapped_user + map_size)); __sanitizer::u64 v2 = (u64)((end_idx)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 409, "(" "(region->mapped_user + map_size)" ") " ">=" " (" "(end_idx)" ")", v1, v2); } while (false); MapWithCallback(region_beg + region->mapped_user, map_size); stat->Add(AllocatorStatMapped, map_size); region->mapped_user += map_size; } CompactPtrT *free_array = GetFreeArray(region_beg); uptr total_count = (region->mapped_user - beg_idx) / size; uptr num_freed_chunks = region->num_freed_chunks; EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count); for (uptr i = 0; i < total_count; i++) { uptr chunk = beg_idx + i * size; free_array[num_freed_chunks + total_count - 1 - i] = PointerToCompactPtr(0, chunk); } if (kRandomShuffleChunks) RandomShuffle(&free_array[num_freed_chunks], total_count, ®ion->rand_state); region->num_freed_chunks += total_count; region->allocated_user += total_count * size; do { __sanitizer::u64 v1 = (u64)((region->allocated_user)); __sanitizer::u64 v2 = (u64)((region->mapped_user)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 428, "(" "(region->allocated_user)" ") " "<=" " (" "(region->mapped_user)" ")", v1, v2); } while (false); region->allocated_meta += total_count * kMetadataSize; if (region->allocated_meta > region->mapped_meta) { uptr map_size = kMetaMapSize; while (region->allocated_meta > region->mapped_meta + map_size) map_size += kMetaMapSize; do { __sanitizer::u64 v1 = (u64)((region->mapped_meta + map_size)); __sanitizer::u64 v2 = (u64)((region->allocated_meta)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 436, "(" "(region->mapped_meta + map_size)" ") " ">=" " (" "(region->allocated_meta)" ")", v1, v2); } while (false); MapWithCallback(GetMetadataEnd(region_beg) - region->mapped_meta - map_size, map_size); region->mapped_meta += map_size; } do { __sanitizer::u64 v1 = (u64)((region->allocated_meta)); __sanitizer::u64 v2 = (u64)((region->mapped_meta)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 441, "(" "(region->allocated_meta)" ") " "<=" " (" "(region->mapped_meta)" ")", v1, v2); } while (false); if (region->mapped_user + region->mapped_meta > kRegionSize - kFreeArraySize) { Printf("%s: Out of memory. Dying. ", SanitizerToolName); Printf("The process has exhausted %zuMB for size class %zu.\n", kRegionSize / 1024 / 1024, size); Die(); } } bool MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size, CompactPtrT first, CompactPtrT last) { uptr beg_ptr = CompactPtrToPointer(region_beg, first); uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size; do { __sanitizer::u64 v1 = (u64)((end_ptr - beg_ptr)); __sanitizer::u64 v2 = (u64)((kReleaseToOsGranularity)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 455, "(" "(end_ptr - beg_ptr)" ") " ">=" " (" "(kReleaseToOsGranularity)" ")", v1, v2); } while (false); beg_ptr = RoundUpTo(beg_ptr, kReleaseToOsGranularity); end_ptr = RoundDownTo(end_ptr, kReleaseToOsGranularity); if (end_ptr == beg_ptr) return false; ReleaseMemoryToOS(beg_ptr, end_ptr - beg_ptr); return true; } # 471 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h" void ReleaseToOS(uptr class_id) { RegionInfo *region = GetRegionInfo(class_id); uptr region_beg = GetRegionBeginBySizeClass(class_id); CompactPtrT *free_array = GetFreeArray(region_beg); uptr chunk_size = ClassIdToSize(class_id); uptr scaled_chunk_size = chunk_size >> kCompactPtrScale; const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale; BlockingMutexLock l(®ion->mutex); uptr n = region->num_freed_chunks; if (n * chunk_size < kReleaseToOsGranularity) return; if ((region->rtoi.n_freed_at_last_release - region->n_freed) * chunk_size < kReleaseToOsGranularity) return; SortArray(free_array, n); uptr beg = free_array[0]; uptr prev = free_array[0]; for (uptr i = 1; i < n; i++) { uptr chunk = free_array[i]; do { __sanitizer::u64 v1 = (u64)((chunk)); __sanitizer::u64 v2 = (u64)((prev)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 490, "(" "(chunk)" ") " ">" " (" "(prev)" ")", v1, v2); } while (false); if (chunk - prev != scaled_chunk_size) { do { __sanitizer::u64 v1 = (u64)((chunk - prev)); __sanitizer::u64 v2 = (u64)((scaled_chunk_size)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h", 492, "(" "(chunk - prev)" ") " ">" " (" "(scaled_chunk_size)" ")", v1, v2); } while (false); if (prev + scaled_chunk_size - beg >= kScaledGranularity) { MaybeReleaseChunkRange(region_beg, chunk_size, beg, prev); region->rtoi.n_freed_at_last_release = region->n_freed; region->rtoi.num_releases++; } beg = chunk; } prev = chunk; } } }; # 47 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h" 1 # 16 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h" template class FlatByteMap { public: void TestOnlyInit() { internal_memset(map_, 0, sizeof(map_)); } void set(uptr idx, u8 val) { do { __sanitizer::u64 v1 = (u64)((idx)); __sanitizer::u64 v2 = (u64)((kSize)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h", 24, "(" "(idx)" ") " "<" " (" "(kSize)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((0U)); __sanitizer::u64 v2 = (u64)((map_[idx])); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h", 25, "(" "(0U)" ") " "==" " (" "(map_[idx])" ")", v1, v2); } while (false); map_[idx] = val; } u8 operator[] (uptr idx) { do { __sanitizer::u64 v1 = (u64)((idx)); __sanitizer::u64 v2 = (u64)((kSize)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h", 29, "(" "(idx)" ") " "<" " (" "(kSize)" ")", v1, v2); } while (false); return map_[idx]; } private: u8 map_[kSize]; }; template class TwoLevelByteMap { public: void TestOnlyInit() { internal_memset(map1_, 0, sizeof(map1_)); mu_.Init(); } void TestOnlyUnmap() { for (uptr i = 0; i < kSize1; i++) { u8 *p = Get(i); if (!p) continue; MapUnmapCallback().OnUnmap(reinterpret_cast(p), kSize2); UnmapOrDie(p, kSize2); } } uptr size() const { return kSize1 * kSize2; } uptr size1() const { return kSize1; } uptr size2() const { return kSize2; } void set(uptr idx, u8 val) { do { __sanitizer::u64 v1 = (u64)((idx)); __sanitizer::u64 v2 = (u64)((kSize1 * kSize2)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h", 64, "(" "(idx)" ") " "<" " (" "(kSize1 * kSize2)" ")", v1, v2); } while (false); u8 *map2 = GetOrCreate(idx / kSize2); do { __sanitizer::u64 v1 = (u64)((0U)); __sanitizer::u64 v2 = (u64)((map2[idx % kSize2])); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h", 66, "(" "(0U)" ") " "==" " (" "(map2[idx % kSize2])" ")", v1, v2); } while (false); map2[idx % kSize2] = val; } u8 operator[] (uptr idx) const { do { __sanitizer::u64 v1 = (u64)((idx)); __sanitizer::u64 v2 = (u64)((kSize1 * kSize2)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h", 71, "(" "(idx)" ") " "<" " (" "(kSize1 * kSize2)" ")", v1, v2); } while (false); u8 *map2 = Get(idx / kSize2); if (!map2) return 0; return map2[idx % kSize2]; } private: u8 *Get(uptr idx) const { do { __sanitizer::u64 v1 = (u64)((idx)); __sanitizer::u64 v2 = (u64)((kSize1)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h", 79, "(" "(idx)" ") " "<" " (" "(kSize1)" ")", v1, v2); } while (false); return reinterpret_cast( atomic_load(&map1_[idx], memory_order_acquire)); } u8 *GetOrCreate(uptr idx) { u8 *res = Get(idx); if (!res) { SpinMutexLock l(&mu_); if (!(res = Get(idx))) { res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap"); MapUnmapCallback().OnMap(reinterpret_cast(res), kSize2); atomic_store(&map1_[idx], reinterpret_cast(res), memory_order_release); } } return res; } atomic_uintptr_t map1_[kSize1]; StaticSpinMutex mu_; }; # 48 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h" 1 # 15 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h" template struct SizeClassAllocator32LocalCache; # 37 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h" template class SizeClassAllocator32 { public: struct TransferBatch { static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2; void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) { count_ = count; do { __sanitizer::u64 v1 = (u64)((count_)); __sanitizer::u64 v2 = (u64)((kMaxNumCached)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 48, "(" "(count_)" ") " "<=" " (" "(kMaxNumCached)" ")", v1, v2); } while (false); for (uptr i = 0; i < count; i++) batch_[i] = batch[i]; } uptr Count() const { return count_; } void Clear() { count_ = 0; } void Add(void *ptr) { batch_[count_++] = ptr; do { __sanitizer::u64 v1 = (u64)((count_)); __sanitizer::u64 v2 = (u64)((kMaxNumCached)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 56, "(" "(count_)" ") " "<=" " (" "(kMaxNumCached)" ")", v1, v2); } while (false); } void CopyToArray(void *to_batch[]) { for (uptr i = 0, n = Count(); i < n; i++) to_batch[i] = batch_[i]; } static uptr AllocationSizeRequiredForNElements(uptr n) { return sizeof(uptr) * 2 + sizeof(void *) * n; } static uptr MaxCached(uptr class_id) { return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id)); } TransferBatch *next; private: uptr count_; void *batch_[kMaxNumCached]; }; static const uptr kBatchSize = sizeof(TransferBatch); typedef char assertion_failed__79[2*(int)((kBatchSize & (kBatchSize - 1)) == 0)-1]; typedef char assertion_failed__81[2*(int)(sizeof(TransferBatch) == SizeClassMap::kMaxNumCachedHint * sizeof(uptr))-1]; static uptr ClassIdToSize(uptr class_id) { return SizeClassMap::Size(class_id); } typedef SizeClassAllocator32 ThisT; typedef SizeClassAllocator32LocalCache AllocatorCache; void Init() { possible_regions.TestOnlyInit(); internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); } void *MapWithCallback(uptr size) { size = RoundUpTo(size, GetPageSizeCached()); void *res = MmapOrDie(size, "SizeClassAllocator32"); MapUnmapCallback().OnMap((uptr)res, size); return res; } void UnmapWithCallback(uptr beg, uptr size) { MapUnmapCallback().OnUnmap(beg, size); UnmapOrDie(reinterpret_cast(beg), size); } static bool CanAllocate(uptr size, uptr alignment) { return size <= SizeClassMap::kMaxSize && alignment <= SizeClassMap::kMaxSize; } void *GetMetaData(const void *p) { do { __sanitizer::u64 v1 = (u64)((PointerIsMine(p))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 114, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2); } while (false); uptr mem = reinterpret_cast(p); uptr beg = ComputeRegionBeg(mem); uptr size = ClassIdToSize(GetSizeClass(p)); u32 offset = mem - beg; uptr n = offset / (u32)size; uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize; return reinterpret_cast(meta); } __attribute__((noinline)) TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c, uptr class_id) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 126, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); if (sci->free_list.empty()) PopulateFreeList(stat, c, sci, class_id); do { __sanitizer::u64 v1 = (u64)((!sci->free_list.empty())); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 131, "(" "(!sci->free_list.empty())" ") " "!=" " (" "0" ")", v1, v2); } while (false); TransferBatch *b = sci->free_list.front(); sci->free_list.pop_front(); return b; } __attribute__((noinline)) void DeallocateBatch(AllocatorStats *stat, uptr class_id, TransferBatch *b) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 139, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); do { __sanitizer::u64 v1 = (u64)((b->Count())); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 142, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2); } while (false); sci->free_list.push_front(b); } uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; } bool PointerIsMine(const void *p) { uptr mem = reinterpret_cast(p); if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize) return false; return GetSizeClass(p) != 0; } uptr GetSizeClass(const void *p) { return possible_regions[ComputeRegionId(reinterpret_cast(p))]; } void *GetBlockBegin(const void *p) { do { __sanitizer::u64 v1 = (u64)((PointerIsMine(p))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 160, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2); } while (false); uptr mem = reinterpret_cast(p); uptr beg = ComputeRegionBeg(mem); uptr size = ClassIdToSize(GetSizeClass(p)); u32 offset = mem - beg; u32 n = offset / (u32)size; uptr res = beg + (n * (u32)size); return reinterpret_cast(res); } uptr GetActuallyAllocatedSize(void *p) { do { __sanitizer::u64 v1 = (u64)((PointerIsMine(p))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 171, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2); } while (false); return ClassIdToSize(GetSizeClass(p)); } uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } uptr TotalMemoryUsed() { uptr res = 0; for (uptr i = 0; i < kNumPossibleRegions; i++) if (possible_regions[i]) res += kRegionSize; return res; } void TestOnlyUnmap() { for (uptr i = 0; i < kNumPossibleRegions; i++) if (possible_regions[i]) UnmapWithCallback((i * kRegionSize), kRegionSize); } void ForceLock() { for (uptr i = 0; i < kNumClasses; i++) { GetSizeClassInfo(i)->mutex.Lock(); } } void ForceUnlock() { for (int i = kNumClasses - 1; i >= 0; i--) { GetSizeClassInfo(i)->mutex.Unlock(); } } void ForEachChunk(ForEachChunkCallback callback, void *arg) { for (uptr region = 0; region < kNumPossibleRegions; region++) if (possible_regions[region]) { uptr chunk_size = ClassIdToSize(possible_regions[region]); uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); uptr region_beg = region * kRegionSize; for (uptr chunk = region_beg; chunk < region_beg + max_chunks_in_region * chunk_size; chunk += chunk_size) { callback(chunk, arg); } } } void PrintStats() { } static uptr AdditionalSize() { return 0; } void ReleaseToOS() { } typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; private: static const uptr kRegionSize = 1 << kRegionSizeLog; static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; struct SizeClassInfo { SpinMutex mutex; IntrusiveList free_list; char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList)]; }; typedef char assertion_failed__247[2*(int)(sizeof(SizeClassInfo) == kCacheLineSize)-1]; uptr ComputeRegionId(uptr mem) { uptr res = mem >> kRegionSizeLog; do { __sanitizer::u64 v1 = (u64)((res)); __sanitizer::u64 v2 = (u64)((kNumPossibleRegions)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 251, "(" "(res)" ") " "<" " (" "(kNumPossibleRegions)" ")", v1, v2); } while (false); return res; } uptr ComputeRegionBeg(uptr mem) { return mem & ~(kRegionSize - 1); } uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 260, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); uptr res = reinterpret_cast(MmapAlignedOrDie(kRegionSize, kRegionSize, "SizeClassAllocator32")); MapUnmapCallback().OnMap(res, kRegionSize); stat->Add(AllocatorStatMapped, kRegionSize); do { __sanitizer::u64 v1 = (u64)((0U)); __sanitizer::u64 v2 = (u64)(((res & (kRegionSize - 1)))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 265, "(" "(0U)" ") " "==" " (" "((res & (kRegionSize - 1)))" ")", v1, v2); } while (false); possible_regions.set(ComputeRegionId(res), static_cast(class_id)); return res; } SizeClassInfo *GetSizeClassInfo(uptr class_id) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 271, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); return &size_class_info_array[class_id]; } void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, SizeClassInfo *sci, uptr class_id) { uptr size = ClassIdToSize(class_id); uptr reg = AllocateRegion(stat, class_id); uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr max_count = TransferBatch::MaxCached(class_id); TransferBatch *b = nullptr; for (uptr i = reg; i < reg + n_chunks * size; i += size) { if (!b) { b = c->CreateBatch(class_id, this, (TransferBatch*)i); b->Clear(); } b->Add((void*)i); if (b->Count() == max_count) { do { __sanitizer::u64 v1 = (u64)((b->Count())); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 289, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2); } while (false); sci->free_list.push_back(b); b = nullptr; } } if (b) { do { __sanitizer::u64 v1 = (u64)((b->Count())); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h", 295, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2); } while (false); sci->free_list.push_back(b); } } ByteMap possible_regions; SizeClassInfo size_class_info_array[kNumClasses]; }; # 49 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h" 1 # 18 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h" template struct SizeClassAllocatorLocalCache : SizeClassAllocator::AllocatorCache { }; template struct SizeClassAllocator64LocalCache { typedef SizeClassAllocator Allocator; static const uptr kNumClasses = SizeClassAllocator::kNumClasses; typedef typename Allocator::SizeClassMapT SizeClassMap; typedef typename Allocator::CompactPtrT CompactPtrT; void Init(AllocatorGlobalStats *s) { stats_.Init(); if (s) s->Register(&stats_); } void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) { Drain(allocator); if (s) s->Unregister(&stats_); } void *Allocate(SizeClassAllocator *allocator, uptr class_id) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((0UL)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 44, "(" "(class_id)" ") " "!=" " (" "(0UL)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 45, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id)); PerClass *c = &per_class_[class_id]; if (__builtin_expect(!!(c->count == 0), 0)) Refill(c, allocator, class_id); do { __sanitizer::u64 v1 = (u64)((c->count)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 50, "(" "(c->count)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); CompactPtrT chunk = c->chunks[--c->count]; void *res = reinterpret_cast(allocator->CompactPtrToPointer( allocator->GetRegionBeginBySizeClass(class_id), chunk)); return res; } void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((0UL)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 58, "(" "(class_id)" ") " "!=" " (" "(0UL)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 59, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); InitCache(); stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id)); PerClass *c = &per_class_[class_id]; do { __sanitizer::u64 v1 = (u64)((c->max_count)); __sanitizer::u64 v2 = (u64)((0UL)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 65, "(" "(c->max_count)" ") " "!=" " (" "(0UL)" ")", v1, v2); } while (false); if (__builtin_expect(!!(c->count == c->max_count), 0)) Drain(c, allocator, class_id, c->max_count / 2); CompactPtrT chunk = allocator->PointerToCompactPtr( allocator->GetRegionBeginBySizeClass(class_id), reinterpret_cast(p)); c->chunks[c->count++] = chunk; } void Drain(SizeClassAllocator *allocator) { for (uptr class_id = 0; class_id < kNumClasses; class_id++) { PerClass *c = &per_class_[class_id]; while (c->count > 0) Drain(c, allocator, class_id, c->count); } } struct PerClass { u32 count; u32 max_count; CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint]; }; PerClass per_class_[kNumClasses]; AllocatorStats stats_; void InitCache() { if (per_class_[1].max_count) return; for (uptr i = 0; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; c->max_count = 2 * SizeClassMap::MaxCachedHint(i); } } __attribute__((noinline)) void Refill(PerClass *c, SizeClassAllocator *allocator, uptr class_id) { InitCache(); uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id); allocator->GetFromAllocator(&stats_, class_id, c->chunks, num_requested_chunks); c->count = num_requested_chunks; } __attribute__((noinline)) void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id, uptr count) { InitCache(); do { __sanitizer::u64 v1 = (u64)((c->count)); __sanitizer::u64 v2 = (u64)((count)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 112, "(" "(c->count)" ") " ">=" " (" "(count)" ")", v1, v2); } while (false); uptr first_idx_to_drain = c->count - count; c->count -= count; allocator->ReturnToAllocator(&stats_, class_id, &c->chunks[first_idx_to_drain], count); } }; template struct SizeClassAllocator32LocalCache { typedef SizeClassAllocator Allocator; typedef typename Allocator::TransferBatch TransferBatch; static const uptr kNumClasses = SizeClassAllocator::kNumClasses; void Init(AllocatorGlobalStats *s) { stats_.Init(); if (s) s->Register(&stats_); } void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) { Drain(allocator); if (s) s->Unregister(&stats_); } void *Allocate(SizeClassAllocator *allocator, uptr class_id) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((0UL)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 140, "(" "(class_id)" ") " "!=" " (" "(0UL)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 141, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id)); PerClass *c = &per_class_[class_id]; if (__builtin_expect(!!(c->count == 0), 0)) Refill(allocator, class_id); void *res = c->batch[--c->count]; __asm__("prefetchnta (%0)" : : "r" (c->batch[c->count - 1])); return res; } void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((0UL)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 152, "(" "(class_id)" ") " "!=" " (" "(0UL)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((class_id)); __sanitizer::u64 v2 = (u64)((kNumClasses)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 153, "(" "(class_id)" ") " "<" " (" "(kNumClasses)" ")", v1, v2); } while (false); InitCache(); stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id)); PerClass *c = &per_class_[class_id]; do { __sanitizer::u64 v1 = (u64)((c->max_count)); __sanitizer::u64 v2 = (u64)((0UL)); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 159, "(" "(c->max_count)" ") " "!=" " (" "(0UL)" ")", v1, v2); } while (false); if (__builtin_expect(!!(c->count == c->max_count), 0)) Drain(allocator, class_id); c->batch[c->count++] = p; } void Drain(SizeClassAllocator *allocator) { for (uptr class_id = 0; class_id < kNumClasses; class_id++) { PerClass *c = &per_class_[class_id]; while (c->count > 0) Drain(allocator, class_id); } } typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap; struct PerClass { uptr count; uptr max_count; void *batch[2 * TransferBatch::kMaxNumCached]; }; PerClass per_class_[kNumClasses]; AllocatorStats stats_; void InitCache() { if (per_class_[1].max_count) return; for (uptr i = 0; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; c->max_count = 2 * TransferBatch::MaxCached(i); } } static uptr SizeClassForTransferBatch(uptr class_id) { if (Allocator::ClassIdToSize(class_id) < TransferBatch::AllocationSizeRequiredForNElements( TransferBatch::MaxCached(class_id))) return SizeClassMap::ClassID(sizeof(TransferBatch)); return 0; } TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator, TransferBatch *b) { if (uptr batch_class_id = SizeClassForTransferBatch(class_id)) return (TransferBatch*)Allocate(allocator, batch_class_id); return b; } void DestroyBatch(uptr class_id, SizeClassAllocator *allocator, TransferBatch *b) { if (uptr batch_class_id = SizeClassForTransferBatch(class_id)) Deallocate(allocator, batch_class_id, b); } __attribute__((noinline)) void Refill(SizeClassAllocator *allocator, uptr class_id) { InitCache(); PerClass *c = &per_class_[class_id]; TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id); do { __sanitizer::u64 v1 = (u64)((b->Count())); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h", 228, "(" "(b->Count())" ") " ">" " (" "(0)" ")", v1, v2); } while (false); b->CopyToArray(c->batch); c->count = b->Count(); DestroyBatch(class_id, allocator, b); } __attribute__((noinline)) void Drain(SizeClassAllocator *allocator, uptr class_id) { InitCache(); PerClass *c = &per_class_[class_id]; uptr cnt = Min(c->max_count / 2, c->count); uptr first_idx_to_drain = c->count - cnt; TransferBatch *b = CreateBatch( class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id), &c->batch[first_idx_to_drain], cnt); c->count -= cnt; allocator->DeallocateBatch(&stats_, class_id, b); } }; # 50 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h" 1 # 18 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h" template class LargeMmapAllocator { public: void InitLinkerInitialized(bool may_return_null) { page_size_ = GetPageSizeCached(); atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); } void Init(bool may_return_null) { internal_memset(this, 0, sizeof(*this)); InitLinkerInitialized(may_return_null); } void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { do { __sanitizer::u64 v1 = (u64)((IsPowerOfTwo(alignment))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 32, "(" "(IsPowerOfTwo(alignment))" ") " "!=" " (" "0" ")", v1, v2); } while (false); uptr map_size = RoundUpMapSize(size); if (alignment > page_size_) map_size += alignment; if (map_size < size) return ReturnNullOrDieOnBadRequest(); uptr map_beg = reinterpret_cast( MmapOrDie(map_size, "LargeMmapAllocator")); do { __sanitizer::u64 v1 = (u64)((IsAligned(map_beg, page_size_))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 40, "(" "(IsAligned(map_beg, page_size_))" ") " "!=" " (" "0" ")", v1, v2); } while (false); MapUnmapCallback().OnMap(map_beg, map_size); uptr map_end = map_beg + map_size; uptr res = map_beg + page_size_; if (res & (alignment - 1)) res += alignment - (res & (alignment - 1)); do { __sanitizer::u64 v1 = (u64)((IsAligned(res, alignment))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 46, "(" "(IsAligned(res, alignment))" ") " "!=" " (" "0" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((IsAligned(res, page_size_))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 47, "(" "(IsAligned(res, page_size_))" ") " "!=" " (" "0" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((res + size)); __sanitizer::u64 v2 = (u64)((map_beg)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 48, "(" "(res + size)" ") " ">=" " (" "(map_beg)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((res + size)); __sanitizer::u64 v2 = (u64)((map_end)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 49, "(" "(res + size)" ") " "<=" " (" "(map_end)" ")", v1, v2); } while (false); Header *h = GetHeader(res); h->size = size; h->map_beg = map_beg; h->map_size = map_size; uptr size_log = MostSignificantSetBitIndex(map_size); do { __sanitizer::u64 v1 = (u64)((size_log)); __sanitizer::u64 v2 = (u64)(((sizeof(stats.by_size_log)/sizeof((stats.by_size_log)[0])))); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 55, "(" "(size_log)" ") " "<" " (" "((sizeof(stats.by_size_log)/sizeof((stats.by_size_log)[0])))" ")", v1, v2); } while (false); { SpinMutexLock l(&mutex_); uptr idx = n_chunks_++; chunks_sorted_ = false; do { __sanitizer::u64 v1 = (u64)((idx)); __sanitizer::u64 v2 = (u64)((kMaxNumChunks)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 60, "(" "(idx)" ") " "<" " (" "(kMaxNumChunks)" ")", v1, v2); } while (false); h->chunk_idx = idx; chunks_[idx] = h; stats.n_allocs++; stats.currently_allocated += map_size; stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); stats.by_size_log[size_log]++; stat->Add(AllocatorStatAllocated, map_size); stat->Add(AllocatorStatMapped, map_size); } return reinterpret_cast(res); } bool MayReturnNull() const { return atomic_load(&may_return_null_, memory_order_acquire); } void *ReturnNullOrDieOnBadRequest() { if (MayReturnNull()) return nullptr; ReportAllocatorCannotReturnNull(false); } void *ReturnNullOrDieOnOOM() { if (MayReturnNull()) return nullptr; ReportAllocatorCannotReturnNull(true); } void SetMayReturnNull(bool may_return_null) { atomic_store(&may_return_null_, may_return_null, memory_order_release); } void Deallocate(AllocatorStats *stat, void *p) { Header *h = GetHeader(p); { SpinMutexLock l(&mutex_); uptr idx = h->chunk_idx; do { __sanitizer::u64 v1 = (u64)((chunks_[idx])); __sanitizer::u64 v2 = (u64)((h)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 96, "(" "(chunks_[idx])" ") " "==" " (" "(h)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((idx)); __sanitizer::u64 v2 = (u64)((n_chunks_)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 97, "(" "(idx)" ") " "<" " (" "(n_chunks_)" ")", v1, v2); } while (false); chunks_[idx] = chunks_[n_chunks_ - 1]; chunks_[idx]->chunk_idx = idx; n_chunks_--; chunks_sorted_ = false; stats.n_frees++; stats.currently_allocated -= h->map_size; stat->Sub(AllocatorStatAllocated, h->map_size); stat->Sub(AllocatorStatMapped, h->map_size); } MapUnmapCallback().OnUnmap(h->map_beg, h->map_size); UnmapOrDie(reinterpret_cast(h->map_beg), h->map_size); } uptr TotalMemoryUsed() { SpinMutexLock l(&mutex_); uptr res = 0; for (uptr i = 0; i < n_chunks_; i++) { Header *h = chunks_[i]; do { __sanitizer::u64 v1 = (u64)((h->chunk_idx)); __sanitizer::u64 v2 = (u64)((i)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 116, "(" "(h->chunk_idx)" ") " "==" " (" "(i)" ")", v1, v2); } while (false); res += RoundUpMapSize(h->size); } return res; } bool PointerIsMine(const void *p) { return GetBlockBegin(p) != nullptr; } uptr GetActuallyAllocatedSize(void *p) { return RoundUpTo(GetHeader(p)->size, page_size_); } void *GetMetaData(const void *p) { if (!IsAligned(reinterpret_cast(p), page_size_)) { Printf("%s: bad pointer %p\n", SanitizerToolName, p); do { __sanitizer::u64 v1 = (u64)((IsAligned(reinterpret_cast(p), page_size_))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 135, "(" "(IsAligned(reinterpret_cast(p), page_size_))" ") " "!=" " (" "0" ")", v1, v2); } while (false); } return GetHeader(p) + 1; } void *GetBlockBegin(const void *ptr) { uptr p = reinterpret_cast(ptr); SpinMutexLock l(&mutex_); uptr nearest_chunk = 0; for (uptr i = 0; i < n_chunks_; i++) { uptr ch = reinterpret_cast(chunks_[i]); if (p < ch) continue; if (p - ch < p - nearest_chunk) nearest_chunk = ch; } if (!nearest_chunk) return nullptr; Header *h = reinterpret_cast
(nearest_chunk); do { __sanitizer::u64 v1 = (u64)((nearest_chunk)); __sanitizer::u64 v2 = (u64)((h->map_beg)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 154, "(" "(nearest_chunk)" ") " ">=" " (" "(h->map_beg)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((nearest_chunk)); __sanitizer::u64 v2 = (u64)((h->map_beg + h->map_size)); if (__builtin_expect(!!(!(v1 < v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 155, "(" "(nearest_chunk)" ") " "<" " (" "(h->map_beg + h->map_size)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((nearest_chunk)); __sanitizer::u64 v2 = (u64)((p)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 156, "(" "(nearest_chunk)" ") " "<=" " (" "(p)" ")", v1, v2); } while (false); if (h->map_beg + h->map_size <= p) return nullptr; return GetUser(h); } void *GetBlockBeginFastLocked(void *ptr) { mutex_.CheckLocked(); uptr p = reinterpret_cast(ptr); uptr n = n_chunks_; if (!n) return nullptr; if (!chunks_sorted_) { SortArray(reinterpret_cast(chunks_), n); for (uptr i = 0; i < n; i++) chunks_[i]->chunk_idx = i; chunks_sorted_ = true; min_mmap_ = reinterpret_cast(chunks_[0]); max_mmap_ = reinterpret_cast(chunks_[n - 1]) + chunks_[n - 1]->map_size; } if (p < min_mmap_ || p >= max_mmap_) return nullptr; uptr beg = 0, end = n - 1; while (end - beg >= 2) { uptr mid = (beg + end) / 2; if (p < reinterpret_cast(chunks_[mid])) end = mid - 1; else beg = mid; } if (beg < end) { do { __sanitizer::u64 v1 = (u64)((beg + 1)); __sanitizer::u64 v2 = (u64)((end)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 193, "(" "(beg + 1)" ") " "==" " (" "(end)" ")", v1, v2); } while (false); if (p >= reinterpret_cast(chunks_[end])) beg = end; } Header *h = chunks_[beg]; if (h->map_beg + h->map_size <= p || p < h->map_beg) return nullptr; return GetUser(h); } void PrintStats() { Printf("Stats: LargeMmapAllocator: allocated %zd times, " "remains %zd (%zd K) max %zd M; by size logs: ", stats.n_allocs, stats.n_allocs - stats.n_frees, stats.currently_allocated >> 10, stats.max_allocated >> 20); for (uptr i = 0; i < (sizeof(stats.by_size_log)/sizeof((stats.by_size_log)[0])); i++) { uptr c = stats.by_size_log[i]; if (!c) continue; Printf("%zd:%zd; ", i, c); } Printf("\n"); } void ForceLock() { mutex_.Lock(); } void ForceUnlock() { mutex_.Unlock(); } void ForEachChunk(ForEachChunkCallback callback, void *arg) { for (uptr i = 0; i < n_chunks_; i++) callback(reinterpret_cast(GetUser(chunks_[i])), arg); } private: static const int kMaxNumChunks = 1 << (18); struct Header { uptr map_beg; uptr map_size; uptr size; uptr chunk_idx; }; Header *GetHeader(uptr p) { do { __sanitizer::u64 v1 = (u64)((IsAligned(p, page_size_))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 245, "(" "(IsAligned(p, page_size_))" ") " "!=" " (" "0" ")", v1, v2); } while (false); return reinterpret_cast(p - page_size_); } Header *GetHeader(const void *p) { return GetHeader(reinterpret_cast(p)); } void *GetUser(Header *h) { do { __sanitizer::u64 v1 = (u64)((IsAligned((uptr)h, page_size_))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h", 253, "(" "(IsAligned((uptr)h, page_size_))" ") " "!=" " (" "0" ")", v1, v2); } while (false); return reinterpret_cast(reinterpret_cast(h) + page_size_); } uptr RoundUpMapSize(uptr size) { return RoundUpTo(size, page_size_) + page_size_; } uptr page_size_; Header *chunks_[kMaxNumChunks]; uptr n_chunks_; uptr min_mmap_, max_mmap_; bool chunks_sorted_; struct Stats { uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; } stats; atomic_uint8_t may_return_null_; SpinMutex mutex_; }; # 51 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h" 1 # 21 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h" template class CombinedAllocator { public: void InitCommon(bool may_return_null) { primary_.Init(); atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); } void InitLinkerInitialized(bool may_return_null) { secondary_.InitLinkerInitialized(may_return_null); stats_.InitLinkerInitialized(); InitCommon(may_return_null); } void Init(bool may_return_null) { secondary_.Init(may_return_null); stats_.Init(); InitCommon(may_return_null); } void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, bool cleared = false, bool check_rss_limit = false) { if (size == 0) size = 1; if (size + alignment < size) return ReturnNullOrDieOnBadRequest(); if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM(); if (alignment > 8) size = RoundUpTo(size, alignment); void *res; bool from_primary = primary_.CanAllocate(size, alignment); if (from_primary) res = cache->Allocate(&primary_, primary_.ClassID(size)); else res = secondary_.Allocate(&stats_, size, alignment); if (alignment > 8) do { __sanitizer::u64 v1 = (u64)((reinterpret_cast(res) & (alignment - 1))); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h", 58, "(" "(reinterpret_cast(res) & (alignment - 1))" ") " "==" " (" "(0)" ")", v1, v2); } while (false); if (cleared && res && from_primary) internal_bzero_aligned16(res, RoundUpTo(size, 16)); return res; } bool MayReturnNull() const { return atomic_load(&may_return_null_, memory_order_acquire); } void *ReturnNullOrDieOnBadRequest() { if (MayReturnNull()) return nullptr; ReportAllocatorCannotReturnNull(false); } void *ReturnNullOrDieOnOOM() { if (MayReturnNull()) return nullptr; ReportAllocatorCannotReturnNull(true); } void SetMayReturnNull(bool may_return_null) { secondary_.SetMayReturnNull(may_return_null); atomic_store(&may_return_null_, may_return_null, memory_order_release); } bool RssLimitIsExceeded() { return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire); } void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) { atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded, memory_order_release); } void Deallocate(AllocatorCache *cache, void *p) { if (!p) return; if (primary_.PointerIsMine(p)) cache->Deallocate(&primary_, primary_.GetSizeClass(p), p); else secondary_.Deallocate(&stats_, p); } void *Reallocate(AllocatorCache *cache, void *p, uptr new_size, uptr alignment) { if (!p) return Allocate(cache, new_size, alignment); if (!new_size) { Deallocate(cache, p); return nullptr; } do { __sanitizer::u64 v1 = (u64)((PointerIsMine(p))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h", 109, "(" "(PointerIsMine(p))" ") " "!=" " (" "0" ")", v1, v2); } while (false); uptr old_size = GetActuallyAllocatedSize(p); uptr memcpy_size = Min(new_size, old_size); void *new_p = Allocate(cache, new_size, alignment); if (new_p) internal_memcpy(new_p, p, memcpy_size); Deallocate(cache, p); return new_p; } bool PointerIsMine(void *p) { if (primary_.PointerIsMine(p)) return true; return secondary_.PointerIsMine(p); } bool FromPrimary(void *p) { return primary_.PointerIsMine(p); } void *GetMetaData(const void *p) { if (primary_.PointerIsMine(p)) return primary_.GetMetaData(p); return secondary_.GetMetaData(p); } void *GetBlockBegin(const void *p) { if (primary_.PointerIsMine(p)) return primary_.GetBlockBegin(p); return secondary_.GetBlockBegin(p); } void *GetBlockBeginFastLocked(void *p) { if (primary_.PointerIsMine(p)) return primary_.GetBlockBegin(p); return secondary_.GetBlockBeginFastLocked(p); } uptr GetActuallyAllocatedSize(void *p) { if (primary_.PointerIsMine(p)) return primary_.GetActuallyAllocatedSize(p); return secondary_.GetActuallyAllocatedSize(p); } uptr TotalMemoryUsed() { return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed(); } void TestOnlyUnmap() { primary_.TestOnlyUnmap(); } void InitCache(AllocatorCache *cache) { cache->Init(&stats_); } void DestroyCache(AllocatorCache *cache) { cache->Destroy(&primary_, &stats_); } void SwallowCache(AllocatorCache *cache) { cache->Drain(&primary_); } void GetStats(AllocatorStatCounters s) const { stats_.Get(s); } void PrintStats() { primary_.PrintStats(); secondary_.PrintStats(); } void ForceLock() { primary_.ForceLock(); secondary_.ForceLock(); } void ForceUnlock() { secondary_.ForceUnlock(); primary_.ForceUnlock(); } void ReleaseToOS() { primary_.ReleaseToOS(); } void ForEachChunk(ForEachChunkCallback callback, void *arg) { primary_.ForEachChunk(callback, arg); secondary_.ForEachChunk(callback, arg); } private: PrimaryAllocator primary_; SecondaryAllocator secondary_; AllocatorGlobalStats stats_; atomic_uint8_t may_return_null_; atomic_uint8_t rss_limit_is_exceeded_; }; # 52 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator.h" 2 } # 28 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h" 1 # 18 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h" namespace __sanitizer { typedef CompactSizeClassMap InternalSizeClassMap; static const uptr kInternalAllocatorSpace = 0; static const u64 kInternalAllocatorSize = (1ULL << 47); static const uptr kInternalAllocatorRegionSizeLog = 20; static const uptr kInternalAllocatorNumRegions = kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog; typedef TwoLevelByteMap<(kInternalAllocatorNumRegions >> 12), 1 << 12> ByteMap; typedef SizeClassAllocator32< kInternalAllocatorSpace, kInternalAllocatorSize, 0, InternalSizeClassMap, kInternalAllocatorRegionSizeLog, ByteMap> PrimaryInternalAllocator; typedef SizeClassAllocatorLocalCache InternalAllocatorCache; typedef CombinedAllocator > InternalAllocator; void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, uptr alignment = 0); void *InternalRealloc(void *p, uptr size, InternalAllocatorCache *cache = nullptr); void *InternalCalloc(uptr countr, uptr size, InternalAllocatorCache *cache = nullptr); void InternalFree(void *p, InternalAllocatorCache *cache = nullptr); InternalAllocator *internal_allocator(); enum InternalAllocEnum { INTERNAL_ALLOC }; } inline void *operator new(__sanitizer::operator_new_size_type size, __sanitizer::InternalAllocEnum) { return __sanitizer::InternalAlloc(size); } # 29 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_asm.h" 1 # 30 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h" 1 # 24 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h" namespace __sanitizer { struct DDPhysicalThread; struct DDLogicalThread; struct DDMutex { uptr id; u32 stk; u64 ctx; }; struct DDFlags { bool second_deadlock_stack; }; struct DDReport { enum { kMaxLoopSize = 20 }; int n; struct { u64 thr_ctx; u64 mtx_ctx0; u64 mtx_ctx1; u32 stk[2]; } loop[kMaxLoopSize]; }; struct DDCallback { DDPhysicalThread *pt; DDLogicalThread *lt; virtual u32 Unwind() { return 0; } virtual int UniqueTid() { return 0; } }; struct DDetector { static DDetector *Create(const DDFlags *flags); virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; } virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {} virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; } virtual void DestroyLogicalThread(DDLogicalThread *lt) {} virtual void MutexInit(DDCallback *cb, DDMutex *m) {} virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {} virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {} virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {} virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {} virtual DDReport *GetReport(DDCallback *cb) { return nullptr; } }; } # 32 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_libignore.h" 1 # 23 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_libignore.h" namespace __sanitizer { class LibIgnore { public: explicit LibIgnore(LinkerInitialized); void AddIgnoredLibrary(const char *name_templ); void OnLibraryLoaded(const char *name); void OnLibraryUnloaded(); bool IsIgnored(uptr pc) const; private: struct Lib { char *templ; char *name; char *real_name; bool loaded; }; struct LibCodeRange { uptr begin; uptr end; }; static const uptr kMaxLibs = 128; atomic_uintptr_t loaded_count_; LibCodeRange code_ranges_[kMaxLibs]; BlockingMutex mutex_; uptr count_; Lib libs_[kMaxLibs]; LibIgnore(const LibIgnore&); void operator = (const LibIgnore&); }; inline bool LibIgnore::IsIgnored(uptr pc) const { const uptr n = atomic_load(&loaded_count_, memory_order_acquire); for (uptr i = 0; i < n; i++) { if (pc >= code_ranges_[i].begin && pc < code_ranges_[i].end) return true; } return false; } } # 33 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_suppressions.h" 1 # 18 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_suppressions.h" namespace __sanitizer { struct Suppression { Suppression() { internal_memset(this, 0, sizeof(*this)); } const char *type; char *templ; atomic_uint32_t hit_count; uptr weight; }; class SuppressionContext { public: SuppressionContext(const char *supprression_types[], int suppression_types_num); void ParseFromFile(const char *filename); void Parse(const char *str); bool Match(const char *str, const char *type, Suppression **s); uptr SuppressionCount() const; bool HasSuppressionType(const char *type) const; const Suppression *SuppressionAt(uptr i) const; void GetMatched(InternalMmapVector *matched); private: static const int kMaxSuppressionTypes = 32; const char **const suppression_types_; const int suppression_types_num_; InternalMmapVector suppressions_; bool has_suppression_type_[kMaxSuppressionTypes]; bool can_parse_; }; } # 34 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_thread_registry.h" 1 # 20 "../../../../gcc/libsanitizer/sanitizer_common/sanitizer_thread_registry.h" namespace __sanitizer { enum ThreadStatus { ThreadStatusInvalid, ThreadStatusCreated, ThreadStatusRunning, ThreadStatusFinished, ThreadStatusDead }; class ThreadContextBase { public: explicit ThreadContextBase(u32 tid); ~ThreadContextBase(); const u32 tid; u64 unique_id; u32 reuse_count; uptr os_id; uptr user_id; char name[64]; ThreadStatus status; bool detached; u32 parent_tid; ThreadContextBase *next; void SetName(const char *new_name); void SetDead(); void SetJoined(void *arg); void SetFinished(); void SetStarted(uptr _os_id, void *arg); void SetCreated(uptr _user_id, u64 _unique_id, bool _detached, u32 _parent_tid, void *arg); void Reset(); virtual void OnDead() {} virtual void OnJoined(void *arg) {} virtual void OnFinished() {} virtual void OnStarted(void *arg) {} virtual void OnCreated(void *arg) {} virtual void OnReset() {} virtual void OnDetached(void *arg) {} }; typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid); class ThreadRegistry { public: static const u32 kUnknownTid; ThreadRegistry(ThreadContextFactory factory, u32 max_threads, u32 thread_quarantine_size, u32 max_reuse = 0); void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr, uptr *alive = nullptr); uptr GetMaxAliveThreads(); void Lock() { mtx_.Lock(); } void CheckLocked() { mtx_.CheckLocked(); } void Unlock() { mtx_.Unlock(); } ThreadContextBase *GetThreadLocked(u32 tid) { ; return threads_[tid]; } u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg); typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg); void RunCallbackForEachThreadLocked(ThreadCallback cb, void *arg); typedef bool (*FindThreadCallback)(ThreadContextBase *tctx, void *arg); u32 FindThread(FindThreadCallback cb, void *arg); ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb, void *arg); ThreadContextBase *FindThreadContextByOsIDLocked(uptr os_id); void SetThreadName(u32 tid, const char *name); void SetThreadNameByUserId(uptr user_id, const char *name); void DetachThread(u32 tid, void *arg); void JoinThread(u32 tid, void *arg); void FinishThread(u32 tid); void StartThread(u32 tid, uptr os_id, void *arg); private: const ThreadContextFactory context_factory_; const u32 max_threads_; const u32 thread_quarantine_size_; const u32 max_reuse_; BlockingMutex mtx_; u32 n_contexts_; u64 total_threads_; uptr alive_threads_; uptr max_alive_threads_; uptr running_threads_; ThreadContextBase **threads_; IntrusiveList dead_threads_; IntrusiveList invalid_threads_; void QuarantinePush(ThreadContextBase *tctx); ThreadContextBase *QuarantinePop(); }; typedef GenericScopedLock ThreadRegistryLock; } # 35 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_clock.h" 1 # 15 "../../../../gcc/libsanitizer/tsan/tsan_clock.h" # 1 "../../../../gcc/libsanitizer/tsan/tsan_dense_alloc.h" 1 # 24 "../../../../gcc/libsanitizer/tsan/tsan_dense_alloc.h" namespace __tsan { class DenseSlabAllocCache { static const uptr kSize = 128; typedef u32 IndexT; uptr pos; IndexT cache[kSize]; template friend class DenseSlabAlloc; }; template class DenseSlabAlloc { public: typedef DenseSlabAllocCache Cache; typedef typename Cache::IndexT IndexT; DenseSlabAlloc() { do { __sanitizer::u64 v1 = (u64)((kL1Size & (kL1Size - 1))); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_dense_alloc.h", 42, "(" "(kL1Size & (kL1Size - 1))" ") " "==" " (" "(0)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((kL2Size & (kL2Size - 1))); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_dense_alloc.h", 43, "(" "(kL2Size & (kL2Size - 1))" ") " "==" " (" "(0)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((1ull << (sizeof(IndexT) * 8))); __sanitizer::u64 v2 = (u64)((kL1Size * kL2Size)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_dense_alloc.h", 44, "(" "(1ull << (sizeof(IndexT) * 8))" ") " ">=" " (" "(kL1Size * kL2Size)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((sizeof(T))); __sanitizer::u64 v2 = (u64)((sizeof(IndexT))); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_dense_alloc.h", 46, "(" "(sizeof(T))" ") " ">=" " (" "(sizeof(IndexT))" ")", v1, v2); } while (false); internal_memset(map_, 0, sizeof(map_)); freelist_ = 0; fillpos_ = 0; } ~DenseSlabAlloc() { for (uptr i = 0; i < kL1Size; i++) { if (map_[i] != 0) UnmapOrDie(map_[i], kL2Size * sizeof(T)); } } IndexT Alloc(Cache *c) { if (c->pos == 0) Refill(c); return c->cache[--c->pos]; } void Free(Cache *c, IndexT idx) { ; if (c->pos == Cache::kSize) Drain(c); c->cache[c->pos++] = idx; } T *Map(IndexT idx) { ; ; return &map_[idx / kL2Size][idx % kL2Size]; } void FlushCache(Cache *c) { SpinMutexLock lock(&mtx_); while (c->pos) { IndexT idx = c->cache[--c->pos]; *(IndexT*)Map(idx) = freelist_; freelist_ = idx; } } void InitCache(Cache *c) { c->pos = 0; internal_memset(c->cache, 0, sizeof(c->cache)); } private: T *map_[kL1Size]; SpinMutex mtx_; IndexT freelist_; uptr fillpos_; void Refill(Cache *c) { SpinMutexLock lock(&mtx_); if (freelist_ == 0) { if (fillpos_ == kL1Size) { Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n"); Die(); } T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator"); IndexT start = fillpos_ == 0 ? 1 : 0; for (IndexT i = start; i < kL2Size; i++) { new(batch + i) T; *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; } *(IndexT*)(batch + kL2Size - 1) = 0; freelist_ = fillpos_ * kL2Size + start; map_[fillpos_++] = batch; } for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) { IndexT idx = freelist_; c->cache[c->pos++] = idx; freelist_ = *(IndexT*)Map(idx); } } void Drain(Cache *c) { SpinMutexLock lock(&mtx_); for (uptr i = 0; i < Cache::kSize / 2; i++) { IndexT idx = c->cache[--c->pos]; *(IndexT*)Map(idx) = freelist_; freelist_ = idx; } } }; } # 16 "../../../../gcc/libsanitizer/tsan/tsan_clock.h" 2 namespace __tsan { struct ClockElem { u64 epoch : kClkBits; u64 reused : 64 - kClkBits; }; struct ClockBlock { static const uptr kSize = 512; static const uptr kTableSize = kSize / sizeof(u32); static const uptr kClockCount = kSize / sizeof(ClockElem); union { u32 table[kTableSize]; ClockElem clock[kClockCount]; }; ClockBlock() { } }; typedef DenseSlabAlloc ClockAlloc; typedef DenseSlabAllocCache ClockCache; class SyncClock { public: SyncClock(); ~SyncClock(); uptr size() const { return size_; } u64 get(unsigned tid) const { return elem(tid).epoch; } void Resize(ClockCache *c, uptr nclk); void Reset(ClockCache *c); void DebugDump(int(*printf)(const char *s, ...)); private: friend struct ThreadClock; static const uptr kDirtyTids = 2; unsigned release_store_tid_; unsigned release_store_reused_; unsigned dirty_tids_[kDirtyTids]; ClockBlock *tab_; u32 tab_idx_; u32 size_; ClockElem &elem(unsigned tid) const; }; struct ThreadClock { public: typedef DenseSlabAllocCache Cache; explicit ThreadClock(unsigned tid, unsigned reused = 0); u64 get(unsigned tid) const { ; return clk_[tid].epoch; } void set(unsigned tid, u64 v); void set(u64 v) { ; clk_[tid_].epoch = v; } void tick() { clk_[tid_].epoch++; } uptr size() const { return nclk_; } void acquire(ClockCache *c, const SyncClock *src); void release(ClockCache *c, SyncClock *dst) const; void acq_rel(ClockCache *c, SyncClock *dst); void ReleaseStore(ClockCache *c, SyncClock *dst) const; void DebugReset(); void DebugDump(int(*printf)(const char *s, ...)); private: static const uptr kDirtyTids = SyncClock::kDirtyTids; const unsigned tid_; const unsigned reused_; u64 last_acquire_; uptr nclk_; ClockElem clk_[kMaxTidInClock]; bool IsAlreadyAcquired(const SyncClock *src) const; void UpdateCurrentThread(SyncClock *dst) const; }; } # 36 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_flags.h" 1 # 18 "../../../../gcc/libsanitizer/tsan/tsan_flags.h" namespace __tsan { struct Flags : DDFlags { # 1 "../../../../gcc/libsanitizer/tsan/tsan_flags.inc" 1 # 18 "../../../../gcc/libsanitizer/tsan/tsan_flags.inc" bool enable_annotations; bool suppress_equal_stacks; bool suppress_equal_addresses; bool report_bugs; bool report_thread_leaks; bool report_destroy_locked; bool report_mutex_bugs; bool report_signal_unsafe; bool report_atomic_races; bool force_seq_cst_atomics; bool print_benign; bool halt_on_error; int atexit_sleep_ms; const char * profile_memory; int flush_memory_ms; int flush_symbolizer_ms; int memory_limit_mb; bool stop_on_start; bool running_on_valgrind; int history_size; int io_sync; bool die_after_fork; const char * suppressions; bool ignore_interceptors_accesses; bool shared_ptr_interceptor; # 23 "../../../../gcc/libsanitizer/tsan/tsan_flags.h" 2 void SetDefaults(); void ParseFromString(const char *str); }; Flags *flags(); void InitializeFlags(Flags *flags, const char *env); } # 38 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_sync.h" 1 # 22 "../../../../gcc/libsanitizer/tsan/tsan_sync.h" namespace __tsan { struct SyncVar { SyncVar(); static const int kInvalidTid = -1; uptr addr; Mutex mtx; u64 uid; u32 creation_stack_id; int owner_tid; u64 last_lock; int recursion; bool is_rw; bool is_recursive; bool is_broken; bool is_linker_init; u32 next; DDMutex dd; SyncClock read_clock; SyncClock clock; void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid); void Reset(Processor *proc); u64 GetId() const { return GetLsb((u64)addr | (uid << 48), 60); } bool CheckId(u64 uid) const { do { __sanitizer::u64 v1 = (u64)((uid)); __sanitizer::u64 v2 = (u64)((GetLsb(uid, 14))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_sync.h", 55, "(" "(uid)" ") " "==" " (" "(GetLsb(uid, 14))" ")", v1, v2); } while (false); return GetLsb(this->uid, 14) == uid; } static uptr SplitId(u64 id, u64 *uid) { *uid = id >> 48; return (uptr)GetLsb(id, 48); } }; class MetaMap { public: MetaMap(); void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz); uptr FreeBlock(Processor *proc, uptr p); bool FreeRange(Processor *proc, uptr p, uptr sz); void ResetRange(Processor *proc, uptr p, uptr sz); MBlock* GetBlock(uptr p); SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock); SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock); void MoveMemory(uptr src, uptr dst, uptr sz); void OnProcIdle(Processor *proc); private: static const u32 kFlagMask = 3u << 30; static const u32 kFlagBlock = 1u << 30; static const u32 kFlagSync = 2u << 30; typedef DenseSlabAlloc BlockAlloc; typedef DenseSlabAlloc SyncAlloc; BlockAlloc block_alloc_; SyncAlloc sync_alloc_; atomic_uint64_t uid_gen_; SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock, bool create); }; } # 39 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_vector.h" 1 # 18 "../../../../gcc/libsanitizer/tsan/tsan_vector.h" # 1 "../../../../gcc/libsanitizer/tsan/tsan_mman.h" 1 # 16 "../../../../gcc/libsanitizer/tsan/tsan_mman.h" namespace __tsan { const uptr kDefaultAlignment = 16; void InitializeAllocator(); void InitializeAllocatorLate(); void ReplaceSystemMalloc(); void AllocatorProcStart(Processor *proc); void AllocatorProcFinish(Processor *proc); void AllocatorPrintStats(); void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align = kDefaultAlignment, bool signal = true); void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n); void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true); void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz); void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align); uptr user_alloc_usable_size(const void *p); void invoke_malloc_hook(void *ptr, uptr size); void invoke_free_hook(void *ptr); enum MBlockType { MBlockScopedBuf, MBlockString, MBlockStackTrace, MBlockShadowStack, MBlockSync, MBlockClock, MBlockThreadContex, MBlockDeadInfo, MBlockRacyStacks, MBlockRacyAddresses, MBlockAtExit, MBlockFlag, MBlockReport, MBlockReportMop, MBlockReportThread, MBlockReportMutex, MBlockReportLoc, MBlockReportStack, MBlockSuppression, MBlockExpectRace, MBlockSignal, MBlockJmpBuf, MBlockTypeCount }; void *internal_alloc(MBlockType typ, uptr sz); void internal_free(void *p); template void DestroyAndFree(T *&p) { p->~T(); internal_free(p); p = 0; } } # 19 "../../../../gcc/libsanitizer/tsan/tsan_vector.h" 2 namespace __tsan { template class Vector { public: explicit Vector(MBlockType typ) : typ_(typ) , begin_() , end_() , last_() { } ~Vector() { if (begin_) internal_free(begin_); } void Reset() { if (begin_) internal_free(begin_); begin_ = 0; end_ = 0; last_ = 0; } uptr Size() const { return end_ - begin_; } T &operator[](uptr i) { ; return begin_[i]; } const T &operator[](uptr i) const { ; return begin_[i]; } T *PushBack() { EnsureSize(Size() + 1); T *p = &end_[-1]; internal_memset(p, 0, sizeof(*p)); return p; } T *PushBack(const T& v) { EnsureSize(Size() + 1); T *p = &end_[-1]; internal_memcpy(p, &v, sizeof(*p)); return p; } void PopBack() { ; end_--; } void Resize(uptr size) { if (size == 0) { end_ = begin_; return; } uptr old_size = Size(); EnsureSize(size); if (old_size < size) { for (uptr i = old_size; i < size; i++) internal_memset(&begin_[i], 0, sizeof(begin_[i])); } } private: const MBlockType typ_; T *begin_; T *end_; T *last_; void EnsureSize(uptr size) { if (size <= Size()) return; if (size <= (uptr)(last_ - begin_)) { end_ = begin_ + size; return; } uptr cap0 = last_ - begin_; uptr cap = cap0 * 5 / 4; if (cap == 0) cap = 16; if (cap < size) cap = size; T *p = (T*)internal_alloc(typ_, cap * sizeof(T)); if (cap0) { internal_memcpy(p, begin_, cap0 * sizeof(T)); internal_free(begin_); } begin_ = p; end_ = begin_ + size; last_ = begin_ + cap; } Vector(const Vector&); void operator=(const Vector&); }; } # 41 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_report.h" 1 # 18 "../../../../gcc/libsanitizer/tsan/tsan_report.h" namespace __tsan { enum ReportType { ReportTypeRace, ReportTypeVptrRace, ReportTypeUseAfterFree, ReportTypeVptrUseAfterFree, ReportTypeThreadLeak, ReportTypeMutexDestroyLocked, ReportTypeMutexDoubleLock, ReportTypeMutexInvalidAccess, ReportTypeMutexBadUnlock, ReportTypeMutexBadReadLock, ReportTypeMutexBadReadUnlock, ReportTypeSignalUnsafe, ReportTypeErrnoInSignal, ReportTypeDeadlock }; struct ReportStack { SymbolizedStack *frames; bool suppressable; static ReportStack *New(); private: ReportStack(); }; struct ReportMopMutex { u64 id; bool write; }; struct ReportMop { int tid; uptr addr; int size; bool write; bool atomic; Vector mset; ReportStack *stack; ReportMop(); }; enum ReportLocationType { ReportLocationGlobal, ReportLocationHeap, ReportLocationStack, ReportLocationTLS, ReportLocationFD }; struct ReportLocation { ReportLocationType type; DataInfo global; uptr heap_chunk_start; uptr heap_chunk_size; int tid; int fd; bool suppressable; ReportStack *stack; static ReportLocation *New(ReportLocationType type); private: explicit ReportLocation(ReportLocationType type); }; struct ReportThread { int id; uptr os_id; bool running; char *name; int parent_tid; ReportStack *stack; }; struct ReportMutex { u64 id; uptr addr; bool destroyed; ReportStack *stack; }; class ReportDesc { public: ReportType typ; Vector stacks; Vector mops; Vector locs; Vector mutexes; Vector threads; Vector unique_tids; ReportStack *sleep; int count; ReportDesc(); ~ReportDesc(); private: ReportDesc(const ReportDesc&); void operator = (const ReportDesc&); }; void PrintReport(const ReportDesc *rep); void PrintStack(const ReportStack *stack); } # 42 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_ignoreset.h" 1 # 17 "../../../../gcc/libsanitizer/tsan/tsan_ignoreset.h" namespace __tsan { class IgnoreSet { public: static const uptr kMaxSize = 16; IgnoreSet(); void Add(u32 stack_id); void Reset(); uptr Size() const; u32 At(uptr i) const; private: uptr size_; u32 stacks_[kMaxSize]; }; } # 45 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" 2 namespace __tsan { struct MapUnmapCallback; # 67 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" struct AP64 { static const uptr kSpaceBeg = Mapping::kHeapMemBeg; static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg; static const uptr kMetadataSize = 0; typedef DefaultSizeClassMap SizeClassMap; typedef __tsan::MapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; typedef SizeClassAllocator64 PrimaryAllocator; typedef SizeClassAllocatorLocalCache AllocatorCache; typedef LargeMmapAllocator SecondaryAllocator; typedef CombinedAllocator Allocator; Allocator *allocator(); void TsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); const u64 kShadowRodata = (u64)-1; class FastState { public: FastState(u64 tid, u64 epoch) { x_ = tid << kTidShift; x_ |= epoch; ; ; ; } explicit FastState(u64 x) : x_(x) { } u64 raw() const { return x_; } u64 tid() const { u64 res = (x_ & ~kIgnoreBit) >> kTidShift; return res; } u64 TidWithIgnore() const { u64 res = x_ >> kTidShift; return res; } u64 epoch() const { u64 res = x_ & ((1ull << kClkBits) - 1); return res; } void IncrementEpoch() { u64 old_epoch = epoch(); x_ += 1; ; (void)old_epoch; } void SetIgnoreBit() { x_ |= kIgnoreBit; } void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } bool GetIgnoreBit() const { return (s64)x_ < 0; } void SetHistorySize(int hs) { do { __sanitizer::u64 v1 = (u64)((hs)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.h", 140, "(" "(hs)" ") " ">=" " (" "(0)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((hs)); __sanitizer::u64 v2 = (u64)((7)); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.h", 141, "(" "(hs)" ") " "<=" " (" "(7)" ")", v1, v2); } while (false); x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift); } inline __attribute__((always_inline)) int GetHistorySize() const { return (int)((x_ >> kHistoryShift) & kHistoryMask); } void ClearHistorySize() { SetHistorySize(0); } inline __attribute__((always_inline)) u64 GetTracePos() const { const int hs = GetHistorySize(); const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; return epoch() & mask; } private: friend class Shadow; static const int kTidShift = 64 - kTidBits - 1; static const u64 kIgnoreBit = 1ull << 63; static const u64 kFreedBit = 1ull << 63; static const u64 kHistoryShift = kClkBits; static const u64 kHistoryMask = 7; u64 x_; }; # 180 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" class Shadow : public FastState { public: explicit Shadow(u64 x) : FastState(x) { } explicit Shadow(const FastState &s) : FastState(s.x_) { ClearHistorySize(); } void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { ; ; ; x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits; ; ; } void SetWrite(unsigned kAccessIsWrite) { ; if (!kAccessIsWrite) x_ |= kReadBit; ; } void SetAtomic(bool kIsAtomic) { ; if (kIsAtomic) x_ |= kAtomicBit; ; } bool IsAtomic() const { return x_ & kAtomicBit; } bool IsZero() const { return x_ == 0; } static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; ; return shifted_xor == 0; } static inline __attribute__((always_inline)) bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31; return masked_xor == 0; } static inline __attribute__((always_inline)) bool TwoRangesIntersect(Shadow s1, Shadow s2, unsigned kS2AccessSize) { bool res = false; u64 diff = s1.addr0() - s2.addr0(); if ((s64)diff < 0) { if (s1.size() > -diff) res = true; } else { if (kS2AccessSize > diff) res = true; } ; ; return res; } u64 inline __attribute__((always_inline)) addr0() const { return (x_ >> kClkBits) & 7; } u64 inline __attribute__((always_inline)) size() const { return 1ull << size_log(); } bool inline __attribute__((always_inline)) IsWrite() const { return !IsRead(); } bool inline __attribute__((always_inline)) IsRead() const { return x_ & kReadBit; } # 266 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" void MarkAsFreed() { x_ |= kFreedBit; } bool IsFreed() const { return x_ & kFreedBit; } bool GetFreedAndReset() { bool res = x_ & kFreedBit; x_ &= ~kFreedBit; return res; } bool inline __attribute__((always_inline)) IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) | (u64(kIsAtomic) << kAtomicShift)); ; return v; } bool inline __attribute__((always_inline)) IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { bool v = ((x_ >> kReadShift) & 3) <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); ; return v; } bool inline __attribute__((always_inline)) IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { bool v = ((x_ >> kReadShift) & 3) >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); ; return v; } private: static const u64 kReadShift = 5 + kClkBits; static const u64 kReadBit = 1ull << kReadShift; static const u64 kAtomicShift = 6 + kClkBits; static const u64 kAtomicBit = 1ull << kAtomicShift; u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; } static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) { if (s1.addr0() == s2.addr0()) return true; if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) return true; if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) return true; return false; } }; struct ThreadSignalContext; struct JmpBuf { uptr sp; uptr mangled_sp; int int_signal_send; bool in_blocking_func; uptr in_signal_handler; uptr *shadow_stack_pos; }; # 340 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" struct Processor { ThreadState *thr; AllocatorCache alloc_cache; InternalAllocatorCache internal_alloc_cache; DenseSlabAllocCache block_cache; DenseSlabAllocCache sync_cache; DenseSlabAllocCache clock_cache; DDPhysicalThread *dd_pt; }; struct ScopedGlobalProcessor { ScopedGlobalProcessor(); ~ScopedGlobalProcessor(); }; struct ThreadState { FastState fast_state; # 376 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" u64 fast_synch_epoch; int ignore_reads_and_writes; int ignore_sync; IgnoreSet mop_ignore_set; IgnoreSet sync_ignore_set; uptr *shadow_stack; uptr *shadow_stack_end; uptr *shadow_stack_pos; u64 *racy_shadow_addr; u64 racy_state[2]; MutexSet mset; ThreadClock clock; Vector jmp_bufs; int ignore_interceptors; const int tid; const int unique_id; bool in_symbolizer; bool in_ignored_lib; bool is_inited; bool is_dead; bool is_freeing; bool is_vptr_access; const uptr stk_addr; const uptr stk_size; const uptr tls_addr; const uptr tls_size; ThreadContext *tctx; DDLogicalThread *dd_lt; Processor *proc1; Processor *proc() { return proc1; } atomic_uintptr_t in_signal_handler; ThreadSignalContext *signal_ctx; u32 last_sleep_stack_id; ThreadClock last_sleep_clock; int nomalloc; const ReportDesc *current_report; explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, unsigned reuse_count, uptr stk_addr, uptr stk_size, uptr tls_addr, uptr tls_size); }; __attribute__((tls_model("initial-exec"))) extern __thread char cur_thread_placeholder[]; inline ThreadState *cur_thread() { return reinterpret_cast(&cur_thread_placeholder); } inline void cur_thread_finalize() { } class ThreadContext : public ThreadContextBase { public: explicit ThreadContext(int tid); ~ThreadContext(); ThreadState *thr; u32 creation_stack_id; SyncClock sync; u64 epoch0; u64 epoch1; void OnDead() override; void OnJoined(void *arg) override; void OnFinished() override; void OnStarted(void *arg) override; void OnCreated(void *arg) override; void OnReset() override; void OnDetached(void *arg) override; }; struct RacyStacks { MD5Hash hash[2]; bool operator==(const RacyStacks &other) const { if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) return true; if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) return true; return false; } }; struct RacyAddress { uptr addr_min; uptr addr_max; }; struct FiredSuppression { ReportType type; uptr pc_or_addr; Suppression *supp; }; struct Context { Context(); bool initialized; bool after_multithreaded_fork; MetaMap metamap; Mutex report_mtx; int nreported; int nmissed_expected; atomic_uint64_t last_symbolize_time_ns; void *background_thread; atomic_uint32_t stop_background_thread; ThreadRegistry *thread_registry; Mutex racy_mtx; Vector racy_stacks; Vector racy_addresses; Mutex fired_suppressions_mtx; InternalMmapVector fired_suppressions; DDetector *dd; ClockAlloc clock_alloc; Flags flags; u64 stat[StatCnt]; u64 int_alloc_cnt[MBlockTypeCount]; u64 int_alloc_siz[MBlockTypeCount]; }; extern Context *ctx; struct ScopedIgnoreInterceptors { ScopedIgnoreInterceptors() { cur_thread()->ignore_interceptors++; } ~ScopedIgnoreInterceptors() { cur_thread()->ignore_interceptors--; } }; class ScopedReport { public: explicit ScopedReport(ReportType typ); ~ScopedReport(); void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, const MutexSet *mset); void AddStack(StackTrace stack, bool suppressable = false); void AddThread(const ThreadContext *tctx, bool suppressable = false); void AddThread(int unique_tid, bool suppressable = false); void AddUniqueTid(int unique_tid); void AddMutex(const SyncVar *s); u64 AddMutex(u64 id); void AddLocation(uptr addr, uptr size); void AddSleep(u32 stack_id); void SetCount(int count); const ReportDesc *GetReport() const; private: ReportDesc *rep_; ScopedIgnoreInterceptors ignore_interceptors_; void AddDeadMutex(u64 id); ScopedReport(const ScopedReport&); void operator = (const ScopedReport&); }; void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, MutexSet *mset); template void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) { uptr size = thr->shadow_stack_pos - thr->shadow_stack; uptr start = 0; if (size + !!toppc > kStackTraceMax) { start = size + !!toppc - kStackTraceMax; size = kStackTraceMax - !!toppc; } stack->Init(&thr->shadow_stack[start], size, toppc); } void inline __attribute__((always_inline)) StatInc(ThreadState *thr, StatType typ, u64 n = 1) { } void inline __attribute__((always_inline)) StatSet(ThreadState *thr, StatType typ, u64 n) { } void MapShadow(uptr addr, uptr size); void MapThreadTrace(uptr addr, uptr size, const char *name); void DontNeedShadowFor(uptr addr, uptr size); void InitializeShadowMemory(); void InitializeInterceptors(); void InitializeLibIgnore(); void InitializeDynamicAnnotations(); void ForkBefore(ThreadState *thr, uptr pc); void ForkParentAfter(ThreadState *thr, uptr pc); void ForkChildAfter(ThreadState *thr, uptr pc); void ReportRace(ThreadState *thr); bool OutputReport(ThreadState *thr, const ScopedReport &srep); bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); void PrintMatchedBenignRaces(); # 652 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" u32 CurrentStackId(ThreadState *thr, uptr pc); ReportStack *SymbolizeStackId(u32 stack_id); void PrintCurrentStack(ThreadState *thr, uptr pc); void PrintCurrentStackSlow(uptr pc); void Initialize(ThreadState *thr); int Finalize(ThreadState *thr); void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write); void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write); void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); void MemoryAccessImpl(ThreadState *thr, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur); void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size, bool is_write); void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, uptr size, uptr step, bool is_write); void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, int size, bool kAccessIsWrite, bool kIsAtomic); const int kSizeLog1 = 0; const int kSizeLog2 = 1; const int kSizeLog4 = 2; const int kSizeLog8 = 3; void inline __attribute__((always_inline)) MemoryRead(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog) { MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); } void inline __attribute__((always_inline)) MemoryWrite(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog) { MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); } void inline __attribute__((always_inline)) MemoryReadAtomic(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog) { MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); } void inline __attribute__((always_inline)) MemoryWriteAtomic(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog) { MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); } void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); void ThreadIgnoreBegin(ThreadState *thr, uptr pc); void ThreadIgnoreEnd(ThreadState *thr, uptr pc); void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc); void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc); void FuncEntry(ThreadState *thr, uptr pc); void FuncExit(ThreadState *thr); int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); void ThreadStart(ThreadState *thr, int tid, uptr os_id); void ThreadFinish(ThreadState *thr); int ThreadTid(ThreadState *thr, uptr pc, uptr uid); void ThreadJoin(ThreadState *thr, uptr pc, int tid); void ThreadDetach(ThreadState *thr, uptr pc, int tid); void ThreadFinalize(ThreadState *thr); void ThreadSetName(ThreadState *thr, const char *name); int ThreadCount(ThreadState *thr); void ProcessPendingSignals(ThreadState *thr); Processor *ProcCreate(); void ProcDestroy(Processor *proc); void ProcWire(Processor *proc, ThreadState *thr); void ProcUnwire(Processor *proc, ThreadState *thr); void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive, bool linker_init); void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1, bool try_lock = false); int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false); void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexRepair(ThreadState *thr, uptr pc, uptr addr); void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr); void Acquire(ThreadState *thr, uptr pc, uptr addr); void AcquireGlobal(ThreadState *thr, uptr pc); void Release(ThreadState *thr, uptr pc, uptr addr); void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); void AfterSleep(ThreadState *thr, uptr pc); void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); # 777 "../../../../gcc/libsanitizer/tsan/tsan_rtl.h" void TraceSwitch(ThreadState *thr); uptr TraceTopPC(ThreadState *thr); uptr TraceSize(); uptr TraceParts(); Trace *ThreadTrace(int tid); extern "C" void __tsan_trace_switch(); void inline __attribute__((always_inline)) TraceAddEvent(ThreadState *thr, FastState fs, EventType typ, u64 addr) { if (!kCollectHistory) return; ; ; ; StatInc(thr, StatEvents); u64 pos = fs.GetTracePos(); if (__builtin_expect(!!((pos % kTracePartSize) == 0), 0)) { __asm__ __volatile__("sub $1024, %%rsp;" ".cfi_adjust_cfa_offset " "1024" ";" ".hidden " "__tsan_trace_switch" "_thunk;" "call " "__tsan_trace_switch" "_thunk;" "add $1024, %%rsp;" ".cfi_adjust_cfa_offset " "-1024" ";" ::: "memory", "cc");; } Event *trace = (Event*)GetThreadTrace(fs.tid()); Event *evp = &trace[pos]; Event ev = (u64)addr | ((u64)typ << 61); *evp = ev; } uptr inline __attribute__((always_inline)) HeapEnd() { return HeapMemEnd() + PrimaryAllocator::AdditionalSize(); } } # 22 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_suppressions.h" 1 # 17 "../../../../gcc/libsanitizer/tsan/tsan_suppressions.h" namespace __tsan { const char kSuppressionNone[] = "none"; const char kSuppressionRace[] = "race"; const char kSuppressionRaceTop[] = "race_top"; const char kSuppressionMutex[] = "mutex"; const char kSuppressionThread[] = "thread"; const char kSuppressionSignal[] = "signal"; const char kSuppressionLib[] = "called_from_lib"; const char kSuppressionDeadlock[] = "deadlock"; void InitializeSuppressions(); SuppressionContext *Suppressions(); void PrintMatchedSuppressions(); uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp); uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp); } # 24 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/tsan/tsan_symbolize.h" 1 # 17 "../../../../gcc/libsanitizer/tsan/tsan_symbolize.h" namespace __tsan { void EnterSymbolizer(); void ExitSymbolizer(); SymbolizedStack *SymbolizeCode(uptr addr); ReportLocation *SymbolizeData(uptr addr); void SymbolizeFlush(); ReportStack *NewReportStackEntry(uptr addr); } # 25 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 1 "../../../../gcc/libsanitizer/ubsan/ubsan_init.h" 1 # 14 "../../../../gcc/libsanitizer/ubsan/ubsan_init.h" namespace __ubsan { void InitAsStandalone(); void InitAsStandaloneIfNecessary(); void InitAsPlugin(); } # 26 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 # 37 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" volatile int __tsan_resumed = 0; extern "C" void __tsan_resume() { __tsan_resumed = 1; } namespace __tsan { __thread char cur_thread_placeholder[sizeof(ThreadState)] __attribute__((aligned(64))); static char ctx_placeholder[sizeof(Context)] __attribute__((aligned(64))); Context *ctx; extern "C++" __attribute__((visibility("default"))) __attribute__((weak)) __attribute__((noinline)) bool OnFinalize(bool failed) { return failed; } extern "C++" __attribute__((visibility("default"))) __attribute__((weak)) __attribute__((noinline)) void OnInitialize() {} static char thread_registry_placeholder[sizeof(ThreadRegistry)]; static ThreadContextBase *CreateThreadContext(u32 tid) { char name[50]; internal_snprintf(name, sizeof(name), "trace %u", tid); MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); const uptr hdr = GetThreadTraceHeader(tid); internal_snprintf(name, sizeof(name), "trace header %u", tid); MapThreadTrace(hdr, sizeof(Trace), name); new((void*)hdr) Trace(); uptr hdr_end = hdr + sizeof(Trace); hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); hdr_end = RoundUp(hdr_end, GetPageSizeCached()); if (hdr_end < hdr + sizeof(Trace)) UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end); void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); return new(mem) ThreadContext(tid); } static const u32 kThreadQuarantineSize = 16; Context::Context() : initialized() , report_mtx(MutexTypeReport, StatMtxReport) , nreported() , nmissed_expected() , thread_registry(new(thread_registry_placeholder) ThreadRegistry( CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) , racy_mtx(MutexTypeRacy, StatMtxRacy) , racy_stacks(MBlockRacyStacks) , racy_addresses(MBlockRacyAddresses) , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) , fired_suppressions(8) { } ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, unsigned reuse_count, uptr stk_addr, uptr stk_size, uptr tls_addr, uptr tls_size) : fast_state(tid, epoch) , clock(tid, reuse_count) , jmp_bufs(MBlockJmpBuf) , tid(tid) , unique_id(unique_id) , stk_addr(stk_addr) , stk_size(stk_size) , tls_addr(tls_addr) , tls_size(tls_size) , last_sleep_clock(tid) { } static void MemoryProfiler(Context *ctx, fd_t fd, int i) { uptr n_threads; uptr n_running_threads; ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); InternalScopedBuffer buf(4096); WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); WriteToFile(fd, buf.data(), internal_strlen(buf.data())); } static void BackgroundThread(void *arg) { cur_thread()->ignore_interceptors++; const u64 kMs2Ns = 1000 * 1000; fd_t mprof_fd = ((fd_t)-1); if (flags()->profile_memory && flags()->profile_memory[0]) { if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { mprof_fd = 1; } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { mprof_fd = 2; } else { InternalScopedString filename(kMaxPathLength); filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); fd_t fd = OpenFile(filename.data(), WrOnly); if (fd == ((fd_t)-1)) { Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", &filename[0]); } else { mprof_fd = fd; } } } u64 last_flush = NanoTime(); uptr last_rss = 0; for (int i = 0; atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; i++) { SleepForMillis(100); u64 now = NanoTime(); if (flags()->flush_memory_ms > 0) { if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: periodic memory flush\n"); } while (0); FlushShadowMemory(); last_flush = NanoTime(); } } if (flags()->memory_limit_mb > 0) { uptr rss = GetRSS(); uptr limit = uptr(flags()->memory_limit_mb) << 20; do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: memory flush check" " RSS=%llu LAST=%llu LIMIT=%llu\n", (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); } while (0); if (2 * rss > limit + last_rss) { do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: flushing memory due to RSS\n"); } while (0); FlushShadowMemory(); rss = GetRSS(); do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); } while (0); } last_rss = rss; } if (mprof_fd != ((fd_t)-1)) MemoryProfiler(ctx, mprof_fd, i); if (flags()->flush_symbolizer_ms > 0) { u64 last = atomic_load(&ctx->last_symbolize_time_ns, memory_order_relaxed); if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { Lock l(&ctx->report_mtx); SpinMutexLock l2(&CommonSanitizerReportMutex); SymbolizeFlush(); atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); } } } } static void StartBackgroundThread() { ctx->background_thread = internal_start_thread(&BackgroundThread, 0); } static void StopBackgroundThread() { atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); internal_join_thread(ctx->background_thread); ctx->background_thread = 0; } void DontNeedShadowFor(uptr addr, uptr size) { uptr shadow_beg = MemToShadow(addr); uptr shadow_end = MemToShadow(addr + size); ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg); } void MapShadow(uptr addr, uptr size) { const uptr kPageSize = GetPageSizeCached(); uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"); static bool data_mapped = false; static uptr mapped_meta_end = 0; uptr meta_begin = (uptr)MemToMeta(addr); uptr meta_end = (uptr)MemToMeta(addr + size); meta_begin = RoundDownTo(meta_begin, 64 << 10); meta_end = RoundUpTo(meta_end, 64 << 10); if (!data_mapped) { data_mapped = true; MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); } else { meta_begin = RoundDownTo(meta_begin, 64 << 10); meta_end = RoundUpTo(meta_end, 64 << 10); if (meta_end <= mapped_meta_end) return; if (meta_begin < mapped_meta_end) meta_begin = mapped_meta_end; MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); mapped_meta_end = meta_end; } do { if ((uptr)Verbosity() >= (2)) Printf("mapped meta shadow for (%p-%p) at (%p-%p)\n", addr, addr+size, meta_begin, meta_end); } while (0); } void MapThreadTrace(uptr addr, uptr size, const char *name) { ; do { __sanitizer::u64 v1 = (u64)((addr)); __sanitizer::u64 v2 = (u64)((TraceMemBeg())); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 277, "(" "(addr)" ") " ">=" " (" "(TraceMemBeg())" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((addr + size)); __sanitizer::u64 v2 = (u64)((TraceMemEnd())); if (__builtin_expect(!!(!(v1 <= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 278, "(" "(addr + size)" ") " "<=" " (" "(TraceMemEnd())" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((addr)); __sanitizer::u64 v2 = (u64)((addr & ~((64 << 10) - 1))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 279, "(" "(addr)" ") " "==" " (" "(addr & ~((64 << 10) - 1))" ")", v1, v2); } while (false); uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name); if (addr1 != addr) { Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n", addr, size, addr1); Die(); } } static void CheckShadowMapping() { uptr beg, end; for (int i = 0; GetUserRegion(i, &beg, &end); i++) { if (beg == end) continue; do { if ((uptr)Verbosity() >= (3)) Printf("checking shadow region %p-%p\n", beg, end); } while (0); uptr prev = 0; for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { const uptr p = RoundDown(p0 + x, kShadowCell); if (p < beg || p >= end) continue; const uptr s = MemToShadow(p); const uptr m = (uptr)MemToMeta(p); do { if ((uptr)Verbosity() >= (3)) Printf(" checking pointer %p: shadow=%p meta=%p\n", p, s, m); } while (0); do { __sanitizer::u64 v1 = (u64)((IsAppMem(p))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 305, "(" "(IsAppMem(p))" ") " "!=" " (" "0" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((IsShadowMem(s))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 306, "(" "(IsShadowMem(s))" ") " "!=" " (" "0" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((p)); __sanitizer::u64 v2 = (u64)((ShadowToMem(s))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 307, "(" "(p)" ") " "==" " (" "(ShadowToMem(s))" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((IsMetaMem(m))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 308, "(" "(IsMetaMem(m))" ") " "!=" " (" "0" ")", v1, v2); } while (false); if (prev) { const uptr prev_s = MemToShadow(prev); const uptr prev_m = (uptr)MemToMeta(prev); do { __sanitizer::u64 v1 = (u64)((s - prev_s)); __sanitizer::u64 v2 = (u64)(((p - prev) * kShadowMultiplier)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 314, "(" "(s - prev_s)" ") " "==" " (" "((p - prev) * kShadowMultiplier)" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)(((m - prev_m) / kMetaShadowSize)); __sanitizer::u64 v2 = (u64)(((p - prev) / kMetaShadowCell)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 316, "(" "((m - prev_m) / kMetaShadowSize)" ") " "==" " (" "((p - prev) / kMetaShadowCell)" ")", v1, v2); } while (false); } prev = p; } } } } void Initialize(ThreadState *thr) { static bool is_initialized = false; if (is_initialized) return; is_initialized = true; ScopedIgnoreInterceptors ignore; SanitizerToolName = "ThreadSanitizer"; SetCheckFailedCallback(TsanCheckFailed); ctx = new(ctx_placeholder) Context; const char *options = GetEnv(0 ? "GORACE" : "TSAN_OPTIONS"); CacheBinaryName(); InitializeFlags(&ctx->flags, options); AvoidCVE_2016_2143(); InitializePlatformEarly(); MaybeReexec(); InitializeAllocator(); ReplaceSystemMalloc(); if (common_flags()->detect_deadlocks) ctx->dd = DDetector::Create(flags()); Processor *proc = ProcCreate(); ProcWire(proc, thr); InitializeInterceptors(); CheckShadowMapping(); InitializePlatform(); InitializeMutex(); InitializeDynamicAnnotations(); InitializeShadowMemory(); InitializeAllocatorLate(); __sanitizer_set_report_path(common_flags()->log_path); InitializeSuppressions(); InitializeLibIgnore(); Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); StartBackgroundThread(); SetSandboxingCallback(StopBackgroundThread); do { if ((uptr)Verbosity() >= (1)) Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n", (int)internal_getpid()); } while (0); int tid = ThreadCreate(thr, 0, 0, true); do { __sanitizer::u64 v1 = (u64)((tid)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 382, "(" "(tid)" ") " "==" " (" "(0)" ")", v1, v2); } while (false); ThreadStart(thr, tid, internal_getpid()); ctx->initialized = true; Symbolizer::LateInitialize(); if (flags()->stop_on_start) { Printf("ThreadSanitizer is suspended at startup (pid %d)." " Call __tsan_resume().\n", (int)internal_getpid()); while (__tsan_resumed == 0) {} } OnInitialize(); } int Finalize(ThreadState *thr) { bool failed = false; if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) SleepForMillis(flags()->atexit_sleep_ms); ctx->report_mtx.Lock(); CommonSanitizerReportMutex.Lock(); CommonSanitizerReportMutex.Unlock(); ctx->report_mtx.Unlock(); if (Verbosity()) AllocatorPrintStats(); ThreadFinalize(thr); if (ctx->nreported) { failed = true; Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); } if (ctx->nmissed_expected) { failed = true; Printf("ThreadSanitizer: missed %d expected races\n", ctx->nmissed_expected); } if (common_flags()->print_suppressions) PrintMatchedSuppressions(); if (flags()->print_benign) PrintMatchedBenignRaces(); failed = OnFinalize(failed); return failed ? common_flags()->exitcode : 0; } void ForkBefore(ThreadState *thr, uptr pc) { ctx->thread_registry->Lock(); ctx->report_mtx.Lock(); } void ForkParentAfter(ThreadState *thr, uptr pc) { ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); } void ForkChildAfter(ThreadState *thr, uptr pc) { ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); uptr nthread = 0; ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread ); do { if ((uptr)Verbosity() >= (1)) Printf("ThreadSanitizer: forked new process with pid %d," " parent had %d threads\n", (int)internal_getpid(), (int)nthread); } while (0); if (nthread == 1) { StartBackgroundThread(); } else { ctx->after_multithreaded_fork = true; thr->ignore_interceptors++; ThreadIgnoreBegin(thr, pc); ThreadIgnoreSyncBegin(thr, pc); } } # 501 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" u32 CurrentStackId(ThreadState *thr, uptr pc) { if (!thr->is_inited) return 0; if (pc != 0) { ; thr->shadow_stack_pos[0] = pc; thr->shadow_stack_pos++; } u32 id = StackDepotPut( StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); if (pc != 0) thr->shadow_stack_pos--; return id; } void TraceSwitch(ThreadState *thr) { thr->nomalloc++; Trace *thr_trace = ThreadTrace(thr->tid); Lock l(&thr_trace->mtx); unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); TraceHeader *hdr = &thr_trace->headers[trace]; hdr->epoch0 = thr->fast_state.epoch(); ObtainCurrentStack(thr, 0, &hdr->stack0); hdr->mset0 = thr->mset; thr->nomalloc--; } Trace *ThreadTrace(int tid) { return (Trace*)GetThreadTraceHeader(tid); } uptr TraceTopPC(ThreadState *thr) { Event *events = (Event*)GetThreadTrace(thr->tid); uptr pc = events[thr->fast_state.GetTracePos()]; return pc; } uptr TraceSize() { return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); } uptr TraceParts() { return TraceSize() / kTracePartSize; } extern "C" void __tsan_trace_switch() { TraceSwitch(cur_thread()); } extern "C" void __tsan_report_race() { ReportRace(cur_thread()); } inline __attribute__((always_inline)) Shadow LoadShadow(u64 *p) { u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); return Shadow(raw); } inline __attribute__((always_inline)) void StoreShadow(u64 *sp, u64 s) { atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); } inline __attribute__((always_inline)) void StoreIfNotYetStored(u64 *sp, u64 *s) { StoreShadow(sp, *s); *s = 0; } inline __attribute__((always_inline)) void HandleRace(ThreadState *thr, u64 *shadow_mem, Shadow cur, Shadow old) { thr->racy_state[0] = cur.raw(); thr->racy_state[1] = old.raw(); thr->racy_shadow_addr = shadow_mem; __asm__ __volatile__("sub $1024, %%rsp;" ".cfi_adjust_cfa_offset " "1024" ";" ".hidden " "__tsan_report_race" "_thunk;" "call " "__tsan_report_race" "_thunk;" "add $1024, %%rsp;" ".cfi_adjust_cfa_offset " "-1024" ";" ::: "memory", "cc");; } static inline bool HappensBefore(Shadow old, ThreadState *thr) { return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); } inline __attribute__((always_inline)) void MemoryAccessImpl1(ThreadState *thr, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); u64 store_word = cur.raw(); # 617 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" Shadow old(0); # 629 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" int idx = 0; # 1 "../../../../gcc/libsanitizer/tsan/tsan_update_shadow_word_inl.h" 1 # 14 "../../../../gcc/libsanitizer/tsan/tsan_update_shadow_word_inl.h" do { StatInc(thr, StatShadowProcessed); const unsigned kAccessSize = 1 << kAccessSizeLog; u64 *sp = &shadow_mem[idx]; old = LoadShadow(sp); if (old.IsZero()) { StatInc(thr, StatShadowZero); if (store_word) StoreIfNotYetStored(sp, &store_word); break; } if (Shadow::Addr0AndSizeAreEqual(cur, old)) { StatInc(thr, StatShadowSameSize); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } StatInc(thr, StatShadowAnotherThread); if (HappensBefore(old, thr)) { if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; goto RACE; } if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) { StatInc(thr, StatShadowIntersect); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); break; } StatInc(thr, StatShadowAnotherThread); if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; if (HappensBefore(old, thr)) break; goto RACE; } StatInc(thr, StatShadowNotIntersect); break; } while (0); # 631 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 idx = 1; # 1 "../../../../gcc/libsanitizer/tsan/tsan_update_shadow_word_inl.h" 1 # 14 "../../../../gcc/libsanitizer/tsan/tsan_update_shadow_word_inl.h" do { StatInc(thr, StatShadowProcessed); const unsigned kAccessSize = 1 << kAccessSizeLog; u64 *sp = &shadow_mem[idx]; old = LoadShadow(sp); if (old.IsZero()) { StatInc(thr, StatShadowZero); if (store_word) StoreIfNotYetStored(sp, &store_word); break; } if (Shadow::Addr0AndSizeAreEqual(cur, old)) { StatInc(thr, StatShadowSameSize); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } StatInc(thr, StatShadowAnotherThread); if (HappensBefore(old, thr)) { if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; goto RACE; } if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) { StatInc(thr, StatShadowIntersect); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); break; } StatInc(thr, StatShadowAnotherThread); if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; if (HappensBefore(old, thr)) break; goto RACE; } StatInc(thr, StatShadowNotIntersect); break; } while (0); # 633 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 idx = 2; # 1 "../../../../gcc/libsanitizer/tsan/tsan_update_shadow_word_inl.h" 1 # 14 "../../../../gcc/libsanitizer/tsan/tsan_update_shadow_word_inl.h" do { StatInc(thr, StatShadowProcessed); const unsigned kAccessSize = 1 << kAccessSizeLog; u64 *sp = &shadow_mem[idx]; old = LoadShadow(sp); if (old.IsZero()) { StatInc(thr, StatShadowZero); if (store_word) StoreIfNotYetStored(sp, &store_word); break; } if (Shadow::Addr0AndSizeAreEqual(cur, old)) { StatInc(thr, StatShadowSameSize); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } StatInc(thr, StatShadowAnotherThread); if (HappensBefore(old, thr)) { if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; goto RACE; } if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) { StatInc(thr, StatShadowIntersect); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); break; } StatInc(thr, StatShadowAnotherThread); if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; if (HappensBefore(old, thr)) break; goto RACE; } StatInc(thr, StatShadowNotIntersect); break; } while (0); # 635 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 idx = 3; # 1 "../../../../gcc/libsanitizer/tsan/tsan_update_shadow_word_inl.h" 1 # 14 "../../../../gcc/libsanitizer/tsan/tsan_update_shadow_word_inl.h" do { StatInc(thr, StatShadowProcessed); const unsigned kAccessSize = 1 << kAccessSizeLog; u64 *sp = &shadow_mem[idx]; old = LoadShadow(sp); if (old.IsZero()) { StatInc(thr, StatShadowZero); if (store_word) StoreIfNotYetStored(sp, &store_word); break; } if (Shadow::Addr0AndSizeAreEqual(cur, old)) { StatInc(thr, StatShadowSameSize); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } StatInc(thr, StatShadowAnotherThread); if (HappensBefore(old, thr)) { if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; goto RACE; } if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) { StatInc(thr, StatShadowIntersect); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); break; } StatInc(thr, StatShadowAnotherThread); if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; if (HappensBefore(old, thr)) break; goto RACE; } StatInc(thr, StatShadowNotIntersect); break; } while (0); # 637 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2 if (__builtin_expect(!!(store_word == 0), 1)) return; StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); StatInc(thr, StatShadowReplace); return; RACE: HandleRace(thr, shadow_mem, cur, old); return; } void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, int size, bool kAccessIsWrite, bool kIsAtomic) { while (size) { int size1 = 1; int kAccessSizeLog = kSizeLog1; if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { size1 = 8; kAccessSizeLog = kSizeLog8; } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { size1 = 4; kAccessSizeLog = kSizeLog4; } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { size1 = 2; kAccessSizeLog = kSizeLog2; } MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); addr += size1; size -= size1; } } inline __attribute__((always_inline)) bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { Shadow cur(a); for (uptr i = 0; i < kShadowCnt; i++) { Shadow old(LoadShadow(&s[i])); if (Shadow::Addr0AndSizeAreEqual(cur, old) && old.TidWithIgnore() == cur.TidWithIgnore() && old.epoch() > sync_epoch && old.IsAtomic() == cur.IsAtomic() && old.IsRead() <= cur.IsRead()) return true; } return false; } # 746 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" inline __attribute__((always_inline)) bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { # 756 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" return ContainsSameAccessSlow(s, a, sync_epoch, is_write); } inline __attribute__((always_inline)) __attribute__((used)) void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { u64 *shadow_mem = (u64*)MemToShadow(addr); ; # 781 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" if (!0 && *shadow_mem == kShadowRodata) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopRodata); return; } FastState fast_state = thr->fast_state; if (fast_state.GetIgnoreBit()) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopIgnored); return; } Shadow cur(fast_state); cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); cur.SetWrite(kAccessIsWrite); cur.SetAtomic(kIsAtomic); if (__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite)), 1)) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopSame); return; } if (kCollectHistory) { fast_state.IncrementEpoch(); thr->fast_state = fast_state; TraceAddEvent(thr, fast_state, EventTypeMop, pc); cur.IncrementEpoch(); } MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, shadow_mem, cur); } inline __attribute__((always_inline)) __attribute__((used)) void MemoryAccessImpl(ThreadState *thr, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur) { if (__builtin_expect(!!(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch, kAccessIsWrite)), 1)) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); StatInc(thr, StatMopSame); return; } MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, shadow_mem, cur); } static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, u64 val) { (void)thr; (void)pc; if (size == 0) return; uptr offset = addr % kShadowCell; if (offset) { offset = kShadowCell - offset; if (size <= offset) return; addr += offset; size -= offset; } ; if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) return; size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); if (0 || size < common_flags()->clear_shadow_mmap_threshold) { u64 *p = (u64*)MemToShadow(addr); do { __sanitizer::u64 v1 = (u64)((IsShadowMem((uptr)p))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 870, "(" "(IsShadowMem((uptr)p))" ") " "!=" " (" "0" ")", v1, v2); } while (false); do { __sanitizer::u64 v1 = (u64)((IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)))); __sanitizer::u64 v2 = (u64)(0); if (__builtin_expect(!!(!(v1 != v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 871, "(" "(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)))" ") " "!=" " (" "0" ")", v1, v2); } while (false); for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { p[i++] = val; for (uptr j = 1; j < kShadowCnt; j++) p[i++] = 0; } } else { const uptr kPageSize = GetPageSizeCached(); u64 *begin = (u64*)MemToShadow(addr); u64 *end = begin + size / kShadowCell * kShadowCnt; u64 *p = begin; while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { *p++ = val; for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0; } u64 *p1 = p; p = RoundDown(end, kPageSize); UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); while (p < end) { *p++ = val; for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0; } } } void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { MemoryRangeSet(thr, pc, addr, size, 0); } void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { if (size > 1024) size = 1024; do { __sanitizer::u64 v1 = (u64)((thr->is_freeing)); __sanitizer::u64 v2 = (u64)((false)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 914, "(" "(thr->is_freeing)" ") " "==" " (" "(false)" ")", v1, v2); } while (false); thr->is_freeing = true; MemoryAccessRange(thr, pc, addr, size, true); thr->is_freeing = false; if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); } Shadow s(thr->fast_state); s.ClearIgnoreBit(); s.MarkAsFreed(); s.SetWrite(true); s.SetAddr0AndSizeLog(0, 3); MemoryRangeSet(thr, pc, addr, size, s.raw()); } void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); } Shadow s(thr->fast_state); s.ClearIgnoreBit(); s.SetWrite(true); s.SetAddr0AndSizeLog(0, 3); MemoryRangeSet(thr, pc, addr, size, s.raw()); } inline __attribute__((always_inline)) __attribute__((used)) void FuncEntry(ThreadState *thr, uptr pc) { StatInc(thr, StatFuncEnter); ; if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); } ; ; thr->shadow_stack_pos[0] = pc; thr->shadow_stack_pos++; } inline __attribute__((always_inline)) __attribute__((used)) void FuncExit(ThreadState *thr) { StatInc(thr, StatFuncExit); ; if (kCollectHistory) { thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); } ; ; thr->shadow_stack_pos--; } void ThreadIgnoreBegin(ThreadState *thr, uptr pc) { ; thr->ignore_reads_and_writes++; do { __sanitizer::u64 v1 = (u64)((thr->ignore_reads_and_writes)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 983, "(" "(thr->ignore_reads_and_writes)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); thr->fast_state.SetIgnoreBit(); if (!ctx->after_multithreaded_fork) thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); } void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { ; thr->ignore_reads_and_writes--; do { __sanitizer::u64 v1 = (u64)((thr->ignore_reads_and_writes)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 994, "(" "(thr->ignore_reads_and_writes)" ") " ">=" " (" "(0)" ")", v1, v2); } while (false); if (thr->ignore_reads_and_writes == 0) { thr->fast_state.ClearIgnoreBit(); thr->mop_ignore_set.Reset(); } } void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { ; thr->ignore_sync++; do { __sanitizer::u64 v1 = (u64)((thr->ignore_sync)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 > v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 1006, "(" "(thr->ignore_sync)" ") " ">" " (" "(0)" ")", v1, v2); } while (false); if (!ctx->after_multithreaded_fork) thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); } void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { ; thr->ignore_sync--; do { __sanitizer::u64 v1 = (u64)((thr->ignore_sync)); __sanitizer::u64 v2 = (u64)((0)); if (__builtin_expect(!!(!(v1 >= v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_rtl.cc", 1016, "(" "(thr->ignore_sync)" ") " ">=" " (" "(0)" ")", v1, v2); } while (false); if (thr->ignore_sync == 0) thr->sync_ignore_set.Reset(); } bool MD5Hash::operator==(const MD5Hash &other) const { return hash[0] == other.hash[0] && hash[1] == other.hash[1]; } void build_consistency_release() {} void build_consistency_nostats() {} } # 1 "../../../../gcc/libsanitizer/tsan/tsan_interface_inl.h" 1 # 12 "../../../../gcc/libsanitizer/tsan/tsan_interface_inl.h" # 1 "../../../../gcc/libsanitizer/tsan/tsan_interface.h" 1 # 18 "../../../../gcc/libsanitizer/tsan/tsan_interface.h" using __sanitizer::uptr; extern "C" { __attribute__((visibility("default"))) void __tsan_init(); __attribute__((visibility("default"))) void __tsan_read1(void *addr); __attribute__((visibility("default"))) void __tsan_read2(void *addr); __attribute__((visibility("default"))) void __tsan_read4(void *addr); __attribute__((visibility("default"))) void __tsan_read8(void *addr); __attribute__((visibility("default"))) void __tsan_read16(void *addr); __attribute__((visibility("default"))) void __tsan_write1(void *addr); __attribute__((visibility("default"))) void __tsan_write2(void *addr); __attribute__((visibility("default"))) void __tsan_write4(void *addr); __attribute__((visibility("default"))) void __tsan_write8(void *addr); __attribute__((visibility("default"))) void __tsan_write16(void *addr); __attribute__((visibility("default"))) void __tsan_unaligned_read2(const void *addr); __attribute__((visibility("default"))) void __tsan_unaligned_read4(const void *addr); __attribute__((visibility("default"))) void __tsan_unaligned_read8(const void *addr); __attribute__((visibility("default"))) void __tsan_unaligned_read16(const void *addr); __attribute__((visibility("default"))) void __tsan_unaligned_write2(void *addr); __attribute__((visibility("default"))) void __tsan_unaligned_write4(void *addr); __attribute__((visibility("default"))) void __tsan_unaligned_write8(void *addr); __attribute__((visibility("default"))) void __tsan_unaligned_write16(void *addr); __attribute__((visibility("default"))) void __tsan_read1_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_read2_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_read4_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_read8_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_read16_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_write1_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_write2_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_write4_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_write8_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_write16_pc(void *addr, void *pc); __attribute__((visibility("default"))) void __tsan_vptr_read(void **vptr_p); __attribute__((visibility("default"))) void __tsan_vptr_update(void **vptr_p, void *new_val); __attribute__((visibility("default"))) void __tsan_func_entry(void *call_pc); __attribute__((visibility("default"))) void __tsan_func_exit(); __attribute__((visibility("default"))) void __tsan_read_range(void *addr, unsigned long size); __attribute__((visibility("default"))) void __tsan_write_range(void *addr, unsigned long size); __attribute__((visibility("default"))) void __tsan_on_report(void *report); __attribute__((visibility("default"))) void *__tsan_get_current_report(); __attribute__((visibility("default"))) int __tsan_get_report_data(void *report, const char **description, int *count, int *stack_count, int *mop_count, int *loc_count, int *mutex_count, int *thread_count, int *unique_tid_count, void **sleep_trace, uptr trace_size); __attribute__((visibility("default"))) int __tsan_get_report_stack(void *report, uptr idx, void **trace, uptr trace_size); __attribute__((visibility("default"))) int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr, int *size, int *write, int *atomic, void **trace, uptr trace_size); __attribute__((visibility("default"))) int __tsan_get_report_loc(void *report, uptr idx, const char **type, void **addr, uptr *start, uptr *size, int *tid, int *fd, int *suppressable, void **trace, uptr trace_size); __attribute__((visibility("default"))) int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr, int *destroyed, void **trace, uptr trace_size); __attribute__((visibility("default"))) int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id, int *running, const char **name, int *parent_tid, void **trace, uptr trace_size); __attribute__((visibility("default"))) int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid); } namespace __tsan { typedef unsigned char a8; typedef unsigned short a16; typedef unsigned int a32; typedef unsigned long long a64; __extension__ typedef __int128 a128; typedef enum { mo_relaxed, mo_consume, mo_acquire, mo_release, mo_acq_rel, mo_seq_cst } morder; struct ThreadState; extern "C" { __attribute__((visibility("default"))) a8 __tsan_atomic8_load(const volatile a8 *a, morder mo); __attribute__((visibility("default"))) a16 __tsan_atomic16_load(const volatile a16 *a, morder mo); __attribute__((visibility("default"))) a32 __tsan_atomic32_load(const volatile a32 *a, morder mo); __attribute__((visibility("default"))) a64 __tsan_atomic64_load(const volatile a64 *a, morder mo); __attribute__((visibility("default"))) a128 __tsan_atomic128_load(const volatile a128 *a, morder mo); __attribute__((visibility("default"))) void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo); __attribute__((visibility("default"))) void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo); __attribute__((visibility("default"))) void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo); __attribute__((visibility("default"))) void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo); __attribute__((visibility("default"))) void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo); __attribute__((visibility("default"))) a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo); __attribute__((visibility("default"))) a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo); __attribute__((visibility("default"))) a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo); __attribute__((visibility("default"))) a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo); __attribute__((visibility("default"))) a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo); __attribute__((visibility("default"))) a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo); __attribute__((visibility("default"))) a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo); __attribute__((visibility("default"))) a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo); __attribute__((visibility("default"))) a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo); __attribute__((visibility("default"))) a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo); __attribute__((visibility("default"))) a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo); __attribute__((visibility("default"))) a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo); __attribute__((visibility("default"))) a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo); __attribute__((visibility("default"))) a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo); __attribute__((visibility("default"))) a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo); __attribute__((visibility("default"))) a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo); __attribute__((visibility("default"))) a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo); __attribute__((visibility("default"))) a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo); __attribute__((visibility("default"))) a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo); __attribute__((visibility("default"))) a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo); __attribute__((visibility("default"))) a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo); __attribute__((visibility("default"))) a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo); __attribute__((visibility("default"))) a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo); __attribute__((visibility("default"))) a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo); __attribute__((visibility("default"))) a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo); __attribute__((visibility("default"))) a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo); __attribute__((visibility("default"))) a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo); __attribute__((visibility("default"))) a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo); __attribute__((visibility("default"))) a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo); __attribute__((visibility("default"))) a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo); __attribute__((visibility("default"))) a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo); __attribute__((visibility("default"))) a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo); __attribute__((visibility("default"))) a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo); __attribute__((visibility("default"))) a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo); __attribute__((visibility("default"))) a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo); __attribute__((visibility("default"))) int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo); __attribute__((visibility("default"))) int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo); __attribute__((visibility("default"))) a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo, morder fmo); __attribute__((visibility("default"))) a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, morder mo, morder fmo); __attribute__((visibility("default"))) a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, morder mo, morder fmo); __attribute__((visibility("default"))) a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, morder mo, morder fmo); __attribute__((visibility("default"))) a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, morder mo, morder fmo); __attribute__((visibility("default"))) void __tsan_atomic_thread_fence(morder mo); __attribute__((visibility("default"))) void __tsan_atomic_signal_fence(morder mo); __attribute__((visibility("default"))) void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a); __attribute__((visibility("default"))) void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a); } } # 13 "../../../../gcc/libsanitizer/tsan/tsan_interface_inl.h" 2 using namespace __tsan; void __tsan_read1(void *addr) { MemoryRead(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, kSizeLog1); } void __tsan_read2(void *addr) { MemoryRead(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, kSizeLog2); } void __tsan_read4(void *addr) { MemoryRead(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, kSizeLog4); } void __tsan_read8(void *addr) { MemoryRead(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, kSizeLog8); } void __tsan_write1(void *addr) { MemoryWrite(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, kSizeLog1); } void __tsan_write2(void *addr) { MemoryWrite(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, kSizeLog2); } void __tsan_write4(void *addr) { MemoryWrite(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, kSizeLog4); } void __tsan_write8(void *addr) { MemoryWrite(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, kSizeLog8); } void __tsan_read1_pc(void *addr, void *pc) { MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1); } void __tsan_read2_pc(void *addr, void *pc) { MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2); } void __tsan_read4_pc(void *addr, void *pc) { MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4); } void __tsan_read8_pc(void *addr, void *pc) { MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8); } void __tsan_write1_pc(void *addr, void *pc) { MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1); } void __tsan_write2_pc(void *addr, void *pc) { MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2); } void __tsan_write4_pc(void *addr, void *pc) { MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4); } void __tsan_write8_pc(void *addr, void *pc) { MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8); } void __tsan_vptr_update(void **vptr_p, void *new_val) { do { __sanitizer::u64 v1 = (u64)((sizeof(vptr_p))); __sanitizer::u64 v2 = (u64)((8)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_interface_inl.h", 84, "(" "(sizeof(vptr_p))" ") " "==" " (" "(8)" ")", v1, v2); } while (false); if (*vptr_p != new_val) { ThreadState *thr = cur_thread(); thr->is_vptr_access = true; MemoryWrite(thr, ((uptr)__builtin_return_address(0)), (uptr)vptr_p, kSizeLog8); thr->is_vptr_access = false; } } void __tsan_vptr_read(void **vptr_p) { do { __sanitizer::u64 v1 = (u64)((sizeof(vptr_p))); __sanitizer::u64 v2 = (u64)((8)); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("../../../../gcc/libsanitizer/tsan/tsan_interface_inl.h", 94, "(" "(sizeof(vptr_p))" ") " "==" " (" "(8)" ")", v1, v2); } while (false); ThreadState *thr = cur_thread(); thr->is_vptr_access = true; MemoryRead(thr, ((uptr)__builtin_return_address(0)), (uptr)vptr_p, kSizeLog8); thr->is_vptr_access = false; } void __tsan_func_entry(void *pc) { FuncEntry(cur_thread(), (uptr)pc); } void __tsan_func_exit() { FuncExit(cur_thread()); } void __tsan_read_range(void *addr, uptr size) { MemoryAccessRange(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, size, false); } void __tsan_write_range(void *addr, uptr size) { MemoryAccessRange(cur_thread(), ((uptr)__builtin_return_address(0)), (uptr)addr, size, true); } # 1044 "../../../../gcc/libsanitizer/tsan/tsan_rtl.cc" 2