Date: Thu, 20 Apr 2017 21:20:59 +0000 (UTC) From: Dimitry Andric <dim@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org Subject: svn commit: r317222 - in vendor/compiler-rt/dist: include/sanitizer lib/asan lib/asan/tests lib/dfsan lib/lsan lib/sanitizer_common lib/scudo lib/tsan/rtl lib/ubsan lib/xray test/asan/TestCases tes... Message-ID: <201704202120.v3KLKxsw002764@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: dim Date: Thu Apr 20 21:20:59 2017 New Revision: 317222 URL: https://svnweb.freebsd.org/changeset/base/317222 Log: Vendor import of compiler-rt trunk r300890: https://llvm.org/svn/llvm-project/compiler-rt/trunk@300890 Added: vendor/compiler-rt/dist/test/asan/TestCases/Posix/strchr.c (contents, props changed) Deleted: vendor/compiler-rt/dist/test/asan/TestCases/Posix/coverage-caller-callee.cc vendor/compiler-rt/dist/test/asan/TestCases/coverage-caller-callee-total-count.cc vendor/compiler-rt/dist/test/asan/TestCases/coverage-reset.cc vendor/compiler-rt/dist/test/asan/TestCases/coverage-tracing.cc Modified: vendor/compiler-rt/dist/include/sanitizer/coverage_interface.h vendor/compiler-rt/dist/lib/asan/asan_thread.cc vendor/compiler-rt/dist/lib/asan/asan_thread.h vendor/compiler-rt/dist/lib/asan/tests/asan_test_main.cc vendor/compiler-rt/dist/lib/dfsan/done_abilist.txt vendor/compiler-rt/dist/lib/lsan/lsan_common.cc vendor/compiler-rt/dist/lib/lsan/lsan_common.h vendor/compiler-rt/dist/lib/lsan/lsan_common_linux.cc vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc vendor/compiler-rt/dist/lib/lsan/lsan_thread.cc vendor/compiler-rt/dist/lib/lsan/lsan_thread.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_interface.inc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_internal_defs.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux_libcdep.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_platform.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_common.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_procmaps_mac.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_stoptheworld.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_stoptheworld_mac.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_thread_registry.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_thread_registry.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_win.cc vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp vendor/compiler-rt/dist/lib/scudo/scudo_allocator.h vendor/compiler-rt/dist/lib/scudo/scudo_allocator_secondary.h vendor/compiler-rt/dist/lib/scudo/scudo_utils.cpp vendor/compiler-rt/dist/lib/scudo/scudo_utils.h vendor/compiler-rt/dist/lib/tsan/rtl/tsan_debugging.cc vendor/compiler-rt/dist/lib/tsan/rtl/tsan_interface.h vendor/compiler-rt/dist/lib/tsan/rtl/tsan_report.h vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl.h vendor/compiler-rt/dist/lib/tsan/rtl/tsan_rtl_thread.cc vendor/compiler-rt/dist/lib/ubsan/ubsan_flags.cc vendor/compiler-rt/dist/lib/xray/xray_fdr_logging.cc vendor/compiler-rt/dist/lib/xray/xray_inmemory_log.cc vendor/compiler-rt/dist/lib/xray/xray_trampoline_x86_64.S vendor/compiler-rt/dist/lib/xray/xray_x86_64.cc vendor/compiler-rt/dist/test/asan/TestCases/coverage-levels.cc vendor/compiler-rt/dist/test/lsan/TestCases/Linux/cleanup_in_tsd_destructor.c vendor/compiler-rt/dist/test/lsan/TestCases/Linux/disabler_in_tsd_destructor.c vendor/compiler-rt/dist/test/lsan/TestCases/Linux/use_tls_dynamic.cc vendor/compiler-rt/dist/test/lsan/TestCases/Linux/use_tls_pthread_specific_dynamic.cc vendor/compiler-rt/dist/test/lsan/TestCases/Linux/use_tls_pthread_specific_static.cc vendor/compiler-rt/dist/test/lsan/TestCases/Linux/use_tls_static.cc vendor/compiler-rt/dist/test/lsan/TestCases/disabler.c vendor/compiler-rt/dist/test/lsan/TestCases/disabler.cc vendor/compiler-rt/dist/test/lsan/TestCases/do_leak_check_override.cc vendor/compiler-rt/dist/test/lsan/TestCases/high_allocator_contention.cc vendor/compiler-rt/dist/test/lsan/TestCases/ignore_object.c vendor/compiler-rt/dist/test/lsan/TestCases/ignore_object_errors.cc vendor/compiler-rt/dist/test/lsan/TestCases/large_allocation_leak.cc vendor/compiler-rt/dist/test/lsan/TestCases/leak_check_at_exit.cc vendor/compiler-rt/dist/test/lsan/TestCases/leak_check_before_thread_started.cc vendor/compiler-rt/dist/test/lsan/TestCases/link_turned_off.cc vendor/compiler-rt/dist/test/lsan/TestCases/pointer_to_self.cc vendor/compiler-rt/dist/test/lsan/TestCases/print_suppressions.cc vendor/compiler-rt/dist/test/lsan/TestCases/recoverable_leak_check.cc vendor/compiler-rt/dist/test/lsan/TestCases/register_root_region.cc vendor/compiler-rt/dist/test/lsan/TestCases/stale_stack_leak.cc vendor/compiler-rt/dist/test/lsan/TestCases/suppressions_default.cc vendor/compiler-rt/dist/test/lsan/TestCases/suppressions_file.cc vendor/compiler-rt/dist/test/lsan/TestCases/swapcontext.cc vendor/compiler-rt/dist/test/lsan/TestCases/use_after_return.cc vendor/compiler-rt/dist/test/lsan/TestCases/use_globals_initialized.cc vendor/compiler-rt/dist/test/lsan/TestCases/use_globals_uninitialized.cc vendor/compiler-rt/dist/test/lsan/TestCases/use_poisoned_asan.cc vendor/compiler-rt/dist/test/lsan/TestCases/use_registers.cc vendor/compiler-rt/dist/test/lsan/TestCases/use_stacks.cc vendor/compiler-rt/dist/test/lsan/TestCases/use_stacks_threaded.cc vendor/compiler-rt/dist/test/lsan/TestCases/use_unaligned.cc vendor/compiler-rt/dist/test/lsan/lit.common.cfg vendor/compiler-rt/dist/test/sanitizer_common/lit.common.cfg vendor/compiler-rt/dist/test/tsan/Darwin/main_tid.mm vendor/compiler-rt/dist/test/tsan/debug_alloc_stack.cc vendor/compiler-rt/dist/test/tsan/debugging.cc vendor/compiler-rt/dist/test/xray/TestCases/Linux/fdr-mode.cc vendor/compiler-rt/dist/test/xray/TestCases/Linux/fdr-thread-order.cc vendor/compiler-rt/dist/test/xray/lit.site.cfg.in Modified: vendor/compiler-rt/dist/include/sanitizer/coverage_interface.h ============================================================================== --- vendor/compiler-rt/dist/include/sanitizer/coverage_interface.h Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/include/sanitizer/coverage_interface.h Thu Apr 20 21:20:59 2017 (r317222) @@ -35,35 +35,6 @@ extern "C" { // Get the number of unique covered blocks (or edges). // This can be useful for coverage-directed in-process fuzzers. uintptr_t __sanitizer_get_total_unique_coverage(); - // Get the number of unique indirect caller-callee pairs. - uintptr_t __sanitizer_get_total_unique_caller_callee_pairs(); - - // Reset the basic-block (edge) coverage to the initial state. - // Useful for in-process fuzzing to start collecting coverage from scratch. - // Experimental, will likely not work for multi-threaded process. - void __sanitizer_reset_coverage(); - // Set *data to the array of covered PCs and return the size of that array. - // Some of the entries in *data will be zero. - uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data); - - // The coverage instrumentation may optionally provide imprecise counters. - // Rather than exposing the counter values to the user we instead map - // the counters to a bitset. - // Every counter is associated with 8 bits in the bitset. - // We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+ - // The i-th bit is set to 1 if the counter value is in the i-th range. - // This counter-based coverage implementation is *not* thread-safe. - - // Returns the number of registered coverage counters. - uintptr_t __sanitizer_get_number_of_counters(); - // Updates the counter 'bitset', clears the counters and returns the number of - // new bits in 'bitset'. - // If 'bitset' is nullptr, only clears the counters. - // Otherwise 'bitset' should be at least - // __sanitizer_get_number_of_counters bytes long and 8-aligned. - uintptr_t - __sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset); - #ifdef __cplusplus } // extern "C" #endif Modified: vendor/compiler-rt/dist/lib/asan/asan_thread.cc ============================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_thread.cc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/asan/asan_thread.cc Thu Apr 20 21:20:59 2017 (r317222) @@ -237,7 +237,7 @@ void AsanThread::Init() { } thread_return_t AsanThread::ThreadStart( - uptr os_id, atomic_uintptr_t *signal_thread_is_registered) { + tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) { Init(); asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false, nullptr); @@ -395,7 +395,7 @@ void EnsureMainThreadIDIsCorrect() { context->os_id = GetTid(); } -__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) { +__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); if (!context) return nullptr; @@ -405,7 +405,7 @@ __asan::AsanThread *GetAsanThreadByOsIDL // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { -bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, +bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls) { __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); @@ -421,7 +421,7 @@ bool GetThreadRangesLocked(uptr os_id, u return true; } -void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback, +void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, void *arg) { __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); if (t && t->has_fake_stack()) Modified: vendor/compiler-rt/dist/lib/asan/asan_thread.h ============================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_thread.h Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/asan/asan_thread.h Thu Apr 20 21:20:59 2017 (r317222) @@ -63,7 +63,7 @@ class AsanThread { void Destroy(); void Init(); // Should be called from the thread itself. - thread_return_t ThreadStart(uptr os_id, + thread_return_t ThreadStart(tid_t os_id, atomic_uintptr_t *signal_thread_is_registered); uptr stack_top(); Modified: vendor/compiler-rt/dist/lib/asan/tests/asan_test_main.cc ============================================================================== --- vendor/compiler-rt/dist/lib/asan/tests/asan_test_main.cc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/asan/tests/asan_test_main.cc Thu Apr 20 21:20:59 2017 (r317222) @@ -13,15 +13,23 @@ #include "asan_test_utils.h" #include "sanitizer_common/sanitizer_platform.h" -// Default ASAN_OPTIONS for the unit tests. Let's turn symbolication off to -// speed up testing (unit tests don't use it anyway). +// Default ASAN_OPTIONS for the unit tests. extern "C" const char* __asan_default_options() { #if SANITIZER_MAC // On Darwin, we default to `abort_on_error=1`, which would make tests run - // much slower. Let's override this and run lit tests with 'abort_on_error=0'. - // Also, make sure we do not overwhelm the syslog while testing. + // much slower. Let's override this and run lit tests with 'abort_on_error=0' + // and make sure we do not overwhelm the syslog while testing. Also, let's + // turn symbolization off to speed up testing, especially when not running + // with llvm-symbolizer but with atos. return "symbolize=false:abort_on_error=0:log_to_syslog=0"; +#elif SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT + // On PowerPC and ARM Thumb, a couple tests involving pthread_exit fail due to + // leaks detected by LSan. Symbolized leak report is required to apply a + // suppression for this known problem. + return ""; #else + // Let's turn symbolization off to speed up testing (more than 3 times speedup + // observed). return "symbolize=false"; #endif } Modified: vendor/compiler-rt/dist/lib/dfsan/done_abilist.txt ============================================================================== --- vendor/compiler-rt/dist/lib/dfsan/done_abilist.txt Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/dfsan/done_abilist.txt Thu Apr 20 21:20:59 2017 (r317222) @@ -285,22 +285,8 @@ fun:__sanitizer_cov_module_init=uninstru fun:__sanitizer_cov_module_init=discard fun:__sanitizer_cov_with_check=uninstrumented fun:__sanitizer_cov_with_check=discard -fun:__sanitizer_cov_indir_call16=uninstrumented -fun:__sanitizer_cov_indir_call16=discard -fun:__sanitizer_cov_indir_call16=uninstrumented -fun:__sanitizer_cov_indir_call16=discard -fun:__sanitizer_reset_coverage=uninstrumented -fun:__sanitizer_reset_coverage=discard fun:__sanitizer_set_death_callback=uninstrumented fun:__sanitizer_set_death_callback=discard -fun:__sanitizer_get_coverage_guards=uninstrumented -fun:__sanitizer_get_coverage_guards=discard -fun:__sanitizer_get_number_of_counters=uninstrumented -fun:__sanitizer_get_number_of_counters=discard -fun:__sanitizer_update_counter_bitset_and_clear_counters=uninstrumented -fun:__sanitizer_update_counter_bitset_and_clear_counters=discard -fun:__sanitizer_get_total_unique_coverage=uninstrumented -fun:__sanitizer_get_total_unique_coverage=discard fun:__sanitizer_get_total_unique_coverage=uninstrumented fun:__sanitizer_get_total_unique_coverage=discard fun:__sanitizer_update_counter_bitset_and_clear_counters=uninstrumented Modified: vendor/compiler-rt/dist/lib/lsan/lsan_common.cc ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_common.cc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/lsan/lsan_common.cc Thu Apr 20 21:20:59 2017 (r317222) @@ -68,6 +68,14 @@ ALIGNED(64) static char suppression_plac static SuppressionContext *suppression_ctx = nullptr; static const char kSuppressionLeak[] = "leak"; static const char *kSuppressionTypes[] = { kSuppressionLeak }; +static const char kStdSuppressions[] = +#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT + // The actual string allocation happens here (for more details refer to the + // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT definition). + "leak:*_dl_map_object_deps*"; +#else + ""; +#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT void InitializeSuppressions() { CHECK_EQ(nullptr, suppression_ctx); @@ -76,6 +84,7 @@ void InitializeSuppressions() { suppression_ctx->ParseFromFile(flags()->suppressions); if (&__lsan_default_suppressions) suppression_ctx->Parse(__lsan_default_suppressions()); + suppression_ctx->Parse(kStdSuppressions); } static SuppressionContext *GetSuppressionContext() { @@ -83,12 +92,9 @@ static SuppressionContext *GetSuppressio return suppression_ctx; } -struct RootRegion { - const void *begin; - uptr size; -}; +static InternalMmapVector<RootRegion> *root_regions; -InternalMmapVector<RootRegion> *root_regions; +InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; } void InitializeRootRegions() { CHECK(!root_regions); @@ -200,11 +206,11 @@ void ForEachExtraStackRangeCb(uptr begin // Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { - InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); + InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount()); uptr registers_begin = reinterpret_cast<uptr>(registers.data()); uptr registers_end = registers_begin + registers.size(); - for (uptr i = 0; i < suspended_threads.thread_count(); i++) { - uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); + for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { + tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i)); LOG_THREADS("Processing thread %d.\n", os_id); uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; DTLS *dtls; @@ -291,23 +297,29 @@ static void ProcessThreads(SuspendedThre } } -static void ProcessRootRegion(Frontier *frontier, uptr root_begin, - uptr root_end) { - MemoryMappingLayout proc_maps(/*cache_enabled*/true); +void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, + uptr region_begin, uptr region_end, uptr prot) { + uptr intersection_begin = Max(root_region.begin, region_begin); + uptr intersection_end = Min(region_end, root_region.begin + root_region.size); + if (intersection_begin >= intersection_end) return; + bool is_readable = prot & MemoryMappingLayout::kProtectionRead; + LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", + root_region.begin, root_region.begin + root_region.size, + region_begin, region_end, + is_readable ? "readable" : "unreadable"); + if (is_readable) + ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT", + kReachable); +} + +static void ProcessRootRegion(Frontier *frontier, + const RootRegion &root_region) { + MemoryMappingLayout proc_maps(/*cache_enabled*/ true); uptr begin, end, prot; while (proc_maps.Next(&begin, &end, /*offset*/ nullptr, /*filename*/ nullptr, /*filename_size*/ 0, &prot)) { - uptr intersection_begin = Max(root_begin, begin); - uptr intersection_end = Min(end, root_end); - if (intersection_begin >= intersection_end) continue; - bool is_readable = prot & MemoryMappingLayout::kProtectionRead; - LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", - root_begin, root_end, begin, end, - is_readable ? "readable" : "unreadable"); - if (is_readable) - ScanRangeForPointers(intersection_begin, intersection_end, frontier, - "ROOT", kReachable); + ScanRootRegion(frontier, root_region, begin, end, prot); } } @@ -316,9 +328,7 @@ static void ProcessRootRegions(Frontier if (!flags()->use_root_regions) return; CHECK(root_regions); for (uptr i = 0; i < root_regions->size(); i++) { - RootRegion region = (*root_regions)[i]; - uptr begin_addr = reinterpret_cast<uptr>(region.begin); - ProcessRootRegion(frontier, begin_addr, begin_addr + region.size); + ProcessRootRegion(frontier, (*root_regions)[i]); } } @@ -356,6 +366,72 @@ static void CollectIgnoredCb(uptr chunk, } } +static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { + CHECK(stack_id); + StackTrace stack = map->Get(stack_id); + // The top frame is our malloc/calloc/etc. The next frame is the caller. + if (stack.size >= 2) + return stack.trace[1]; + return 0; +} + +struct InvalidPCParam { + Frontier *frontier; + StackDepotReverseMap *stack_depot_reverse_map; + bool skip_linker_allocations; +}; + +// ForEachChunk callback. If the caller pc is invalid or is within the linker, +// mark as reachable. Called by ProcessPlatformSpecificAllocations. +static void MarkInvalidPCCb(uptr chunk, void *arg) { + CHECK(arg); + InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg); + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { + u32 stack_id = m.stack_trace_id(); + uptr caller_pc = 0; + if (stack_id > 0) + caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); + // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark + // it as reachable, as we can't properly report its allocation stack anyway. + if (caller_pc == 0 || (param->skip_linker_allocations && + GetLinker()->containsAddress(caller_pc))) { + m.set_tag(kReachable); + param->frontier->push_back(chunk); + } + } +} + +// On Linux, handles dynamically allocated TLS blocks by treating all chunks +// allocated from ld-linux.so as reachable. +// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. +// They are allocated with a __libc_memalign() call in allocate_and_init() +// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those +// blocks, but we can make sure they come from our own allocator by intercepting +// __libc_memalign(). On top of that, there is no easy way to reach them. Their +// addresses are stored in a dynamically allocated array (the DTV) which is +// referenced from the static TLS. Unfortunately, we can't just rely on the DTV +// being reachable from the static TLS, and the dynamic TLS being reachable from +// the DTV. This is because the initial DTV is allocated before our interception +// mechanism kicks in, and thus we don't recognize it as allocated memory. We +// can't special-case it either, since we don't know its size. +// Our solution is to include in the root set all allocations made from +// ld-linux.so (which is where allocate_and_init() is implemented). This is +// guaranteed to include all dynamic TLS blocks (and possibly other allocations +// which we don't care about). +// On all other platforms, this simply checks to ensure that the caller pc is +// valid before reporting chunks as leaked. +void ProcessPC(Frontier *frontier) { + StackDepotReverseMap stack_depot_reverse_map; + InvalidPCParam arg; + arg.frontier = frontier; + arg.stack_depot_reverse_map = &stack_depot_reverse_map; + arg.skip_linker_allocations = + flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; + ForEachChunk(MarkInvalidPCCb, &arg); +} + // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. @@ -367,11 +443,13 @@ static void ClassifyAllChunks(SuspendedT ProcessRootRegions(&frontier); FloodFillTag(&frontier, kReachable); + CHECK_EQ(0, frontier.size()); + ProcessPC(&frontier); + // The check here is relatively expensive, so we do this in a separate flood // fill. That way we can skip the check for chunks that are reachable // otherwise. LOG_POINTERS("Processing platform-specific allocations.\n"); - CHECK_EQ(0, frontier.size()); ProcessPlatformSpecificAllocations(&frontier); FloodFillTag(&frontier, kReachable); @@ -707,7 +785,7 @@ void __lsan_register_root_region(const v #if CAN_SANITIZE_LEAKS BlockingMutexLock l(&global_mutex); CHECK(root_regions); - RootRegion region = {begin, size}; + RootRegion region = {reinterpret_cast<uptr>(begin), size}; root_regions->push_back(region); VReport(1, "Registered root region at %p of size %llu\n", begin, size); #endif // CAN_SANITIZE_LEAKS @@ -721,7 +799,7 @@ void __lsan_unregister_root_region(const bool removed = false; for (uptr i = 0; i < root_regions->size(); i++) { RootRegion region = (*root_regions)[i]; - if (region.begin == begin && region.size == size) { + if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) { removed = true; uptr last_index = root_regions->size() - 1; (*root_regions)[i] = (*root_regions)[last_index]; Modified: vendor/compiler-rt/dist/lib/lsan/lsan_common.h ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_common.h Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/lsan/lsan_common.h Thu Apr 20 21:20:59 2017 (r317222) @@ -118,6 +118,15 @@ typedef InternalMmapVector<uptr> Frontie void InitializePlatformSpecificModules(); void ProcessGlobalRegions(Frontier *frontier); void ProcessPlatformSpecificAllocations(Frontier *frontier); + +struct RootRegion { + uptr begin; + uptr size; +}; + +InternalMmapVector<RootRegion> const *GetRootRegions(); +void ScanRootRegion(Frontier *frontier, RootRegion const ®ion, + uptr region_begin, uptr region_end, uptr prot); // Run stoptheworld while holding any platform-specific locks. void DoStopTheWorld(StopTheWorldCallback callback, void* argument); @@ -193,10 +202,10 @@ bool WordIsPoisoned(uptr addr); // Wrappers for ThreadRegistry access. void LockThreadRegistry(); void UnlockThreadRegistry(); -bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, +bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls); -void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback, +void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, void *arg); // If called from the main thread, updates the main thread's TID in the thread // registry. We need this to handle processes that fork() without a subsequent @@ -212,6 +221,10 @@ uptr PointsIntoChunk(void *p); uptr GetUserBegin(uptr chunk); // Helper for __lsan_ignore_object(). IgnoreObjectResult IgnoreObjectLocked(const void *p); + +// Return the linker module, if valid for the platform. +LoadedModule *GetLinker(); + // Wrapper for chunk metadata operations. class LsanMetadata { public: Modified: vendor/compiler-rt/dist/lib/lsan/lsan_common_linux.cc ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_common_linux.cc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/lsan/lsan_common_linux.cc Thu Apr 20 21:20:59 2017 (r317222) @@ -89,70 +89,9 @@ void ProcessGlobalRegions(Frontier *fron dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); } -static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { - CHECK(stack_id); - StackTrace stack = map->Get(stack_id); - // The top frame is our malloc/calloc/etc. The next frame is the caller. - if (stack.size >= 2) - return stack.trace[1]; - return 0; -} - -struct ProcessPlatformAllocParam { - Frontier *frontier; - StackDepotReverseMap *stack_depot_reverse_map; - bool skip_linker_allocations; -}; +LoadedModule *GetLinker() { return linker; } -// ForEachChunk callback. Identifies unreachable chunks which must be treated as -// reachable. Marks them as reachable and adds them to the frontier. -static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) { - CHECK(arg); - ProcessPlatformAllocParam *param = - reinterpret_cast<ProcessPlatformAllocParam *>(arg); - chunk = GetUserBegin(chunk); - LsanMetadata m(chunk); - if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { - u32 stack_id = m.stack_trace_id(); - uptr caller_pc = 0; - if (stack_id > 0) - caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); - // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark - // it as reachable, as we can't properly report its allocation stack anyway. - if (caller_pc == 0 || (param->skip_linker_allocations && - linker->containsAddress(caller_pc))) { - m.set_tag(kReachable); - param->frontier->push_back(chunk); - } - } -} - -// Handles dynamically allocated TLS blocks by treating all chunks allocated -// from ld-linux.so as reachable. -// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. -// They are allocated with a __libc_memalign() call in allocate_and_init() -// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those -// blocks, but we can make sure they come from our own allocator by intercepting -// __libc_memalign(). On top of that, there is no easy way to reach them. Their -// addresses are stored in a dynamically allocated array (the DTV) which is -// referenced from the static TLS. Unfortunately, we can't just rely on the DTV -// being reachable from the static TLS, and the dynamic TLS being reachable from -// the DTV. This is because the initial DTV is allocated before our interception -// mechanism kicks in, and thus we don't recognize it as allocated memory. We -// can't special-case it either, since we don't know its size. -// Our solution is to include in the root set all allocations made from -// ld-linux.so (which is where allocate_and_init() is implemented). This is -// guaranteed to include all dynamic TLS blocks (and possibly other allocations -// which we don't care about). -void ProcessPlatformSpecificAllocations(Frontier *frontier) { - StackDepotReverseMap stack_depot_reverse_map; - ProcessPlatformAllocParam arg; - arg.frontier = frontier; - arg.stack_depot_reverse_map = &stack_depot_reverse_map; - arg.skip_linker_allocations = - flags()->use_tls && flags()->use_ld_allocations && linker != nullptr; - ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg); -} +void ProcessPlatformSpecificAllocations(Frontier *frontier) {} struct DoStopTheWorldParam { StopTheWorldCallback callback; Modified: vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc Thu Apr 20 21:20:59 2017 (r317222) @@ -22,6 +22,8 @@ #include <pthread.h> +#include <mach/mach.h> + namespace __lsan { typedef struct { @@ -85,6 +87,8 @@ void SetCurrentThread(u32 tid) { get_tls AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; } +LoadedModule *GetLinker() { return nullptr; } + // Required on Linux for initialization of TLS behavior, but should not be // required on Darwin. void InitializePlatformSpecificModules() { @@ -106,7 +110,7 @@ void ProcessGlobalRegions(Frontier *fron for (const __sanitizer::LoadedModule::AddressRange &range : modules[i].ranges()) { - if (range.executable) continue; + if (range.executable || !range.readable) continue; ScanGlobalRange(range.beg, range.end, frontier); } @@ -114,11 +118,54 @@ void ProcessGlobalRegions(Frontier *fron } void ProcessPlatformSpecificAllocations(Frontier *frontier) { - CHECK(0 && "unimplemented"); + mach_port_name_t port; + if (task_for_pid(mach_task_self(), internal_getpid(), &port) + != KERN_SUCCESS) { + return; + } + + unsigned depth = 1; + vm_size_t size = 0; + vm_address_t address = 0; + kern_return_t err = KERN_SUCCESS; + mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; + + InternalMmapVector<RootRegion> const *root_regions = GetRootRegions(); + + while (err == KERN_SUCCESS) { + struct vm_region_submap_info_64 info; + err = vm_region_recurse_64(port, &address, &size, &depth, + (vm_region_info_t)&info, &count); + + uptr end_address = address + size; + + // libxpc stashes some pointers in the Kernel Alloc Once page, + // make sure not to report those as leaks. + if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) { + ScanRangeForPointers(address, end_address, frontier, "GLOBAL", + kReachable); + } + + // This additional root region scan is required on Darwin in order to + // detect root regions contained within mmap'd memory regions, because + // the Darwin implementation of sanitizer_procmaps traverses images + // as loaded by dyld, and not the complete set of all memory regions. + // + // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same + // behavior as sanitizer_procmaps_linux and traverses all memory regions + if (flags()->use_root_regions) { + for (uptr i = 0; i < root_regions->size(); i++) { + ScanRootRegion(frontier, (*root_regions)[i], address, end_address, + info.protection); + } + } + + address = end_address; + } } void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { - CHECK(0 && "unimplemented"); + StopTheWorld(callback, argument); } } // namespace __lsan Modified: vendor/compiler-rt/dist/lib/lsan/lsan_thread.cc ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_thread.cc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/lsan/lsan_thread.cc Thu Apr 20 21:20:59 2017 (r317222) @@ -77,7 +77,7 @@ u32 ThreadCreate(u32 parent_tid, uptr us /* arg */ nullptr); } -void ThreadStart(u32 tid, uptr os_id) { +void ThreadStart(u32 tid, tid_t os_id) { OnStartedArgs args; uptr stack_size = 0; uptr tls_size = 0; @@ -127,7 +127,7 @@ void EnsureMainThreadIDIsCorrect() { ///// Interface to the common LSan module. ///// -bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, +bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls) { ThreadContext *context = static_cast<ThreadContext *>( @@ -143,7 +143,7 @@ bool GetThreadRangesLocked(uptr os_id, u return true; } -void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback, +void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, void *arg) { } Modified: vendor/compiler-rt/dist/lib/lsan/lsan_thread.h ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_thread.h Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/lsan/lsan_thread.h Thu Apr 20 21:20:59 2017 (r317222) @@ -45,7 +45,7 @@ class ThreadContext : public ThreadConte void InitializeThreadRegistry(); -void ThreadStart(u32 tid, uptr os_id); +void ThreadStart(u32 tid, tid_t os_id); void ThreadFinish(); u32 ThreadCreate(u32 tid, uptr uid, bool detached); void ThreadJoin(u32 tid); Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.cc ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.cc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.cc Thu Apr 20 21:20:59 2017 (r317222) @@ -284,9 +284,10 @@ void LoadedModule::clear() { } } -void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) { +void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable, + bool readable) { void *mem = InternalAlloc(sizeof(AddressRange)); - AddressRange *r = new(mem) AddressRange(beg, end, executable); + AddressRange *r = new(mem) AddressRange(beg, end, executable, readable); ranges_.push_back(r); if (executable && end > max_executable_address_) max_executable_address_ = end; Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.h ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.h Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.h Thu Apr 20 21:20:59 2017 (r317222) @@ -72,7 +72,7 @@ INLINE uptr GetPageSizeCached() { uptr GetMmapGranularity(); uptr GetMaxVirtualAddress(); // Threads -uptr GetTid(); +tid_t GetTid(); uptr GetThreadSelf(); void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom); @@ -717,7 +717,7 @@ class LoadedModule { void set(const char *module_name, uptr base_address, ModuleArch arch, u8 uuid[kModuleUUIDSize], bool instrumented); void clear(); - void addAddressRange(uptr beg, uptr end, bool executable); + void addAddressRange(uptr beg, uptr end, bool executable, bool readable); bool containsAddress(uptr address) const; const char *full_name() const { return full_name_; } @@ -732,9 +732,14 @@ class LoadedModule { uptr beg; uptr end; bool executable; + bool readable; - AddressRange(uptr beg, uptr end, bool executable) - : next(nullptr), beg(beg), end(end), executable(executable) {} + AddressRange(uptr beg, uptr end, bool executable, bool readable) + : next(nullptr), + beg(beg), + end(end), + executable(executable), + readable(readable) {} }; const IntrusiveList<AddressRange> &ranges() const { return ranges_; } Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common_interceptors.inc Thu Apr 20 21:20:59 2017 (r317222) @@ -139,12 +139,9 @@ bool PlatformHasDifferentMemcpyAndMemmov #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0) #endif -#define COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n) \ - COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \ - common_flags()->strict_string_checks ? (len) + 1 : (n) ) - #define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \ - COMMON_INTERCEPTOR_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) + COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \ + common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) ) #ifndef COMMON_INTERCEPTOR_ON_DLOPEN #define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ @@ -450,8 +447,7 @@ static inline void StrstrCheck(void *ctx const char *s2) { uptr len1 = REAL(strlen)(s1); uptr len2 = REAL(strlen)(s2); - COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s1, len1, - r ? r - s1 + len2 : len1 + 1); + COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1); } #endif @@ -577,10 +573,11 @@ INTERCEPTOR(char*, strchr, const char *s return internal_strchr(s, c); COMMON_INTERCEPTOR_ENTER(ctx, strchr, s, c); char *result = REAL(strchr)(s, c); - uptr len = internal_strlen(s); - uptr n = result ? result - s + 1 : len + 1; - if (common_flags()->intercept_strchr) - COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n); + if (common_flags()->intercept_strchr) { + // Keep strlen as macro argument, as macro may ignore it. + COMMON_INTERCEPTOR_READ_STRING(ctx, s, + (result ? result - s : REAL(strlen)(s)) + 1); + } return result; } #define INIT_STRCHR COMMON_INTERCEPT_FUNCTION(strchr) @@ -609,9 +606,8 @@ INTERCEPTOR(char*, strrchr, const char * if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_strrchr(s, c); COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c); - uptr len = internal_strlen(s); if (common_flags()->intercept_strchr) - COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, len + 1); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1); return REAL(strrchr)(s, c); } #define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr) Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_interface.inc ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_interface.inc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_interface.inc Thu Apr 20 21:20:59 2017 (r317222) @@ -10,21 +10,13 @@ //===----------------------------------------------------------------------===// INTERFACE_FUNCTION(__sanitizer_cov) INTERFACE_FUNCTION(__sanitizer_cov_dump) -INTERFACE_FUNCTION(__sanitizer_cov_indir_call16) INTERFACE_FUNCTION(__sanitizer_cov_init) INTERFACE_FUNCTION(__sanitizer_cov_module_init) -INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block) -INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter) INTERFACE_FUNCTION(__sanitizer_cov_with_check) INTERFACE_FUNCTION(__sanitizer_dump_coverage) INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage) -INTERFACE_FUNCTION(__sanitizer_get_coverage_guards) -INTERFACE_FUNCTION(__sanitizer_get_number_of_counters) -INTERFACE_FUNCTION(__sanitizer_get_total_unique_caller_callee_pairs) INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage) INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file) -INTERFACE_FUNCTION(__sanitizer_reset_coverage) -INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters) INTERFACE_WEAK_FUNCTION(__sancov_default_options) INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp) INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1) Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc Thu Apr 20 21:20:56 2017 (r317221) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_coverage_libcdep.cc Thu Apr 20 21:20:59 2017 (r317222) @@ -57,12 +57,6 @@ static const u64 kMagic = SANITIZER_WORD static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once. static atomic_uintptr_t coverage_counter; -static atomic_uintptr_t caller_callee_counter; - -static void ResetGlobalCounters() { - return atomic_store(&coverage_counter, 0, memory_order_relaxed); - return atomic_store(&caller_callee_counter, 0, memory_order_relaxed); -} // pc_array is the array containing the covered PCs. // To make the pc_array thread- and async-signal-safe it has to be large enough. @@ -90,25 +84,14 @@ class CoverageData { void AfterFork(int child_pid); void Extend(uptr npcs); void Add(uptr pc, u32 *guard); - void IndirCall(uptr caller, uptr callee, uptr callee_cache[], - uptr cache_size); - void DumpCallerCalleePairs(); - void DumpTrace(); void DumpAsBitSet(); - void DumpCounters(); void DumpOffsets(); void DumpAll(); - ALWAYS_INLINE - void TraceBasicBlock(u32 *id); - void InitializeGuardArray(s32 *guards); void InitializeGuards(s32 *guards, uptr n, const char *module_name, uptr caller_pc); - void InitializeCounters(u8 *counters, uptr n); void ReinitializeGuards(); - uptr GetNumberOf8bitCounters(); - uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset); uptr *data(); uptr size() const; @@ -150,37 +133,6 @@ class CoverageData { InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec; InternalMmapVectorNoCtor<NamedPcRange> module_name_vec; - struct CounterAndSize { - u8 *counters; - uptr n; - }; - - InternalMmapVectorNoCtor<CounterAndSize> counters_vec; - uptr num_8bit_counters; - - // Caller-Callee (cc) array, size and current index. - static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24); - uptr **cc_array; - atomic_uintptr_t cc_array_index; - atomic_uintptr_t cc_array_size; - - // Tracing event array, size and current pointer. - // We record all events (basic block entries) in a global buffer of u32 - // values. Each such value is the index in pc_array. - // So far the tracing is highly experimental: - // - not thread-safe; - // - does not support long traces; - // - not tuned for performance. - // Windows doesn't do overcommit (committed virtual memory costs swap), so - // programs can't reliably map such large amounts of virtual memory. - // TODO(etienneb): Find a way to support coverage of larger executable -static const uptr kTrEventArrayMaxSize = - (SANITIZER_WORDSIZE == 32 || SANITIZER_WINDOWS) ? 1 << 22 : 1 << 30; - u32 *tr_event_array; - uptr tr_event_array_size; - u32 *tr_event_pointer; - static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27); - StaticSpinMutex mu; }; @@ -217,23 +169,6 @@ void CoverageData::Enable() { } else { atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed); } - - cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie( - sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array")); - atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed); - atomic_store(&cc_array_index, 0, memory_order_relaxed); - - // Allocate tr_event_array with a guard page at the end. - tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie( - sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(), - "CovInit::tr_event_array")); - MprotectNoAccess( - reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]), - GetMmapGranularity()); - tr_event_array_size = kTrEventArrayMaxSize; - tr_event_pointer = tr_event_array; - - num_8bit_counters = 0; } void CoverageData::InitializeGuardArray(s32 *guards) { @@ -251,17 +186,6 @@ void CoverageData::Disable() { UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize); pc_array = nullptr; } - if (cc_array) { - UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize); - cc_array = nullptr; - } - if (tr_event_array) { - UnmapOrDie(tr_event_array, - sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + - GetMmapGranularity()); - tr_event_array = nullptr; - tr_event_pointer = nullptr; - } if (pc_fd != kInvalidFd) { CloseFile(pc_fd); pc_fd = kInvalidFd; @@ -341,15 +265,6 @@ void CoverageData::Extend(uptr npcs) { atomic_store(&pc_array_size, size, memory_order_release); } -void CoverageData::InitializeCounters(u8 *counters, uptr n) { - if (!counters) return; - CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0); - n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned. - SpinMutexLock l(&mu); - counters_vec.push_back({counters, n}); - num_8bit_counters += n; -} - void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end) { auto sym = Symbolizer::GetOrInit(); @@ -424,98 +339,6 @@ void CoverageData::Add(uptr pc, u32 *gua pc_array[idx] = BundlePcAndCounter(pc, counter); } -// Registers a pair caller=>callee. -// When a given caller is seen for the first time, the callee_cache is added -// to the global array cc_array, callee_cache[0] is set to caller and -// callee_cache[1] is set to cache_size. -// Then we are trying to add callee to callee_cache [2,cache_size) if it is -// not there yet. -// If the cache is full we drop the callee (may want to fix this later). -void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[], - uptr cache_size) { - if (!cc_array) return; - atomic_uintptr_t *atomic_callee_cache = - reinterpret_cast<atomic_uintptr_t *>(callee_cache); - uptr zero = 0; - if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller, - memory_order_seq_cst)) { - uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed); - CHECK_LT(idx * sizeof(uptr), - atomic_load(&cc_array_size, memory_order_acquire)); - callee_cache[1] = cache_size; - cc_array[idx] = callee_cache; - } - CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller); - for (uptr i = 2; i < cache_size; i++) { - uptr was = 0; - if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee, - memory_order_seq_cst)) { - atomic_fetch_add(&caller_callee_counter, 1, memory_order_relaxed); - return; - } - if (was == callee) // Already have this callee. - return; - } -} - -uptr CoverageData::GetNumberOf8bitCounters() { - return num_8bit_counters; -} - -// Map every 8bit counter to a 8-bit bitset and clear the counter. -uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) { - uptr num_new_bits = 0; - uptr cur = 0; - // For better speed we map 8 counters to 8 bytes of bitset at once. - static const uptr kBatchSize = 8; - CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0); - for (uptr i = 0, len = counters_vec.size(); i < len; i++) { - u8 *c = counters_vec[i].counters; - uptr n = counters_vec[i].n; - CHECK_EQ(n % 16, 0); - CHECK_EQ(cur % kBatchSize, 0); - CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0); - if (!bitset) { - internal_bzero_aligned16(c, n); - cur += n; - continue; - } - for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) { - CHECK_LT(cur, num_8bit_counters); - u64 *pc64 = reinterpret_cast<u64*>(c + j); - u64 *pb64 = reinterpret_cast<u64*>(bitset + cur); - u64 c64 = *pc64; - u64 old_bits_64 = *pb64; - u64 new_bits_64 = old_bits_64; - if (c64) { - *pc64 = 0; - for (uptr k = 0; k < kBatchSize; k++) { - u64 x = (c64 >> (8 * k)) & 0xff; - if (x) { - u64 bit = 0; - /**/ if (x >= 128) bit = 128; - else if (x >= 32) bit = 64; - else if (x >= 16) bit = 32; - else if (x >= 8) bit = 16; - else if (x >= 4) bit = 8; - else if (x >= 3) bit = 4; - else if (x >= 2) bit = 2; - else if (x >= 1) bit = 1; - u64 mask = bit << (8 * k); - if (!(new_bits_64 & mask)) { - num_new_bits++; - new_bits_64 |= mask; - } - } - } - *pb64 = new_bits_64; - } - } - } - CHECK_EQ(cur, num_8bit_counters); - return num_new_bits; -} - uptr *CoverageData::data() { return pc_array; } @@ -596,132 +419,6 @@ static fd_t CovOpenFile(InternalScopedSt return fd; } -// Dump trace PCs and trace events into two separate files. -void CoverageData::DumpTrace() { - uptr max_idx = tr_event_pointer - tr_event_array; - if (!max_idx) return; - auto sym = Symbolizer::GetOrInit(); - if (!sym) - return; - InternalScopedString out(32 << 20); - for (uptr i = 0, n = size(); i < n; i++) { - const char *module_name = "<unknown>"; - uptr module_address = 0; - sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name, - &module_address); - out.append("%s 0x%zx\n", module_name, module_address); - } - InternalScopedString path(kMaxPathLength); - fd_t fd = CovOpenFile(&path, false, "trace-points"); - if (fd == kInvalidFd) return; - WriteToFile(fd, out.data(), out.length()); - CloseFile(fd); - - fd = CovOpenFile(&path, false, "trace-compunits"); - if (fd == kInvalidFd) return; - out.clear(); - for (uptr i = 0; i < comp_unit_name_vec.size(); i++) - out.append("%s\n", comp_unit_name_vec[i].copied_module_name); - WriteToFile(fd, out.data(), out.length()); - CloseFile(fd); - - fd = CovOpenFile(&path, false, "trace-events"); *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201704202120.v3KLKxsw002764>