From owner-svn-src-vendor@freebsd.org Mon Jun 26 20:33:26 2017 Return-Path: Delivered-To: svn-src-vendor@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 25EC7D913A8; Mon, 26 Jun 2017 20:33:26 +0000 (UTC) (envelope-from dim@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id AB12F64E23; Mon, 26 Jun 2017 20:33:25 +0000 (UTC) (envelope-from dim@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id v5QKXOG0098912; Mon, 26 Jun 2017 20:33:24 GMT (envelope-from dim@FreeBSD.org) Received: (from dim@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id v5QKXMpm098882; Mon, 26 Jun 2017 20:33:22 GMT (envelope-from dim@FreeBSD.org) Message-Id: <201706262033.v5QKXMpm098882@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: dim set sender to dim@FreeBSD.org using -f From: Dimitry Andric Date: Mon, 26 Jun 2017 20:33:22 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org Subject: svn commit: r320378 - in vendor/compiler-rt/dist: . cmake include/xray lib/asan lib/asan/scripts lib/interception lib/interception/tests lib/lsan lib/msan lib/sanitizer_common lib/sanitizer_common/... X-SVN-Group: vendor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-vendor@freebsd.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: SVN commit messages for the vendor work area tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 26 Jun 2017 20:33:26 -0000 Author: dim Date: Mon Jun 26 20:33:22 2017 New Revision: 320378 URL: https://svnweb.freebsd.org/changeset/base/320378 Log: Vendor import of compiler-rt trunk r306325: https://llvm.org/svn/llvm-project/compiler-rt/trunk@306325 Added: vendor/compiler-rt/dist/lib/lsan/lsan_mac.cc (contents, props changed) vendor/compiler-rt/dist/test/asan/TestCases/pr33372.cc (contents, props changed) vendor/compiler-rt/dist/test/lsan/TestCases/Darwin/ vendor/compiler-rt/dist/test/lsan/TestCases/Darwin/dispatch.mm vendor/compiler-rt/dist/test/lsan/TestCases/Darwin/lit.local.cfg vendor/compiler-rt/dist/test/profile/Linux/counter_promo_for.c (contents, props changed) vendor/compiler-rt/dist/test/profile/Linux/counter_promo_while.c (contents, props changed) vendor/compiler-rt/dist/test/xray/TestCases/Linux/arg1-arg0-logging.cc (contents, props changed) Modified: vendor/compiler-rt/dist/CMakeLists.txt vendor/compiler-rt/dist/cmake/config-ix.cmake vendor/compiler-rt/dist/include/xray/xray_interface.h vendor/compiler-rt/dist/lib/asan/asan_allocator.cc vendor/compiler-rt/dist/lib/asan/asan_report.cc vendor/compiler-rt/dist/lib/asan/scripts/asan_device_setup vendor/compiler-rt/dist/lib/asan/weak_symbols.txt vendor/compiler-rt/dist/lib/interception/interception_win.cc vendor/compiler-rt/dist/lib/interception/tests/interception_win_test.cc vendor/compiler-rt/dist/lib/lsan/CMakeLists.txt vendor/compiler-rt/dist/lib/lsan/lsan.h vendor/compiler-rt/dist/lib/lsan/lsan_allocator.cc vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc vendor/compiler-rt/dist/lib/lsan/lsan_thread.cc vendor/compiler-rt/dist/lib/lsan/lsan_thread.h vendor/compiler-rt/dist/lib/msan/msan_allocator.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_combined.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_internal.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_local_cache.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_primary32.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_secondary.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_atomic_clang.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_atomic_clang_other.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_common.h vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_linux_libcdep.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_mac.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_posix.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_posix_libcdep.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_win.cc vendor/compiler-rt/dist/lib/sanitizer_common/tests/sanitizer_allocator_test.cc vendor/compiler-rt/dist/lib/sanitizer_common/tests/sanitizer_common_test.cc vendor/compiler-rt/dist/lib/scudo/scudo_allocator.cpp vendor/compiler-rt/dist/lib/scudo/scudo_allocator_combined.h vendor/compiler-rt/dist/lib/scudo/scudo_allocator_secondary.h vendor/compiler-rt/dist/lib/tsan/rtl/tsan_mman.cc vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.cc vendor/compiler-rt/dist/lib/ubsan/ubsan_handlers.h vendor/compiler-rt/dist/lib/ubsan/ubsan_interface.inc vendor/compiler-rt/dist/lib/xray/xray_interface.cc vendor/compiler-rt/dist/test/asan/TestCases/Linux/allocator_oom_test.cc vendor/compiler-rt/dist/test/asan/TestCases/Linux/preinstalled_signal.cc vendor/compiler-rt/dist/test/asan/TestCases/Windows/oom.cc vendor/compiler-rt/dist/test/asan/lit.cfg vendor/compiler-rt/dist/test/esan/TestCases/workingset-midreport.cpp vendor/compiler-rt/dist/test/esan/TestCases/workingset-samples.cpp vendor/compiler-rt/dist/test/esan/TestCases/workingset-simple.cpp vendor/compiler-rt/dist/test/lsan/lit.common.cfg vendor/compiler-rt/dist/test/sanitizer_common/TestCases/sanitizer_coverage_inline8bit_counter.cc vendor/compiler-rt/dist/test/scudo/random_shuffle.cpp vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/nonnull.cpp vendor/compiler-rt/dist/test/ubsan/TestCases/Misc/nullability.c Modified: vendor/compiler-rt/dist/CMakeLists.txt ============================================================================== --- vendor/compiler-rt/dist/CMakeLists.txt Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/CMakeLists.txt Mon Jun 26 20:33:22 2017 (r320378) @@ -7,13 +7,13 @@ # An important constraint of the build is that it only produces libraries # based on the ability of the host toolchain to target various platforms. +cmake_minimum_required(VERSION 3.4.3) + # Check if compiler-rt is built as a standalone project. if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR OR COMPILER_RT_STANDALONE_BUILD) project(CompilerRT C CXX ASM) set(COMPILER_RT_STANDALONE_BUILD TRUE) endif() - -cmake_minimum_required(VERSION 3.4.3) # Add path for custom compiler-rt modules. list(INSERT CMAKE_MODULE_PATH 0 Modified: vendor/compiler-rt/dist/cmake/config-ix.cmake ============================================================================== --- vendor/compiler-rt/dist/cmake/config-ix.cmake Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/cmake/config-ix.cmake Mon Jun 26 20:33:22 2017 (r320378) @@ -179,7 +179,7 @@ set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64}) set(ALL_ESAN_SUPPORTED_ARCH ${X86_64} ${MIPS64}) -set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64}) +set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le) if(APPLE) Modified: vendor/compiler-rt/dist/include/xray/xray_interface.h ============================================================================== --- vendor/compiler-rt/dist/include/xray/xray_interface.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/include/xray/xray_interface.h Mon Jun 26 20:33:22 2017 (r320378) @@ -60,7 +60,8 @@ extern int __xray_remove_handler(); /// start logging their subsequent affected function calls (if patched). /// /// Returns 1 on success, 0 on error. -extern int __xray_set_handler_arg1(void (*)(int32_t, XRayEntryType, uint64_t)); +extern int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, + uint64_t)); /// Disables the XRay handler used to log first arguments of function calls. /// Returns 1 on success, 0 on error. Modified: vendor/compiler-rt/dist/lib/asan/asan_allocator.cc ============================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_allocator.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/asan/asan_allocator.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -160,7 +160,7 @@ struct QuarantineCallback { } void *Allocate(uptr size) { - return get_allocator().Allocate(cache_, size, 1, false); + return get_allocator().Allocate(cache_, size, 1); } void Deallocate(void *p) { @@ -266,7 +266,8 @@ struct Allocator { } void Initialize(const AllocatorOptions &options) { - allocator.Init(options.may_return_null, options.release_to_os_interval_ms); + SetAllocatorMayReturnNull(options.may_return_null); + allocator.Init(options.release_to_os_interval_ms); SharedInitCode(options); } @@ -302,7 +303,7 @@ struct Allocator { } void ReInitialize(const AllocatorOptions &options) { - allocator.SetMayReturnNull(options.may_return_null); + SetAllocatorMayReturnNull(options.may_return_null); allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); @@ -323,7 +324,7 @@ struct Allocator { options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); - options->may_return_null = allocator.MayReturnNull(); + options->may_return_null = AllocatorMayReturnNull(); options->alloc_dealloc_mismatch = atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); @@ -374,7 +375,7 @@ struct Allocator { if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); if (RssLimitExceeded()) - return allocator.ReturnNullOrDieOnOOM(); + return AsanAllocator::FailureHandler::OnOOM(); Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; @@ -407,24 +408,22 @@ struct Allocator { if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", (void*)size); - return allocator.ReturnNullOrDieOnBadRequest(); + return AsanAllocator::FailureHandler::OnBadRequest(); } AsanThread *t = GetCurrentThread(); void *allocated; if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocated = - allocator.Allocate(cache, needed_size, 8, false); + allocated = allocator.Allocate(cache, needed_size, 8); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; - allocated = - allocator.Allocate(cache, needed_size, 8, false); + allocated = allocator.Allocate(cache, needed_size, 8); } + if (!allocated) + return nullptr; - if (!allocated) return allocator.ReturnNullOrDieOnOOM(); - if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { // Heap poisoning is enabled, but the allocator provides an unpoisoned // chunk. This is possible if CanPoisonMemory() was false for some @@ -634,7 +633,7 @@ struct Allocator { void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { if (CallocShouldReturnNullDueToOverflow(size, nmemb)) - return allocator.ReturnNullOrDieOnBadRequest(); + return AsanAllocator::FailureHandler::OnBadRequest(); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. Modified: vendor/compiler-rt/dist/lib/asan/asan_report.cc ============================================================================== --- vendor/compiler-rt/dist/lib/asan/asan_report.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/asan/asan_report.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -204,6 +204,14 @@ class ScopedInErrorReport { error_report_callback(buffer_copy.data()); } + if (halt_on_error_ && common_flags()->abort_on_error) { + // On Android the message is truncated to 512 characters. + // FIXME: implement "compact" error format, possibly without, or with + // highly compressed stack traces? + // FIXME: or just use the summary line as abort message? + SetAbortMessage(buffer_copy.data()); + } + // In halt_on_error = false mode, reset the current error object (before // unlocking). if (!halt_on_error_) Modified: vendor/compiler-rt/dist/lib/asan/scripts/asan_device_setup ============================================================================== --- vendor/compiler-rt/dist/lib/asan/scripts/asan_device_setup Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/asan/scripts/asan_device_setup Mon Jun 26 20:33:22 2017 (r320378) @@ -410,15 +410,15 @@ if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then install "$TMPDIR/asanwrapper" /system/bin 755 install "$TMPDIR/asanwrapper64" /system/bin 755 - adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK - adb_shell ln -s $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK + adb_shell ln -sf $ASAN_RT /system/lib/$ASAN_RT_SYMLINK + adb_shell ln -sf $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK else install "$TMPDIR/$ASAN_RT" /system/lib 644 install "$TMPDIR/app_process32" /system/bin 755 $CTX install "$TMPDIR/app_process.wrap" /system/bin 755 $CTX install "$TMPDIR/asanwrapper" /system/bin 755 $CTX - adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK + adb_shell ln -sf $ASAN_RT /system/lib/$ASAN_RT_SYMLINK adb_shell rm /system/bin/app_process adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process Modified: vendor/compiler-rt/dist/lib/asan/weak_symbols.txt ============================================================================== --- vendor/compiler-rt/dist/lib/asan/weak_symbols.txt Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/asan/weak_symbols.txt Mon Jun 26 20:33:22 2017 (r320378) @@ -1,3 +1,12 @@ ___asan_default_options ___asan_default_suppressions ___asan_on_error +___asan_set_shadow_00 +___asan_set_shadow_f1 +___asan_set_shadow_f2 +___asan_set_shadow_f3 +___asan_set_shadow_f4 +___asan_set_shadow_f5 +___asan_set_shadow_f6 +___asan_set_shadow_f7 +___asan_set_shadow_f8 Modified: vendor/compiler-rt/dist/lib/interception/interception_win.cc ============================================================================== --- vendor/compiler-rt/dist/lib/interception/interception_win.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/interception/interception_win.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -477,7 +477,7 @@ static size_t GetInstructionSize(uptr address, size_t* switch (*(u8*)address) { case 0xA1: // A1 XX XX XX XX XX XX XX XX : // movabs eax, dword ptr ds:[XXXXXXXX] - return 8; + return 9; } switch (*(u16*)address) { @@ -495,6 +495,11 @@ static size_t GetInstructionSize(uptr address, size_t* case 0x5741: // push r15 case 0x9066: // Two-byte NOP return 2; + + case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX] + if (rel_offset) + *rel_offset = 2; + return 6; } switch (0x00FFFFFF & *(u32*)address) { Modified: vendor/compiler-rt/dist/lib/interception/tests/interception_win_test.cc ============================================================================== --- vendor/compiler-rt/dist/lib/interception/tests/interception_win_test.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/interception/tests/interception_win_test.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -170,6 +170,13 @@ const u8 kPatchableCode5[] = { 0x54, // push esp }; +#if SANITIZER_WINDOWS64 +u8 kLoadGlobalCode[] = { + 0x8B, 0x05, 0x00, 0x00, 0x00, 0x00, // mov eax [rip + global] + 0xC3, // ret +}; +#endif + const u8 kUnpatchableCode1[] = { 0xC3, // ret }; @@ -501,6 +508,10 @@ TEST(Interception, PatchableFunction) { #endif EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override)); EXPECT_TRUE(TestFunctionPatching(kPatchableCode5, override)); + +#if SANITIZER_WINDOWS64 + EXPECT_TRUE(TestFunctionPatching(kLoadGlobalCode, override)); +#endif EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override)); EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override)); Modified: vendor/compiler-rt/dist/lib/lsan/CMakeLists.txt ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/CMakeLists.txt Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/lsan/CMakeLists.txt Mon Jun 26 20:33:22 2017 (r320378) @@ -13,6 +13,7 @@ set(LSAN_SOURCES lsan_allocator.cc lsan_linux.cc lsan_interceptors.cc + lsan_mac.cc lsan_malloc_mac.cc lsan_preinit.cc lsan_thread.cc) Modified: vendor/compiler-rt/dist/lib/lsan/lsan.h ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/lsan/lsan.h Mon Jun 26 20:33:22 2017 (r320378) @@ -38,6 +38,8 @@ GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \ common_flags()->fast_unwind_on_malloc) +#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true) + namespace __lsan { void InitializeInterceptors(); Modified: vendor/compiler-rt/dist/lib/lsan/lsan_allocator.cc ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_allocator.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/lsan/lsan_allocator.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -38,8 +38,8 @@ typedef CombinedAllocatorallocator_may_return_null); allocator.InitLinkerInitialized( - common_flags()->allocator_may_return_null, common_flags()->allocator_release_to_os_interval_ms); } @@ -76,7 +76,7 @@ void *Allocate(const StackTrace &stack, uptr size, upt Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); return nullptr; } - void *p = allocator.Allocate(GetAllocatorCache(), size, alignment, false); + void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); // Do not rely on the allocator to clear the memory (it's slow). if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); Modified: vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/lsan/lsan_common_mac.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -79,8 +79,7 @@ void EnableInThisThread() { u32 GetCurrentThread() { thread_local_data_t *data = get_tls_val(false); - CHECK(data); - return data->current_thread_id; + return data ? data->current_thread_id : kInvalidTid; } void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; } Added: vendor/compiler-rt/dist/lib/lsan/lsan_mac.cc ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ vendor/compiler-rt/dist/lib/lsan/lsan_mac.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -0,0 +1,192 @@ +//===-- lsan_mac.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer, a memory leak checker. +// +// Mac-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "interception/interception.h" +#include "lsan.h" +#include "lsan_allocator.h" +#include "lsan_thread.h" + +#include + +namespace __lsan { +// Support for the following functions from libdispatch on Mac OS: +// dispatch_async_f() +// dispatch_async() +// dispatch_sync_f() +// dispatch_sync() +// dispatch_after_f() +// dispatch_after() +// dispatch_group_async_f() +// dispatch_group_async() +// TODO(glider): libdispatch API contains other functions that we don't support +// yet. +// +// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are +// they can cause jobs to run on a thread different from the current one. +// TODO(glider): if so, we need a test for this (otherwise we should remove +// them). +// +// The following functions use dispatch_barrier_async_f() (which isn't a library +// function but is exported) and are thus supported: +// dispatch_source_set_cancel_handler_f() +// dispatch_source_set_cancel_handler() +// dispatch_source_set_event_handler_f() +// dispatch_source_set_event_handler() +// +// The reference manual for Grand Central Dispatch is available at +// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html +// The implementation details are at +// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c + +typedef void *dispatch_group_t; +typedef void *dispatch_queue_t; +typedef void *dispatch_source_t; +typedef u64 dispatch_time_t; +typedef void (*dispatch_function_t)(void *block); +typedef void *(*worker_t)(void *block); + +// A wrapper for the ObjC blocks used to support libdispatch. +typedef struct { + void *block; + dispatch_function_t func; + u32 parent_tid; +} lsan_block_context_t; + +ALWAYS_INLINE +void lsan_register_worker_thread(int parent_tid) { + if (GetCurrentThread() == kInvalidTid) { + u32 tid = ThreadCreate(parent_tid, 0, true); + ThreadStart(tid, GetTid()); + SetCurrentThread(tid); + } +} + +// For use by only those functions that allocated the context via +// alloc_lsan_context(). +extern "C" void lsan_dispatch_call_block_and_release(void *block) { + lsan_block_context_t *context = (lsan_block_context_t *)block; + VReport(2, + "lsan_dispatch_call_block_and_release(): " + "context: %p, pthread_self: %p\n", + block, pthread_self()); + lsan_register_worker_thread(context->parent_tid); + // Call the original dispatcher for the block. + context->func(context->block); + lsan_free(context); +} + +} // namespace __lsan + +using namespace __lsan; // NOLINT + +// Wrap |ctxt| and |func| into an lsan_block_context_t. +// The caller retains control of the allocated context. +extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt, + dispatch_function_t func) { + GET_STACK_TRACE_THREAD; + lsan_block_context_t *lsan_ctxt = + (lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack); + lsan_ctxt->block = ctxt; + lsan_ctxt->func = func; + lsan_ctxt->parent_tid = GetCurrentThread(); + return lsan_ctxt; +} + +// Define interceptor for dispatch_*_f function with the three most common +// parameters: dispatch_queue_t, context, dispatch_function_t. +#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ + INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ + dispatch_function_t func) { \ + lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \ + return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \ + lsan_dispatch_call_block_and_release); \ + } + +INTERCEPT_DISPATCH_X_F_3(dispatch_async_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) + +INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq, + void *ctxt, dispatch_function_t func) { + lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); + return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt, + lsan_dispatch_call_block_and_release); +} + +INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, + dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { + lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); + REAL(dispatch_group_async_f) + (group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release); +} + +#if !defined(MISSING_BLOCKS_SUPPORT) +extern "C" { +void dispatch_async(dispatch_queue_t dq, void (^work)(void)); +void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, + void (^work)(void)); +void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void (^work)(void)); +void dispatch_source_set_cancel_handler(dispatch_source_t ds, + void (^work)(void)); +void dispatch_source_set_event_handler(dispatch_source_t ds, + void (^work)(void)); +} + +#define GET_LSAN_BLOCK(work) \ + void (^lsan_block)(void); \ + int parent_tid = GetCurrentThread(); \ + lsan_block = ^(void) { \ + lsan_register_worker_thread(parent_tid); \ + work(); \ + } + +INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) { + GET_LSAN_BLOCK(work); + REAL(dispatch_async)(dq, lsan_block); +} + +INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg, + dispatch_queue_t dq, void (^work)(void)) { + GET_LSAN_BLOCK(work); + REAL(dispatch_group_async)(dg, dq, lsan_block); +} + +INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue, + void (^work)(void)) { + GET_LSAN_BLOCK(work); + REAL(dispatch_after)(when, queue, lsan_block); +} + +INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds, + void (^work)(void)) { + if (!work) { + REAL(dispatch_source_set_cancel_handler)(ds, work); + return; + } + GET_LSAN_BLOCK(work); + REAL(dispatch_source_set_cancel_handler)(ds, lsan_block); +} + +INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds, + void (^work)(void)) { + GET_LSAN_BLOCK(work); + REAL(dispatch_source_set_event_handler)(ds, lsan_block); +} +#endif + +#endif // SANITIZER_MAC Modified: vendor/compiler-rt/dist/lib/lsan/lsan_thread.cc ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_thread.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/lsan/lsan_thread.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -77,7 +77,7 @@ u32 ThreadCreate(u32 parent_tid, uptr user_id, bool de /* arg */ nullptr); } -void ThreadStart(u32 tid, tid_t os_id) { +void ThreadStart(u32 tid, tid_t os_id, bool workerthread) { OnStartedArgs args; uptr stack_size = 0; uptr tls_size = 0; @@ -87,7 +87,7 @@ void ThreadStart(u32 tid, tid_t os_id) { args.tls_end = args.tls_begin + tls_size; GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); args.dtls = DTLS_Get(); - thread_registry->StartThread(tid, os_id, /*workerthread*/ false, &args); + thread_registry->StartThread(tid, os_id, workerthread, &args); } void ThreadFinish() { Modified: vendor/compiler-rt/dist/lib/lsan/lsan_thread.h ============================================================================== --- vendor/compiler-rt/dist/lib/lsan/lsan_thread.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/lsan/lsan_thread.h Mon Jun 26 20:33:22 2017 (r320378) @@ -45,7 +45,7 @@ class ThreadContext : public ThreadContextBase { void InitializeThreadRegistry(); -void ThreadStart(u32 tid, tid_t os_id); +void ThreadStart(u32 tid, tid_t os_id, bool workerthread = false); void ThreadFinish(); u32 ThreadCreate(u32 tid, uptr uid, bool detached); void ThreadJoin(u32 tid); Modified: vendor/compiler-rt/dist/lib/msan/msan_allocator.cc ============================================================================== --- vendor/compiler-rt/dist/lib/msan/msan_allocator.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/msan/msan_allocator.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -119,9 +119,8 @@ static AllocatorCache fallback_allocator_cache; static SpinMutex fallback_mutex; void MsanAllocatorInit() { - allocator.Init( - common_flags()->allocator_may_return_null, - common_flags()->allocator_release_to_os_interval_ms); + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + allocator.Init(common_flags()->allocator_release_to_os_interval_ms); } AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { @@ -139,17 +138,17 @@ static void *MsanAllocate(StackTrace *stack, uptr size if (size > kMaxAllowedMallocSize) { Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", (void *)size); - return allocator.ReturnNullOrDieOnBadRequest(); + return Allocator::FailureHandler::OnBadRequest(); } MsanThread *t = GetCurrentThread(); void *allocated; if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocated = allocator.Allocate(cache, size, alignment, false); + allocated = allocator.Allocate(cache, size, alignment); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; - allocated = allocator.Allocate(cache, size, alignment, false); + allocated = allocator.Allocate(cache, size, alignment); } Metadata *meta = reinterpret_cast(allocator.GetMetaData(allocated)); @@ -197,7 +196,7 @@ void MsanDeallocate(StackTrace *stack, void *p) { void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { if (CallocShouldReturnNullDueToOverflow(size, nmemb)) - return allocator.ReturnNullOrDieOnBadRequest(); + return Allocator::FailureHandler::OnBadRequest(); return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true); } Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator.cc ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator.cc Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator.cc Mon Jun 26 20:33:22 2017 (r320378) @@ -94,8 +94,7 @@ InternalAllocator *internal_allocator() { SpinMutexLock l(&internal_alloc_init_mu); if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 0) { - internal_allocator_instance->Init( - /* may_return_null */ false, kReleaseToOSIntervalNever); + internal_allocator_instance->Init(kReleaseToOSIntervalNever); atomic_store(&internal_allocator_initialized, 1, memory_order_release); } } @@ -108,9 +107,9 @@ static void *RawInternalAlloc(uptr size, InternalAlloc if (cache == 0) { SpinMutexLock l(&internal_allocator_cache_mu); return internal_allocator()->Allocate(&internal_allocator_cache, size, - alignment, false); + alignment); } - return internal_allocator()->Allocate(cache, size, alignment, false); + return internal_allocator()->Allocate(cache, size, alignment); } static void *RawInternalRealloc(void *ptr, uptr size, @@ -162,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalA void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { if (CallocShouldReturnNullDueToOverflow(count, size)) - return internal_allocator()->ReturnNullOrDieOnBadRequest(); + return InternalAllocator::FailureHandler::OnBadRequest(); void *p = InternalAlloc(count * size, cache); if (p) internal_memset(p, 0, count * size); return p; @@ -209,17 +208,51 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, up return (max / size) < n; } -static atomic_uint8_t reporting_out_of_memory = {0}; +static atomic_uint8_t allocator_out_of_memory = {0}; +static atomic_uint8_t allocator_may_return_null = {0}; -bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); } +bool IsAllocatorOutOfMemory() { + return atomic_load_relaxed(&allocator_out_of_memory); +} -void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) { - if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1); +// Prints error message and kills the program. +void NORETURN ReportAllocatorCannotReturnNull() { Report("%s's allocator is terminating the process instead of returning 0\n", SanitizerToolName); Report("If you don't like this behavior set allocator_may_return_null=1\n"); CHECK(0); Die(); +} + +bool AllocatorMayReturnNull() { + return atomic_load(&allocator_may_return_null, memory_order_relaxed); +} + +void SetAllocatorMayReturnNull(bool may_return_null) { + atomic_store(&allocator_may_return_null, may_return_null, + memory_order_relaxed); +} + +void *ReturnNullOrDieOnFailure::OnBadRequest() { + if (AllocatorMayReturnNull()) + return nullptr; + ReportAllocatorCannotReturnNull(); +} + +void *ReturnNullOrDieOnFailure::OnOOM() { + atomic_store_relaxed(&allocator_out_of_memory, 1); + if (AllocatorMayReturnNull()) + return nullptr; + ReportAllocatorCannotReturnNull(); +} + +void *DieOnFailure::OnBadRequest() { + ReportAllocatorCannotReturnNull(); +} + +void *DieOnFailure::OnOOM() { + atomic_store_relaxed(&allocator_out_of_memory, 1); + ReportAllocatorCannotReturnNull(); } } // namespace __sanitizer Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator.h ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator.h Mon Jun 26 20:33:22 2017 (r320378) @@ -24,12 +24,28 @@ namespace __sanitizer { -// Returns true if ReportAllocatorCannotReturnNull(true) was called. -// Can be use to avoid memory hungry operations. -bool IsReportingOOM(); +// Since flags are immutable and allocator behavior can be changed at runtime +// (unit tests or ASan on Android are some examples), allocator_may_return_null +// flag value is cached here and can be altered later. +bool AllocatorMayReturnNull(); +void SetAllocatorMayReturnNull(bool may_return_null); -// Prints error message and kills the program. -void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory); +// Allocator failure handling policies: +// Implements AllocatorMayReturnNull policy, returns null when the flag is set, +// dies otherwise. +struct ReturnNullOrDieOnFailure { + static void *OnBadRequest(); + static void *OnOOM(); +}; +// Always dies on the failure. +struct DieOnFailure { + static void *OnBadRequest(); + static void *OnOOM(); +}; + +// Returns true if allocator detected OOM condition. Can be used to avoid memory +// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called. +bool IsAllocatorOutOfMemory(); // Allocators call these callbacks on mmap/munmap. struct NoOpMapUnmapCallback { Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_combined.h ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_combined.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_combined.h Mon Jun 26 20:33:22 2017 (r320378) @@ -24,31 +24,26 @@ template // NOLINT class CombinedAllocator { public: - void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) { - primary_.Init(release_to_os_interval_ms); - atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); - } + typedef typename SecondaryAllocator::FailureHandler FailureHandler; - void InitLinkerInitialized( - bool may_return_null, s32 release_to_os_interval_ms) { - secondary_.InitLinkerInitialized(may_return_null); + void InitLinkerInitialized(s32 release_to_os_interval_ms) { + primary_.Init(release_to_os_interval_ms); + secondary_.InitLinkerInitialized(); stats_.InitLinkerInitialized(); - InitCommon(may_return_null, release_to_os_interval_ms); } - void Init(bool may_return_null, s32 release_to_os_interval_ms) { - secondary_.Init(may_return_null); + void Init(s32 release_to_os_interval_ms) { + primary_.Init(release_to_os_interval_ms); + secondary_.Init(); stats_.Init(); - InitCommon(may_return_null, release_to_os_interval_ms); } - void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, - bool cleared = false) { + void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) { // Returning 0 on malloc(0) may break a lot of code. if (size == 0) size = 1; if (size + alignment < size) - return ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); uptr original_size = size; // If alignment requirements are to be fulfilled by the frontend allocator // rather than by the primary or secondary, passing an alignment lower than @@ -56,49 +51,24 @@ class CombinedAllocator { // alignment check. if (alignment > 8) size = RoundUpTo(size, alignment); - void *res; - bool from_primary = primary_.CanAllocate(size, alignment); // The primary allocator should return a 2^x aligned allocation when // requested 2^x bytes, hence using the rounded up 'size' when being // serviced by the primary (this is no longer true when the primary is // using a non-fixed base address). The secondary takes care of the // alignment without such requirement, and allocating 'size' would use // extraneous memory, so we employ 'original_size'. - if (from_primary) + void *res; + if (primary_.CanAllocate(size, alignment)) res = cache->Allocate(&primary_, primary_.ClassID(size)); else res = secondary_.Allocate(&stats_, original_size, alignment); + if (!res) + return FailureHandler::OnOOM(); if (alignment > 8) CHECK_EQ(reinterpret_cast(res) & (alignment - 1), 0); - // When serviced by the secondary, the chunk comes from a mmap allocation - // and will be zero'd out anyway. We only need to clear our the chunk if - // it was serviced by the primary, hence using the rounded up 'size'. - if (cleared && res && from_primary) - internal_bzero_aligned16(res, RoundUpTo(size, 16)); return res; } - bool MayReturnNull() const { - return atomic_load(&may_return_null_, memory_order_acquire); - } - - void *ReturnNullOrDieOnBadRequest() { - if (MayReturnNull()) - return nullptr; - ReportAllocatorCannotReturnNull(false); - } - - void *ReturnNullOrDieOnOOM() { - if (MayReturnNull()) - return nullptr; - ReportAllocatorCannotReturnNull(true); - } - - void SetMayReturnNull(bool may_return_null) { - secondary_.SetMayReturnNull(may_return_null); - atomic_store(&may_return_null_, may_return_null, memory_order_release); - } - s32 ReleaseToOSIntervalMs() const { return primary_.ReleaseToOSIntervalMs(); } @@ -219,6 +189,5 @@ class CombinedAllocator { PrimaryAllocator primary_; SecondaryAllocator secondary_; AllocatorGlobalStats stats_; - atomic_uint8_t may_return_null_; }; Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_internal.h ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_internal.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_internal.h Mon Jun 26 20:33:22 2017 (r320378) @@ -47,7 +47,8 @@ typedef SizeClassAllocatorLocalCache > InternalAllocator; + LargeMmapAllocator + > InternalAllocator; void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, uptr alignment = 0); Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_local_cache.h ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_local_cache.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_local_cache.h Mon Jun 26 20:33:22 2017 (r320378) @@ -144,8 +144,10 @@ struct SizeClassAllocator32LocalCache { CHECK_NE(class_id, 0UL); CHECK_LT(class_id, kNumClasses); PerClass *c = &per_class_[class_id]; - if (UNLIKELY(c->count == 0)) - Refill(allocator, class_id); + if (UNLIKELY(c->count == 0)) { + if (UNLIKELY(!Refill(allocator, class_id))) + return nullptr; + } stats_.Add(AllocatorStatAllocated, c->class_size); void *res = c->batch[--c->count]; PREFETCH(c->batch[c->count - 1]); @@ -227,14 +229,17 @@ struct SizeClassAllocator32LocalCache { Deallocate(allocator, batch_class_id, b); } - NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) { + NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) { InitCache(); PerClass *c = &per_class_[class_id]; TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id); + if (UNLIKELY(!b)) + return false; CHECK_GT(b->Count(), 0); b->CopyToArray(c->batch); c->count = b->Count(); DestroyBatch(class_id, allocator, b); + return true; } NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { @@ -244,6 +249,10 @@ struct SizeClassAllocator32LocalCache { uptr first_idx_to_drain = c->count - cnt; TransferBatch *b = CreateBatch( class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); + // Failure to allocate a batch while releasing memory is non recoverable. + // TODO(alekseys): Figure out how to do it without allocating a new batch. + if (UNLIKELY(!b)) + DieOnFailure::OnOOM(); b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id), &c->batch[first_idx_to_drain], cnt); c->count -= cnt; Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_primary32.h ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_primary32.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_primary32.h Mon Jun 26 20:33:22 2017 (r320378) @@ -24,7 +24,8 @@ template struct SizeClassAll // be returned by MmapOrDie(). // // Region: -// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize). +// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize, +// kRegionSize). // Since the regions are aligned by kRegionSize, there are exactly // kNumPossibleRegions possible regions in the address space and so we keep // a ByteMap possible_regions to store the size classes of each Region. @@ -149,8 +150,9 @@ class SizeClassAllocator32 { CHECK_LT(class_id, kNumClasses); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); - if (sci->free_list.empty()) - PopulateFreeList(stat, c, sci, class_id); + if (sci->free_list.empty() && + UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))) + return nullptr; CHECK(!sci->free_list.empty()); TransferBatch *b = sci->free_list.front(); sci->free_list.pop_front(); @@ -277,8 +279,10 @@ class SizeClassAllocator32 { uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { CHECK_LT(class_id, kNumClasses); - uptr res = reinterpret_cast(MmapAlignedOrDie(kRegionSize, kRegionSize, - "SizeClassAllocator32")); + uptr res = reinterpret_cast(MmapAlignedOrDieOnFatalError( + kRegionSize, kRegionSize, "SizeClassAllocator32")); + if (UNLIKELY(!res)) + return 0; MapUnmapCallback().OnMap(res, kRegionSize); stat->Add(AllocatorStatMapped, kRegionSize); CHECK_EQ(0U, (res & (kRegionSize - 1))); @@ -291,16 +295,20 @@ class SizeClassAllocator32 { return &size_class_info_array[class_id]; } - void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, + bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, SizeClassInfo *sci, uptr class_id) { uptr size = ClassIdToSize(class_id); uptr reg = AllocateRegion(stat, class_id); + if (UNLIKELY(!reg)) + return false; uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr max_count = TransferBatch::MaxCached(class_id); TransferBatch *b = nullptr; for (uptr i = reg; i < reg + n_chunks * size; i += size) { if (!b) { b = c->CreateBatch(class_id, this, (TransferBatch*)i); + if (!b) + return false; b->Clear(); } b->Add((void*)i); @@ -314,6 +322,7 @@ class SizeClassAllocator32 { CHECK_GT(b->Count(), 0); sci->free_list.push_back(b); } + return true; } ByteMap possible_regions; Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_secondary.h ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_secondary.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_allocator_secondary.h Mon Jun 26 20:33:22 2017 (r320378) @@ -17,17 +17,19 @@ // This class can (de)allocate only large chunks of memory using mmap/unmap. // The main purpose of this allocator is to cover large and rare allocation // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). -template +template class LargeMmapAllocator { public: - void InitLinkerInitialized(bool may_return_null) { + typedef FailureHandlerT FailureHandler; + + void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); - atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); } - void Init(bool may_return_null) { + void Init() { internal_memset(this, 0, sizeof(*this)); - InitLinkerInitialized(may_return_null); + InitLinkerInitialized(); } void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { @@ -37,11 +39,11 @@ class LargeMmapAllocator { map_size += alignment; // Overflow. if (map_size < size) - return ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); uptr map_beg = reinterpret_cast( MmapOrDieOnFatalError(map_size, "LargeMmapAllocator")); if (!map_beg) - return ReturnNullOrDieOnOOM(); + return FailureHandler::OnOOM(); CHECK(IsAligned(map_beg, page_size_)); MapUnmapCallback().OnMap(map_beg, map_size); uptr map_end = map_beg + map_size; @@ -75,24 +77,6 @@ class LargeMmapAllocator { return reinterpret_cast(res); } - bool MayReturnNull() const { - return atomic_load(&may_return_null_, memory_order_acquire); - } - - void *ReturnNullOrDieOnBadRequest() { - if (MayReturnNull()) return nullptr; - ReportAllocatorCannotReturnNull(false); - } - - void *ReturnNullOrDieOnOOM() { - if (MayReturnNull()) return nullptr; - ReportAllocatorCannotReturnNull(true); - } - - void SetMayReturnNull(bool may_return_null) { - atomic_store(&may_return_null_, may_return_null, memory_order_release); - } - void Deallocate(AllocatorStats *stat, void *p) { Header *h = GetHeader(p); { @@ -278,7 +262,6 @@ class LargeMmapAllocator { struct Stats { uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; } stats; - atomic_uint8_t may_return_null_; SpinMutex mutex_; }; Modified: vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_atomic_clang.h ============================================================================== --- vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_atomic_clang.h Mon Jun 26 20:33:18 2017 (r320377) +++ vendor/compiler-rt/dist/lib/sanitizer_common/sanitizer_atomic_clang.h Mon Jun 26 20:33:22 2017 (r320378) @@ -71,16 +71,25 @@ INLINE typename T::Type atomic_exchange(volatile T *a, return v; } -template -INLINE bool atomic_compare_exchange_strong(volatile T *a, - typename T::Type *cmp, +template +INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { typedef typename T::Type Type; Type cmpv = *cmp; - Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); - if (prev == cmpv) - return true; *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***