Date: Mon, 21 Oct 2013 05:10:46 +0000 (UTC) From: Jason Evans <jasone@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r256823 - in head/contrib/jemalloc: . doc include/jemalloc include/jemalloc/internal src Message-ID: <201310210510.r9L5Ak40035450@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: jasone Date: Mon Oct 21 05:10:46 2013 New Revision: 256823 URL: http://svnweb.freebsd.org/changeset/base/256823 Log: Update jemalloc to version 3.4.1. Modified: head/contrib/jemalloc/ChangeLog head/contrib/jemalloc/FREEBSD-diffs head/contrib/jemalloc/FREEBSD-upgrade head/contrib/jemalloc/VERSION head/contrib/jemalloc/doc/jemalloc.3 head/contrib/jemalloc/include/jemalloc/internal/arena.h head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h head/contrib/jemalloc/include/jemalloc/internal/tcache.h head/contrib/jemalloc/include/jemalloc/jemalloc.h head/contrib/jemalloc/src/arena.c head/contrib/jemalloc/src/chunk.c head/contrib/jemalloc/src/ctl.c head/contrib/jemalloc/src/jemalloc.c Modified: head/contrib/jemalloc/ChangeLog ============================================================================== --- head/contrib/jemalloc/ChangeLog Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/ChangeLog Mon Oct 21 05:10:46 2013 (r256823) @@ -6,6 +6,21 @@ found in the git revision history: http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git git://canonware.com/jemalloc.git +* 3.4.1 (October 20, 2013) + + Bug fixes: + - Fix a race in the "arenas.extend" mallctl that could cause memory corruption + of internal data structures and subsequent crashes. + - Fix Valgrind integration flaws that caused Valgrind warnings about reads of + uninitialized memory in: + + arena chunk headers + + internal zero-initialized data structures (relevant to tcache and prof + code) + - Preserve errno during the first allocation. A readlink(2) call during + initialization fails unless /etc/malloc.conf exists, so errno was typically + set during the first allocation prior to this fix. + - Fix compilation warnings reported by gcc 4.8.1. + * 3.4.0 (June 2, 2013) This version is essentially a small bugfix release, but the addition of @@ -60,7 +75,7 @@ found in the git revision history: Bug fixes: - Fix "arenas.extend" mallctl to output the number of arenas. - - Fix chunk_recycyle() to unconditionally inform Valgrind that returned memory + - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory is undefined. - Fix build break on FreeBSD related to alloca.h. Modified: head/contrib/jemalloc/FREEBSD-diffs ============================================================================== --- head/contrib/jemalloc/FREEBSD-diffs Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/FREEBSD-diffs Mon Oct 21 05:10:46 2013 (r256823) @@ -45,7 +45,7 @@ index abd5e6f..1d7491a 100644 + </refsect1> </refentry> diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in -index e46ac54..527449d 100644 +index 53c135c..c547339 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -1,5 +1,8 @@ @@ -97,10 +97,10 @@ index de44e14..564d604 100644 bool malloc_mutex_init(malloc_mutex_t *mutex); diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h -index 65de316..366676b 100644 +index cdb0b0e..2a98d1f 100644 --- a/include/jemalloc/internal/private_namespace.h +++ b/include/jemalloc/internal/private_namespace.h -@@ -216,7 +216,6 @@ +@@ -218,7 +218,6 @@ #define iralloc JEMALLOC_N(iralloc) #define irallocx JEMALLOC_N(irallocx) #define isalloc JEMALLOC_N(isalloc) @@ -244,7 +244,7 @@ index 0000000..e6c8407 +#endif + diff --git a/src/jemalloc.c b/src/jemalloc.c -index bc350ed..352c98e 100644 +index ae56db6..1412a5e 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL) Modified: head/contrib/jemalloc/FREEBSD-upgrade ============================================================================== --- head/contrib/jemalloc/FREEBSD-upgrade Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/FREEBSD-upgrade Mon Oct 21 05:10:46 2013 (r256823) @@ -60,7 +60,7 @@ do_extract() { local rev=$1 # Clone. rm -rf ${work} - git clone git://canonware.com/jemalloc.git ${work} + git clone https://github.com/jemalloc/jemalloc.git ${work} ( cd ${work} if [ "x${rev}" != "x" ] ; then Modified: head/contrib/jemalloc/VERSION ============================================================================== --- head/contrib/jemalloc/VERSION Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/VERSION Mon Oct 21 05:10:46 2013 (r256823) @@ -1 +1 @@ -3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775 +3.4.1-0-g0135fb806e4137dc9cdf152541926a2bc95e33f0 Modified: head/contrib/jemalloc/doc/jemalloc.3 ============================================================================== --- head/contrib/jemalloc/doc/jemalloc.3 Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/doc/jemalloc.3 Mon Oct 21 05:10:46 2013 (r256823) @@ -2,12 +2,12 @@ .\" Title: JEMALLOC .\" Author: Jason Evans .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/> -.\" Date: 06/02/2013 +.\" Date: 10/20/2013 .\" Manual: User Manual -.\" Source: jemalloc 3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775 +.\" Source: jemalloc 3.4.1-0-g0135fb806e4137dc9cdf152541926a2bc95e33f0 .\" Language: English .\" -.TH "JEMALLOC" "3" "06/02/2013" "jemalloc 3.4.0-0-g0ed518e5dab7" "User Manual" +.TH "JEMALLOC" "3" "10/20/2013" "jemalloc 3.4.1-0-g0135fb806e41" "User Manual" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -31,7 +31,7 @@ jemalloc \- general purpose memory allocation functions .SH "LIBRARY" .PP -This manual describes jemalloc 3\&.4\&.0\-0\-g0ed518e5dab789ad2171bb38977a8927e2a26775\&. More information can be found at the +This manual describes jemalloc 3\&.4\&.1\-0\-g0135fb806e4137dc9cdf152541926a2bc95e33f0\&. More information can be found at the \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&. .PP The following configuration options are enabled in libc\*(Aqs built\-in jemalloc: Modified: head/contrib/jemalloc/include/jemalloc/internal/arena.h ============================================================================== --- head/contrib/jemalloc/include/jemalloc/internal/arena.h Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/include/jemalloc/internal/arena.h Mon Oct 21 05:10:46 2013 (r256823) @@ -441,6 +441,7 @@ void arena_postfork_child(arena_t *arena #ifndef JEMALLOC_ENABLE_INLINE arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbitsp_read(size_t *mapbitsp); size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind); @@ -451,6 +452,7 @@ size_t arena_mapbits_dirty_get(arena_chu size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); +void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, @@ -498,10 +500,17 @@ arena_mapbitsp_get(arena_chunk_t *chunk, } JEMALLOC_ALWAYS_INLINE size_t +arena_mapbitsp_read(size_t *mapbitsp) +{ + + return (*mapbitsp); +} + +JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) { - return (*arena_mapbitsp_get(chunk, pageind)); + return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); } JEMALLOC_ALWAYS_INLINE size_t @@ -585,82 +594,89 @@ arena_mapbits_allocated_get(arena_chunk_ } JEMALLOC_ALWAYS_INLINE void +arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) +{ + + *mapbitsp = mapbits; +} + +JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { - size_t *mapbitsp; + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); - *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags; + arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size) { - size_t *mapbitsp; + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); - mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); - assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - *mapbitsp = size | (*mapbitsp & PAGE_MASK); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { - size_t *mapbitsp; + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); size_t unzeroed; - mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_DIRTY) == flags); - unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ - *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed | - CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ + arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags + | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, size_t binind) { - size_t *mapbitsp; + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); assert(binind <= BININD_INVALID); - mapbitsp = arena_mapbitsp_get(chunk, pageind); assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); - *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind << - CHUNK_MAP_BININD_SHIFT); + arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | + (binind << CHUNK_MAP_BININD_SHIFT)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, size_t binind, size_t flags) { - size_t *mapbitsp; + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); size_t unzeroed; assert(binind < BININD_INVALID); - mapbitsp = arena_mapbitsp_get(chunk, pageind); assert(pageind - runind >= map_bias); assert((flags & CHUNK_MAP_DIRTY) == flags); - unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ - *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) | - flags | unzeroed | CHUNK_MAP_ALLOCATED; + unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ + arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind << + CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, size_t unzeroed) { - size_t *mapbitsp; + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); - mapbitsp = arena_mapbitsp_get(chunk, pageind); - *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed; + arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) | + unzeroed); } JEMALLOC_INLINE bool Modified: head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h ============================================================================== --- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h Mon Oct 21 05:10:46 2013 (r256823) @@ -232,9 +232,18 @@ static const bool config_ivsalloc = # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif +/* + * JEMALLOC_ALWAYS_INLINE is used within header files for functions that are + * static inline functions if inlining is enabled, and single-definition + * library-private functions if inlining is disabled. + * + * JEMALLOC_ALWAYS_INLINE_C is for use in .c files, in which case the denoted + * functions are always static, regardless of whether inlining is enabled. + */ #ifdef JEMALLOC_DEBUG /* Disable inlining to make debugging easier. */ # define JEMALLOC_ALWAYS_INLINE +# define JEMALLOC_ALWAYS_INLINE_C static # define JEMALLOC_INLINE # define inline #else @@ -242,8 +251,11 @@ static const bool config_ivsalloc = # ifdef JEMALLOC_HAVE_ATTR # define JEMALLOC_ALWAYS_INLINE \ static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) +# define JEMALLOC_ALWAYS_INLINE_C \ + static inline JEMALLOC_ATTR(always_inline) # else # define JEMALLOC_ALWAYS_INLINE static inline +# define JEMALLOC_ALWAYS_INLINE_C static inline # endif # define JEMALLOC_INLINE static inline # ifdef _MSC_VER Modified: head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h ============================================================================== --- head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h Mon Oct 21 05:10:46 2013 (r256823) @@ -33,6 +33,8 @@ #define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) #define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set) #define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get) +#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) +#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) #define arena_mapp_get JEMALLOC_N(arena_mapp_get) #define arena_maxclass JEMALLOC_N(arena_maxclass) #define arena_new JEMALLOC_N(arena_new) Modified: head/contrib/jemalloc/include/jemalloc/internal/tcache.h ============================================================================== --- head/contrib/jemalloc/include/jemalloc/internal/tcache.h Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/include/jemalloc/internal/tcache.h Mon Oct 21 05:10:46 2013 (r256823) @@ -313,6 +313,7 @@ tcache_alloc_small(tcache_t *tcache, siz } else if (opt_zero) memset(ret, 0, size); } + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { if (config_fill && opt_junk) { arena_alloc_junk_small(ret, &arena_bin_info[binind], @@ -321,7 +322,6 @@ tcache_alloc_small(tcache_t *tcache, siz VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); if (config_stats) tbin->tstats.nrequests++; @@ -368,11 +368,11 @@ tcache_alloc_large(tcache_t *tcache, siz else if (opt_zero) memset(ret, 0, size); } + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); if (config_stats) tbin->tstats.nrequests++; Modified: head/contrib/jemalloc/include/jemalloc/jemalloc.h ============================================================================== --- head/contrib/jemalloc/include/jemalloc/jemalloc.h Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/include/jemalloc/jemalloc.h Mon Oct 21 05:10:46 2013 (r256823) @@ -7,12 +7,12 @@ extern "C" { #include <limits.h> #include <strings.h> -#define JEMALLOC_VERSION "3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775" +#define JEMALLOC_VERSION "3.4.1-0-g0135fb806e4137dc9cdf152541926a2bc95e33f0" #define JEMALLOC_VERSION_MAJOR 3 #define JEMALLOC_VERSION_MINOR 4 -#define JEMALLOC_VERSION_BUGFIX 0 +#define JEMALLOC_VERSION_BUGFIX 1 #define JEMALLOC_VERSION_NREV 0 -#define JEMALLOC_VERSION_GID "0ed518e5dab789ad2171bb38977a8927e2a26775" +#define JEMALLOC_VERSION_GID "0135fb806e4137dc9cdf152541926a2bc95e33f0" #include "jemalloc_defs.h" #include "jemalloc_FreeBSD.h" Modified: head/contrib/jemalloc/src/arena.c ============================================================================== --- head/contrib/jemalloc/src/arena.c Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/src/arena.c Mon Oct 21 05:10:46 2013 (r256823) @@ -369,13 +369,20 @@ arena_run_zero(arena_chunk_t *chunk, siz } static inline void +arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) +{ + + VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << + LG_PAGE)), PAGE); +} + +static inline void arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) { size_t i; UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); - VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), PAGE); + arena_run_page_mark_zeroed(chunk, run_ind); for (i = 0; i < PAGE / sizeof(size_t); i++) assert(p[i] == 0); } @@ -458,6 +465,9 @@ arena_run_split(arena_t *arena, arena_ru } else if (config_debug) { arena_run_page_validate_zeroed( chunk, run_ind+i); + } else { + arena_run_page_mark_zeroed( + chunk, run_ind+i); } } } else { @@ -467,6 +477,9 @@ arena_run_split(arena_t *arena, arena_ru */ arena_run_zero(chunk, run_ind, need_pages); } + } else { + VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); } /* @@ -508,9 +521,9 @@ arena_run_split(arena_t *arena, arena_ru arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1); } + VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); } - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), (need_pages << LG_PAGE)); } static arena_chunk_t * @@ -569,17 +582,24 @@ arena_chunk_alloc(arena_t *arena) * unless the chunk is not zeroed. */ if (zero == false) { + VALGRIND_MAKE_MEM_UNDEFINED( + (void *)arena_mapp_get(chunk, map_bias+1), + (size_t)((uintptr_t) arena_mapp_get(chunk, + chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, + map_bias+1))); for (i = map_bias+1; i < chunk_npages-1; i++) arena_mapbits_unzeroed_set(chunk, i, unzeroed); - } else if (config_debug) { + } else { VALGRIND_MAKE_MEM_DEFINED( (void *)arena_mapp_get(chunk, map_bias+1), - (void *)((uintptr_t) - arena_mapp_get(chunk, chunk_npages-1) - - (uintptr_t)arena_mapp_get(chunk, map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, i) == - unzeroed); + (size_t)((uintptr_t) arena_mapp_get(chunk, + chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, + map_bias+1))); + if (config_debug) { + for (i = map_bias+1; i < chunk_npages-1; i++) { + assert(arena_mapbits_unzeroed_get(chunk, + i) == unzeroed); + } } } arena_mapbits_unallocated_set(chunk, chunk_npages-1, @@ -1458,6 +1478,7 @@ arena_malloc_small(arena_t *arena, size_ } else if (opt_zero) memset(ret, 0, size); } + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { if (config_fill && opt_junk) { arena_alloc_junk_small(ret, &arena_bin_info[binind], @@ -1466,7 +1487,6 @@ arena_malloc_small(arena_t *arena, size_ VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } Modified: head/contrib/jemalloc/src/chunk.c ============================================================================== --- head/contrib/jemalloc/src/chunk.c Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/src/chunk.c Mon Oct 21 05:10:46 2013 (r256823) @@ -294,7 +294,7 @@ label_return: if (xnode != NULL) base_node_dealloc(xnode); if (xprev != NULL) - base_node_dealloc(prev); + base_node_dealloc(xprev); } void Modified: head/contrib/jemalloc/src/ctl.c ============================================================================== --- head/contrib/jemalloc/src/ctl.c Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/src/ctl.c Mon Oct 21 05:10:46 2013 (r256823) @@ -546,43 +546,30 @@ ctl_arena_refresh(arena_t *arena, unsign static bool ctl_grow(void) { - size_t astats_size; ctl_arena_stats_t *astats; arena_t **tarenas; - /* Extend arena stats and arenas arrays. */ - astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t); - if (ctl_stats.narenas == narenas_auto) { - /* ctl_stats.arenas and arenas came from base_alloc(). */ - astats = (ctl_arena_stats_t *)imalloc(astats_size); - if (astats == NULL) - return (true); - memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - - tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * - sizeof(arena_t *)); - if (tarenas == NULL) { - idalloc(astats); - return (true); - } - memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *)); - } else { - astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas, - astats_size, 0, 0, false, false); - if (astats == NULL) - return (true); - - tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) * - sizeof(arena_t *), 0, 0, false, false); - if (tarenas == NULL) - return (true); + /* Allocate extended arena stats and arenas arrays. */ + astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) * + sizeof(ctl_arena_stats_t)); + if (astats == NULL) + return (true); + tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * + sizeof(arena_t *)); + if (tarenas == NULL) { + idalloc(astats); + return (true); } - /* Initialize the new astats and arenas elements. */ + + /* Initialize the new astats element. */ + memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * + sizeof(ctl_arena_stats_t)); memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); - if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) + if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { + idalloc(tarenas); + idalloc(astats); return (true); - tarenas[ctl_stats.narenas] = NULL; + } /* Swap merged stats to their new location. */ { ctl_arena_stats_t tstats; @@ -593,13 +580,34 @@ ctl_grow(void) memcpy(&astats[ctl_stats.narenas + 1], &tstats, sizeof(ctl_arena_stats_t)); } + /* Initialize the new arenas element. */ + tarenas[ctl_stats.narenas] = NULL; + { + arena_t **arenas_old = arenas; + /* + * Swap extended arenas array into place. Although ctl_mtx + * protects this function from other threads extending the + * array, it does not protect from other threads mutating it + * (i.e. initializing arenas and setting array elements to + * point to them). Therefore, array copying must happen under + * the protection of arenas_lock. + */ + malloc_mutex_lock(&arenas_lock); + arenas = tarenas; + memcpy(arenas, arenas_old, ctl_stats.narenas * + sizeof(arena_t *)); + narenas_total++; + arenas_extend(narenas_total - 1); + malloc_mutex_unlock(&arenas_lock); + /* + * Deallocate arenas_old only if it came from imalloc() (not + * base_alloc()). + */ + if (ctl_stats.narenas != narenas_auto) + idalloc(arenas_old); + } ctl_stats.arenas = astats; ctl_stats.narenas++; - malloc_mutex_lock(&arenas_lock); - arenas = tarenas; - narenas_total++; - arenas_extend(narenas_total - 1); - malloc_mutex_unlock(&arenas_lock); return (false); } @@ -1109,7 +1117,7 @@ epoch_ctl(const size_t *mib, size_t mibl void *newp, size_t newlen) { int ret; - uint64_t newval; + UNUSED uint64_t newval; malloc_mutex_lock(&ctl_mtx); WRITE(newval, uint64_t); Modified: head/contrib/jemalloc/src/jemalloc.c ============================================================================== --- head/contrib/jemalloc/src/jemalloc.c Mon Oct 21 04:15:55 2013 (r256822) +++ head/contrib/jemalloc/src/jemalloc.c Mon Oct 21 05:10:46 2013 (r256823) @@ -286,7 +286,7 @@ arenas_cleanup(void *arg) malloc_mutex_unlock(&arenas_lock); } -static JEMALLOC_ATTR(always_inline) void +JEMALLOC_ALWAYS_INLINE_C void malloc_thread_init(void) { @@ -303,7 +303,7 @@ malloc_thread_init(void) quarantine_alloc_hook(); } -static JEMALLOC_ATTR(always_inline) bool +JEMALLOC_ALWAYS_INLINE_C bool malloc_init(void) { @@ -440,8 +440,9 @@ malloc_conf_init(void) } break; case 1: { + int linklen = 0; #ifndef _WIN32 - int linklen; + int saved_errno = errno; const char *linkname = # ifdef JEMALLOC_PREFIX "/etc/"JEMALLOC_PREFIX"malloc.conf" @@ -450,21 +451,20 @@ malloc_conf_init(void) # endif ; - if ((linklen = readlink(linkname, buf, - sizeof(buf) - 1)) != -1) { - /* - * Use the contents of the "/etc/malloc.conf" - * symbolic link's name. - */ - buf[linklen] = '\0'; - opts = buf; - } else -#endif - { + /* + * Try to use the contents of the "/etc/malloc.conf" + * symbolic link's name. + */ + linklen = readlink(linkname, buf, sizeof(buf) - 1); + if (linklen == -1) { /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; + linklen = 0; + /* restore errno */ + set_errno(saved_errno); } +#endif + buf[linklen] = '\0'; + opts = buf; break; } case 2: { const char *envname = @@ -1407,7 +1407,7 @@ je_mallctlbymib(const size_t *mib, size_ */ #ifdef JEMALLOC_EXPERIMENTAL -static JEMALLOC_ATTR(always_inline) void * +JEMALLOC_ALWAYS_INLINE_C void * iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena) {
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201310210510.r9L5Ak40035450>