diff --git a/runtime/src/mem/jemalloc/mem-jemalloc.c b/runtime/src/mem/jemalloc/mem-jemalloc.c index 844ae0457142..1603b3e51c61 100644 --- a/runtime/src/mem/jemalloc/mem-jemalloc.c +++ b/runtime/src/mem/jemalloc/mem-jemalloc.c @@ -66,11 +66,11 @@ static void* chunk_alloc(void *chunk, size_t size, size_t alignment, bool *zero, // compute our current aligned pointer into the shared heap // - // jemalloc 4.3.1 man: "The alignment parameter is always a power of two at + // jemalloc 4.4.0 man: "The alignment parameter is always a power of two at // least as large as the chunk size." cur_chunk_base = alignHelper(heap.base, heap.cur_offset, alignment); - // jemalloc 4.3.1 man: "If chunk is not NULL, the returned pointer must be + // jemalloc 4.4.0 man: "If chunk is not NULL, the returned pointer must be // chunk on success or NULL on error" if (chunk && chunk != cur_chunk_base) { pthread_mutex_unlock(&heap.alloc_lock); @@ -91,7 +91,7 @@ static void* chunk_alloc(void *chunk, size_t size, size_t alignment, bool *zero, // now that cur_heap_offset is updated, we can unlock pthread_mutex_unlock(&heap.alloc_lock); - // jemalloc 4.3.1 man: "Zeroing is mandatory if *zero is true upon entry." + // jemalloc 4.4.0 man: "Zeroing is mandatory if *zero is true upon entry." if (*zero) { memset(cur_chunk_base, 0, size); } @@ -159,7 +159,7 @@ static void initialize_arenas(void) { // for each non-zero arena, set the current thread to use it (this // initializes each arena). arena 0 is automatically initialized. // - // jemalloc 4.3.1 man: "If the specified arena was not initialized + // jemalloc 4.4.0 man: "If the specified arena was not initialized // beforehand, it will be automatically initialized as a side effect of // calling this interface." narenas = get_num_arenas(); @@ -249,7 +249,7 @@ static bool addressNotInHeap(void* ptr) { // grab (and leak) whatever memory jemalloc got on it's own, that's not in // our shared heap // -// jemalloc 4.3.1 man: "arenas may have already created chunks prior to the +// jemalloc 4.4.0 man: "arenas may have already created chunks prior to the // application having an opportunity to take over chunk allocation." // // jemalloc grabs "chunks" from the system in order to store metadata and some @@ -307,7 +307,7 @@ void chpl_mem_layerInit(void) { // of initializing jemalloc. If we're not using a shared heap, do a first // allocation to allow jemalloc to set up: // - // jemalloc 4.3.1 man: "Once, when the first call is made to one of the + // jemalloc 4.4.0 man: "Once, when the first call is made to one of the // memory allocation routines, the allocator initializes its internals" if (heap_base != NULL) { heap.base = heap_base; diff --git a/third-party/jemalloc/README b/third-party/jemalloc/README index 0e34104286cd..65e1392cd5e2 100644 --- a/third-party/jemalloc/README +++ b/third-party/jemalloc/README @@ -2,7 +2,7 @@ jemalloc README for Chapel ========================== -This copy of jemalloc 4.3.1 is being released with Chapel for +This copy of jemalloc 4.4.0 is being released with Chapel for convenience and was obtained from: https://github.com/jemalloc/jemalloc @@ -18,9 +18,9 @@ The directory $CHPL_HOME/third-party/jemalloc/jemalloc-src contains the un-tarballed jemalloc package contents. Version updates should be done as follows, assuming the CWD is $CHPL_HOME/third-party/jemalloc/: -1. download and untar the latest jemalloc version: e.g. jemalloc-4.3.1 +1. download and untar the latest jemalloc version: e.g. jemalloc-4.4.0 2. `rm -rf jemalloc-src` -3. `mv jemalloc-4.3.1 jemalloc-src` +3. `mv jemalloc-4.4.0 jemalloc-src` 4. `git add --force jemalloc-src` (--force to ignore our .gitignore) 5. update the version number mentioned above 6. verify the references to jemalloc's man page in the runtime shim are diff --git a/third-party/jemalloc/jemalloc-src/ChangeLog b/third-party/jemalloc/jemalloc-src/ChangeLog index 587685d02971..f75edd933ad3 100644 --- a/third-party/jemalloc/jemalloc-src/ChangeLog +++ b/third-party/jemalloc/jemalloc-src/ChangeLog @@ -4,6 +4,33 @@ brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 4.4.0 (December 3, 2016) + + New features: + - Add configure support for *-*-linux-android. (@cferris1000, @jasone) + - Add the --disable-syscall configure option, for use on systems that place + security-motivated limitations on syscall(2). (@jasone) + - Add support for Debian GNU/kFreeBSD. (@thesam) + + Optimizations: + - Add extent serial numbers and use them where appropriate as a sort key that + is higher priority than address, so that the allocation policy prefers older + extents. This tends to improve locality (decrease fragmentation) when + memory grows downward. (@jasone) + - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized + on Linux 4.5 and newer. (@jasone) + - Mark partially purged arena chunks as non-huge-page. This improves + interaction with Linux's transparent huge page functionality. (@jasone) + + Bug fixes: + - Fix size class computations for edge conditions involving extremely large + allocations. This regression was first released in 4.0.0. (@jasone, + @ingvarha) + - Remove overly restrictive assertions related to the cactive statistic. This + regression was first released in 4.1.0. (@jasone) + - Implement a more reliable detection scheme for os_unfair_lock on macOS. + (@jszakmeister) + * 4.3.1 (November 7, 2016) Bug fixes: diff --git a/third-party/jemalloc/jemalloc-src/INSTALL b/third-party/jemalloc/jemalloc-src/INSTALL index 68787165352e..cce3ed711b54 100644 --- a/third-party/jemalloc/jemalloc-src/INSTALL +++ b/third-party/jemalloc/jemalloc-src/INSTALL @@ -206,6 +206,11 @@ any of the following arguments (not a definitive list) to 'configure': most extreme case increases physical memory usage for the 16 KiB size class to 20 KiB. +--disable-syscall + Disable use of syscall(2) rather than {open,read,write,close}(2). This is + intended as a workaround for systems that place security limitations on + syscall(2). + --with-xslroot= Specify where to find DocBook XSL stylesheets when building the documentation. @@ -327,6 +332,15 @@ LDFLAGS="?" PATH="?" 'configure' uses this to find programs. +In some cases it may be necessary to work around configuration results that do +not match reality. For example, Linux 4.5 added support for the MADV_FREE flag +to madvise(2), which can cause problems if building on a host with MADV_FREE +support and deploying to a target without. To work around this, use a cache +file to override the relevant configuration variable defined in configure.ac, +e.g.: + + echo "je_cv_madv_free=no" > config.cache && ./configure -C + === Advanced compilation ======================================================= To build only parts of jemalloc, use the following targets: diff --git a/third-party/jemalloc/jemalloc-src/Makefile.in b/third-party/jemalloc/jemalloc-src/Makefile.in index d13c7f108552..c70536391866 100644 --- a/third-party/jemalloc/jemalloc-src/Makefile.in +++ b/third-party/jemalloc/jemalloc-src/Makefile.in @@ -166,6 +166,8 @@ TESTS_UNIT := \ $(srcroot)test/unit/math.c \ $(srcroot)test/unit/mq.c \ $(srcroot)test/unit/mtx.c \ + $(srcroot)test/unit/pack.c \ + $(srcroot)test/unit/pages.c \ $(srcroot)test/unit/ph.c \ $(srcroot)test/unit/prng.c \ $(srcroot)test/unit/prof_accum.c \ diff --git a/third-party/jemalloc/jemalloc-src/VERSION b/third-party/jemalloc/jemalloc-src/VERSION index c108ecb84aad..810bd6d4c9a7 100644 --- a/third-party/jemalloc/jemalloc-src/VERSION +++ b/third-party/jemalloc/jemalloc-src/VERSION @@ -1 +1 @@ -4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2 +4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc diff --git a/third-party/jemalloc/jemalloc-src/build-aux/config.guess b/third-party/jemalloc/jemalloc-src/build-aux/config.guess index 1f5c50c0d152..2e9ad7fe8189 100755 --- a/third-party/jemalloc/jemalloc-src/build-aux/config.guess +++ b/third-party/jemalloc/jemalloc-src/build-aux/config.guess @@ -1,8 +1,8 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright 1992-2014 Free Software Foundation, Inc. +# Copyright 1992-2016 Free Software Foundation, Inc. -timestamp='2014-03-23' +timestamp='2016-10-02' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -24,12 +24,12 @@ timestamp='2014-03-23' # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # -# Originally written by Per Bothner. +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess # -# Please send patches with a ChangeLog entry to config-patches@gnu.org. +# Please send patches to . me=`echo "$0" | sed -e 's,.*/,,'` @@ -50,7 +50,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright 1992-2014 Free Software Foundation, Inc. +Copyright 1992-2016 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -168,19 +168,29 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + /sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || \ + echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` + machine=${arch}${endian}-unknown + ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. + # to ELF recently (or will in the future) and ABI. case "${UNAME_MACHINE_ARCH}" in + earm*) + os=netbsdelf + ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ @@ -197,6 +207,13 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in os=netbsd ;; esac + # Determine ABI tags. + case "${UNAME_MACHINE_ARCH}" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` + ;; + esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need @@ -207,13 +224,13 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in release='-gnu' ;; *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" + echo "${machine}-${os}${release}${abi}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` @@ -223,6 +240,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} + exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; @@ -235,6 +256,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; + *:Sortix:*:*) + echo ${UNAME_MACHINE}-unknown-sortix + exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) @@ -251,42 +275,42 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; + UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; + UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; + UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; + UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; + UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; + UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; + UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; + UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; + UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 @@ -359,16 +383,16 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build - SUN_ARCH="i386" + SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then - SUN_ARCH="x86_64" + SUN_ARCH=x86_64 fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` @@ -393,7 +417,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} @@ -579,8 +603,9 @@ EOF else IBM_ARCH=powerpc fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi @@ -617,13 +642,13 @@ EOF sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi @@ -662,11 +687,11 @@ EOF exit (0); } EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac - if [ ${HP_ARCH} = "hppa2.0w" ] + if [ ${HP_ARCH} = hppa2.0w ] then eval $set_cc_for_build @@ -679,12 +704,12 @@ EOF # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then - HP_ARCH="hppa2.0w" + HP_ARCH=hppa2.0w else - HP_ARCH="hppa64" + HP_ARCH=hppa64 fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} @@ -789,14 +814,14 @@ EOF echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) @@ -878,7 +903,7 @@ EOF exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix @@ -901,7 +926,7 @@ EOF EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="gnulibc1" ; fi + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arc:Linux:*:* | arceb:Linux:*:*) @@ -932,6 +957,9 @@ EOF crisv32:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; + e2k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; frv:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; @@ -944,6 +972,9 @@ EOF ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; + k1om:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; @@ -969,6 +1000,9 @@ EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-${LIBC} exit ;; @@ -1001,6 +1035,9 @@ EOF ppcle:Linux:*:*) echo powerpcle-unknown-linux-${LIBC} exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux-${LIBC} exit ;; @@ -1020,7 +1057,7 @@ EOF echo ${UNAME_MACHINE}-dec-linux-${LIBC} exit ;; x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} @@ -1099,7 +1136,7 @@ EOF # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that + # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; @@ -1248,6 +1285,9 @@ EOF SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux${UNAME_RELEASE} + exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; @@ -1261,9 +1301,9 @@ EOF UNAME_PROCESSOR=powerpc fi if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in @@ -1285,7 +1325,7 @@ EOF exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then + if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi @@ -1316,7 +1356,7 @@ EOF # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. - if test "$cputype" = "386"; then + if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" @@ -1358,7 +1398,7 @@ EOF echo i386-pc-xenix exit ;; i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos @@ -1369,23 +1409,25 @@ EOF x86_64:VMkernel:*:*) echo ${UNAME_MACHINE}-unknown-esx exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; esac cat >&2 < in order to provide the needed -information to handle your system. +If $0 has already been updated, send the following data and any +information you think might be pertinent to config-patches@gnu.org to +provide the necessary information to handle your system. config.guess timestamp = $timestamp diff --git a/third-party/jemalloc/jemalloc-src/build-aux/config.sub b/third-party/jemalloc/jemalloc-src/build-aux/config.sub index 0ccff7706810..dd2ca93c6fbe 100755 --- a/third-party/jemalloc/jemalloc-src/build-aux/config.sub +++ b/third-party/jemalloc/jemalloc-src/build-aux/config.sub @@ -1,8 +1,8 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2014 Free Software Foundation, Inc. +# Copyright 1992-2016 Free Software Foundation, Inc. -timestamp='2014-05-01' +timestamp='2016-11-04' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -25,7 +25,7 @@ timestamp='2014-05-01' # of the GNU General Public License, version 3 ("GPLv3"). -# Please send patches with a ChangeLog entry to config-patches@gnu.org. +# Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. @@ -33,7 +33,7 @@ timestamp='2014-05-01' # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases @@ -53,8 +53,7 @@ timestamp='2014-05-01' me=`echo "$0" | sed -e 's,.*/,,'` usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. @@ -68,7 +67,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright 1992-2014 Free Software Foundation, Inc. +Copyright 1992-2016 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -117,8 +116,8 @@ maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` @@ -255,12 +254,13 @@ case $basic_machine in | arc | arceb \ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ | avr | avr32 \ + | ba \ | be32 | be64 \ | bfin \ | c4x | c8051 | clipper \ | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i860 | i960 | ia64 \ @@ -301,10 +301,12 @@ case $basic_machine in | open8 | or1k | or1knd | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ | pyramid \ + | riscv32 | riscv64 \ | rl78 | rx \ | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ @@ -312,6 +314,7 @@ case $basic_machine in | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | visium \ | we32k \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) @@ -326,6 +329,9 @@ case $basic_machine in c6x) basic_machine=tic6x-unknown ;; + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) basic_machine=$basic_machine-unknown os=-none @@ -371,12 +377,13 @@ case $basic_machine in | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ + | ba-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ | c8051-* | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ + | e2k-* | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ @@ -422,13 +429,15 @@ case $basic_machine in | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ | pyramid-* \ + | riscv32-* | riscv64-* \ | rl78-* | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ | tahoe-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ | tile*-* \ @@ -436,6 +445,7 @@ case $basic_machine in | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ + | visium-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* \ | xstormy16-* | xtensa*-* \ @@ -512,6 +522,9 @@ case $basic_machine in basic_machine=i386-pc os=-aros ;; + asmjs) + basic_machine=asmjs-unknown + ;; aux) basic_machine=m68k-apple os=-aux @@ -632,6 +645,14 @@ case $basic_machine in basic_machine=m68k-bull os=-sysv3 ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + os=$os"spe" + ;; ebmon29k) basic_machine=a29k-amd os=-ebmon @@ -773,6 +794,9 @@ case $basic_machine in basic_machine=m68k-isi os=-sysv ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` + ;; m68knommu) basic_machine=m68k-unknown os=-linux @@ -828,6 +852,10 @@ case $basic_machine in basic_machine=powerpc-unknown os=-morphos ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; msdos) basic_machine=i386-pc os=-msdos @@ -1004,7 +1032,7 @@ case $basic_machine in ppc-* | ppcbe-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` ;; - ppcle | powerpclittle | ppc-le | powerpc-little) + ppcle | powerpclittle) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) @@ -1014,7 +1042,7 @@ case $basic_machine in ;; ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) + ppc64le | powerpc64little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) @@ -1360,27 +1388,28 @@ case $os in | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ | -sym* | -kopensolaris* | -plan9* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -bitrig* | -openbsd* | -solidbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*) + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) @@ -1404,9 +1433,6 @@ case $os in -mac*) os=`echo $os | sed -e 's|mac|macos|'` ;; - # Apple iOS - -ios*) - ;; -linux-dietlibc) os=-linux-dietlibc ;; @@ -1515,6 +1541,8 @@ case $os in ;; -nacl*) ;; + -ios) + ;; -none) ;; *) diff --git a/third-party/jemalloc/jemalloc-src/configure b/third-party/jemalloc/jemalloc-src/configure index 2ad9b5a87a01..6a427b8a051d 100755 --- a/third-party/jemalloc/jemalloc-src/configure +++ b/third-party/jemalloc/jemalloc-src/configure @@ -787,6 +787,7 @@ with_lg_page with_lg_page_sizes with_lg_size_class_group with_version +enable_syscall enable_lazy_lock enable_tls enable_zone_allocator @@ -1442,6 +1443,7 @@ Optional Features: --disable-cache-oblivious Disable support for cache-oblivious allocation alignment + --disable-syscall Disable use of syscall(2) --enable-lazy-lock Enable lazy locking (only lock when multi-threaded) --disable-tls Disable thread-local storage (__thread keyword) --disable-zone-allocator @@ -5311,8 +5313,6 @@ maps_coalesce="1" case "${host}" in *-*-darwin* | *-*-ios*) abi="macho" - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" @@ -5326,34 +5326,38 @@ case "${host}" in abi="elf" $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - force_lazy_lock="1" ;; *-*-dragonfly*) abi="elf" - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - ;; *-*-openbsd*) abi="elf" - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - force_tls="0" ;; *-*-bitrig*) abi="elf" - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - ;; - *-*-linux*) + *-*-linux-android) CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" abi="elf" $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h - $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h + $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h + + $as_echo "#define JEMALLOC_C11ATOMICS 1" >>confdefs.h + + force_tls="0" + default_munmap="0" + ;; + *-*-linux* | *-*-kfreebsd*) + CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" + abi="elf" + $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h + + $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h @@ -5388,13 +5392,9 @@ fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5 $as_echo "$abi" >&6; } - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - ;; *-*-solaris2*) abi="elf" - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - RPATH='-Wl,-R,$(1)' CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" LIBS="$LIBS -lposix4 -lsocket -lnsl" @@ -7831,6 +7831,42 @@ fi fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_atfork(3) is compilable" >&5 +$as_echo_n "checking whether pthread_atfork(3) is compilable... " >&6; } +if ${je_cv_pthread_atfork+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + pthread_atfork((void *)0, (void *)0, (void *)0); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_pthread_atfork=yes +else + je_cv_pthread_atfork=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_atfork" >&5 +$as_echo "$je_cv_pthread_atfork" >&6; } + + if test "x${je_cv_pthread_atfork}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_PTHREAD_ATFORK " >>confdefs.h + + fi fi CPPFLAGS="$CPPFLAGS -D_REENTRANT" @@ -8113,7 +8149,21 @@ if test "x${je_cv_mach_absolute_time}" = "xyes" ; then fi -SAVED_CFLAGS="${CFLAGS}" +# Check whether --enable-syscall was given. +if test "${enable_syscall+set}" = set; then : + enableval=$enable_syscall; if test "x$enable_syscall" = "xno" ; then + enable_syscall="0" +else + enable_syscall="1" +fi + +else + enable_syscall="1" + +fi + +if test "x$enable_syscall" = "x1" ; then + SAVED_CFLAGS="${CFLAGS}" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 $as_echo_n "checking whether compiler supports -Werror... " >&6; } @@ -8183,10 +8233,11 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_syscall" >&5 $as_echo "$je_cv_syscall" >&6; } -CFLAGS="${SAVED_CFLAGS}" -if test "x$je_cv_syscall" = "xyes" ; then - $as_echo "#define JEMALLOC_HAVE_SYSCALL " >>confdefs.h + CFLAGS="${SAVED_CFLAGS}" + if test "x$je_cv_syscall" = "xyes" ; then + $as_echo "#define JEMALLOC_USE_SYSCALL " >>confdefs.h + fi fi ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv" @@ -8573,9 +8624,7 @@ int main () { - { - madvise((void *)0, 0, 0); - } + madvise((void *)0, 0, 0); ; return 0; @@ -8595,6 +8644,118 @@ $as_echo "$je_cv_madvise" >&6; } if test "x${je_cv_madvise}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_MADVISE " >>confdefs.h + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_FREE) is compilable" >&5 +$as_echo_n "checking whether madvise(..., MADV_FREE) is compilable... " >&6; } +if ${je_cv_madv_free+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + madvise((void *)0, 0, MADV_FREE); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_madv_free=yes +else + je_cv_madv_free=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_free" >&5 +$as_echo "$je_cv_madv_free" >&6; } + + if test "x${je_cv_madv_free}" = "xyes" ; then + $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h + + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DONTNEED) is compilable" >&5 +$as_echo_n "checking whether madvise(..., MADV_DONTNEED) is compilable... " >&6; } +if ${je_cv_madv_dontneed+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + madvise((void *)0, 0, MADV_DONTNEED); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_madv_dontneed=yes +else + je_cv_madv_dontneed=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontneed" >&5 +$as_echo "$je_cv_madv_dontneed" >&6; } + + if test "x${je_cv_madv_dontneed}" = "xyes" ; then + $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h + + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable" >&5 +$as_echo_n "checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable... " >&6; } +if ${je_cv_thp+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + madvise((void *)0, 0, MADV_HUGEPAGE); + madvise((void *)0, 0, MADV_NOHUGEPAGE); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_thp=yes +else + je_cv_thp=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_thp" >&5 +$as_echo "$je_cv_thp" >&6; } + + if test "x${je_cv_thp}" = "xyes" ; then + $as_echo "#define JEMALLOC_THP " >>confdefs.h + + fi fi @@ -8746,14 +8907,19 @@ else /* end confdefs.h. */ #include +#include int main () { + #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 + #error "os_unfair_lock is not supported" + #else os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; os_unfair_lock_lock(&lock); os_unfair_lock_unlock(&lock); + #endif ; return 0; diff --git a/third-party/jemalloc/jemalloc-src/configure.ac b/third-party/jemalloc/jemalloc-src/configure.ac index 104fd994d892..9573c3020ea7 100644 --- a/third-party/jemalloc/jemalloc-src/configure.ac +++ b/third-party/jemalloc/jemalloc-src/configure.ac @@ -171,7 +171,6 @@ fi if test "x$CFLAGS" = "x" ; then no_CFLAGS="yes" if test "x$GCC" = "xyes" ; then -dnl JE_CFLAGS_APPEND([-std=gnu99]) JE_CFLAGS_APPEND([-std=gnu11]) if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) @@ -355,7 +354,6 @@ maps_coalesce="1" case "${host}" in *-*-darwin* | *-*-ios*) abi="macho" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" @@ -368,29 +366,35 @@ case "${host}" in *-*-freebsd*) abi="elf" AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) force_lazy_lock="1" ;; *-*-dragonfly*) abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-openbsd*) abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) force_tls="0" ;; *-*-bitrig*) abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; - *-*-linux*) + *-*-linux-android) + dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. + CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" + abi="elf" + AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) + AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) + AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) + AC_DEFINE([JEMALLOC_C11ATOMICS]) + force_tls="0" + default_munmap="0" + ;; + *-*-linux* | *-*-kfreebsd*) dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" abi="elf" AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) default_munmap="0" @@ -407,11 +411,9 @@ case "${host}" in [abi="elf"], [abi="aout"]) AC_MSG_RESULT([$abi]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-solaris2*) abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) RPATH='-Wl,-R,$(1)' dnl Solaris needs this for sigwait(). CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" @@ -1327,6 +1329,14 @@ if test "x$abi" != "xpecoff" ; then AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"], [AC_SEARCH_LIBS([pthread_create], , , AC_MSG_ERROR([libpthread is missing]))]) + JE_COMPILABLE([pthread_atfork(3)], [ +#include +], [ + pthread_atfork((void *)0, (void *)0, (void *)0); +], [je_cv_pthread_atfork]) + if test "x${je_cv_pthread_atfork}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ]) + fi fi CPPFLAGS="$CPPFLAGS -D_REENTRANT" @@ -1386,20 +1396,33 @@ if test "x${je_cv_mach_absolute_time}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME]) fi -dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS X -dnl 10.12's deprecation warning prevents use. -SAVED_CFLAGS="${CFLAGS}" -JE_CFLAGS_APPEND([-Werror]) -JE_COMPILABLE([syscall(2)], [ +dnl Use syscall(2) (if available) by default. +AC_ARG_ENABLE([syscall], + [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])], +[if test "x$enable_syscall" = "xno" ; then + enable_syscall="0" +else + enable_syscall="1" +fi +], +[enable_syscall="1"] +) +if test "x$enable_syscall" = "x1" ; then + dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS + dnl X 10.12's deprecation warning prevents use. + SAVED_CFLAGS="${CFLAGS}" + JE_CFLAGS_APPEND([-Werror]) + JE_COMPILABLE([syscall(2)], [ #include #include ], [ syscall(SYS_write, 2, "hello", 5); ], - [je_cv_syscall]) -CFLAGS="${SAVED_CFLAGS}" -if test "x$je_cv_syscall" = "xyes" ; then - AC_DEFINE([JEMALLOC_HAVE_SYSCALL], [ ]) + [je_cv_syscall]) + CFLAGS="${SAVED_CFLAGS}" + if test "x$je_cv_syscall" = "xyes" ; then + AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ]) + fi fi dnl Check if the GNU-specific secure_getenv function exists. @@ -1599,12 +1622,41 @@ dnl Check for madvise(2). JE_COMPILABLE([madvise(2)], [ #include ], [ - { - madvise((void *)0, 0, 0); - } + madvise((void *)0, 0, 0); ], [je_cv_madvise]) if test "x${je_cv_madvise}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ]) + + dnl Check for madvise(..., MADV_FREE). + JE_COMPILABLE([madvise(..., MADV_FREE)], [ +#include +], [ + madvise((void *)0, 0, MADV_FREE); +], [je_cv_madv_free]) + if test "x${je_cv_madv_free}" = "xyes" ; then + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + fi + + dnl Check for madvise(..., MADV_DONTNEED). + JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [ +#include +], [ + madvise((void *)0, 0, MADV_DONTNEED); +], [je_cv_madv_dontneed]) + if test "x${je_cv_madv_dontneed}" = "xyes" ; then + AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) + fi + + dnl Check for madvise(..., MADV_[NO]HUGEPAGE). + JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [ +#include +], [ + madvise((void *)0, 0, MADV_HUGEPAGE); + madvise((void *)0, 0, MADV_NOHUGEPAGE); +], [je_cv_thp]) + if test "x${je_cv_thp}" = "xyes" ; then + AC_DEFINE([JEMALLOC_THP], [ ]) + fi fi dnl ============================================================================ @@ -1669,10 +1721,15 @@ dnl Check for os_unfair_lock operations as provided on Darwin. JE_COMPILABLE([Darwin os_unfair_lock_*()], [ #include +#include ], [ + #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 + #error "os_unfair_lock is not supported" + #else os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; os_unfair_lock_lock(&lock); os_unfair_lock_unlock(&lock); + #endif ], [je_cv_os_unfair_lock]) if test "x${je_cv_os_unfair_lock}" = "xyes" ; then AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ]) diff --git a/third-party/jemalloc/jemalloc-src/doc/jemalloc.3 b/third-party/jemalloc/jemalloc-src/doc/jemalloc.3 index c2d7118ba4fb..3709f66923c1 100644 --- a/third-party/jemalloc/jemalloc-src/doc/jemalloc.3 +++ b/third-party/jemalloc/jemalloc-src/doc/jemalloc.3 @@ -2,12 +2,12 @@ .\" Title: JEMALLOC .\" Author: Jason Evans .\" Generator: DocBook XSL Stylesheets v1.79.1 -.\" Date: 11/07/2016 +.\" Date: 12/03/2016 .\" Manual: User Manual -.\" Source: jemalloc 4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2 +.\" Source: jemalloc 4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc .\" Language: English .\" -.TH "JEMALLOC" "3" "11/07/2016" "jemalloc 4.3.1-0-g0110fa8451af" "User Manual" +.TH "JEMALLOC" "3" "12/03/2016" "jemalloc 4.4.0-0-gf1f76357313e" "User Manual" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -31,7 +31,7 @@ jemalloc \- general purpose memory allocation functions .SH "LIBRARY" .PP -This manual describes jemalloc 4\&.3\&.1\-0\-g0110fa8451af905affd77c3bea0d545fee2251b2\&. More information can be found at the +This manual describes jemalloc 4\&.4\&.0\-0\-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc\&. More information can be found at the \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&. .SH "SYNOPSIS" .sp @@ -350,7 +350,7 @@ for (i = 0; i < nbins; i++) { mib[2] = i; len = sizeof(bin_size); - mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); + mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0); /* Do something with bin_size\&.\&.\&. */ } .fi diff --git a/third-party/jemalloc/jemalloc-src/doc/jemalloc.html b/third-party/jemalloc/jemalloc-src/doc/jemalloc.html index 58a35d5eca1c..db2504f6ec61 100644 --- a/third-party/jemalloc/jemalloc-src/doc/jemalloc.html +++ b/third-party/jemalloc/jemalloc-src/doc/jemalloc.html @@ -1,8 +1,8 @@ -JEMALLOC

Name

jemalloc — general purpose memory allocation functions

SYNOPSIS

#include <jemalloc/jemalloc.h>

Non-standard API

void *mallocx(size_t size,
 int flags);
 
void *rallocx(void *ptr,
 size_t size,
 int flags);
 
size_t xallocx(void *ptr,
 size_t size,
 size_t extra,
 int flags);
 
size_t sallocx(void *ptr,
 int flags);
 
void dallocx(void *ptr,
 int flags);
 
void sdallocx(void *ptr,
 size_t size,
 int flags);
 
size_t nallocx(size_t size,
 int flags);
 
int mallctl(const char *name,
 void *oldp,
 size_t *oldlenp,
 void *newp,
 size_t newlen);
 
int mallctlnametomib(const char *name,
 size_t *mibp,
 size_t *miblenp);
 
int mallctlbymib(const size_t *mib,
 size_t miblen,
 void *oldp,
 size_t *oldlenp,
 void *newp,
 size_t newlen);
 
void malloc_stats_print(void (*write_cb) +JEMALLOC
 void *cbopaque,
 const char *opts);
 
size_t malloc_usable_size(const void *ptr);
 
void (*malloc_message)(void *cbopaque,
 const char *s);
 

const char *malloc_conf;

DESCRIPTION

RETURN VALUES

RETURN VALUES

Non-standard API

The mallocx() and rallocx() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned to indicate insufficient contiguous memory was diff --git a/third-party/jemalloc/jemalloc-src/doc/jemalloc.xml.in b/third-party/jemalloc/jemalloc-src/doc/jemalloc.xml.in index 3d2e721d3904..d9c83452de2a 100644 --- a/third-party/jemalloc/jemalloc-src/doc/jemalloc.xml.in +++ b/third-party/jemalloc/jemalloc-src/doc/jemalloc.xml.in @@ -406,7 +406,7 @@ for (i = 0; i < nbins; i++) { mib[2] = i; len = sizeof(bin_size); - mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); + mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0); /* Do something with bin_size... */ }]]> diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/arena.h b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/arena.h index f39ce54b5ca4..ce4e6029e2fc 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/arena.h +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/arena.h @@ -190,6 +190,14 @@ struct arena_chunk_s { */ extent_node_t node; + /* + * True if memory could be backed by transparent huge pages. This is + * only directly relevant to Linux, since it is the only supported + * platform on which jemalloc interacts with explicit transparent huge + * page controls. + */ + bool hugepage; + /* * Map of pages within chunk that keeps track of free/large/small. The * first map_bias entries are omitted, since the chunk header does not @@ -374,10 +382,12 @@ struct arena_s { dss_prec_t dss_prec; - /* Extant arena chunks. */ ql_head(extent_node_t) achunks; + /* Extent serial number generator state. */ + size_t extent_sn_next; + /* * In order to avoid rapid chunk allocation/deallocation when an arena * oscillates right on the cusp of needing a new chunk, cache the most @@ -453,9 +463,9 @@ struct arena_s { * orderings are needed, which is why there are two trees with the same * contents. */ - extent_tree_t chunks_szad_cached; + extent_tree_t chunks_szsnad_cached; extent_tree_t chunks_ad_cached; - extent_tree_t chunks_szad_retained; + extent_tree_t chunks_szsnad_retained; extent_tree_t chunks_ad_retained; malloc_mutex_t chunks_mtx; @@ -522,13 +532,13 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool *zero); + size_t alignment, size_t *sn, bool *zero); void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, - size_t usize); + size_t usize, size_t sn); void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t oldsize, size_t usize); void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize); + void *chunk, size_t oldsize, size_t usize, size_t sn); bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t oldsize, size_t usize, bool *zero); ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); @@ -601,6 +611,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, unsigned arena_nthreads_get(arena_t *arena, bool internal); void arena_nthreads_inc(arena_t *arena, bool internal); void arena_nthreads_dec(arena_t *arena, bool internal); +size_t arena_extent_sn_next(arena_t *arena); arena_t *arena_new(tsdn_t *tsdn, unsigned ind); void arena_boot(void); void arena_prefork0(tsdn_t *tsdn, arena_t *arena); diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/chunk.h b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/chunk.h index 38c9a012da11..50b9904b04ec 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/chunk.h +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/chunk.h @@ -58,15 +58,16 @@ void chunk_deregister(const void *chunk, const extent_node_t *node); void *chunk_alloc_base(size_t size); void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit, bool dalloc_node); + size_t *sn, bool *zero, bool *commit, bool dalloc_node); void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit); + size_t *sn, bool *zero, bool *commit); void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed); -void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, bool committed); +void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, + bool zeroed, bool committed); bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length); diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/extent.h b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/extent.h index 49d76a57f503..168ffe64389d 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/extent.h +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/extent.h @@ -18,6 +18,20 @@ struct extent_node_s { /* Total region size. */ size_t en_size; + /* + * Serial number (potentially non-unique). + * + * In principle serial numbers can wrap around on 32-bit systems if + * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall + * back on address comparison for equal serial numbers, stable (if + * imperfect) ordering is maintained. + * + * Serial numbers may not be unique even in the absence of wrap-around, + * e.g. when splitting an extent and assigning the same serial number to + * both resulting adjacent extents. + */ + size_t en_sn; + /* * The zeroed flag is used by chunk recycling code to track whether * memory is zero-filled. @@ -45,8 +59,8 @@ struct extent_node_s { qr(extent_node_t) cc_link; union { - /* Linkage for the size/address-ordered tree. */ - rb_node(extent_node_t) szad_link; + /* Linkage for the size/sn/address-ordered tree. */ + rb_node(extent_node_t) szsnad_link; /* Linkage for arena's achunks, huge, and node_cache lists. */ ql_elm(extent_node_t) ql_link; @@ -61,7 +75,7 @@ typedef rb_tree(extent_node_t) extent_tree_t; /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) +rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t) rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) @@ -73,6 +87,7 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) arena_t *extent_node_arena_get(const extent_node_t *node); void *extent_node_addr_get(const extent_node_t *node); size_t extent_node_size_get(const extent_node_t *node); +size_t extent_node_sn_get(const extent_node_t *node); bool extent_node_zeroed_get(const extent_node_t *node); bool extent_node_committed_get(const extent_node_t *node); bool extent_node_achunk_get(const extent_node_t *node); @@ -80,12 +95,13 @@ prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); void extent_node_arena_set(extent_node_t *node, arena_t *arena); void extent_node_addr_set(extent_node_t *node, void *addr); void extent_node_size_set(extent_node_t *node, size_t size); +void extent_node_sn_set(extent_node_t *node, size_t sn); void extent_node_zeroed_set(extent_node_t *node, bool zeroed); void extent_node_committed_set(extent_node_t *node, bool committed); void extent_node_achunk_set(extent_node_t *node, bool achunk); void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, - size_t size, bool zeroed, bool committed); + size_t size, size_t sn, bool zeroed, bool committed); void extent_node_dirty_linkage_init(extent_node_t *node); void extent_node_dirty_insert(extent_node_t *node, arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); @@ -114,6 +130,13 @@ extent_node_size_get(const extent_node_t *node) return (node->en_size); } +JEMALLOC_INLINE size_t +extent_node_sn_get(const extent_node_t *node) +{ + + return (node->en_sn); +} + JEMALLOC_INLINE bool extent_node_zeroed_get(const extent_node_t *node) { @@ -164,6 +187,13 @@ extent_node_size_set(extent_node_t *node, size_t size) node->en_size = size; } +JEMALLOC_INLINE void +extent_node_sn_set(extent_node_t *node, size_t sn) +{ + + node->en_sn = sn; +} + JEMALLOC_INLINE void extent_node_zeroed_set(extent_node_t *node, bool zeroed) { @@ -194,12 +224,13 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) JEMALLOC_INLINE void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, - bool zeroed, bool committed) + size_t sn, bool zeroed, bool committed) { extent_node_arena_set(node, arena); extent_node_addr_set(node, addr); extent_node_size_set(node, size); + extent_node_sn_set(node, sn); extent_node_zeroed_set(node, zeroed); extent_node_committed_set(node, committed); extent_node_achunk_set(node, false); diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/jemalloc_internal.h.in b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/jemalloc_internal.h.in index fdc8fef9d445..e7ace7d8cf89 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/jemalloc_internal.h.in +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/jemalloc_internal.h.in @@ -337,7 +337,7 @@ typedef unsigned szind_t; /* Return the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2BASE(a, alignment) \ - ((void *)((uintptr_t)(a) & (-(alignment)))) + ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) /* Return the offset between a and the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \ @@ -345,7 +345,7 @@ typedef unsigned szind_t; /* Return the smallest alignment multiple that is >= s. */ #define ALIGNMENT_CEILING(s, alignment) \ - (((s) + (alignment - 1)) & (-(alignment))) + (((s) + (alignment - 1)) & ((~(alignment)) + 1)) /* Declare a variable-length array. */ #if __STDC_VERSION__ < 199901L diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/jemalloc_internal_defs.h.in b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/jemalloc_internal_defs.h.in index 9b3dca5044a3..def4ba5503a9 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -55,11 +55,6 @@ */ #undef JEMALLOC_HAVE_BUILTIN_CLZ -/* - * Defined if madvise(2) is available. - */ -#undef JEMALLOC_HAVE_MADVISE - /* * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. */ @@ -71,8 +66,8 @@ */ #undef JEMALLOC_OSSPIN -/* Defined if syscall(2) is available. */ -#undef JEMALLOC_HAVE_SYSCALL +/* Defined if syscall(2) is usable. */ +#undef JEMALLOC_USE_SYSCALL /* * Defined if secure_getenv(3) is available. @@ -84,6 +79,9 @@ */ #undef JEMALLOC_HAVE_ISSETUGID +/* Defined if pthread_atfork(3) is available. */ +#undef JEMALLOC_HAVE_PTHREAD_ATFORK + /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ @@ -252,18 +250,26 @@ #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY +/* Defined if madvise(2) is available. */ +#undef JEMALLOC_HAVE_MADVISE + /* * Methods for purging unused pages differ between operating systems. * - * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, - * such that new pages will be demand-zeroed if - * the address region is later touched. - * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being - * unused, such that they will be discarded rather - * than swapped out. + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that + * new pages will be demand-zeroed if the + * address region is later touched. */ -#undef JEMALLOC_PURGE_MADVISE_DONTNEED #undef JEMALLOC_PURGE_MADVISE_FREE +#undef JEMALLOC_PURGE_MADVISE_DONTNEED + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +#undef JEMALLOC_THP /* Define if operating system has alloca.h header. */ #undef JEMALLOC_HAS_ALLOCA_H diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/pages.h b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/pages.h index e21effd14f77..4ae9f156a7b3 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/pages.h +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/pages.h @@ -16,6 +16,8 @@ void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, bool pages_commit(void *addr, size_t size); bool pages_decommit(void *addr, size_t size); bool pages_purge(void *addr, size_t size); +bool pages_huge(void *addr, size_t size); +bool pages_nohuge(void *addr, size_t size); void pages_boot(void); #endif /* JEMALLOC_H_EXTERNS */ diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/private_symbols.txt b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/private_symbols.txt index 87c8c9b710e9..c1c6c4090248 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/private_symbols.txt +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/private_symbols.txt @@ -36,6 +36,7 @@ arena_decay_time_get arena_decay_time_set arena_dss_prec_get arena_dss_prec_set +arena_extent_sn_next arena_get arena_ichoose arena_init @@ -218,6 +219,8 @@ extent_node_prof_tctx_get extent_node_prof_tctx_set extent_node_size_get extent_node_size_set +extent_node_sn_get +extent_node_sn_set extent_node_zeroed_get extent_node_zeroed_set extent_tree_ad_destroy @@ -239,25 +242,25 @@ extent_tree_ad_reverse_iter extent_tree_ad_reverse_iter_recurse extent_tree_ad_reverse_iter_start extent_tree_ad_search -extent_tree_szad_destroy -extent_tree_szad_destroy_recurse -extent_tree_szad_empty -extent_tree_szad_first -extent_tree_szad_insert -extent_tree_szad_iter -extent_tree_szad_iter_recurse -extent_tree_szad_iter_start -extent_tree_szad_last -extent_tree_szad_new -extent_tree_szad_next -extent_tree_szad_nsearch -extent_tree_szad_prev -extent_tree_szad_psearch -extent_tree_szad_remove -extent_tree_szad_reverse_iter -extent_tree_szad_reverse_iter_recurse -extent_tree_szad_reverse_iter_start -extent_tree_szad_search +extent_tree_szsnad_destroy +extent_tree_szsnad_destroy_recurse +extent_tree_szsnad_empty +extent_tree_szsnad_first +extent_tree_szsnad_insert +extent_tree_szsnad_iter +extent_tree_szsnad_iter_recurse +extent_tree_szsnad_iter_start +extent_tree_szsnad_last +extent_tree_szsnad_new +extent_tree_szsnad_next +extent_tree_szsnad_nsearch +extent_tree_szsnad_prev +extent_tree_szsnad_psearch +extent_tree_szsnad_remove +extent_tree_szsnad_reverse_iter +extent_tree_szsnad_reverse_iter_recurse +extent_tree_szsnad_reverse_iter_start +extent_tree_szsnad_search ffs_llu ffs_lu ffs_u @@ -394,7 +397,9 @@ p2rz pages_boot pages_commit pages_decommit +pages_huge pages_map +pages_nohuge pages_purge pages_trim pages_unmap diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/stats.h b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/stats.h index b62181783eab..04e7dae14c7e 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/stats.h +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/stats.h @@ -175,25 +175,21 @@ stats_cactive_get(void) JEMALLOC_INLINE void stats_cactive_add(size_t size) { - UNUSED size_t cactive; assert(size > 0); assert((size & chunksize_mask) == 0); - cactive = atomic_add_z(&stats_cactive, size); - assert(cactive - size < cactive); + atomic_add_z(&stats_cactive, size); } JEMALLOC_INLINE void stats_cactive_sub(size_t size) { - UNUSED size_t cactive; assert(size > 0); assert((size & chunksize_mask) == 0); - cactive = atomic_sub_z(&stats_cactive, size); - assert(cactive + size > cactive); + atomic_sub_z(&stats_cactive, size); } #endif diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/util.h b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/util.h index aee00d6d9a47..4b56d652ed31 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/util.h +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/util.h @@ -41,8 +41,12 @@ #define MALLOC_PRINTF_BUFSIZE 4096 /* Junk fill patterns. */ -#define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) -#define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +#ifndef JEMALLOC_ALLOC_JUNK +# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +#endif +#ifndef JEMALLOC_FREE_JUNK +# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +#endif /* * Wrap a cpp argument that contains commas such that it isn't broken up into diff --git a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/valgrind.h b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/valgrind.h index 1a8680828acf..877a142b62d9 100644 --- a/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/valgrind.h +++ b/third-party/jemalloc/jemalloc-src/include/jemalloc/internal/valgrind.h @@ -36,13 +36,25 @@ zero); \ } \ } while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ - ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ - zero) do { \ +#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \ + ((ptr) != (old_ptr)) +#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \ + (ptr == NULL) +#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \ + (old_ptr == NULL) +#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \ + old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \ if (unlikely(in_valgrind)) { \ size_t rzsize = p2rz(tsdn, ptr); \ \ - if (!maybe_moved || ptr == old_ptr) { \ + if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \ + old_ptr)) { \ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ usize, rzsize); \ if (zero && old_usize < usize) { \ @@ -51,11 +63,13 @@ old_usize), usize - old_usize); \ } \ } else { \ - if (!old_ptr_maybe_null || old_ptr != NULL) { \ + if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \ + old_ptr_null(old_ptr)) { \ valgrind_freelike_block(old_ptr, \ old_rzsize); \ } \ - if (!ptr_maybe_null || ptr != NULL) { \ + if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \ + ptr_null(ptr)) { \ size_t copy_size = (old_usize < usize) \ ? old_usize : usize; \ size_t tail_size = usize - copy_size; \ diff --git a/third-party/jemalloc/jemalloc-src/msvc/projects/vc2015/test_threads/test_threads.cpp b/third-party/jemalloc/jemalloc-src/msvc/projects/vc2015/test_threads/test_threads.cpp old mode 100644 new mode 100755 index c8cb7d66a794..a3d1a792aee9 --- a/third-party/jemalloc/jemalloc-src/msvc/projects/vc2015/test_threads/test_threads.cpp +++ b/third-party/jemalloc/jemalloc-src/msvc/projects/vc2015/test_threads/test_threads.cpp @@ -21,7 +21,7 @@ int test_threads() je_malloc_conf = "narenas:3"; int narenas = 0; size_t sz = sizeof(narenas); - je_mallctl("opt.narenas", &narenas, &sz, NULL, 0); + je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); if (narenas != 3) { printf("Error: unexpected number of arenas: %d\n", narenas); return 1; @@ -33,7 +33,7 @@ int test_threads() je_malloc_stats_print(NULL, NULL, NULL); size_t allocated1; size_t sz1 = sizeof(allocated1); - je_mallctl("stats.active", &allocated1, &sz1, NULL, 0); + je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0); printf("\nPress Enter to start threads...\n"); getchar(); printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2); @@ -78,7 +78,7 @@ int test_threads() } je_malloc_stats_print(NULL, NULL, NULL); size_t allocated2; - je_mallctl("stats.active", &allocated2, &sz1, NULL, 0); + je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0); size_t leaked = allocated2 - allocated1; printf("\nDone. Leaked: %zd bytes\n", leaked); bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) diff --git a/third-party/jemalloc/jemalloc-src/src/arena.c b/third-party/jemalloc/jemalloc-src/src/arena.c index e196b13378e6..648a8da3ab4b 100644 --- a/third-party/jemalloc/jemalloc-src/src/arena.c +++ b/third-party/jemalloc/jemalloc-src/src/arena.c @@ -38,8 +38,8 @@ static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, bool decommitted); static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); +static void arena_bin_lower_run(arena_t *arena, arena_run_t *run, + arena_bin_t *bin); /******************************************************************************/ @@ -55,8 +55,31 @@ arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) return (arena_mapbits_size_decode(mapbits)); } +JEMALLOC_INLINE_C const extent_node_t * +arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + return (&chunk->node); +} + +JEMALLOC_INLINE_C int +arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b) +{ + size_t a_sn, b_sn; + + assert(a != NULL); + assert(b != NULL); + + a_sn = extent_node_sn_get(arena_miscelm_extent_get(a)); + b_sn = extent_node_sn_get(arena_miscelm_extent_get(b)); + + return ((a_sn > b_sn) - (a_sn < b_sn)); +} + JEMALLOC_INLINE_C int -arena_run_addr_comp(const arena_chunk_map_misc_t *a, +arena_ad_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b) { uintptr_t a_miscelm = (uintptr_t)a; @@ -68,9 +91,26 @@ arena_run_addr_comp(const arena_chunk_map_misc_t *a, return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); } +JEMALLOC_INLINE_C int +arena_snad_comp(const arena_chunk_map_misc_t *a, + const arena_chunk_map_misc_t *b) +{ + int ret; + + assert(a != NULL); + assert(b != NULL); + + ret = arena_sn_comp(a, b); + if (ret != 0) + return (ret); + + ret = arena_ad_comp(a, b); + return (ret); +} + /* Generate pairing heap functions. */ ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, - ph_link, arena_run_addr_comp) + ph_link, arena_snad_comp) #ifdef JEMALLOC_JET #undef run_quantize_floor @@ -529,7 +569,7 @@ arena_chunk_init_spare(arena_t *arena) static bool arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - bool zero) + size_t sn, bool zero) { /* @@ -538,7 +578,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, * of runs is tracked individually, and upon chunk deallocation the * entire chunk is in a consistent commit state. */ - extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); + extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true); extent_node_achunk_set(&chunk->node, true); return (chunk_register(tsdn, chunk, &chunk->node)); } @@ -548,28 +588,30 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) { arena_chunk_t *chunk; + size_t sn; malloc_mutex_unlock(tsdn, &arena->lock); chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, - NULL, chunksize, chunksize, zero, commit); + NULL, chunksize, chunksize, &sn, zero, commit); if (chunk != NULL && !*commit) { /* Commit header. */ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind)) { chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, - (void *)chunk, chunksize, *zero, *commit); + (void *)chunk, chunksize, sn, *zero, *commit); chunk = NULL; } } - if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) { + if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn, + *zero)) { if (!*commit) { /* Undo commit of header. */ chunk_hooks->decommit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind); } chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, - chunksize, *zero, *commit); + chunksize, sn, *zero, *commit); chunk = NULL; } @@ -583,13 +625,14 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, { arena_chunk_t *chunk; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t sn; chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, - chunksize, zero, commit, true); + chunksize, &sn, zero, commit, true); if (chunk != NULL) { - if (arena_chunk_register(tsdn, arena, chunk, *zero)) { + if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) { chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, - chunksize, true); + chunksize, sn, true); return (NULL); } } @@ -621,6 +664,8 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) if (chunk == NULL) return (NULL); + chunk->hugepage = true; + /* * Initialize the map to contain one maximal free untouched run. Mark * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed @@ -684,11 +729,14 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) static void arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) { + size_t sn, hugepage; bool committed; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_deregister(chunk, &chunk->node); + sn = extent_node_sn_get(&chunk->node); + hugepage = chunk->hugepage; committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); if (!committed) { /* @@ -701,9 +749,17 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind); } + if (!hugepage) { + /* + * Convert chunk back to the default state, so that all + * subsequent chunk allocations start out with chunks that can + * be backed by transparent huge pages. + */ + pages_huge(chunk, chunksize); + } chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, - committed); + sn, committed); if (config_stats) { arena->stats.mapped -= chunksize; @@ -859,14 +915,14 @@ arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) static void * arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, - size_t csize) + chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn, + bool *zero, size_t csize) { void *ret; bool commit = true; ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, - alignment, zero, &commit); + alignment, sn, zero, &commit); if (ret == NULL) { /* Revert optimistic stats updates. */ malloc_mutex_lock(tsdn, &arena->lock); @@ -883,7 +939,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, void * arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool *zero) + size_t alignment, size_t *sn, bool *zero) { void *ret; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; @@ -900,18 +956,19 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, arena_nactive_add(arena, usize >> LG_PAGE); ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, - alignment, zero, &commit, true); + alignment, sn, zero, &commit, true); malloc_mutex_unlock(tsdn, &arena->lock); if (ret == NULL) { ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, - usize, alignment, zero, csize); + usize, alignment, sn, zero, csize); } return (ret); } void -arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize) +arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize, + size_t sn) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize; @@ -924,7 +981,7 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize) } arena_nactive_sub(arena, usize >> LG_PAGE); - chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true); + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true); malloc_mutex_unlock(tsdn, &arena->lock); } @@ -948,7 +1005,7 @@ arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, - size_t oldsize, size_t usize) + size_t oldsize, size_t usize, size_t sn) { size_t udiff = oldsize - usize; size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); @@ -967,7 +1024,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, CHUNK_CEILING(usize)); chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, - true); + sn, true); } malloc_mutex_unlock(tsdn, &arena->lock); } @@ -975,13 +1032,13 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, static bool arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, - bool *zero, void *nchunk, size_t udiff, size_t cdiff) + size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff) { bool err; bool commit = true; err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, - chunksize, zero, &commit) == NULL); + chunksize, sn, zero, &commit) == NULL); if (err) { /* Revert optimistic stats updates. */ malloc_mutex_lock(tsdn, &arena->lock); @@ -995,7 +1052,7 @@ arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, - *zero, true); + *sn, *zero, true); err = true; } return (err); @@ -1010,6 +1067,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); size_t udiff = usize - oldsize; size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); + size_t sn; bool commit = true; malloc_mutex_lock(tsdn, &arena->lock); @@ -1022,16 +1080,16 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, arena_nactive_add(arena, udiff >> LG_PAGE); err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, - chunksize, zero, &commit, true) == NULL); + chunksize, &sn, zero, &commit, true) == NULL); malloc_mutex_unlock(tsdn, &arena->lock); if (err) { err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, - &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, - cdiff); + &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk, + udiff, cdiff); } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, - *zero, true); + sn, *zero, true); err = true; } @@ -1519,6 +1577,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, if (rdelm == &chunkselm->rd) { extent_node_t *chunkselm_next; + size_t sn; bool zero, commit; UNUSED void *chunk; @@ -1536,8 +1595,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, commit = false; chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, extent_node_addr_get(chunkselm), - extent_node_size_get(chunkselm), chunksize, &zero, - &commit, false); + extent_node_size_get(chunkselm), chunksize, &sn, + &zero, &commit, false); assert(chunk == extent_node_addr_get(chunkselm)); assert(zero == extent_node_zeroed_get(chunkselm)); extent_node_dirty_insert(chunkselm, purge_runs_sentinel, @@ -1634,6 +1693,17 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, run_size = arena_mapbits_large_size_get(chunk, pageind); npages = run_size >> LG_PAGE; + /* + * If this is the first run purged within chunk, mark + * the chunk as non-huge. This will prevent all use of + * transparent huge pages for this chunk until the chunk + * as a whole is deallocated. + */ + if (chunk->hugepage) { + pages_nohuge(chunk, chunksize); + chunk->hugepage = false; + } + assert(pageind + npages <= chunk_npages); assert(!arena_mapbits_decommitted_get(chunk, pageind)); assert(!arena_mapbits_decommitted_get(chunk, @@ -1703,13 +1773,14 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, cc_link); void *addr = extent_node_addr_get(chunkselm); size_t size = extent_node_size_get(chunkselm); + size_t sn = extent_node_sn_get(chunkselm); bool zeroed = extent_node_zeroed_get(chunkselm); bool committed = extent_node_committed_get(chunkselm); extent_node_dirty_remove(chunkselm); arena_node_dalloc(tsdn, arena, chunkselm); chunkselm = chunkselm_next; chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, - size, zeroed, committed); + size, sn, zeroed, committed); } else { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); @@ -2315,7 +2386,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); } else - arena_bin_lower_run(arena, chunk, run, bin); + arena_bin_lower_run(arena, run, bin); } return (ret); } @@ -2820,16 +2891,18 @@ arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, } static void -arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) +arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin) { /* - * Make sure that if bin->runcur is non-NULL, it refers to the lowest - * non-full run. It is okay to NULL runcur out rather than proactively - * keeping it pointing at the lowest non-full run. + * Make sure that if bin->runcur is non-NULL, it refers to the + * oldest/lowest non-full run. It is okay to NULL runcur out rather + * than proactively keeping it pointing at the oldest/lowest non-full + * run. */ - if ((uintptr_t)run < (uintptr_t)bin->runcur) { + if (bin->runcur != NULL && + arena_snad_comp(arena_run_to_miscelm(bin->runcur), + arena_run_to_miscelm(run)) > 0) { /* Switch runcur. */ if (bin->runcur->nfree > 0) arena_bin_runs_insert(bin, bin->runcur); @@ -2865,7 +2938,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_dissociate_bin_run(chunk, run, bin); arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, chunk, run, bin); + arena_bin_lower_run(arena, run, bin); if (config_stats) { bin->stats.ndalloc++; @@ -3452,6 +3525,13 @@ arena_nthreads_dec(arena_t *arena, bool internal) atomic_sub_u(&arena->nthreads[internal], 1); } +size_t +arena_extent_sn_next(arena_t *arena) +{ + + return (atomic_add_z(&arena->extent_sn_next, 1) - 1); +} + arena_t * arena_new(tsdn_t *tsdn, unsigned ind) { @@ -3511,6 +3591,8 @@ arena_new(tsdn_t *tsdn, unsigned ind) ql_new(&arena->achunks); + arena->extent_sn_next = 0; + arena->spare = NULL; arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); @@ -3532,9 +3614,9 @@ arena_new(tsdn_t *tsdn, unsigned ind) WITNESS_RANK_ARENA_HUGE)) return (NULL); - extent_tree_szad_new(&arena->chunks_szad_cached); + extent_tree_szsnad_new(&arena->chunks_szsnad_cached); extent_tree_ad_new(&arena->chunks_ad_cached); - extent_tree_szad_new(&arena->chunks_szad_retained); + extent_tree_szsnad_new(&arena->chunks_szsnad_retained); extent_tree_ad_new(&arena->chunks_ad_retained); if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", WITNESS_RANK_ARENA_CHUNKS)) diff --git a/third-party/jemalloc/jemalloc-src/src/base.c b/third-party/jemalloc/jemalloc-src/src/base.c index 81b0801fdefc..5681a3f36d40 100644 --- a/third-party/jemalloc/jemalloc-src/src/base.c +++ b/third-party/jemalloc/jemalloc-src/src/base.c @@ -5,7 +5,8 @@ /* Data. */ static malloc_mutex_t base_mtx; -static extent_tree_t base_avail_szad; +static size_t base_extent_sn_next; +static extent_tree_t base_avail_szsnad; static extent_node_t *base_nodes; static size_t base_allocated; static size_t base_resident; @@ -39,6 +40,14 @@ base_node_dalloc(tsdn_t *tsdn, extent_node_t *node) base_nodes = node; } +static void +base_extent_node_init(extent_node_t *node, void *addr, size_t size) +{ + size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1; + + extent_node_init(node, NULL, addr, size, sn, true, true); +} + static extent_node_t * base_chunk_alloc(tsdn_t *tsdn, size_t minsize) { @@ -68,7 +77,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize) base_resident += PAGE_CEILING(nsize); } } - extent_node_init(node, NULL, addr, csize, true, true); + base_extent_node_init(node, addr, csize); return (node); } @@ -92,12 +101,12 @@ base_alloc(tsdn_t *tsdn, size_t size) csize = CACHELINE_CEILING(size); usize = s2u(csize); - extent_node_init(&key, NULL, NULL, usize, false, false); + extent_node_init(&key, NULL, NULL, usize, 0, false, false); malloc_mutex_lock(tsdn, &base_mtx); - node = extent_tree_szad_nsearch(&base_avail_szad, &key); + node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key); if (node != NULL) { /* Use existing space. */ - extent_tree_szad_remove(&base_avail_szad, node); + extent_tree_szsnad_remove(&base_avail_szsnad, node); } else { /* Try to allocate more space. */ node = base_chunk_alloc(tsdn, csize); @@ -111,7 +120,7 @@ base_alloc(tsdn_t *tsdn, size_t size) if (extent_node_size_get(node) > csize) { extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); extent_node_size_set(node, extent_node_size_get(node) - csize); - extent_tree_szad_insert(&base_avail_szad, node); + extent_tree_szsnad_insert(&base_avail_szsnad, node); } else base_node_dalloc(tsdn, node); if (config_stats) { @@ -149,7 +158,8 @@ base_boot(void) if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE)) return (true); - extent_tree_szad_new(&base_avail_szad); + base_extent_sn_next = 0; + extent_tree_szsnad_new(&base_avail_szsnad); base_nodes = NULL; return (false); diff --git a/third-party/jemalloc/jemalloc-src/src/chunk.c b/third-party/jemalloc/jemalloc-src/src/chunk.c index 07e26f77c9f3..c1c514a860fa 100644 --- a/third-party/jemalloc/jemalloc-src/src/chunk.c +++ b/third-party/jemalloc/jemalloc-src/src/chunk.c @@ -50,9 +50,9 @@ const chunk_hooks_t chunk_hooks_default = { */ static void chunk_record(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed, - bool committed); + chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad, + extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn, + bool zeroed, bool committed); /******************************************************************************/ @@ -183,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node) } /* - * Do first-best-fit chunk selection, i.e. select the lowest chunk that best - * fits. + * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that + * best fits. */ static extent_node_t * -chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, size_t size) +chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size) { extent_node_t key; assert(size == CHUNK_CEILING(size)); - extent_node_init(&key, arena, NULL, size, false, false); - return (extent_tree_szad_nsearch(chunks_szad, &key)); + extent_node_init(&key, arena, NULL, size, 0, false, false); + return (extent_tree_szsnad_nsearch(chunks_szsnad, &key)); } static void * chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, - bool dalloc_node) + extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit, bool dalloc_node) { void *ret; extent_node_t *node; size_t alloc_size, leadsize, trailsize; bool zeroed, committed; + assert(CHUNK_CEILING(size) == size); + assert(alignment > 0); assert(new_addr == NULL || alignment == chunksize); + assert(CHUNK_ADDR2BASE(new_addr) == new_addr); /* * Cached chunks use the node linkage embedded in their headers, in * which case dalloc_node is true, and new_addr is non-NULL because @@ -217,7 +219,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, */ assert(dalloc_node || new_addr != NULL); - alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); + alloc_size = size + CHUNK_CEILING(alignment) - chunksize; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); @@ -225,12 +227,11 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); if (new_addr != NULL) { extent_node_t key; - extent_node_init(&key, arena, new_addr, alloc_size, false, + extent_node_init(&key, arena, new_addr, alloc_size, 0, false, false); node = extent_tree_ad_search(chunks_ad, &key); } else { - node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, - alloc_size); + node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size); } if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < size)) { @@ -243,6 +244,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, assert(extent_node_size_get(node) >= leadsize + size); trailsize = extent_node_size_get(node) - leadsize - size; ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); + *sn = extent_node_sn_get(node); zeroed = extent_node_zeroed_get(node); if (zeroed) *zero = true; @@ -257,13 +259,13 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, return (NULL); } /* Remove node from the tree. */ - extent_tree_szad_remove(chunks_szad, node); + extent_tree_szsnad_remove(chunks_szsnad, node); extent_tree_ad_remove(chunks_ad, node); arena_chunk_cache_maybe_remove(arena, node, cache); if (leadsize != 0) { /* Insert the leading space as a smaller chunk. */ extent_node_size_set(node, leadsize); - extent_tree_szad_insert(chunks_szad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; @@ -275,9 +277,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, if (dalloc_node && node != NULL) arena_node_dalloc(tsdn, arena, node); malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - chunk_record(tsdn, arena, chunk_hooks, chunks_szad, - chunks_ad, cache, ret, size + trailsize, zeroed, - committed); + chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, + chunks_ad, cache, ret, size + trailsize, *sn, + zeroed, committed); return (NULL); } /* Insert the trailing space as a smaller chunk. */ @@ -286,22 +288,22 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, if (node == NULL) { malloc_mutex_unlock(tsdn, &arena->chunks_mtx); chunk_record(tsdn, arena, chunk_hooks, - chunks_szad, chunks_ad, cache, ret, size + - trailsize, zeroed, committed); + chunks_szsnad, chunks_ad, cache, ret, size + + trailsize, *sn, zeroed, committed); return (NULL); } } extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), - trailsize, zeroed, committed); - extent_tree_szad_insert(chunks_szad, node); + trailsize, *sn, zeroed, committed); + extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { malloc_mutex_unlock(tsdn, &arena->chunks_mtx); - chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad, - cache, ret, size, zeroed, committed); + chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad, + cache, ret, size, *sn, zeroed, committed); return (NULL); } malloc_mutex_unlock(tsdn, &arena->chunks_mtx); @@ -385,8 +387,8 @@ chunk_alloc_base(size_t size) void * chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, - bool dalloc_node) + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit, bool dalloc_node) { void *ret; @@ -396,8 +398,8 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, assert((alignment & chunksize_mask) == 0); ret = chunk_recycle(tsdn, arena, chunk_hooks, - &arena->chunks_szad_cached, &arena->chunks_ad_cached, true, - new_addr, size, alignment, zero, commit, dalloc_node); + &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true, + new_addr, size, alignment, sn, zero, commit, dalloc_node); if (ret == NULL) return (NULL); if (config_valgrind) @@ -451,7 +453,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, static void * chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit) { void *ret; @@ -461,8 +464,8 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, assert((alignment & chunksize_mask) == 0); ret = chunk_recycle(tsdn, arena, chunk_hooks, - &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, - new_addr, size, alignment, zero, commit, true); + &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false, + new_addr, size, alignment, sn, zero, commit, true); if (config_stats && ret != NULL) arena->stats.retained -= size; @@ -472,14 +475,15 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void * chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit) { void *ret; chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size, - alignment, zero, commit); + alignment, sn, zero, commit); if (ret == NULL) { if (chunk_hooks->alloc == chunk_alloc_default) { /* Call directly to propagate tsdn. */ @@ -493,6 +497,8 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, if (ret == NULL) return (NULL); + *sn = arena_extent_sn_next(arena); + if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); @@ -503,8 +509,8 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, static void chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *chunk, size_t size, bool zeroed, bool committed) + extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, + void *chunk, size_t size, size_t sn, bool zeroed, bool committed) { bool unzeroed; extent_node_t *node, *prev; @@ -516,7 +522,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); - extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, + extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0, false, false); node = extent_tree_ad_nsearch(chunks_ad, &key); /* Try to coalesce forward. */ @@ -528,15 +534,17 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, /* * Coalesce chunk with the following address range. This does * not change the position within chunks_ad, so only - * remove/insert from/into chunks_szad. + * remove/insert from/into chunks_szsnad. */ - extent_tree_szad_remove(chunks_szad, node); + extent_tree_szsnad_remove(chunks_szsnad, node); arena_chunk_cache_maybe_remove(arena, node, cache); extent_node_addr_set(node, chunk); extent_node_size_set(node, size + extent_node_size_get(node)); + if (sn < extent_node_sn_get(node)) + extent_node_sn_set(node, sn); extent_node_zeroed_set(node, extent_node_zeroed_get(node) && !unzeroed); - extent_tree_szad_insert(chunks_szad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } else { /* Coalescing forward failed, so insert a new node. */ @@ -554,10 +562,10 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, } goto label_return; } - extent_node_init(node, arena, chunk, size, !unzeroed, + extent_node_init(node, arena, chunk, size, sn, !unzeroed, committed); extent_tree_ad_insert(chunks_ad, node); - extent_tree_szad_insert(chunks_szad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } @@ -571,19 +579,21 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, /* * Coalesce chunk with the previous address range. This does * not change the position within chunks_ad, so only - * remove/insert node from/into chunks_szad. + * remove/insert node from/into chunks_szsnad. */ - extent_tree_szad_remove(chunks_szad, prev); + extent_tree_szsnad_remove(chunks_szsnad, prev); extent_tree_ad_remove(chunks_ad, prev); arena_chunk_cache_maybe_remove(arena, prev, cache); - extent_tree_szad_remove(chunks_szad, node); + extent_tree_szsnad_remove(chunks_szsnad, node); arena_chunk_cache_maybe_remove(arena, node, cache); extent_node_addr_set(node, extent_node_addr_get(prev)); extent_node_size_set(node, extent_node_size_get(prev) + extent_node_size_get(node)); + if (extent_node_sn_get(prev) < extent_node_sn_get(node)) + extent_node_sn_set(node, extent_node_sn_get(prev)); extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && extent_node_zeroed_get(node)); - extent_tree_szad_insert(chunks_szad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); arena_node_dalloc(tsdn, arena, prev); @@ -595,7 +605,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t size, bool committed) + void *chunk, size_t size, size_t sn, bool committed) { assert(chunk != NULL); @@ -603,8 +613,9 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, assert(size != 0); assert((size & chunksize_mask) == 0); - chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached, - &arena->chunks_ad_cached, true, chunk, size, false, committed); + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached, + &arena->chunks_ad_cached, true, chunk, size, sn, false, + committed); arena_maybe_purge(tsdn, arena); } @@ -627,7 +638,7 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed, void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t size, bool zeroed, bool committed) + void *chunk, size_t size, size_t sn, bool zeroed, bool committed) { bool err; @@ -653,8 +664,9 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, } zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, arena->ind); - chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained, - &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained, + &arena->chunks_ad_retained, false, chunk, size, sn, zeroed, + committed); if (config_stats) arena->stats.retained += size; diff --git a/third-party/jemalloc/jemalloc-src/src/chunk_dss.c b/third-party/jemalloc/jemalloc-src/src/chunk_dss.c index 85a13548f403..ee3f83888e0f 100644 --- a/third-party/jemalloc/jemalloc-src/src/chunk_dss.c +++ b/third-party/jemalloc/jemalloc-src/src/chunk_dss.c @@ -162,7 +162,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, CHUNK_HOOKS_INITIALIZER; chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, cpad, cpad_size, - false, true); + arena_extent_sn_next(arena), false, + true); } if (*zero) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( diff --git a/third-party/jemalloc/jemalloc-src/src/extent.c b/third-party/jemalloc/jemalloc-src/src/extent.c index 9f5146e5ff5c..218156c608c1 100644 --- a/third-party/jemalloc/jemalloc-src/src/extent.c +++ b/third-party/jemalloc/jemalloc-src/src/extent.c @@ -3,43 +3,46 @@ /******************************************************************************/ +/* + * Round down to the nearest chunk size that can actually be requested during + * normal huge allocation. + */ JEMALLOC_INLINE_C size_t extent_quantize(size_t size) { + size_t ret; + szind_t ind; - /* - * Round down to the nearest chunk size that can actually be requested - * during normal huge allocation. - */ - return (index2size(size2index(size + 1) - 1)); + assert(size > 0); + + ind = size2index(size + 1); + if (ind == 0) { + /* Avoid underflow. */ + return (index2size(0)); + } + ret = index2size(ind - 1); + assert(ret <= size); + return (ret); } JEMALLOC_INLINE_C int -extent_szad_comp(const extent_node_t *a, const extent_node_t *b) +extent_sz_comp(const extent_node_t *a, const extent_node_t *b) { - int ret; size_t a_qsize = extent_quantize(extent_node_size_get(a)); size_t b_qsize = extent_quantize(extent_node_size_get(b)); - /* - * Compare based on quantized size rather than size, in order to sort - * equally useful extents only by address. - */ - ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); - uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); + return ((a_qsize > b_qsize) - (a_qsize < b_qsize)); +} - ret = (a_addr > b_addr) - (a_addr < b_addr); - } +JEMALLOC_INLINE_C int +extent_sn_comp(const extent_node_t *a, const extent_node_t *b) +{ + size_t a_sn = extent_node_sn_get(a); + size_t b_sn = extent_node_sn_get(b); - return (ret); + return ((a_sn > b_sn) - (a_sn < b_sn)); } -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link, - extent_szad_comp) - JEMALLOC_INLINE_C int extent_ad_comp(const extent_node_t *a, const extent_node_t *b) { @@ -49,5 +52,26 @@ extent_ad_comp(const extent_node_t *a, const extent_node_t *b) return ((a_addr > b_addr) - (a_addr < b_addr)); } +JEMALLOC_INLINE_C int +extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b) +{ + int ret; + + ret = extent_sz_comp(a, b); + if (ret != 0) + return (ret); + + ret = extent_sn_comp(a, b); + if (ret != 0) + return (ret); + + ret = extent_ad_comp(a, b); + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link, + extent_szsnad_comp) + /* Generate red-black tree functions. */ rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) diff --git a/third-party/jemalloc/jemalloc-src/src/huge.c b/third-party/jemalloc/jemalloc-src/src/huge.c index 62e6932b7f00..8abd8c00caa1 100644 --- a/third-party/jemalloc/jemalloc-src/src/huge.c +++ b/third-party/jemalloc/jemalloc-src/src/huge.c @@ -56,6 +56,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, size_t ausize; arena_t *iarena; extent_node_t *node; + size_t sn; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ @@ -68,7 +69,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, assert(ausize >= chunksize); /* Allocate an extent node with which to track the chunk. */ - iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : a0get(); + iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : + a0get(); node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)), CACHELINE, false, NULL, true, iarena); if (node == NULL) @@ -82,15 +84,15 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, if (likely(!tsdn_null(tsdn))) arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, - arena, usize, alignment, &is_zeroed)) == NULL) { + arena, usize, alignment, &sn, &is_zeroed)) == NULL) { idalloctm(tsdn, node, NULL, true, true); return (NULL); } - extent_node_init(node, arena, ret, usize, is_zeroed, true); + extent_node_init(node, arena, ret, usize, sn, is_zeroed, true); if (huge_node_set(tsdn, ret, node)) { - arena_chunk_dalloc_huge(tsdn, arena, ret, usize); + arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn); idalloctm(tsdn, node, NULL, true, true); return (NULL); } @@ -245,7 +247,8 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* Zap the excess chunks. */ - arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize); + arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize, + extent_node_sn_get(node)); return (false); } @@ -407,7 +410,8 @@ huge_dalloc(tsdn_t *tsdn, void *ptr) huge_dalloc_junk(extent_node_addr_get(node), extent_node_size_get(node)); arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), - extent_node_addr_get(node), extent_node_size_get(node)); + extent_node_addr_get(node), extent_node_size_get(node), + extent_node_sn_get(node)); idalloctm(tsdn, node, NULL, true, true); arena_decay_tick(tsdn, arena); diff --git a/third-party/jemalloc/jemalloc-src/src/jemalloc.c b/third-party/jemalloc/jemalloc-src/src/jemalloc.c index 38650ff06260..baead6640c8f 100644 --- a/third-party/jemalloc/jemalloc-src/src/jemalloc.c +++ b/third-party/jemalloc/jemalloc-src/src/jemalloc.c @@ -1056,7 +1056,11 @@ malloc_conf_init(void) if (cont) \ continue; \ } -#define CONF_HANDLE_T_U(t, o, n, min, max, clip) \ +#define CONF_MIN_no(um, min) false +#define CONF_MIN_yes(um, min) ((um) < (min)) +#define CONF_MAX_no(um, max) false +#define CONF_MAX_yes(um, max) ((um) > (max)) +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ if (CONF_MATCH(n)) { \ uintmax_t um; \ char *end; \ @@ -1069,15 +1073,19 @@ malloc_conf_init(void) "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ - if ((min) != 0 && um < (min)) \ + if (CONF_MIN_##check_min(um, \ + (min))) \ o = (t)(min); \ - else if (um > (max)) \ + else if (CONF_MAX_##check_max( \ + um, (max))) \ o = (t)(max); \ else \ o = (t)um; \ } else { \ - if (((min) != 0 && um < (min)) \ - || um > (max)) { \ + if (CONF_MIN_##check_min(um, \ + (min)) || \ + CONF_MAX_##check_max(um, \ + (max))) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ @@ -1087,10 +1095,13 @@ malloc_conf_init(void) } \ continue; \ } -#define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \ - CONF_HANDLE_T_U(unsigned, o, n, min, max, clip) -#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ - CONF_HANDLE_T_U(size_t, o, n, min, max, clip) +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ + clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, \ + check_min, check_max, clip) #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ if (CONF_MATCH(n)) { \ long l; \ @@ -1133,7 +1144,7 @@ malloc_conf_init(void) */ CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), - (sizeof(size_t) << 3) - 1, true) + (sizeof(size_t) << 3) - 1, yes, yes, true) if (strncmp("dss", k, klen) == 0) { int i; bool match = false; @@ -1159,7 +1170,7 @@ malloc_conf_init(void) continue; } CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, - UINT_MAX, false) + UINT_MAX, yes, no, false) if (strncmp("purge", k, klen) == 0) { int i; bool match = false; @@ -1230,7 +1241,7 @@ malloc_conf_init(void) continue; } CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, false) + 0, SIZE_T_MAX, no, no, false) CONF_HANDLE_BOOL(opt_redzone, "redzone", true) CONF_HANDLE_BOOL(opt_zero, "zero", true) } @@ -1267,8 +1278,8 @@ malloc_conf_init(void) CONF_HANDLE_BOOL(opt_prof_thread_active_init, "prof_thread_active_init", true) CONF_HANDLE_SIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, - (sizeof(uint64_t) << 3) - 1, true) + "lg_prof_sample", 0, (sizeof(uint64_t) << 3) + - 1, no, yes, true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", true) CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, @@ -1284,7 +1295,14 @@ malloc_conf_init(void) malloc_conf_error("Invalid conf pair", k, klen, v, vlen); #undef CONF_MATCH +#undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL +#undef CONF_MIN_no +#undef CONF_MIN_yes +#undef CONF_MAX_no +#undef CONF_MAX_yes +#undef CONF_HANDLE_T_U +#undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P @@ -1393,8 +1411,9 @@ malloc_init_hard_recursible(void) ncpus = malloc_ncpus(); -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32) && !defined(__native_client__)) +#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ + !defined(__native_client__)) /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { @@ -1973,8 +1992,8 @@ je_realloc(void *ptr, size_t size) *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize, - old_rzsize, true, false); + JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr, + old_usize, old_rzsize, maybe, false); witness_assert_lockless(tsdn); return (ret); } @@ -2400,8 +2419,8 @@ je_rallocx(void *ptr, size_t size, int flags) *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr, - old_usize, old_rzsize, false, zero); + JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr, + old_usize, old_rzsize, no, zero); witness_assert_lockless(tsd_tsdn(tsd)); return (p); label_oom: @@ -2543,8 +2562,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } - JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr, - old_usize, old_rzsize, false, zero); + JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr, + old_usize, old_rzsize, no, zero); label_not_resized: UTRACE(ptr, size, ptr); witness_assert_lockless(tsd_tsdn(tsd)); diff --git a/third-party/jemalloc/jemalloc-src/src/pages.c b/third-party/jemalloc/jemalloc-src/src/pages.c index 647952ac3ef6..5f0c9669d2b8 100644 --- a/third-party/jemalloc/jemalloc-src/src/pages.c +++ b/third-party/jemalloc/jemalloc-src/src/pages.c @@ -170,15 +170,16 @@ pages_purge(void *addr, size_t size) #ifdef _WIN32 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); unzeroed = true; -#elif defined(JEMALLOC_HAVE_MADVISE) -# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# elif defined(JEMALLOC_PURGE_MADVISE_FREE) +#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED)) +# if defined(JEMALLOC_PURGE_MADVISE_FREE) # define JEMALLOC_MADV_PURGE MADV_FREE # define JEMALLOC_MADV_ZEROS false +# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) +# define JEMALLOC_MADV_PURGE MADV_DONTNEED +# define JEMALLOC_MADV_ZEROS true # else -# error "No madvise(2) flag defined for purging unused dirty pages." +# error No madvise(2) flag defined for purging unused dirty pages # endif int err = madvise(addr, size, JEMALLOC_MADV_PURGE); unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); @@ -191,6 +192,34 @@ pages_purge(void *addr, size_t size) return (unzeroed); } +bool +pages_huge(void *addr, size_t size) +{ + + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + +#ifdef JEMALLOC_THP + return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#else + return (false); +#endif +} + +bool +pages_nohuge(void *addr, size_t size) +{ + + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + +#ifdef JEMALLOC_THP + return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); +#else + return (false); +#endif +} + #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT static bool os_overcommits_sysctl(void) @@ -219,7 +248,7 @@ os_overcommits_proc(void) char buf[1]; ssize_t nread; -#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_open) +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); #else fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); @@ -227,13 +256,13 @@ os_overcommits_proc(void) if (fd == -1) return (false); /* Error. */ -#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_read) +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); #else nread = read(fd, &buf, sizeof(buf)); #endif -#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_close) +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) syscall(SYS_close, fd); #else close(fd); diff --git a/third-party/jemalloc/jemalloc-src/src/stats.c b/third-party/jemalloc/jemalloc-src/src/stats.c old mode 100644 new mode 100755 index bd8af3999ba9..1360f3bd0012 --- a/third-party/jemalloc/jemalloc-src/src/stats.c +++ b/third-party/jemalloc/jemalloc-src/src/stats.c @@ -3,7 +3,7 @@ #define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ - xmallctl(n, v, &sz, NULL, 0); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ } while (0) #define CTL_M2_GET(n, i, v, t) do { \ @@ -12,7 +12,7 @@ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) #define CTL_M2_M4_GET(n, i, j, v, t) do { \ @@ -22,7 +22,7 @@ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ mib[4] = (j); \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ @@ -647,7 +647,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, #define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ bool bv2; \ if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ - je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \ + je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \ if (json) { \ malloc_cprintf(write_cb, cbopaque, \ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ @@ -692,7 +692,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, #define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ ssize_t ssv2; \ if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ - je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \ + je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \ if (json) { \ malloc_cprintf(write_cb, cbopaque, \ "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ @@ -1084,7 +1084,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, * */ epoch = 1; u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); + err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, + sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { malloc_write(": Memory allocation failure in " diff --git a/third-party/jemalloc/jemalloc-src/src/tcache.c b/third-party/jemalloc/jemalloc-src/src/tcache.c old mode 100644 new mode 100755 index f97aa420ca49..21540ff46e75 --- a/third-party/jemalloc/jemalloc-src/src/tcache.c +++ b/third-party/jemalloc/jemalloc-src/src/tcache.c @@ -517,12 +517,12 @@ tcache_boot(tsdn_t *tsdn) * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is * known. */ - if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS) tcache_maxclass = SMALL_MAXCLASS; - else if ((1U << opt_lg_tcache_max) > large_maxclass) + else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass) tcache_maxclass = large_maxclass; else - tcache_maxclass = (1U << opt_lg_tcache_max); + tcache_maxclass = (ZU(1) << opt_lg_tcache_max); nhbins = size2index(tcache_maxclass) + 1; diff --git a/third-party/jemalloc/jemalloc-src/src/util.c b/third-party/jemalloc/jemalloc-src/src/util.c old mode 100644 new mode 100755 index 79052674f26d..dd8c2363008b --- a/third-party/jemalloc/jemalloc-src/src/util.c +++ b/third-party/jemalloc/jemalloc-src/src/util.c @@ -49,7 +49,7 @@ static void wrtmessage(void *cbopaque, const char *s) { -#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_write) +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) /* * Use syscall(2) rather than write(2) when possible in order to avoid * the possibility of memory allocation within libc. This is necessary @@ -200,7 +200,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) p++; } if (neg) - ret = -ret; + ret = (uintmax_t)(-((intmax_t)ret)); if (p == ns) { /* No conversion performed. */ diff --git a/third-party/jemalloc/jemalloc-src/test/integration/MALLOCX_ARENA.c b/third-party/jemalloc/jemalloc-src/test/integration/MALLOCX_ARENA.c old mode 100644 new mode 100755 index 30c203ae657a..910a096fd991 --- a/third-party/jemalloc/jemalloc-src/test/integration/MALLOCX_ARENA.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/MALLOCX_ARENA.c @@ -19,8 +19,8 @@ thd_start(void *arg) size_t sz; sz = sizeof(arena_ind); - assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, - "Error in arenas.extend"); + assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), + 0, "Error in arenas.extend"); if (thread_ind % 4 != 3) { size_t mib[3]; diff --git a/third-party/jemalloc/jemalloc-src/test/integration/allocated.c b/third-party/jemalloc/jemalloc-src/test/integration/allocated.c old mode 100644 new mode 100755 index 3630e80ce252..6ce145b3eb3e --- a/third-party/jemalloc/jemalloc-src/test/integration/allocated.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/allocated.c @@ -18,14 +18,14 @@ thd_start(void *arg) size_t sz, usize; sz = sizeof(a0); - if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { + if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(ap0); - if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { + if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, @@ -36,14 +36,15 @@ thd_start(void *arg) "storage"); sz = sizeof(d0); - if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { + if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(dp0); - if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { + if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, + 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, @@ -57,9 +58,9 @@ thd_start(void *arg) assert_ptr_not_null(p, "Unexpected malloc() error"); sz = sizeof(a1); - mallctl("thread.allocated", &a1, &sz, NULL, 0); + mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); sz = sizeof(ap1); - mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); + mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); assert_u64_eq(*ap1, a1, "Dereferenced \"thread.allocatedp\" value should equal " "\"thread.allocated\" value"); @@ -74,9 +75,9 @@ thd_start(void *arg) free(p); sz = sizeof(d1); - mallctl("thread.deallocated", &d1, &sz, NULL, 0); + mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0); sz = sizeof(dp1); - mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); + mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0); assert_u64_eq(*dp1, d1, "Dereferenced \"thread.deallocatedp\" value should equal " "\"thread.deallocated\" value"); diff --git a/third-party/jemalloc/jemalloc-src/test/integration/chunk.c b/third-party/jemalloc/jemalloc-src/test/integration/chunk.c index ff9bf967aec2..94cf0025afbc 100644 --- a/third-party/jemalloc/jemalloc-src/test/integration/chunk.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/chunk.c @@ -137,8 +137,8 @@ TEST_BEGIN(test_chunk) bool xallocx_success_a, xallocx_success_b, xallocx_success_c; sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; /* Install custom chunk hooks. */ @@ -148,8 +148,9 @@ TEST_BEGIN(test_chunk) hooks_mib[1] = (size_t)arena_ind; old_size = sizeof(chunk_hooks_t); new_size = sizeof(chunk_hooks_t); - assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size, - &new_hooks, new_size), 0, "Unexpected chunk_hooks error"); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, (void *)&new_hooks, new_size), 0, + "Unexpected chunk_hooks error"); orig_hooks = old_hooks; assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error"); assert_ptr_ne(old_hooks.dalloc, chunk_dalloc, @@ -164,18 +165,18 @@ TEST_BEGIN(test_chunk) /* Get large size classes. */ sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, - "Unexpected arenas.lrun.0.size failure"); - assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0, - "Unexpected arenas.lrun.1.size failure"); + assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected arenas.lrun.0.size failure"); + assert_d_eq(mallctl("arenas.lrun.1.size", (void *)&large1, &sz, NULL, + 0), 0, "Unexpected arenas.lrun.1.size failure"); /* Get huge size classes. */ - assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, - "Unexpected arenas.hchunk.0.size failure"); - assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0, - "Unexpected arenas.hchunk.1.size failure"); - assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0, - "Unexpected arenas.hchunk.2.size failure"); + assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL, + 0), 0, "Unexpected arenas.hchunk.0.size failure"); + assert_d_eq(mallctl("arenas.hchunk.1.size", (void *)&huge1, &sz, NULL, + 0), 0, "Unexpected arenas.hchunk.1.size failure"); + assert_d_eq(mallctl("arenas.hchunk.2.size", (void *)&huge2, &sz, NULL, + 0), 0, "Unexpected arenas.hchunk.2.size failure"); /* Test dalloc/decommit/purge cascade. */ purge_miblen = sizeof(purge_mib)/sizeof(size_t); @@ -265,9 +266,9 @@ TEST_BEGIN(test_chunk) /* Restore chunk hooks. */ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, - &old_hooks, new_size), 0, "Unexpected chunk_hooks error"); - assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size, - NULL, 0), 0, "Unexpected chunk_hooks error"); + (void *)&old_hooks, new_size), 0, "Unexpected chunk_hooks error"); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, NULL, 0), 0, "Unexpected chunk_hooks error"); assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc, "Unexpected alloc error"); assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc, diff --git a/third-party/jemalloc/jemalloc-src/test/integration/mallocx.c b/third-party/jemalloc/jemalloc-src/test/integration/mallocx.c old mode 100644 new mode 100755 index 43b76ebac987..d709eb301594 --- a/third-party/jemalloc/jemalloc-src/test/integration/mallocx.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/mallocx.c @@ -11,7 +11,7 @@ get_nsizes_impl(const char *cmd) size_t z; z = sizeof(unsigned); - assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); @@ -37,7 +37,7 @@ get_size_impl(const char *cmd, size_t ind) 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); diff --git a/third-party/jemalloc/jemalloc-src/test/integration/overflow.c b/third-party/jemalloc/jemalloc-src/test/integration/overflow.c old mode 100644 new mode 100755 index 303d9b2d3e00..84a35652cee2 --- a/third-party/jemalloc/jemalloc-src/test/integration/overflow.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/overflow.c @@ -8,8 +8,8 @@ TEST_BEGIN(test_overflow) void *p; sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0), + 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, @@ -17,8 +17,8 @@ TEST_BEGIN(test_overflow) mib[2] = nhchunks - 1; sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, - "Unexpected mallctlbymib() error"); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, + NULL, 0), 0, "Unexpected mallctlbymib() error"); assert_ptr_null(malloc(max_size_class + 1), "Expected OOM due to over-sized allocation request"); diff --git a/third-party/jemalloc/jemalloc-src/test/integration/rallocx.c b/third-party/jemalloc/jemalloc-src/test/integration/rallocx.c old mode 100644 new mode 100755 index 66ad8660a46b..506bf1c90528 --- a/third-party/jemalloc/jemalloc-src/test/integration/rallocx.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/rallocx.c @@ -7,7 +7,7 @@ get_nsizes_impl(const char *cmd) size_t z; z = sizeof(unsigned); - assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); @@ -33,7 +33,7 @@ get_size_impl(const char *cmd, size_t ind) 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); diff --git a/third-party/jemalloc/jemalloc-src/test/integration/sdallocx.c b/third-party/jemalloc/jemalloc-src/test/integration/sdallocx.c index b84817d767aa..f92e0589cf7a 100644 --- a/third-party/jemalloc/jemalloc-src/test/integration/sdallocx.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/sdallocx.c @@ -1,7 +1,7 @@ #include "test/jemalloc_test.h" -#define MAXALIGN (((size_t)1) << 25) -#define NITER 4 +#define MAXALIGN (((size_t)1) << 22) +#define NITER 3 TEST_BEGIN(test_basic) { diff --git a/third-party/jemalloc/jemalloc-src/test/integration/thread_arena.c b/third-party/jemalloc/jemalloc-src/test/integration/thread_arena.c old mode 100644 new mode 100755 index 67be53513350..7a35a6351bf3 --- a/third-party/jemalloc/jemalloc-src/test/integration/thread_arena.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/thread_arena.c @@ -16,8 +16,8 @@ thd_start(void *arg) free(p); size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, - sizeof(main_arena_ind)))) { + if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, + (void *)&main_arena_ind, sizeof(main_arena_ind)))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); @@ -25,7 +25,8 @@ thd_start(void *arg) } size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, + 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); @@ -50,7 +51,8 @@ TEST_BEGIN(test_thread_arena) assert_ptr_not_null(p, "Error in malloc()"); size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, + 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); diff --git a/third-party/jemalloc/jemalloc-src/test/integration/thread_tcache_enabled.c b/third-party/jemalloc/jemalloc-src/test/integration/thread_tcache_enabled.c old mode 100644 new mode 100755 index f4e89c682a7b..2c2825e19342 --- a/third-party/jemalloc/jemalloc-src/test/integration/thread_tcache_enabled.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/thread_tcache_enabled.c @@ -16,7 +16,8 @@ thd_start(void *arg) bool e0, e1; sz = sizeof(bool); - if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { + if ((err = mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, + 0))) { if (err == ENOENT) { assert_false(config_tcache, "ENOENT should only be returned if tcache is " @@ -27,53 +28,53 @@ thd_start(void *arg) if (e0) { e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), - 0, "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); } e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); diff --git a/third-party/jemalloc/jemalloc-src/test/integration/xallocx.c b/third-party/jemalloc/jemalloc-src/test/integration/xallocx.c old mode 100644 new mode 100755 index ad292bb562ea..67e0a0e71686 --- a/third-party/jemalloc/jemalloc-src/test/integration/xallocx.c +++ b/third-party/jemalloc/jemalloc-src/test/integration/xallocx.c @@ -16,8 +16,8 @@ arena_ind(void) if (ind == 0) { size_t sz = sizeof(ind); - assert_d_eq(mallctl("arenas.extend", &ind, &sz, NULL, 0), 0, - "Unexpected mallctl failure creating arena"); + assert_d_eq(mallctl("arenas.extend", (void *)&ind, &sz, NULL, + 0), 0, "Unexpected mallctl failure creating arena"); } return (ind); @@ -78,7 +78,7 @@ get_nsizes_impl(const char *cmd) size_t z; z = sizeof(unsigned); - assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); @@ -118,7 +118,7 @@ get_size_impl(const char *cmd, size_t ind) 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); diff --git a/third-party/jemalloc/jemalloc-src/test/unit/arena_reset.c b/third-party/jemalloc/jemalloc-src/test/unit/arena_reset.c old mode 100644 new mode 100755 index 8ba36c21f802..adf9baa5de4e --- a/third-party/jemalloc/jemalloc-src/test/unit/arena_reset.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/arena_reset.c @@ -11,7 +11,7 @@ get_nsizes_impl(const char *cmd) size_t z; z = sizeof(unsigned); - assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); @@ -51,7 +51,7 @@ get_size_impl(const char *cmd, size_t ind) 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); @@ -92,8 +92,8 @@ TEST_BEGIN(test_arena_reset) && unlikely(opt_quarantine))); sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; diff --git a/third-party/jemalloc/jemalloc-src/test/unit/decay.c b/third-party/jemalloc/jemalloc-src/test/unit/decay.c old mode 100644 new mode 100755 index e169ae24e095..5af8f8074c68 --- a/third-party/jemalloc/jemalloc-src/test/unit/decay.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/decay.c @@ -40,10 +40,10 @@ TEST_BEGIN(test_decay_ticks) "Unexpected failure getting decay ticker"); sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); - assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); + assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); /* * Test the standard APIs using a huge size class, since we can't @@ -175,8 +175,8 @@ TEST_BEGIN(test_decay_ticks) tcache_sizes[1] = 1; sz = sizeof(unsigned); - assert_d_eq(mallctl("tcache.create", &tcache_ind, &sz, NULL, 0), - 0, "Unexpected mallctl failure"); + assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, + NULL, 0), 0, "Unexpected mallctl failure"); for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { sz = tcache_sizes[i]; @@ -193,7 +193,7 @@ TEST_BEGIN(test_decay_ticks) dallocx(p, MALLOCX_TCACHE(tcache_ind)); tick0 = ticker_read(decay_ticker); assert_d_eq(mallctl("tcache.flush", NULL, NULL, - &tcache_ind, sizeof(unsigned)), 0, + (void *)&tcache_ind, sizeof(unsigned)), 0, "Unexpected mallctl failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, @@ -228,22 +228,22 @@ TEST_BEGIN(test_decay_ticker) size_t tcache_max; sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); + assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, + &sz, NULL, 0), 0, "Unexpected mallctl failure"); large = nallocx(tcache_max + 1, flags); } else { sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.lrun.0.size", &large, &sz, NULL, 0), - 0, "Unexpected mallctl failure"); + assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large, &sz, + NULL, 0), 0, "Unexpected mallctl failure"); } assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, - "Unexpected mallctl failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), - config_stats ? 0 : ENOENT, "Unexpected mallctl result"); + assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz, + NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); for (i = 0; i < NPS; i++) { ps[i] = mallocx(large, flags); @@ -283,11 +283,11 @@ TEST_BEGIN(test_decay_ticker) assert_ptr_not_null(p, "Unexpected mallocx() failure"); dallocx(p, flags); } - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, - NULL, 0), config_stats ? 0 : ENOENT, + assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, + &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); nstime_update(&time); @@ -313,16 +313,16 @@ TEST_BEGIN(test_decay_nonmonotonic) test_skip_if(opt_purge != purge_mode_decay); sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); + assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, - "Unexpected mallctl failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), - config_stats ? 0 : ENOENT, "Unexpected mallctl result"); + assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz, + NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); nupdates_mock = 0; nstime_init(&time_mock, 0); @@ -348,11 +348,11 @@ TEST_BEGIN(test_decay_nonmonotonic) "Expected nstime_update() to be called"); } - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, - "Unexpected mallctl failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0), - config_stats ? 0 : ENOENT, "Unexpected mallctl result"); + assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz, + NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); if (config_stats) assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); diff --git a/third-party/jemalloc/jemalloc-src/test/unit/mallctl.c b/third-party/jemalloc/jemalloc-src/test/unit/mallctl.c old mode 100644 new mode 100755 index 69f8c20c1716..2353c92c1faf --- a/third-party/jemalloc/jemalloc-src/test/unit/mallctl.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/mallctl.c @@ -12,16 +12,18 @@ TEST_BEGIN(test_mallctl_errors) EPERM, "mallctl() should return EPERM on attempt to write " "read-only value"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1), - EINVAL, "mallctl() should return EINVAL for input size mismatch"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1), - EINVAL, "mallctl() should return EINVAL for input size mismatch"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)-1), EINVAL, + "mallctl() should return EINVAL for input size mismatch"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)+1), EINVAL, + "mallctl() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; - assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, + assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; - assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, + assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); } TEST_END @@ -56,18 +58,20 @@ TEST_BEGIN(test_mallctlbymib_errors) assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, sizeof(epoch)-1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, sizeof(epoch)+1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; - assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, + assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), + EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; - assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, + assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), + EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); } TEST_END @@ -83,18 +87,19 @@ TEST_BEGIN(test_mallctl_read_write) assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read. */ - assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0, + assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Write. */ - assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)), - 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch, + sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read+write. */ - assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch, - sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, + (void *)&new_epoch, sizeof(new_epoch)), 0, + "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); } TEST_END @@ -120,8 +125,8 @@ TEST_BEGIN(test_mallctl_config) #define TEST_MALLCTL_CONFIG(config, t) do { \ t oldval; \ size_t sz = sizeof(oldval); \ - assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \ - 0, "Unexpected mallctl() failure"); \ + assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_b_eq(oldval, config_##config, "Incorrect config value"); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) @@ -154,7 +159,8 @@ TEST_BEGIN(test_mallctl_opt) t oldval; \ size_t sz = sizeof(oldval); \ int expected = config_##config ? 0 : ENOENT; \ - int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \ + int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \ + 0); \ assert_d_eq(result, expected, \ "Unexpected mallctl() result for opt."#opt); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ @@ -197,7 +203,7 @@ TEST_BEGIN(test_manpage_example) size_t len, miblen; len = sizeof(nbins); - assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, "Unexpected mallctl() failure"); miblen = 4; @@ -208,8 +214,8 @@ TEST_BEGIN(test_manpage_example) mib[2] = i; len = sizeof(bin_size); - assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0), - 0, "Unexpected mallctlbymib() failure"); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len, + NULL, 0), 0, "Unexpected mallctlbymib() failure"); /* Do something with bin_size... */ } } @@ -258,25 +264,25 @@ TEST_BEGIN(test_tcache) /* Create tcaches. */ for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); - assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, - "Unexpected mallctl() failure, i=%u", i); + assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, + 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Exercise tcache ID recycling. */ for (i = 0; i < NTCACHES; i++) { - assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], - sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", - i); + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, + (void *)&tis[i], sizeof(unsigned)), 0, + "Unexpected mallctl() failure, i=%u", i); } for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); - assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, - "Unexpected mallctl() failure, i=%u", i); + assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, + 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Flush empty tcaches. */ for (i = 0; i < NTCACHES; i++) { - assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], + assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } @@ -321,16 +327,16 @@ TEST_BEGIN(test_tcache) /* Flush some non-empty tcaches. */ for (i = 0; i < NTCACHES/2; i++) { - assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], + assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Destroy tcaches. */ for (i = 0; i < NTCACHES; i++) { - assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], - sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", - i); + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, + (void *)&tis[i], sizeof(unsigned)), 0, + "Unexpected mallctl() failure, i=%u", i); } } TEST_END @@ -340,15 +346,17 @@ TEST_BEGIN(test_thread_arena) unsigned arena_old, arena_new, narenas; size_t sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); arena_new = narenas - 1; - assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, - sizeof(unsigned)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz, + (void *)&arena_new, sizeof(unsigned)), 0, + "Unexpected mallctl() failure"); arena_new = 0; - assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, - sizeof(unsigned)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz, + (void *)&arena_new, sizeof(unsigned)), 0, + "Unexpected mallctl() failure"); } TEST_END @@ -359,17 +367,18 @@ TEST_BEGIN(test_arena_i_lg_dirty_mult) test_skip_if(opt_purge != purge_mode_ratio); - assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arena.0.lg_dirty_mult", + (void *)&orig_lg_dirty_mult, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); lg_dirty_mult = -2; assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, - &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); lg_dirty_mult = (sizeof(size_t) << 3); assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, - &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; @@ -377,9 +386,9 @@ TEST_BEGIN(test_arena_i_lg_dirty_mult) = lg_dirty_mult, lg_dirty_mult++) { ssize_t old_lg_dirty_mult; - assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult, - &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arena.0.lg_dirty_mult", + (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, "Unexpected old arena.0.lg_dirty_mult"); } @@ -393,25 +402,25 @@ TEST_BEGIN(test_arena_i_decay_time) test_skip_if(opt_purge != purge_mode_decay); - assert_d_eq(mallctl("arena.0.decay_time", &orig_decay_time, &sz, + assert_d_eq(mallctl("arena.0.decay_time", (void *)&orig_decay_time, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); decay_time = -2; assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL, - &decay_time, sizeof(ssize_t)), EFAULT, + (void *)&decay_time, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); decay_time = 0x7fffffff; assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL, - &decay_time, sizeof(ssize_t)), 0, + (void *)&decay_time, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); for (prev_decay_time = decay_time, decay_time = -1; decay_time < 20; prev_decay_time = decay_time, decay_time++) { ssize_t old_decay_time; - assert_d_eq(mallctl("arena.0.decay_time", &old_decay_time, - &sz, &decay_time, sizeof(ssize_t)), 0, + assert_d_eq(mallctl("arena.0.decay_time", (void *)&old_decay_time, + &sz, (void *)&decay_time, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_decay_time, prev_decay_time, "Unexpected old arena.0.decay_time"); @@ -429,8 +438,8 @@ TEST_BEGIN(test_arena_i_purge) assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; @@ -449,8 +458,8 @@ TEST_BEGIN(test_arena_i_decay) assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; @@ -471,31 +480,35 @@ TEST_BEGIN(test_arena_i_dss) "Unexpected mallctlnametomib() error"); dss_prec_new = "disabled"; - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, - sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, + (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, + "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, - sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); - - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, + (void *)&dss_prec_old, sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); mib[1] = narenas_total_get(); dss_prec_new = "disabled"; - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, - sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, + (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, + "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, - sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); - - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, + (void *)&dss_prec_old, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); } @@ -506,14 +519,14 @@ TEST_BEGIN(test_arenas_initialized) unsigned narenas; size_t sz = sizeof(narenas); - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); { VARIABLE_ARRAY(bool, initialized, narenas); sz = narenas * sizeof(bool); - assert_d_eq(mallctl("arenas.initialized", initialized, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.initialized", (void *)initialized, + &sz, NULL, 0), 0, "Unexpected mallctl() failure"); } } TEST_END @@ -525,17 +538,17 @@ TEST_BEGIN(test_arenas_lg_dirty_mult) test_skip_if(opt_purge != purge_mode_ratio); - assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.lg_dirty_mult", (void *)&orig_lg_dirty_mult, + &sz, NULL, 0), 0, "Unexpected mallctl() failure"); lg_dirty_mult = -2; assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, - &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); lg_dirty_mult = (sizeof(size_t) << 3); assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, - &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; @@ -543,9 +556,9 @@ TEST_BEGIN(test_arenas_lg_dirty_mult) lg_dirty_mult, lg_dirty_mult++) { ssize_t old_lg_dirty_mult; - assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult, - &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.lg_dirty_mult", + (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, "Unexpected old arenas.lg_dirty_mult"); } @@ -559,26 +572,26 @@ TEST_BEGIN(test_arenas_decay_time) test_skip_if(opt_purge != purge_mode_decay); - assert_d_eq(mallctl("arenas.decay_time", &orig_decay_time, &sz, + assert_d_eq(mallctl("arenas.decay_time", (void *)&orig_decay_time, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); decay_time = -2; assert_d_eq(mallctl("arenas.decay_time", NULL, NULL, - &decay_time, sizeof(ssize_t)), EFAULT, + (void *)&decay_time, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); decay_time = 0x7fffffff; assert_d_eq(mallctl("arenas.decay_time", NULL, NULL, - &decay_time, sizeof(ssize_t)), 0, + (void *)&decay_time, sizeof(ssize_t)), 0, "Expected mallctl() failure"); for (prev_decay_time = decay_time, decay_time = -1; decay_time < 20; prev_decay_time = decay_time, decay_time++) { ssize_t old_decay_time; - assert_d_eq(mallctl("arenas.decay_time", &old_decay_time, - &sz, &decay_time, sizeof(ssize_t)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.decay_time", + (void *)&old_decay_time, &sz, (void *)&decay_time, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_decay_time, prev_decay_time, "Unexpected old arenas.decay_time"); } @@ -591,8 +604,8 @@ TEST_BEGIN(test_arenas_constants) #define TEST_ARENAS_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \ - "Unexpected mallctl() failure"); \ + assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) @@ -612,8 +625,8 @@ TEST_BEGIN(test_arenas_bin_constants) #define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \ - 0, "Unexpected mallctl() failure"); \ + assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) @@ -631,8 +644,8 @@ TEST_BEGIN(test_arenas_lrun_constants) #define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ + assert_d_eq(mallctl("arenas.lrun.0."#name, (void *)&name, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) @@ -648,8 +661,8 @@ TEST_BEGIN(test_arenas_hchunk_constants) #define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ + assert_d_eq(mallctl("arenas.hchunk.0."#name, (void *)&name, \ + &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) @@ -664,12 +677,12 @@ TEST_BEGIN(test_arenas_extend) unsigned narenas_before, arena, narenas_after; size_t sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.extend", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas_before+1, narenas_after, "Unexpected number of arenas before versus after extension"); @@ -683,8 +696,8 @@ TEST_BEGIN(test_stats_arenas) #define TEST_STATS_ARENAS(t, name) do { \ t name; \ size_t sz = sizeof(t); \ - assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ + assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ } while (0) TEST_STATS_ARENAS(unsigned, nthreads); diff --git a/third-party/jemalloc/jemalloc-src/test/unit/pack.c b/third-party/jemalloc/jemalloc-src/test/unit/pack.c new file mode 100644 index 000000000000..0b6ffcd21c82 --- /dev/null +++ b/third-party/jemalloc/jemalloc-src/test/unit/pack.c @@ -0,0 +1,206 @@ +#include "test/jemalloc_test.h" + +const char *malloc_conf = + /* Use smallest possible chunk size. */ + "lg_chunk:0" + /* Immediately purge to minimize fragmentation. */ + ",lg_dirty_mult:-1" + ",decay_time:-1" + ; + +/* + * Size class that is a divisor of the page size, ideally 4+ regions per run. + */ +#if LG_PAGE <= 14 +#define SZ (ZU(1) << (LG_PAGE - 2)) +#else +#define SZ 4096 +#endif + +/* + * Number of chunks to consume at high water mark. Should be at least 2 so that + * if mmap()ed memory grows downward, downward growth of mmap()ed memory is + * tested. + */ +#define NCHUNKS 8 + +static unsigned +binind_compute(void) +{ + size_t sz; + unsigned nbins, i; + + sz = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + for (i = 0; i < nbins; i++) { + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + size_t size; + + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, + &miblen), 0, "Unexpected mallctlnametomb failure"); + mib[2] = (size_t)i; + + sz = sizeof(size); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + if (size == SZ) + return (i); + } + + test_fail("Unable to compute nregs_per_run"); + return (0); +} + +static size_t +nregs_per_run_compute(void) +{ + uint32_t nregs; + size_t sz; + unsigned binind = binind_compute(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, + "Unexpected mallctlnametomb failure"); + mib[2] = (size_t)binind; + sz = sizeof(nregs); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + return (nregs); +} + +static size_t +npages_per_run_compute(void) +{ + size_t sz; + unsigned binind = binind_compute(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + size_t run_size; + + assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0, + "Unexpected mallctlnametomb failure"); + mib[2] = (size_t)binind; + sz = sizeof(run_size); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + return (run_size >> LG_PAGE); +} + +static size_t +npages_per_chunk_compute(void) +{ + + return ((chunksize >> LG_PAGE) - map_bias); +} + +static size_t +nruns_per_chunk_compute(void) +{ + + return (npages_per_chunk_compute() / npages_per_run_compute()); +} + +static unsigned +arenas_extend_mallctl(void) +{ + unsigned arena_ind; + size_t sz; + + sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), + 0, "Error in arenas.extend"); + + return (arena_ind); +} + +static void +arena_reset_mallctl(unsigned arena_ind) +{ + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +TEST_BEGIN(test_pack) +{ + unsigned arena_ind = arenas_extend_mallctl(); + size_t nregs_per_run = nregs_per_run_compute(); + size_t nruns_per_chunk = nruns_per_chunk_compute(); + size_t nruns = nruns_per_chunk * NCHUNKS; + size_t nregs = nregs_per_run * nruns; + VARIABLE_ARRAY(void *, ptrs, nregs); + size_t i, j, offset; + + /* Fill matrix. */ + for (i = offset = 0; i < nruns; i++) { + for (j = 0; j < nregs_per_run; j++) { + void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, + "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |" + " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu", + SZ, arena_ind, i, j); + ptrs[(i * nregs_per_run) + j] = p; + } + } + + /* + * Free all but one region of each run, but rotate which region is + * preserved, so that subsequent allocations exercise the within-run + * layout policy. + */ + offset = 0; + for (i = offset = 0; + i < nruns; + i++, offset = (offset + 1) % nregs_per_run) { + for (j = 0; j < nregs_per_run; j++) { + void *p = ptrs[(i * nregs_per_run) + j]; + if (offset == j) + continue; + dallocx(p, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + } + } + + /* + * Logically refill matrix, skipping preserved regions and verifying + * that the matrix is unmodified. + */ + offset = 0; + for (i = offset = 0; + i < nruns; + i++, offset = (offset + 1) % nregs_per_run) { + for (j = 0; j < nregs_per_run; j++) { + void *p; + + if (offset == j) + continue; + p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j], + "Unexpected refill discrepancy, run=%zu, reg=%zu\n", + i, j); + } + } + + /* Clean up. */ + arena_reset_mallctl(arena_ind); +} +TEST_END + +int +main(void) +{ + + return (test( + test_pack)); +} diff --git a/third-party/jemalloc/jemalloc-src/test/unit/pages.c b/third-party/jemalloc/jemalloc-src/test/unit/pages.c new file mode 100644 index 000000000000..d31a35e688f5 --- /dev/null +++ b/third-party/jemalloc/jemalloc-src/test/unit/pages.c @@ -0,0 +1,27 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_pages_huge) +{ + bool commit; + void *pages; + + commit = true; + pages = pages_map(NULL, PAGE, &commit); + assert_ptr_not_null(pages, "Unexpected pages_map() error"); + + assert_false(pages_huge(pages, PAGE), + "Unexpected pages_huge() result"); + assert_false(pages_nohuge(pages, PAGE), + "Unexpected pages_nohuge() result"); + + pages_unmap(pages, PAGE); +} +TEST_END + +int +main(void) +{ + + return (test( + test_pages_huge)); +} diff --git a/third-party/jemalloc/jemalloc-src/test/unit/prof_accum.c b/third-party/jemalloc/jemalloc-src/test/unit/prof_accum.c old mode 100644 new mode 100755 index fd229e0fd279..d941b5bc6f66 --- a/third-party/jemalloc/jemalloc-src/test/unit/prof_accum.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/prof_accum.c @@ -68,8 +68,9 @@ TEST_BEGIN(test_idump) test_skip_if(!config_prof); active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; diff --git a/third-party/jemalloc/jemalloc-src/test/unit/prof_active.c b/third-party/jemalloc/jemalloc-src/test/unit/prof_active.c old mode 100644 new mode 100755 index 814909572925..d00943a4cb2f --- a/third-party/jemalloc/jemalloc-src/test/unit/prof_active.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/prof_active.c @@ -12,7 +12,7 @@ mallctl_bool_get(const char *name, bool expected, const char *func, int line) size_t sz; sz = sizeof(old); - assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0, + assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading %s", func, line, name); assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, name); @@ -26,7 +26,8 @@ mallctl_bool_set(const char *name, bool old_expected, bool val_new, size_t sz; sz = sizeof(old); - assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0, + assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new, + sizeof(val_new)), 0, "%s():%d: Unexpected mallctl failure reading/writing %s", func, line, name); assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, diff --git a/third-party/jemalloc/jemalloc-src/test/unit/prof_gdump.c b/third-party/jemalloc/jemalloc-src/test/unit/prof_gdump.c old mode 100644 new mode 100755 index a0e6ee921178..996cb6704115 --- a/third-party/jemalloc/jemalloc-src/test/unit/prof_gdump.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/prof_gdump.c @@ -28,8 +28,9 @@ TEST_BEGIN(test_gdump) test_skip_if(!config_prof); active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; @@ -45,8 +46,8 @@ TEST_BEGIN(test_gdump) gdump = false; sz = sizeof(gdump_old); - assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, - sizeof(gdump)), 0, + assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, + (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; @@ -56,8 +57,8 @@ TEST_BEGIN(test_gdump) gdump = true; sz = sizeof(gdump_old); - assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, - sizeof(gdump)), 0, + assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, + (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; diff --git a/third-party/jemalloc/jemalloc-src/test/unit/prof_idump.c b/third-party/jemalloc/jemalloc-src/test/unit/prof_idump.c old mode 100644 new mode 100755 index bdea53ecdd9b..16c6462de568 --- a/third-party/jemalloc/jemalloc-src/test/unit/prof_idump.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/prof_idump.c @@ -29,8 +29,9 @@ TEST_BEGIN(test_idump) test_skip_if(!config_prof); active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; diff --git a/third-party/jemalloc/jemalloc-src/test/unit/prof_reset.c b/third-party/jemalloc/jemalloc-src/test/unit/prof_reset.c old mode 100644 new mode 100755 index 5ae45fd2cce3..59d70796a145 --- a/third-party/jemalloc/jemalloc-src/test/unit/prof_reset.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/prof_reset.c @@ -20,8 +20,8 @@ static void set_prof_active(bool active) { - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure"); + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, "Unexpected mallctl failure"); } static size_t @@ -30,7 +30,8 @@ get_lg_prof_sample(void) size_t lg_prof_sample; size_t sz = sizeof(size_t); - assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, + assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz, + NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); return (lg_prof_sample); } @@ -39,7 +40,7 @@ static void do_prof_reset(size_t lg_prof_sample) { assert_d_eq(mallctl("prof.reset", NULL, NULL, - &lg_prof_sample, sizeof(size_t)), 0, + (void *)&lg_prof_sample, sizeof(size_t)), 0, "Unexpected mallctl failure while resetting profile data"); assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), "Expected profile sample rate change"); @@ -54,8 +55,8 @@ TEST_BEGIN(test_prof_reset_basic) test_skip_if(!config_prof); sz = sizeof(size_t); - assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz, - NULL, 0), 0, + assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig, + &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); assert_zu_eq(lg_prof_sample_orig, 0, "Unexpected profiling sample rate"); diff --git a/third-party/jemalloc/jemalloc-src/test/unit/prof_thread_name.c b/third-party/jemalloc/jemalloc-src/test/unit/prof_thread_name.c old mode 100644 new mode 100755 index f501158d7de0..9ec549776d9b --- a/third-party/jemalloc/jemalloc-src/test/unit/prof_thread_name.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/prof_thread_name.c @@ -12,8 +12,9 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, size_t sz; sz = sizeof(thread_name_old); - assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0), - 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", + assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, + NULL, 0), 0, + "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); assert_str_eq(thread_name_old, thread_name_expected, "%s():%d: Unexpected thread.prof.name value", func, line); @@ -26,8 +27,8 @@ mallctl_thread_name_set_impl(const char *thread_name, const char *func, int line) { - assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, - sizeof(thread_name)), 0, + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); mallctl_thread_name_get_impl(thread_name, func, line); @@ -46,15 +47,15 @@ TEST_BEGIN(test_prof_thread_name_validation) /* NULL input shouldn't be allowed. */ thread_name = NULL; - assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, - sizeof(thread_name)), EFAULT, + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* '\n' shouldn't be allowed. */ thread_name = "hi\nthere"; - assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, - sizeof(thread_name)), EFAULT, + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); @@ -64,8 +65,9 @@ TEST_BEGIN(test_prof_thread_name_validation) size_t sz; sz = sizeof(thread_name_old); - assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, - &thread_name, sizeof(thread_name)), EPERM, + assert_d_eq(mallctl("thread.prof.name", + (void *)&thread_name_old, &sz, (void *)&thread_name, + sizeof(thread_name)), EPERM, "Unexpected mallctl result writing \"%s\" to " "thread.prof.name", thread_name); } diff --git a/third-party/jemalloc/jemalloc-src/test/unit/run_quantize.c b/third-party/jemalloc/jemalloc-src/test/unit/run_quantize.c index b1ca6356dc06..089176f3984c 100644 --- a/third-party/jemalloc/jemalloc-src/test/unit/run_quantize.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/run_quantize.c @@ -13,7 +13,7 @@ TEST_BEGIN(test_small_run_size) */ sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0, @@ -21,8 +21,8 @@ TEST_BEGIN(test_small_run_size) for (i = 0; i < nbins; i++) { mib[2] = i; sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &run_size, &sz, NULL, 0), - 0, "Unexpected mallctlbymib failure"); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, + NULL, 0), 0, "Unexpected mallctlbymib failure"); assert_zu_eq(run_size, run_quantize_floor(run_size), "Small run quantization should be a no-op (run_size=%zu)", run_size); @@ -47,11 +47,11 @@ TEST_BEGIN(test_large_run_size) */ sz = sizeof(bool); - assert_d_eq(mallctl("config.cache_oblivious", &cache_oblivious, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious, + &sz, NULL, 0), 0, "Unexpected mallctl failure"); sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0, @@ -61,8 +61,8 @@ TEST_BEGIN(test_large_run_size) mib[2] = i; sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &lrun_size, &sz, NULL, 0), - 0, "Unexpected mallctlbymib failure"); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&lrun_size, &sz, + NULL, 0), 0, "Unexpected mallctlbymib failure"); run_size = cache_oblivious ? lrun_size + PAGE : lrun_size; floor = run_quantize_floor(run_size); ceil = run_quantize_ceil(run_size); @@ -102,11 +102,11 @@ TEST_BEGIN(test_monotonic) */ sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0, "Unexpected mallctl failure"); floor_prev = 0; diff --git a/third-party/jemalloc/jemalloc-src/test/unit/size_classes.c b/third-party/jemalloc/jemalloc-src/test/unit/size_classes.c old mode 100644 new mode 100755 index 4e1e0ce4f61a..81cc606171dd --- a/third-party/jemalloc/jemalloc-src/test/unit/size_classes.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/size_classes.c @@ -8,8 +8,8 @@ get_max_size_class(void) size_t sz, miblen, max_size_class; sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0), + 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, @@ -17,8 +17,8 @@ get_max_size_class(void) mib[2] = nhchunks - 1; sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, - "Unexpected mallctlbymib() error"); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, + NULL, 0), 0, "Unexpected mallctlbymib() error"); return (max_size_class); } diff --git a/third-party/jemalloc/jemalloc-src/test/unit/stats.c b/third-party/jemalloc/jemalloc-src/test/unit/stats.c old mode 100644 new mode 100755 index a9a3981fbebf..315717dfb869 --- a/third-party/jemalloc/jemalloc-src/test/unit/stats.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/stats.c @@ -7,18 +7,18 @@ TEST_BEGIN(test_stats_summary) int expected = config_stats ? 0 : ENOENT; sz = sizeof(cactive); - assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.cactive", (void *)&cactive, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), + assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, + 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0), + assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); if (config_stats) { assert_zu_le(active, *cactive, @@ -45,19 +45,19 @@ TEST_BEGIN(test_stats_huge) p = mallocx(large_maxclass+1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL, - 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL, - 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", (void *)&nrequests, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, @@ -83,8 +83,8 @@ TEST_BEGIN(test_stats_arenas_summary) uint64_t npurge, nmadvise, purged; arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, + sizeof(arena)), 0, "Unexpected mallctl() failure"); little = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(little, "Unexpected mallocx() failure"); @@ -100,19 +100,19 @@ TEST_BEGIN(test_stats_arenas_summary) assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, + 0), expected, "Unexepected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL, + 0), expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz, + NULL, 0), expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL, + 0), expected, "Unexepected mallctl() result"); if (config_stats) { assert_u64_gt(npurge, 0, @@ -150,8 +150,8 @@ TEST_BEGIN(test_stats_arenas_small) no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, + sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); @@ -159,19 +159,21 @@ TEST_BEGIN(test_stats_arenas_small) assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, @@ -197,25 +199,27 @@ TEST_BEGIN(test_stats_arenas_large) int expected = config_stats ? 0 : ENOENT; arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, + sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(large_maxclass, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, @@ -241,23 +245,23 @@ TEST_BEGIN(test_stats_arenas_huge) int expected = config_stats ? 0 : ENOENT; arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, + sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(chunksize, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, @@ -282,8 +286,8 @@ TEST_BEGIN(test_stats_arenas_bins) int expected = config_stats ? 0 : ENOENT; arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, + sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(arena_bin_info[0].reg_size, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); @@ -291,35 +295,36 @@ TEST_BEGIN(test_stats_arenas_bins) assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", (void *)&curregs, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz, - NULL, 0), config_tcache ? expected : ENOENT, + assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", (void *)&nfills, + &sz, NULL, 0), config_tcache ? expected : ENOENT, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz, - NULL, 0), config_tcache ? expected : ENOENT, + assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", (void *)&nflushes, + &sz, NULL, 0), config_tcache ? expected : ENOENT, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz, + assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", (void *)&nruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", (void *)&nreruns, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", (void *)&curruns, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, @@ -355,25 +360,26 @@ TEST_BEGIN(test_stats_arenas_lruns) int expected = config_stats ? 0 : ENOENT; arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, + sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(LARGE_MINCLASS, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", (void *)&curruns, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, @@ -399,23 +405,26 @@ TEST_BEGIN(test_stats_arenas_hchunks) int expected = config_stats ? 0 : ENOENT; arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, + sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(chunksize, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", + (void *)&nmalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", + (void *)&ndalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", + (void *)&curhchunks, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, diff --git a/third-party/jemalloc/jemalloc-src/test/unit/tsd.c b/third-party/jemalloc/jemalloc-src/test/unit/tsd.c index 4e2622a34ef1..d5f96ac36ae9 100644 --- a/third-party/jemalloc/jemalloc-src/test/unit/tsd.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/tsd.c @@ -79,7 +79,7 @@ thd_start(void *arg) TEST_BEGIN(test_tsd_main_thread) { - thd_start((void *) 0xa5f3e329); + thd_start((void *)(uintptr_t)0xa5f3e329); } TEST_END diff --git a/third-party/jemalloc/jemalloc-src/test/unit/util.c b/third-party/jemalloc/jemalloc-src/test/unit/util.c index c958dc0fba29..b1f9abd9bdb8 100644 --- a/third-party/jemalloc/jemalloc-src/test/unit/util.c +++ b/third-party/jemalloc/jemalloc-src/test/unit/util.c @@ -75,6 +75,7 @@ TEST_BEGIN(test_malloc_strtoumax) }; #define ERR(e) e, #e #define KUMAX(x) ((uintmax_t)x##ULL) +#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) struct test_s tests[] = { {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, @@ -87,13 +88,13 @@ TEST_BEGIN(test_malloc_strtoumax) {"42", "", 0, ERR(0), KUMAX(42)}, {"+42", "", 0, ERR(0), KUMAX(42)}, - {"-42", "", 0, ERR(0), KUMAX(-42)}, + {"-42", "", 0, ERR(0), KSMAX(-42)}, {"042", "", 0, ERR(0), KUMAX(042)}, {"+042", "", 0, ERR(0), KUMAX(042)}, - {"-042", "", 0, ERR(0), KUMAX(-042)}, + {"-042", "", 0, ERR(0), KSMAX(-042)}, {"0x42", "", 0, ERR(0), KUMAX(0x42)}, {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, - {"-0x42", "", 0, ERR(0), KUMAX(-0x42)}, + {"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, {"0", "", 0, ERR(0), KUMAX(0)}, {"1", "", 0, ERR(0), KUMAX(1)}, @@ -130,6 +131,7 @@ TEST_BEGIN(test_malloc_strtoumax) }; #undef ERR #undef KUMAX +#undef KSMAX unsigned i; for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {