URL
https://opencores.org/ocsvn/or1k/or1k/trunk
Subversion Repositories or1k
Compare Revisions
- This comparison shows the changes necessary to convert path
/or1k/trunk/ecos-2.0/packages/services/memalloc
- from Rev 1254 to Rev 1765
- ↔ Reverse comparison
Rev 1254 → Rev 1765
/common/v2_0/cdl/memalloc.cdl
0,0 → 1,393
# ==================================================================== |
# |
# memalloc.cdl |
# |
# Dynamic memory allocator services configuration data |
# |
# ==================================================================== |
#####ECOSGPLCOPYRIGHTBEGIN#### |
## ------------------------------------------- |
## This file is part of eCos, the Embedded Configurable Operating System. |
## Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
## |
## eCos is free software; you can redistribute it and/or modify it under |
## the terms of the GNU General Public License as published by the Free |
## Software Foundation; either version 2 or (at your option) any later version. |
## |
## eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
## WARRANTY; without even the implied warranty of MERCHANTABILITY or |
## FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
## for more details. |
## |
## You should have received a copy of the GNU General Public License along |
## with eCos; if not, write to the Free Software Foundation, Inc., |
## 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
## |
## As a special exception, if other files instantiate templates or use macros |
## or inline functions from this file, or you compile this file and link it |
## with other works to produce a work based on this file, this file does not |
## by itself cause the resulting work to be covered by the GNU General Public |
## License. However the source code for this file must still be made available |
## in accordance with section (3) of the GNU General Public License. |
## |
## This exception does not invalidate any other reasons why a work based on |
## this file might be covered by the GNU General Public License. |
## |
## Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
## at http://sources.redhat.com/ecos/ecos-license/ |
## ------------------------------------------- |
#####ECOSGPLCOPYRIGHTEND#### |
# ==================================================================== |
######DESCRIPTIONBEGIN#### |
# |
# Author(s): jlarmour |
# Contributors: |
# Date: 2000-06-02 |
# |
#####DESCRIPTIONEND#### |
# |
# ==================================================================== |
|
cdl_package CYGPKG_MEMALLOC { |
display "Dynamic memory allocation" |
description " |
This package provides memory allocator infrastructure required for |
dynamic memory allocators, including the ISO standard malloc |
interface. It also contains some sample implementations." |
include_dir cyg/memalloc |
compile dlmalloc.cxx memfixed.cxx memvar.cxx \ |
sepmeta.cxx |
|
# ==================================================================== |
|
cdl_component CYGPKG_MEMALLOC_ALLOCATORS { |
display "Memory allocator implementations" |
flavor none |
no_define |
description " |
This component contains configuration options related to the |
various memory allocators available." |
|
cdl_component CYGPKG_MEMALLOC_ALLOCATOR_FIXED { |
display "Fixed block allocator" |
flavor none |
no_define |
description " |
This component contains configuration options related to the |
fixed block memory allocator." |
|
cdl_option CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE { |
display "Make thread safe" |
active_if CYGPKG_KERNEL |
default_value 1 |
description " |
With this option enabled, this allocator will be |
made thread-safe. Additionally allocation functions |
are made available that allow a thread to wait |
until memory is available." |
} |
} |
|
cdl_component CYGPKG_MEMALLOC_ALLOCATOR_VARIABLE { |
display "Simple variable block allocator" |
flavor none |
no_define |
description " |
This component contains configuration options related to the |
simple variable block memory allocator. This allocator is not |
very fast, and in particular does not scale well with large |
numbers of allocations. It is however very compact in terms of |
code size and does not have very much overhead per allocation." |
|
cdl_option CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE { |
display "Make thread safe" |
active_if CYGPKG_KERNEL |
default_value 1 |
description " |
With this option enabled, this allocator will be |
made thread-safe. Additionally allocation functions |
are added that allow a thread to wait until memory |
are made available that allow a thread to wait |
until memory is available." |
} |
|
cdl_option CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_COALESCE { |
display "Coalesce memory" |
default_value 1 |
description " |
The variable-block memory allocator can perform coalescing |
of memory whenever the application code releases memory back |
to the pool. This coalescing reduces the possibility of |
memory fragmentation problems, but involves extra code and |
processor cycles." |
} |
} |
|
cdl_component CYGPKG_MEMALLOC_ALLOCATOR_DLMALLOC { |
display "Doug Lea's malloc" |
flavor none |
description " |
This component contains configuration options related to the |
port of Doug Lea's memory allocator, normally known as |
dlmalloc. dlmalloc has a reputation for being both fast |
and space-conserving, as well as resisting fragmentation well. |
It is a common choice for a general purpose allocator and |
has been used in both newlib and Linux glibc." |
|
cdl_option CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG { |
display "Debug build" |
requires CYGDBG_USE_ASSERTS |
default_value { 0 != CYGDBG_USE_ASSERTS } |
description " |
Doug Lea's malloc implementation has substantial amounts |
of internal checking in order to verify the operation |
and consistency of the allocator. However this imposes |
substantial overhead on each operation. Therefore this |
checking may be individually disabled." |
} |
|
cdl_option CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_THREADAWARE { |
display "Make thread safe" |
active_if CYGPKG_KERNEL |
requires CYGPKG_KERNEL |
default_value 1 |
description " |
With this option enabled, this allocator will be |
made thread-safe. Additionally allocation functions |
are made available that allow a thread to wait |
until memory is available." |
} |
|
cdl_option CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE { |
display "Support more than one instance" |
default_value 1 |
description " |
Having this option disabled allows important |
implementation structures to be declared as a single |
static instance, allowing faster access. However this |
would fail if there is more than one instance of |
the dlmalloc allocator class. Therefore this option can |
be enabled if multiple instances are required. Note: as |
a special case, if this allocator is used as the |
implementation of malloc, and it can be determined there |
is more than one malloc pool, then this option will be |
silently enabled." |
} |
|
cdl_option CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY { |
display "Use system memcpy() and memset()" |
requires CYGPKG_ISOINFRA |
default_value { 0 != CYGPKG_ISOINFRA } |
description " |
This may be used to control whether memset() and memcpy() |
are used within the implementation. The alternative is |
to use some macro equivalents, which some people report |
are faster in some circumstances." |
} |
} |
|
cdl_component CYGPKG_MEMALLOC_ALLOCATOR_SEPMETA { |
display "Variable block allocator with separate metadata" |
flavor none |
no_define |
description " |
This component contains configuration options related to the |
variable block memory allocator with separate metadata." |
|
cdl_option CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE { |
display "Make thread safe" |
active_if CYGPKG_KERNEL |
default_value 1 |
description " |
With this option enabled, this allocator will be |
made thread-safe. Additionally allocation functions |
are made available that allow a thread to wait |
until memory is available." |
} |
} |
} |
|
cdl_option CYGFUN_MEMALLOC_KAPI { |
display "Kernel C API support for memory allocation" |
active_if CYGPKG_KERNEL |
default_value CYGFUN_KERNEL_API_C |
description " |
This option must be enabled to provide the extensions required |
to support integration into the kernel C API." |
compile kapi.cxx |
} |
|
cdl_option CYGSEM_MEMALLOC_MALLOC_ZERO_RETURNS_NULL { |
display "malloc(0) returns NULL" |
default_value 0 |
description " |
This option controls the behavior of malloc(0) ( or calloc with |
either argument 0 ). It is permitted by the standard to return |
either a NULL pointer or a unique pointer. Enabling this option |
forces a NULL pointer to be returned." |
} |
|
cdl_component CYGPKG_MEMALLOC_MALLOC_ALLOCATORS { |
display "malloc() and supporting allocators" |
flavor bool |
active_if CYGPKG_ISOINFRA |
implements CYGINT_ISO_MALLOC |
implements CYGINT_ISO_MALLINFO |
default_value 1 |
compile malloc.cxx |
description " |
This component enables support for dynamic memory |
allocation as supplied by the functions malloc(), |
free(), calloc() and realloc(). As these |
functions are often used, but can have quite an |
overhead, disabling them here can ensure they |
cannot even be used accidentally when static |
allocation is preferred. Within this component are |
various allocators that can be selected for use |
as the underlying implementation of the dynamic |
allocation functions." |
|
make -priority 50 { |
heapgeninc.tcl : <PACKAGE>/src/heapgen.cpp |
$(CC) $(CFLAGS) $(INCLUDE_PATH) -Wp,-MD,heapgen.tmp -E $< -o $@ |
@sed -e '/^ *\\/d' -e "s#.*: #$@: #" heapgen.tmp > $(notdir $@).deps |
@rm heapgen.tmp |
} |
|
# FIXME this should have a dependency on mlt_headers, but CDL doesn't |
# permit custom build rules depending on phony targets |
# FIXME we workaround an NT cygtclsh80 bug by cd'ing into the |
# correct dir and running heapgen.tcl from there rather than passing |
# an absolute path. |
make -priority 50 { |
heaps.cxx : heapgeninc.tcl <PACKAGE>/src/heapgen.tcl |
XPWD=`pwd` ; cd $(REPOSITORY)/$(PACKAGE)/src ; sh heapgen.tcl "$(PREFIX)" "$$XPWD" |
@cp heaps.hxx "$(PREFIX)"/include/pkgconf/heaps.hxx |
@chmod u+w "$(PREFIX)"/include/pkgconf/heaps.hxx |
} |
|
make_object { |
heaps.o.d : heaps.cxx |
$(CC) $(CFLAGS) $(INCLUDE_PATH) -Wp,-MD,heaps.tmp -c -o $(OBJECT_PREFIX)_$(notdir $(@:.o.d=.o)) $< |
@sed -e '/^ *\\/d' -e "s#.*: #$@: #" heaps.tmp > $@ |
@rm heaps.tmp |
} |
|
cdl_component CYGBLD_MEMALLOC_MALLOC_EXTERNAL_HEAP_H { |
display "Use external heap definition" |
flavor booldata |
default_value 0 |
description "This option allows other components in the |
system to override the default system |
provision of heap memory pools. This should |
be set to a header which provides the equivalent |
definitions to <pkgconf/heaps.hxx>." |
} |
|
cdl_interface CYGINT_MEMALLOC_MALLOC_ALLOCATORS { |
display "malloc() allocator implementations" |
requires { CYGINT_MEMALLOC_MALLOC_ALLOCATORS == 1 } |
no_define |
} |
|
cdl_option CYGBLD_MEMALLOC_MALLOC_IMPLEMENTATION_HEADER { |
display "malloc() implementation instantiation data" |
flavor data |
description " |
Memory allocator implementations that are capable of being |
used underneath malloc() must be instantiated. The code |
to do this is set in this option. It is only intended to |
be set by the implementation, not the user." |
# default corresponds to the default allocator |
default_value {"<cyg/memalloc/dlmalloc.hxx>"} |
} |
|
cdl_option CYGIMP_MEMALLOC_MALLOC_VARIABLE_SIMPLE { |
display "Simple variable block implementation" |
description "This causes malloc() to use the simple |
variable block allocator." |
default_value 0 |
implements CYGINT_MEMALLOC_MALLOC_ALLOCATORS |
requires { CYGBLD_MEMALLOC_MALLOC_IMPLEMENTATION_HEADER == \ |
"<cyg/memalloc/memvar.hxx>" } |
requires CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_COALESCE |
} |
|
cdl_option CYGIMP_MEMALLOC_MALLOC_DLMALLOC { |
display "Doug Lea's malloc implementation" |
description "This causes malloc() to use a version of Doug Lea's |
malloc (dlmalloc) as the underlying implementation." |
default_value 1 |
implements CYGINT_MEMALLOC_MALLOC_ALLOCATORS |
requires { CYGBLD_MEMALLOC_MALLOC_IMPLEMENTATION_HEADER == \ |
"<cyg/memalloc/dlmalloc.hxx>" } |
} |
} |
cdl_option CYGNUM_MEMALLOC_FALLBACK_MALLOC_POOL_SIZE { |
display "Size of the fallback dynamic memory pool in bytes" |
flavor data |
legal_values 32 to 0x7fffffff |
default_value 16384 |
description " |
If *no* heaps are configured in your memory layout, |
dynamic memory allocation by |
malloc() and calloc() must be from a fixed-size, |
contiguous memory pool (note here that it is the |
pool that is of a fixed size, but malloc() is still |
able to allocate variable sized chunks of memory |
from it). This option is the size |
of that pool, in bytes. Note that not all of |
this is available for programs to |
use - some is needed for internal information |
about memory regions, and some may be lost to |
ensure that memory allocation only returns |
memory aligned on word (or double word) |
boundaries - a very common architecture |
constraint." |
} |
# ==================================================================== |
|
cdl_component CYGPKG_MEMALLOC_OPTIONS { |
display "Common memory allocator package build options" |
flavor none |
no_define |
description " |
Package specific build options including control over |
compiler flags used only in building this package, |
and details of which tests are built." |
|
cdl_option CYGPKG_MEMALLOC_CFLAGS_ADD { |
display "Additional compiler flags" |
flavor data |
no_define |
default_value { "" } |
description " |
This option modifies the set of compiler flags for |
building this package. These flags are used in addition |
to the set of global flags." |
} |
|
cdl_option CYGPKG_MEMALLOC_CFLAGS_REMOVE { |
display "Suppressed compiler flags" |
flavor data |
no_define |
default_value { "" } |
description " |
This option modifies the set of compiler flags for |
building this package. These flags are removed from |
the set of global flags if present." |
} |
|
cdl_option CYGPKG_MEMALLOC_TESTS { |
display "Tests" |
flavor data |
no_define |
calculated { "tests/dlmalloc1 tests/dlmalloc2 tests/heaptest tests/kmemfix1 tests/kmemvar1 tests/malloc1 tests/malloc2 tests/malloc3 tests/malloc4 tests/memfix1 tests/memfix2 tests/memvar1 tests/memvar2 tests/realloc tests/sepmeta1 tests/sepmeta2" } |
description " |
This option specifies the set of tests for this package." |
} |
} |
} |
|
# ==================================================================== |
# EOF memalloc.cdl |
/common/v2_0/tests/sepmeta1.cxx
0,0 → 1,225
//========================================================================== |
// |
// sepmeta1.cxx |
// |
// Variable memory pool with separate metadata test 1 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2001-06-28 |
// Description: Tests basic variable memory pool functionality |
//####DESCRIPTIONEND#### |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
|
#ifdef CYGPKG_KERNEL |
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/sched.hxx> // Cyg_Scheduler::start() |
#include <cyg/kernel/thread.hxx> // Cyg_Thread |
|
#include <cyg/kernel/sched.inl> |
#include <cyg/kernel/thread.inl> |
|
#include <cyg/kernel/timer.hxx> // Cyg_Timer |
#include <cyg/kernel/clock.inl> // Cyg_Clock |
|
#define NTHREADS 2 |
#include "testaux.hxx" |
|
#endif |
|
#include <cyg/memalloc/sepmeta.hxx> |
|
#include <cyg/infra/testcase.h> |
|
static const cyg_int32 memsize = 10240; |
static const cyg_int32 metadatasize = 2048; |
|
static cyg_uint8 mem[2][memsize]; |
static cyg_uint8 metadata[2][metadatasize]; |
|
static Cyg_Mempool_Sepmeta mempool0(mem[0], memsize, 8, |
metadata[0], metadatasize); |
|
static Cyg_Mempool_Sepmeta mempool1(mem[1], memsize, 8, |
metadata[1], metadatasize); |
|
|
static void check_in_mp0(cyg_uint8 *p, cyg_int32 size) |
{ |
CYG_TEST_CHECK(NULL != p, |
"Allocation failed"); |
CYG_TEST_CHECK(mem[0] <= p && p+size <= mem[1], |
"Block outside memory pool"); |
} |
|
|
static void entry0( CYG_ADDRWORD data ) |
{ |
cyg_int32 f0,f1,f2,t0; |
cyg_uint8 *p0, *p1; |
cyg_int32 most_of_mem=memsize/4*3; |
Cyg_Mempool_Status stat; |
|
mempool0.get_status( CYG_MEMPOOL_STAT_ORIGBASE| |
CYG_MEMPOOL_STAT_BLOCKSIZE| |
CYG_MEMPOOL_STAT_MAXFREE| |
CYG_MEMPOOL_STAT_ORIGSIZE, stat ); |
|
CYG_TEST_CHECK(mem[0] == stat.origbase, "get_status: base wrong"); |
CYG_TEST_CHECK(memsize == stat.origsize, "get_status: size wrong"); |
|
CYG_TEST_CHECK(0 < stat.maxfree && stat.maxfree <= stat.origsize, |
"get_status: maxfree wildly wrong"); |
|
CYG_TEST_CHECK(-1 == stat.blocksize, "blocksize wrong" ); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE| |
CYG_MEMPOOL_STAT_ARENASIZE, stat ); |
t0 = stat.arenasize; |
CYG_TEST_CHECK(t0 > 0, "Negative total memory" ); |
f0 = stat.totalfree; |
CYG_TEST_CHECK(f0 > 0, "Negative free memory" ); |
CYG_TEST_CHECK(t0 <= memsize, "get_totalsize: Too much memory"); |
CYG_TEST_CHECK(f0 <= t0 , "More memory free than possible" ); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
CYG_TEST_CHECK( !stat.waiting, |
"Thread waiting for memory; there shouldn't be"); |
|
CYG_TEST_CHECK( NULL == mempool0.try_alloc(memsize+1), |
"Managed to allocate too much memory"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
p0 = mempool0.alloc(most_of_mem); |
#else |
p0 = mempool0.try_alloc(most_of_mem); |
#endif |
check_in_mp0(p0, most_of_mem); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f1 = stat.totalfree; |
CYG_TEST_CHECK(f1 > 0, "Negative free memory" ); |
CYG_TEST_CHECK(f1 < f0, "Free memory didn't decrease after allocation" ); |
|
CYG_TEST_CHECK( NULL == mempool0.try_alloc(most_of_mem), |
"Managed to allocate too much memory"); |
|
CYG_TEST_CHECK(mempool0.free(p0, most_of_mem), "Couldn't free"); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f2 = stat.totalfree; |
CYG_TEST_CHECK(f2 > f1, "Free memory didn't increase after free" ); |
|
// should be able to reallocate now memory is free |
p0 = mempool0.try_alloc(most_of_mem); |
check_in_mp0(p0, most_of_mem); |
|
p1 = mempool0.try_alloc(10); |
check_in_mp0(p1, 10); |
|
CYG_TEST_CHECK(p1+10 <= p0 || p1 >= p0+most_of_mem, |
"Ranges of allocated memory overlap"); |
|
CYG_TEST_CHECK(mempool0.free(p0, 0), "Couldn't free"); |
CYG_TEST_CHECK(mempool0.free(p1, 10), "Couldn't free"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// This shouldn't have to wait |
p0 = mempool0.alloc(most_of_mem, |
Cyg_Clock::real_time_clock->current_value() + 100000); |
check_in_mp0(p0, most_of_mem); |
p1 = mempool0.alloc(most_of_mem, |
Cyg_Clock::real_time_clock->current_value() + 2); |
CYG_TEST_CHECK(NULL == p1, "Timed alloc unexpectedly worked"); |
p1 = mempool0.alloc(10, |
Cyg_Clock::real_time_clock->current_value() + 2); |
check_in_mp0(p1, 10); |
|
// Expect thread 1 to have run while processing previous timed |
// allocation. It should therefore tbe waiting. |
mempool1.get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
CYG_TEST_CHECK(stat.waiting, "There should be a thread waiting"); |
# endif |
#endif |
|
CYG_TEST_PASS_FINISH("Sepmeta memory pool 1 OK"); |
} |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
static void entry1( CYG_ADDRWORD data ) |
{ |
mempool1.alloc(memsize+1); |
CYG_TEST_FAIL("Oversized alloc returned"); |
} |
#endif |
|
void sepmeta1_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting Seperate metadata pool 1 test"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
new_thread(entry0, 0); |
new_thread(entry1, 1); |
|
Cyg_Scheduler::start(); |
#elif defined(CYGPKG_KERNEL) |
new_thread(entry0, 0); |
|
Cyg_Scheduler::start(); |
#else |
entry0(0); |
#endif |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
sepmeta1_main(); |
} |
// EOF sepmeta1.cxx |
/common/v2_0/tests/sepmeta2.cxx
0,0 → 1,162
//========================================================================== |
// |
// sepmeta2.cxx |
// |
// Variable memory pool with separate metadata test 2 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2001-06-28 |
// Description: test allocation and freeing in variable memory pools |
//####DESCRIPTIONEND#### |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
|
#ifdef CYGPKG_KERNEL |
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/sched.hxx> // Cyg_Scheduler::start() |
#include <cyg/kernel/thread.hxx> // Cyg_Thread |
#include <cyg/kernel/thread.inl> |
#include <cyg/kernel/sema.hxx> |
|
#include <cyg/kernel/sched.inl> |
|
#define NTHREADS 1 |
#include "testaux.hxx" |
|
#endif |
|
#include <cyg/memalloc/sepmeta.hxx> |
|
#include <cyg/infra/testcase.h> |
|
static const cyg_int32 memsize = 10240; |
static const cyg_int32 metadatasize = 2048; |
|
static cyg_uint8 mem[memsize]; |
static cyg_uint8 metadata[metadatasize]; |
|
static Cyg_Mempool_Sepmeta mempool(mem, memsize, 8, |
metadata, metadatasize); |
|
#define NUM_PTRS 16 // Should be even |
|
static cyg_uint8 *ptr[NUM_PTRS]; |
static cyg_int32 size[NUM_PTRS]; |
|
// We make a number of passes over a table of pointers which point to |
// blocks of allocated memory. The block is freed and a new block |
// allocated. The size and the order of the processing of blocks |
// is varied. |
static void entry( CYG_ADDRWORD data ) |
{ |
cyg_uint32 s = 1; |
|
// The number of passes that can be successfully performed |
// depends on the fragmentation performance of the memory |
// allocator. |
for(cyg_ucount32 passes = 0; passes < 10; passes++) { |
|
|
// The order which the table is processed varies according to |
// stepsize. |
cyg_ucount8 stepsize = (passes*2 + 1) % NUM_PTRS; // odd |
|
|
for(cyg_ucount8 c=0, i=0; c < NUM_PTRS; c++) { |
i = (i+stepsize) % NUM_PTRS; |
if(ptr[i]) { |
for(cyg_ucount32 j=size[i];j--;) { |
CYG_TEST_CHECK(ptr[i][j]==i, "Memory corrupted"); |
} |
CYG_TEST_CHECK(mempool.free(ptr[i], size[i]), |
"bad free"); |
} |
s = (s*2 + 17) % 100; // size always odds therefore non-0 |
ptr[i] = mempool.try_alloc(s); |
size[i] = s; |
|
CYG_TEST_CHECK(NULL != ptr[i], "Memory pool not big enough"); |
CYG_TEST_CHECK(mem<=ptr[i] && ptr[i]+s < mem+memsize, |
"Allocated region not within pool"); |
|
// Scribble over memory to check whether region overlaps |
// with other regions. The contents of the memory are |
// checked on freeing. This also tests that the memory |
// does not overlap with allocator memory structures. |
for(cyg_ucount32 j=size[i];j--;) { |
ptr[i][j]=i; |
} |
} |
} |
|
CYG_TEST_PASS_FINISH("Sepmeta memory pool 2 OK"); |
} |
|
|
void sepmeta2_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting Seperate metadata memory pool 2 test"); |
|
for(cyg_ucount32 i = 0; i<NUM_PTRS; i++) { |
ptr[i] = NULL; |
} |
|
#ifdef CYGPKG_KERNEL |
new_thread(entry, 0); |
Cyg_Scheduler::start(); |
#else |
entry(0); |
#endif |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
sepmeta2_main(); |
} |
// EOF sepmeta2.cxx |
/common/v2_0/tests/malloc4.cxx
0,0 → 1,396
//================================================================= |
// |
// malloc4.cxx |
// |
// Stress test malloc(), calloc(), realloc() and free() |
// |
//================================================================= |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//================================================================= |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-05-30 |
// Description: Contains a rigorous multithreaded test for malloc(), |
// calloc(), realloc() and free() functions |
// |
// |
//####DESCRIPTIONEND#### |
|
// #define DEBUGTEST |
|
// INCLUDES |
|
#include <pkgconf/system.h> |
#include <pkgconf/memalloc.h> // config header |
#ifdef CYGPKG_ISOINFRA |
# include <pkgconf/isoinfra.h> |
# include <stdlib.h> |
#endif |
#ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
# include <cyg/kernel/thread.hxx> |
# include <cyg/kernel/thread.inl> |
# include <cyg/kernel/sched.hxx> |
# include <cyg/kernel/sched.inl> |
# include <cyg/kernel/sema.hxx> |
#endif |
#include <cyg/infra/testcase.h> |
|
#if !defined(CYGPKG_KERNEL) |
# define NA_MSG "Requires kernel" |
#elif !defined(CYGFUN_KERNEL_THREADS_TIMER) |
# define NA_MSG "Requires thread timers" |
#elif !defined(CYGPKG_ISOINFRA) |
# define NA_MSG "Requires isoinfra package" |
#elif !CYGINT_ISO_MALLOC |
# define NA_MSG "Requires malloc" |
#elif !CYGINT_ISO_MALLINFO |
# define NA_MSG "Requires mallinfo" |
#elif !CYGINT_ISO_RAND |
# define NA_MSG "Requires rand" |
#elif defined(CYGIMP_MEMALLOC_MALLOC_DLMALLOC) && \ |
!defined(CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_THREADAWARE) |
# define NA_MSG "Requires thread-safe dlmalloc" |
#elif defined(CYGIMP_MEMALLOC_MALLOC_VARIABLE_SIMPLE) && \ |
!defined(CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE) |
# define NA_MSG "Requires thread-safe variable block allocator" |
#endif |
|
#ifdef NA_MSG |
|
externC void |
cyg_start(void) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_NA( NA_MSG ); |
CYG_TEST_FINISH("Done"); |
} |
#else |
//#define DEBUGTEST 1 |
#define NTHREADS 4 |
#include "testaux.hxx" |
|
#include <cyg/infra/diag.h> |
|
Cyg_Counting_Semaphore startsema; |
|
volatile int stopnow = 0; |
|
struct ptr { |
char* volatile p; |
volatile size_t size; |
volatile unsigned char busy; |
}; |
|
#define STRINGIFY1( _x_ ) #_x_ |
#define STRINGIFY( _x_ ) STRINGIFY1( _x_ ) |
|
#define NUM_PTRS 100 |
#define WAITFORMEMDELAYMAX (cyg_test_is_simulator ? 1 : 3) |
#define LOOPDELAYMAX (cyg_test_is_simulator ? 1 : 3) |
#define ITERATIONS (cyg_test_is_simulator ? 10 : 200) |
#define OUTPUTINTERVAL (cyg_test_is_simulator ? 1 : 10) |
|
int iterations = ITERATIONS; |
|
static struct ptr ptrs[ NUM_PTRS ]; |
|
static __inline__ int |
myrand(int limit, unsigned int *seed) |
{ |
int r; |
double l=(double)(limit+1); |
r=(int)( l*rand_r(seed) / (RAND_MAX+1.0) ); |
return r; |
} |
|
size_t memsize; |
|
static void |
fill_with_data( struct ptr *p ) |
{ |
unsigned int i, j; |
for (i=0; i < (p->size/4); i++) |
((unsigned int *)p->p)[i] = (unsigned int)p; |
for ( j=i*4; j < p->size ; j++ ) |
p->p[j] = ((char *)p)[j-i*4]; |
} |
|
static void |
check_data( struct ptr *p ) |
{ |
unsigned int i, j; |
for (i=0; i < (p->size/4); i++) |
CYG_TEST_CHECK( ((unsigned int *)p->p)[i] == (unsigned int)p, |
"Data didn't compare correctly"); |
for ( j=i*4; j < p->size ; j++ ) |
CYG_TEST_CHECK( p->p[j] == ((char *)p)[j-i*4], |
"Data didn't compare correctly"); |
} |
|
static void |
check_zeroes( struct ptr *p ) |
{ |
unsigned int i, j; |
for (i=0; i < (p->size/4); i++) |
CYG_TEST_CHECK( ((int *)p->p)[i] == 0, |
"Zeroed data didn't compare correctly"); |
for ( j=i*4; j < p->size ; j++ ) |
CYG_TEST_CHECK( p->p[j] == 0, |
"Zeroed data didn't compare correctly"); |
} |
|
|
static void |
thrmalloc( CYG_ADDRWORD data ) |
{ |
int r, i; |
void *mem; |
unsigned int seed; |
|
startsema.wait(); |
|
while (!stopnow) { |
r = myrand( NUM_PTRS-1, &seed ); |
|
for (i=r+1; ; i++) { |
Cyg_Scheduler::lock(); |
if (i == NUM_PTRS) |
i=0; |
if (!ptrs[i].busy && (ptrs[i].p == NULL) ) |
break; |
Cyg_Scheduler::unlock(); |
if ( i==r ) { |
Cyg_Thread::self()->delay( myrand(WAITFORMEMDELAYMAX, &seed) ); |
} |
} |
ptrs[i].busy = 1; |
Cyg_Scheduler::unlock(); |
r = myrand(memsize, &seed); |
mem = malloc(r); |
ptrs[i].p = (char *)mem; |
ptrs[i].size = r; |
if ( NULL != mem ) { |
#ifdef DEBUGTEST |
diag_printf("malloc=%08x size=%d\n", mem, r); |
#endif |
fill_with_data( &ptrs[i] ); |
} |
ptrs[i].busy = 0; |
Cyg_Thread::self()->delay( myrand(LOOPDELAYMAX, &seed) ); |
} |
} |
|
static void |
thrcalloc( CYG_ADDRWORD data ) |
{ |
int r, i; |
void *mem; |
unsigned int seed; |
|
startsema.wait(); |
|
while (!stopnow) { |
r = myrand( NUM_PTRS-1, &seed ); |
|
for (i=r+1; ; i++) { |
Cyg_Scheduler::lock(); |
if (i == NUM_PTRS) |
i=0; |
if (!ptrs[i].busy && (ptrs[i].p == NULL) ) |
break; |
Cyg_Scheduler::unlock(); |
if ( i==r ) { |
Cyg_Thread::self()->delay( myrand(WAITFORMEMDELAYMAX, &seed) ); |
} |
} |
ptrs[i].busy = 1; |
Cyg_Scheduler::unlock(); |
r = myrand(memsize, &seed); |
mem = calloc( 1, r ); |
ptrs[i].p = (char *)mem; |
ptrs[i].size = r; |
if ( NULL != mem ) { |
#ifdef DEBUGTEST |
diag_printf("calloc=%08x size=%d\n", mem, r); |
#endif |
check_zeroes( &ptrs[i] ); |
fill_with_data( &ptrs[i] ); |
} |
ptrs[i].busy = 0; |
Cyg_Thread::self()->delay( myrand(LOOPDELAYMAX, &seed) ); |
} |
} |
|
static void |
thrrealloc( CYG_ADDRWORD data ) |
{ |
int r, i; |
void *mem; |
unsigned int seed; |
|
startsema.wait(); |
|
while (!stopnow) { |
r = myrand( NUM_PTRS-1, &seed ); |
|
for (i=r+1; ; i++) { |
Cyg_Scheduler::lock(); |
if (i == NUM_PTRS) |
i=0; |
if (!ptrs[i].busy && (ptrs[i].p != NULL) ) |
break; |
Cyg_Scheduler::unlock(); |
if ( i==r ) { |
Cyg_Thread::self()->delay( myrand(WAITFORMEMDELAYMAX, &seed) ); |
} |
} |
ptrs[i].busy = 1; |
Cyg_Scheduler::unlock(); |
check_data( &ptrs[i] ); |
r = myrand(memsize - 1, &seed) + 1; |
mem = realloc( (void *)ptrs[i].p, r ); |
if ( NULL != mem ) { |
#ifdef DEBUGTEST |
diag_printf("realloc=%08x oldsize=%d newsize=%d\n", mem, ptrs[i].size, r); |
#endif |
ptrs[i].size = r; |
ptrs[i].p = (char *)mem; |
fill_with_data( &ptrs[i] ); |
} |
ptrs[i].busy = 0; |
Cyg_Thread::self()->delay( myrand(LOOPDELAYMAX, &seed) ); |
} |
} |
|
static void |
thrfree( CYG_ADDRWORD data ) |
{ |
int r, i; |
int iter = 0; |
struct mallinfo minfo; |
unsigned int seed; |
|
minfo = mallinfo(); |
memsize = (unsigned long) minfo.maxfree; |
diag_printf("INFO:<Iteration 0, arenasize=%d, space free=%d, maxfree=%d>\n", |
minfo.arena, minfo.fordblks, minfo.maxfree ); |
|
// wake the three threads above. |
startsema.post(); startsema.post(); startsema.post(); |
|
Cyg_Thread::self()->delay(1); |
|
while (1) { |
if ( (iter > 0) && (0 == (iter % OUTPUTINTERVAL)) ) { |
minfo = mallinfo(); |
diag_printf("INFO:<Iteration %d, arenasize=%d, " |
"space free=%d, maxfree=%d>\n", |
iter, minfo.arena, minfo.fordblks, minfo.maxfree ); |
} |
|
if ( iterations == iter++ ) |
stopnow++; |
|
r = myrand( NUM_PTRS-1, &seed ); |
|
for (i=r+1; ; i++) { |
Cyg_Scheduler::lock(); |
if (i >= NUM_PTRS) |
i=0; |
if (!ptrs[i].busy && (ptrs[i].p != NULL) ) |
break; |
Cyg_Scheduler::unlock(); |
if ( i==r ) { |
if ( stopnow ) { |
// we may have gone round all the ptrs even though one |
// or more of them was busy, so check again just for that |
int j; |
for (j=0; j<NUM_PTRS; j++) |
if (ptrs[j].busy) |
break; |
if ( j<NUM_PTRS ) |
continue; |
struct mallinfo minfo; |
|
minfo = mallinfo(); |
diag_printf("INFO:<Iteration %d, arenasize=%d, " |
"space free=%d, maxfree=%d>\n", |
iter, minfo.arena, minfo.fordblks, |
minfo.maxfree ); |
CYG_TEST_PASS_FINISH("malloc4 test completed successfully"); |
} else { |
Cyg_Thread::self()->delay( |
myrand(WAITFORMEMDELAYMAX, &seed) ); |
} |
} |
} |
ptrs[i].busy = 1; |
Cyg_Scheduler::unlock(); |
check_data( &ptrs[i] ); |
#ifdef DEBUGTEST |
diag_printf("about to free %08x\n", ptrs[i].p); |
#endif |
free( (void *)ptrs[i].p ); |
ptrs[i].p = NULL; |
ptrs[i].busy = 0; |
Cyg_Thread::self()->delay( myrand(LOOPDELAYMAX, &seed) ); |
} |
} |
|
|
externC void |
cyg_start(void) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting malloc4 test"); |
|
new_thread(thrmalloc, 0); |
new_thread(thrcalloc, 1); |
new_thread(thrrealloc, 2); |
new_thread(thrfree, 3); |
|
Cyg_Scheduler::start(); |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} // cyg_start() |
|
#endif // !NA_MSG |
|
// EOF malloc4.cxx |
/common/v2_0/tests/heaptest.c
0,0 → 1,235
//================================================================= |
// |
// heaptest.cxx |
// |
// Test all the memory used by heaps to check it's all valid |
// |
//================================================================= |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//================================================================= |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2001-07-17 |
// Description: Tests all memory allocated for use by heaps. |
// |
// |
//####DESCRIPTIONEND#### |
|
// INCLUDES |
|
#include <pkgconf/system.h> |
#include <pkgconf/hal.h> |
#include <pkgconf/memalloc.h> // config header |
#ifdef CYGPKG_ISOINFRA |
# include <pkgconf/isoinfra.h> |
# include <stdlib.h> |
#endif |
#include <cyg/infra/testcase.h> |
|
#if !defined(CYGPKG_ISOINFRA) |
# define NA_MSG "Requires isoinfra package" |
#elif !CYGINT_ISO_MALLOC |
# define NA_MSG "Requires malloc" |
#elif !CYGINT_ISO_MALLINFO |
# define NA_MSG "Requires mallinfo" |
#endif |
|
#ifdef NA_MSG |
|
externC void |
cyg_start(void) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_NA( NA_MSG ); |
CYG_TEST_FINISH("Done"); |
} |
#else |
|
#include <cyg/infra/diag.h> |
|
#define ERRORTHRESHOLD 10 |
#define ITERS (cyg_test_is_simulator ? 1 : 10) |
#define INTALIGNED(_x_) (!((unsigned long)(_x_) & (sizeof(int)-1))) |
|
int |
test_pat(unsigned char *buf, int size, |
unsigned int pat, cyg_bool addrpat, |
const char *testname) |
{ |
unsigned char *bufptr=buf; |
register unsigned int *ibufptr; |
unsigned char *endptr=buf+size; |
register unsigned int *endptra; // int aligned |
int errors=0; |
unsigned char bpat = pat & 0xFF; |
|
endptra = (int *)((unsigned long)endptr & ~(sizeof(int)-1)); |
|
// Set to the pattern |
while (!INTALIGNED(bufptr)) { |
if (addrpat) |
bpat = ((int)bufptr)&0xFF; |
*bufptr++ = bpat; |
} |
|
ibufptr = (unsigned int *)bufptr; |
|
while ( ibufptr < endptra ) { |
if (addrpat) |
pat = (unsigned int)ibufptr; |
*ibufptr++ = pat; |
} |
|
bufptr = (unsigned char *)ibufptr; |
while ( bufptr < endptr ) { |
if (addrpat) |
bpat = ((int)bufptr)&0xFF; |
*bufptr++ = bpat; |
} |
|
// Now compare to the pattern |
bufptr = buf; |
while ( !INTALIGNED(bufptr) ) { |
if (addrpat) |
bpat = ((int)bufptr)&0xFF; |
if ( *bufptr != bpat ) { |
diag_printf( "FAIL:<Memory at 0x%08x: expected 0x%02x, read 0x%02x>\n", |
bufptr, (int)bpat, (int)*bufptr ); |
if ( errors++ == ERRORTHRESHOLD ) |
CYG_TEST_FAIL_FINISH( testname ); |
} |
bufptr++; |
} |
|
ibufptr = (unsigned int *)bufptr; |
|
while ( ibufptr < endptra ) { |
if (addrpat) |
pat = (unsigned int)ibufptr; |
if ( *ibufptr != pat ) { |
diag_printf( "FAIL:<Memory at 0x%08x: expected 0x%08x, read 0x%08x>\n", |
ibufptr, pat, *ibufptr ); |
if ( errors++ == ERRORTHRESHOLD ) |
CYG_TEST_FAIL_FINISH( testname ); |
} |
ibufptr++; |
} |
|
bufptr = (unsigned char *)ibufptr; |
while ( bufptr < endptr ) { |
if (addrpat) |
bpat = ((int)bufptr)&0xFF; |
if ( *bufptr != bpat ) { |
diag_printf( "FAIL:<Memory at 0x%08x: expected 0x%02x, read 0x%02x>\n", |
bufptr, (int)bpat, (int)*bufptr ); |
if ( errors++ == ERRORTHRESHOLD ) |
CYG_TEST_FAIL_FINISH( testname ); |
} |
bufptr++; |
} |
if (errors) |
CYG_TEST_FAIL( testname ); |
else |
CYG_TEST_PASS( testname ); |
return errors; |
} // test_pat() |
|
externC void |
cyg_start(void) |
{ |
unsigned int allonesint=0, checkerboardint1=0, checkerboardint2=0; |
int i; |
int errors=0; |
|
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting heaptest - testing all memory usable as heap"); |
CYG_TEST_INFO("Any failures reported may indicate failing RAM hardware,"); |
CYG_TEST_INFO("or an invalid memory map"); |
|
for (i=0; i<sizeof(int); i++) { |
allonesint = allonesint << 8; |
allonesint |= 0xFF; |
checkerboardint1 = checkerboardint1 << 8; |
checkerboardint1 |= 0xAA; |
checkerboardint2 = checkerboardint2 << 8; |
checkerboardint2 |= 0x55; |
} |
|
for (;;) { |
struct mallinfo info; |
char *buf; |
|
info = mallinfo(); |
|
if ( info.maxfree <= 0 ) |
break; |
|
buf = malloc(info.maxfree); |
if (!buf) { |
diag_printf("Couldn't malloc %d bytes claimed as available", |
info.maxfree); |
CYG_TEST_FAIL_FINISH("heaptest"); |
} |
|
diag_printf( "INFO:<Testing memory at 0x%08x of size %d for %d iterations>\n", |
buf, info.maxfree, ITERS ); |
for (i=0; i<ITERS; i++) { |
errors += test_pat( buf, info.maxfree, 0, 0, "all zeroes" ); |
errors += test_pat( buf, info.maxfree, allonesint, 0, |
"all ones" ); |
errors += test_pat( buf, info.maxfree, checkerboardint1, 0, |
"checkerboard 1" ); |
errors += test_pat( buf, info.maxfree, checkerboardint2, 0, |
"checkerboard 2" ); |
errors += test_pat( buf, info.maxfree, 0, 1, |
"memory addr" ); |
} |
|
// deliberately don't free so we get the next space |
} |
|
if (errors) |
CYG_TEST_FAIL_FINISH( "heaptest errors found" ); |
else |
CYG_TEST_PASS_FINISH( "heaptest OK" ); |
} // cyg_start() |
|
#endif // !NA_MSG |
|
// EOF heaptest.cxx |
/common/v2_0/tests/realloc.c
0,0 → 1,196
//================================================================= |
// |
// realloc.c |
// |
// Testcase for C library realloc() |
// |
//================================================================= |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//================================================================= |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-04-30 |
// Description: Contains testcode for C library realloc() function |
// |
// |
//####DESCRIPTIONEND#### |
|
|
// INCLUDES |
|
#include <pkgconf/system.h> // Overall system configuration |
#include <pkgconf/memalloc.h> // config header |
#ifdef CYGPKG_ISOINFRA |
# include <pkgconf/isoinfra.h> |
# include <stdlib.h> |
#endif |
#include <cyg/infra/testcase.h> |
|
#if !defined(CYGPKG_ISOINFRA) |
# define NA_MSG "Requires isoinfra package" |
#elif !CYGINT_ISO_MAIN_STARTUP |
# define NA_MSG "Requires main() to be called" |
#elif !CYGINT_ISO_MALLOC |
# define NA_MSG "Requires malloc" |
#elif !CYGINT_ISO_MALLINFO |
# define NA_MSG "Requires mallinfo" |
#endif |
|
#ifdef NA_MSG |
void |
cyg_start(void) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_NA( NA_MSG ); |
CYG_TEST_FINISH("Done"); |
} |
#else |
|
|
// FUNCTIONS |
|
static const char alphabet[]="abcdefghijklmnopqrstuvwxyz{-}[]#';:@~!$^&*()" |
"ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; |
|
extern int |
cyg_memalloc_maxalloc( void ); |
|
static int |
compare_with_alphabet( char *buf, int size, int offset ) |
{ |
int i, buf_offset; |
|
for (i=offset, buf_offset=0; |
buf_offset < size; |
buf_offset++,i++ ) { |
|
if ( i==sizeof(alphabet)-1 ) |
i=0; |
|
if ( buf[buf_offset] != alphabet[i] ) { |
CYG_TEST_FAIL( "buffer has not retained correct data!"); |
return 0; // fail |
} // if |
} // for |
|
return 1; // success |
} // compare_with_alphabet() |
|
static int |
fill_with_alphabet( char *buf, int size, int offset ) |
{ |
int i, buf_offset; |
|
for (i=offset, buf_offset=0; |
buf_offset < size; |
buf_offset++,i++ ) { |
|
if ( i==sizeof(alphabet)-1 ) |
i=0; |
|
buf[buf_offset] = alphabet[i]; |
|
} // for |
|
return compare_with_alphabet( buf, size, offset); // be sure |
} // fill_with_alphabet() |
|
|
int |
main( int argc, char *argv[] ) |
{ |
char *str; |
int size; |
int poolmax; |
|
CYG_TEST_INIT(); |
|
CYG_TEST_INFO("Starting tests from testcase " __FILE__ " for C library " |
"realloc() function"); |
|
poolmax = mallinfo().maxfree; |
|
if ( poolmax <= 0 ) { |
CYG_TEST_FAIL_FINISH( "Can't determine allocation size to use" ); |
} |
|
size = poolmax/2; |
|
str = (char *)realloc( NULL, size ); |
CYG_TEST_PASS_FAIL( str != NULL, "realloc doing only allocation"); |
CYG_TEST_PASS_FAIL( fill_with_alphabet( str, size, 0 ), |
"allocation usability"); |
|
str = (char *)realloc( str, 0 ); |
CYG_TEST_PASS_FAIL( str == NULL, "realloc doing implicit free" ); |
|
str = (char *)realloc( NULL, size/2 ); |
CYG_TEST_PASS_FAIL( str != NULL, "realloc doing allocation to half size"); |
CYG_TEST_PASS_FAIL( fill_with_alphabet( str, size/2, 5 ), |
"half allocation usability"); |
|
str = (char *)realloc( str, size ); |
CYG_TEST_PASS_FAIL( str != NULL, |
"reallocing allocation back to normal size"); |
CYG_TEST_PASS_FAIL( compare_with_alphabet(str, size/2, 5), |
"after realloc to normal size, old contents kept" ); |
CYG_TEST_PASS_FAIL( fill_with_alphabet( str, size, 3 ), |
"reallocation normal size usability"); |
|
str = (char *)realloc( str, size/4 ); |
CYG_TEST_PASS_FAIL( str != NULL, "reallocing allocation to quarter size"); |
CYG_TEST_PASS_FAIL( compare_with_alphabet(str, size/4, 3), |
"after realloc to quarter size, old contents kept" ); |
CYG_TEST_PASS_FAIL( fill_with_alphabet( str, size/4, 1 ), |
"reallocation quarter size usability"); |
|
CYG_TEST_PASS_FAIL( realloc( str, size*4 ) == NULL, |
"reallocing allocation that is too large" ); |
CYG_TEST_PASS_FAIL( compare_with_alphabet( str, size/4, 1 ), |
"Checking old contents maintained despite failure" ); |
|
str = (char *)realloc( str, 0 ); |
CYG_TEST_PASS_FAIL( str == NULL, "realloc doing implicit free again" ); |
|
CYG_TEST_FINISH("Finished tests from testcase " __FILE__ " for C library " |
"realloc() function"); |
|
return 0; |
} // main() |
|
#endif // ifndef NA_MSG |
|
// EOF realloc.c |
/common/v2_0/tests/kmemfix1.c
0,0 → 1,209
/*========================================================================== |
// |
// kmemfix1.cxx |
// |
// Kernel C API Fixed memory pool test 1 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-19 |
// Description: Tests basic fixed memory pool functionality |
//####DESCRIPTIONEND#### |
*/ |
|
#include <pkgconf/memalloc.h> |
|
#include <cyg/infra/testcase.h> |
|
#ifdef CYGFUN_MEMALLOC_KAPI |
|
#include <cyg/hal/hal_arch.h> // CYGNUM_HAL_STACK_SIZE_TYPICAL |
|
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/kapi.h> |
|
#define NTHREADS 2 |
#define STACKSIZE CYGNUM_HAL_STACK_SIZE_TYPICAL |
|
static cyg_handle_t thread[NTHREADS]; |
|
static cyg_thread thread_obj[NTHREADS]; |
static char stack[NTHREADS][STACKSIZE]; |
|
|
#define MEMSIZE 10240 |
|
static cyg_uint8 mem[2][MEMSIZE]; |
|
static cyg_mempool_fix mempool_obj[2]; |
static cyg_handle_t mempool0, mempool1; |
|
static void check_in_mp0(cyg_uint8 *p, cyg_int32 size) |
{ |
CYG_TEST_CHECK(NULL != p, |
"Allocation failed"); |
CYG_TEST_CHECK(mem[0] <= p && p+size < mem[1], |
"Block outside memory pool"); |
} |
|
static void entry0( cyg_addrword_t data ) |
{ |
cyg_uint8 *p0, *p1, *p2; |
cyg_mempool_info info0, info1, info2; |
|
cyg_mempool_fix_get_info(mempool0, &info0); |
CYG_TEST_CHECK(mem[0] == info0.base, "get_arena: base wrong"); |
CYG_TEST_CHECK(MEMSIZE == info0.size, "get_arena: size wrong"); |
|
CYG_TEST_CHECK(0 < info0.maxfree && info0.maxfree <= info0.size, |
"get_arena: maxfree wildly wrong"); |
|
CYG_TEST_CHECK(100 == info0.blocksize, "get_blocksize wrong" ); |
|
CYG_TEST_CHECK(info0.totalmem > 0, "Negative total memory" ); |
CYG_TEST_CHECK(info0.freemem > 0, "Negative free memory" ); |
CYG_TEST_CHECK(info0.totalmem <= MEMSIZE, |
"info.totalsize: Too much memory"); |
CYG_TEST_CHECK(info0.freemem <= info0.totalmem , |
"More memory free than possible" ); |
|
CYG_TEST_CHECK( !cyg_mempool_fix_waiting(mempool0) , |
"Thread waiting for memory; there shouldn't be"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
p0 = cyg_mempool_fix_alloc(mempool0); |
check_in_mp0(p0, 100); |
|
cyg_mempool_fix_get_info(mempool0, &info1); |
CYG_TEST_CHECK(info1.freemem > 0, "Negative free memory" ); |
CYG_TEST_CHECK(info1.freemem < info0.freemem, |
"Free memory didn't decrease after allocation" ); |
|
p1 = NULL; |
while((p2 = cyg_mempool_fix_try_alloc(mempool0) )) |
p1 = p2; |
|
cyg_mempool_fix_get_info(mempool0, &info1); |
cyg_mempool_fix_free(mempool0, p0); |
|
cyg_mempool_fix_get_info(mempool0, &info2); |
CYG_TEST_CHECK(info2.freemem > info1.freemem, |
"Free memory didn't increase after free" ); |
#endif |
|
// should be able to reallocate now a block is free |
p0 = cyg_mempool_fix_try_alloc(mempool0); |
check_in_mp0(p0, 100); |
|
CYG_TEST_CHECK(p1+100 <= p0 || p1 >= p0+100, |
"Ranges of allocated memory overlap"); |
|
cyg_mempool_fix_free(mempool0, p0); |
cyg_mempool_fix_free(mempool0, p1); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// This shouldn't have to wait |
p0 = cyg_mempool_fix_timed_alloc(mempool0, cyg_current_time()+100000); |
check_in_mp0(p0, 100); |
p1 = cyg_mempool_fix_timed_alloc(mempool0, cyg_current_time()+20); |
check_in_mp0(p1, 10); |
p1 = cyg_mempool_fix_timed_alloc(mempool0, cyg_current_time()+20); |
CYG_TEST_CHECK(NULL == p1, "Timed alloc unexpectedly worked"); |
|
// Expect thread 1 to have run while processing previous timed |
// allocation. It should therefore be waiting. |
CYG_TEST_CHECK(cyg_mempool_fix_waiting(mempool1), |
"There should be a thread waiting"); |
# endif |
#endif |
|
CYG_TEST_PASS_FINISH("Kernel C API Fixed memory pool 1 OK"); |
} |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
static void entry1( cyg_addrword_t data ) |
{ |
while(NULL != cyg_mempool_fix_alloc(mempool1)) |
; |
CYG_TEST_FAIL("alloc returned NULL"); |
} |
#endif |
|
|
void kmemfix1_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting Kernel C API Fixed memory pool 1 test"); |
|
cyg_thread_create(4, entry0 , (cyg_addrword_t)0, "kmemfix1-0", |
(void *)stack[0], STACKSIZE, &thread[0], &thread_obj[0]); |
cyg_thread_resume(thread[0]); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
cyg_thread_create(4, entry1 , (cyg_addrword_t)1, "kmemfix1-1", |
(void *)stack[1], STACKSIZE, &thread[1], &thread_obj[1]); |
cyg_thread_resume(thread[1]); |
#endif |
|
cyg_mempool_fix_create(mem[0], MEMSIZE, 100, &mempool0, &mempool_obj[0]); |
cyg_mempool_fix_create(mem[1], MEMSIZE, 316, &mempool1, &mempool_obj[1]); |
|
cyg_scheduler_start(); |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
kmemfix1_main(); |
} |
|
#else /* def CYGFUN_MEMALLOC_KAPI */ |
externC void |
cyg_start( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_NA("Kernel C API layer disabled"); |
} |
#endif /* def CYGFUN_MEMALLOC_KAPI */ |
|
/* EOF kmemfix1.c */ |
/common/v2_0/tests/testaux.hxx
0,0 → 1,111
#ifndef CYGONCE_MEMALLOC_TESTS_TESTAUX_HXX |
#define CYGONCE_MEMALLOC_TESTS_TESTAUX_HXX |
|
//========================================================================== |
// |
// testaux.hxx |
// |
// Auxiliary test header file |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm |
// Contributors: dsm |
// Date: 1998-03-09 |
// Description: |
// Defines some convenience functions to get us going. In |
// particular this file reserves space for NTHREADS threads, |
// which can be created by calls to aux_new_thread() |
// It also defines a CHECK function. |
// |
//####DESCRIPTIONEND#### |
|
|
static inline void *operator new(size_t size, void *ptr) { return ptr; }; |
|
|
#include <pkgconf/hal.h> |
|
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
externC void |
cyg_hal_invoke_constructors(); |
#endif |
|
#ifdef NTHREADS |
|
#ifndef STACKSIZE |
#define STACKSIZE CYGNUM_HAL_STACK_SIZE_TYPICAL*2 |
#endif |
|
static Cyg_Thread *thread[NTHREADS]; |
|
typedef CYG_WORD64 CYG_ALIGNMENT_TYPE; |
|
static CYG_ALIGNMENT_TYPE thread_obj[NTHREADS] [ |
(sizeof(Cyg_Thread)+sizeof(CYG_ALIGNMENT_TYPE)-1) |
/ sizeof(CYG_ALIGNMENT_TYPE) ]; |
|
static CYG_ALIGNMENT_TYPE stack[NTHREADS] [ |
(STACKSIZE+sizeof(CYG_ALIGNMENT_TYPE)-1) |
/ sizeof(CYG_ALIGNMENT_TYPE) ]; |
|
static volatile int nthreads = 0; |
|
static Cyg_Thread *new_thread(cyg_thread_entry *entry, CYG_ADDRWORD data) |
{ |
int _nthreads = nthreads++; |
|
CYG_ASSERT(_nthreads < NTHREADS, |
"Attempt to create more than NTHREADS threads"); |
|
thread[_nthreads] = new( (void *)&thread_obj[_nthreads] ) |
Cyg_Thread(CYG_SCHED_DEFAULT_INFO, |
entry, data, |
NULL, // no name |
(CYG_ADDRESS)stack[_nthreads], STACKSIZE ); |
|
thread[_nthreads]->resume(); |
|
return thread[_nthreads]; |
} |
#endif // defined(NTHREADS) |
|
#define CHECK(b) CYG_TEST_CHECK(b,#b) |
|
#endif // ifndef CYGONCE_KERNEL_TESTS_TESTAUX_HXX |
|
// End of testaux.hxx |
/common/v2_0/tests/kmemvar1.c
0,0 → 1,217
/*========================================================================== |
// |
// kmemvar1.cxx |
// |
// Kernel C API Variable memory pool test 1 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm |
// Contributors: dsm |
// Date: 1998-06-08 |
// Description: Tests basic variable memory pool functionality |
//####DESCRIPTIONEND#### |
*/ |
|
#include <pkgconf/memalloc.h> |
|
#include <cyg/infra/testcase.h> |
|
#ifdef CYGFUN_MEMALLOC_KAPI |
|
#include <cyg/hal/hal_arch.h> // CYGNUM_HAL_STACK_SIZE_TYPICAL |
|
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/kapi.h> |
|
#define NTHREADS 2 |
#define STACKSIZE CYGNUM_HAL_STACK_SIZE_TYPICAL |
|
static cyg_handle_t thread[NTHREADS]; |
|
static cyg_thread thread_obj[NTHREADS]; |
static char stack[NTHREADS][STACKSIZE]; |
|
|
#define MEMSIZE 10240 |
|
static cyg_uint8 mem[2][MEMSIZE]; |
|
static cyg_mempool_var mempool_obj[2]; |
static cyg_handle_t mempool0, mempool1; |
|
static void check_in_mp0(cyg_uint8 *p, cyg_int32 size) |
{ |
CYG_TEST_CHECK(NULL != p, |
"Allocation failed"); |
CYG_TEST_CHECK(mem[0] <= p && p+size < mem[1], |
"Block outside memory pool"); |
} |
|
static void entry0( cyg_addrword_t data ) |
{ |
cyg_uint8 *p0, *p1; |
cyg_int32 most_of_mem=MEMSIZE/4*3; |
cyg_mempool_info info0, info1, info2; |
|
cyg_mempool_var_get_info(mempool0, &info0); |
|
CYG_TEST_CHECK(mem[0] == info0.base, "get_arena: base wrong"); |
CYG_TEST_CHECK(MEMSIZE == info0.size, "get_arena: size wrong"); |
|
CYG_TEST_CHECK(0 < info0.maxfree && info0.maxfree <= info0.size, |
"get_arena: maxfree wildly wrong"); |
|
CYG_TEST_CHECK(-1 == info0.blocksize, "get_blocksize wrong" ); |
|
CYG_TEST_CHECK(info0.totalmem > 0, "Negative total memory" ); |
CYG_TEST_CHECK(info0.freemem > 0, "Negative free memory" ); |
CYG_TEST_CHECK(info0.totalmem <= MEMSIZE, |
"info.totalsize: Too much memory"); |
CYG_TEST_CHECK(info0.freemem <= info0.totalmem , |
"More memory free than possible" ); |
|
CYG_TEST_CHECK( !cyg_mempool_var_waiting(mempool0), |
"Thread waiting for memory; there shouldn't be"); |
|
CYG_TEST_CHECK( NULL == cyg_mempool_var_try_alloc(mempool0, MEMSIZE+1), |
"Managed to allocate too much memory"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
p0 = cyg_mempool_var_alloc(mempool0, most_of_mem); |
check_in_mp0(p0, most_of_mem); |
|
cyg_mempool_var_get_info(mempool0, &info1); |
|
CYG_TEST_CHECK(info1.freemem > 0, "Negative free memory" ); |
CYG_TEST_CHECK(info1.freemem < info0.freemem, |
"Free memory didn't decrease after allocation" ); |
|
CYG_TEST_CHECK( NULL == cyg_mempool_var_try_alloc(mempool0, most_of_mem), |
"Managed to allocate too much memory"); |
|
cyg_mempool_var_free(mempool0, p0); |
|
cyg_mempool_var_get_info(mempool0, &info2); |
CYG_TEST_CHECK(info2.freemem > info1.freemem, |
"Free memory didn't increase after free" ); |
#endif |
|
// should be able to reallocate now memory is free |
p0 = cyg_mempool_var_try_alloc(mempool0, most_of_mem); |
check_in_mp0(p0, most_of_mem); |
|
p1 = cyg_mempool_var_try_alloc(mempool0, 10); |
check_in_mp0(p1, 10); |
|
CYG_TEST_CHECK(p1+10 <= p0 || p1 >= p0+MEMSIZE, |
"Ranges of allocated memory overlap"); |
|
cyg_mempool_var_free(mempool0, p0); |
cyg_mempool_var_free(mempool0, p1); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// This shouldn't have to wait |
p0 = cyg_mempool_var_timed_alloc(mempool0, most_of_mem, |
cyg_current_time()+100000); |
check_in_mp0(p0, most_of_mem); |
p1 = cyg_mempool_var_timed_alloc(mempool0, most_of_mem, |
cyg_current_time()+2); |
CYG_TEST_CHECK(NULL == p1, "Timed alloc unexpectedly worked"); |
p1 = cyg_mempool_var_timed_alloc(mempool0, 10, |
cyg_current_time()+2); |
check_in_mp0(p1, 10); |
|
// Expect thread 1 to have run while processing previous timed |
// allocation. It should therefore tbe waiting. |
CYG_TEST_CHECK(cyg_mempool_var_waiting(mempool1), "There should be a thread waiting"); |
# endif |
#endif |
|
CYG_TEST_PASS_FINISH("Kernel C API Variable memory pool 1 OK"); |
} |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
static void entry1( cyg_addrword_t data ) |
{ |
cyg_mempool_var_alloc(mempool1, MEMSIZE+1); |
CYG_TEST_FAIL("Oversized alloc returned"); |
} |
#endif |
|
|
void kmemvar1_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting Kernel C API Variable memory pool 1 test"); |
|
cyg_thread_create(4, entry0 , (cyg_addrword_t)0, "kmemvar1-0", |
(void *)stack[0], STACKSIZE, &thread[0], &thread_obj[0]); |
cyg_thread_resume(thread[0]); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
cyg_thread_create(4, entry1 , (cyg_addrword_t)1, "kmemvar1-1", |
(void *)stack[1], STACKSIZE, &thread[1], &thread_obj[1]); |
cyg_thread_resume(thread[1]); |
#endif |
|
cyg_mempool_var_create(mem[0], MEMSIZE, &mempool0, &mempool_obj[0]); |
cyg_mempool_var_create(mem[1], MEMSIZE, &mempool1, &mempool_obj[1]); |
|
cyg_scheduler_start(); |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
kmemvar1_main(); |
} |
|
#else /* ifdef CYGFUN_MEMALLOC_KAPI */ |
externC void |
cyg_start( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_NA("Kernel C API layer disabled"); |
} |
#endif /* ifdef CYGFUN_MEMALLOC_KAPI */ |
|
/* EOF kmemvar1.c */ |
/common/v2_0/tests/memfix1.cxx
0,0 → 1,213
//========================================================================== |
// |
// memfix1.cxx |
// |
// Fixed memory pool test 1 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Description: Tests basic fixed memory pool functionality |
//####DESCRIPTIONEND#### |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
|
#ifdef CYGPKG_KERNEL |
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/sched.hxx> // Cyg_Scheduler::start() |
#include <cyg/kernel/thread.hxx> // Cyg_Thread |
|
#include <cyg/kernel/sched.inl> |
#include <cyg/kernel/thread.inl> |
|
#include <cyg/kernel/timer.hxx> // Cyg_Timer |
#include <cyg/kernel/clock.inl> // Cyg_Clock |
|
#define NTHREADS 2 |
#include "testaux.hxx" |
|
#endif |
|
#include <cyg/memalloc/memfixed.hxx> |
|
#include <cyg/infra/testcase.h> |
|
static const cyg_int32 memsize = 10240; |
|
static cyg_uint8 mem[2][memsize]; |
|
static Cyg_Mempool_Fixed mempool0(mem[0], memsize, 100); |
|
static Cyg_Mempool_Fixed mempool1(mem[1], memsize, 316); |
|
|
static void check_in_mp0(cyg_uint8 *p, cyg_int32 size) |
{ |
CYG_TEST_CHECK(NULL != p, |
"Allocation failed"); |
CYG_TEST_CHECK(mem[0] <= p && p+size < mem[1], |
"Block outside memory pool"); |
} |
|
|
static void entry0( CYG_ADDRWORD data ) |
{ |
cyg_int32 f0,f1,f2,t0; |
cyg_uint8 *p0, *p1, *p2; |
Cyg_Mempool_Status stat; |
|
mempool0.get_status( CYG_MEMPOOL_STAT_ORIGBASE| |
CYG_MEMPOOL_STAT_BLOCKSIZE| |
CYG_MEMPOOL_STAT_ORIGSIZE, stat ); |
CYG_TEST_CHECK(mem[0] == stat.origbase, "get_status: base wrong"); |
CYG_TEST_CHECK(memsize == stat.origsize, "get_status: size wrong"); |
CYG_TEST_CHECK(100 == stat.blocksize, "get_status: blocksize wrong"); |
|
mempool1.get_status( CYG_MEMPOOL_STAT_BLOCKSIZE, stat ); |
CYG_TEST_CHECK(316 == stat.blocksize, "get_status: pool1 blocksize wrong" ); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE| |
CYG_MEMPOOL_STAT_ARENASIZE, stat ); |
t0 = stat.arenasize; |
CYG_TEST_CHECK(t0 > 0, "Negative total memory" ); |
f0 = stat.totalfree; |
CYG_TEST_CHECK(f0 > 0, "Negative free memory" ); |
CYG_TEST_CHECK(t0 <= memsize, "get_totalsize: Too much memory"); |
CYG_TEST_CHECK(f0 <= t0 , "More memory free than possible" ); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
CYG_TEST_CHECK( !stat.waiting, |
"Thread waiting for memory; there shouldn't be"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
p0 = mempool0.alloc(); |
#else |
p0 = mempool0.try_alloc(); |
#endif |
check_in_mp0(p0, 100); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f1 = stat.totalfree; |
CYG_TEST_CHECK(f1 > 0, "Negative free memory" ); |
CYG_TEST_CHECK(f1 < f0, "Free memory didn't decrease after allocation" ); |
|
p1 = NULL; |
while((p2 = mempool0.try_alloc())) |
p1 = p2; |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f1 = stat.totalfree; |
CYG_TEST_CHECK(mempool0.free(p0), "Couldn't free"); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f2 = stat.totalfree; |
CYG_TEST_CHECK(f2 > f1, "Free memory didn't increase after free" ); |
|
// should be able to reallocate now a block is free |
p0 = mempool0.try_alloc(); |
check_in_mp0(p0, 100); |
|
CYG_TEST_CHECK(p1+100 <= p0 || p1 >= p0+100, |
"Ranges of allocated memory overlap"); |
|
CYG_TEST_CHECK(mempool0.free(p0), "Couldn't free"); |
CYG_TEST_CHECK(mempool0.free(p1), "Couldn't free"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// This shouldn't have to wait |
p0 = mempool0.alloc(Cyg_Clock::real_time_clock->current_value()+100000); |
check_in_mp0(p0, 100); |
p1 = mempool0.alloc(Cyg_Clock::real_time_clock->current_value()+20); |
check_in_mp0(p1, 10); |
p1 = mempool0.alloc(Cyg_Clock::real_time_clock->current_value()+20); |
CYG_TEST_CHECK(NULL == p1, "Timed alloc unexpectedly worked"); |
|
// Expect thread 1 to have run while processing previous timed |
// allocation. It should therefore tbe waiting. |
mempool1.get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
CYG_TEST_CHECK(stat.waiting, "There should be a thread waiting"); |
# endif |
#endif |
|
CYG_TEST_PASS_FINISH("Fixed memory pool 1 OK"); |
} |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
static void entry1( CYG_ADDRWORD data ) |
{ |
while(NULL != mempool1.alloc()) |
; |
CYG_TEST_FAIL("alloc returned NULL"); |
} |
#endif |
|
|
void memfix1_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting Fixed memory pool 1 test"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
new_thread(entry0, 0); |
new_thread(entry1, 1); |
|
Cyg_Scheduler::start(); |
#elif defined(CYGPKG_KERNEL) |
new_thread(entry0, 0); |
|
Cyg_Scheduler::start(); |
#else |
entry0(0); |
#endif |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
memfix1_main(); |
} |
// EOF memfix1.cxx |
/common/v2_0/tests/memfix2.cxx
0,0 → 1,151
//========================================================================== |
// |
// memfix2.cxx |
// |
// Fixed memory pool test 2 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Description: test allocation and freeing in fixed memory pools |
//####DESCRIPTIONEND#### |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
|
#ifdef CYGPKG_KERNEL |
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/sched.hxx> // Cyg_Scheduler::start() |
#include <cyg/kernel/thread.hxx> // Cyg_Thread |
#include <cyg/kernel/thread.inl> |
#include <cyg/kernel/sema.hxx> |
|
#include <cyg/kernel/sched.inl> |
|
|
#define NTHREADS 1 |
#include "testaux.hxx" |
|
#endif |
|
#include <cyg/memalloc/memfixed.hxx> |
|
#include <cyg/infra/testcase.h> |
|
static const cyg_int32 memsize = 1024; |
|
static cyg_uint8 mem[memsize]; |
|
#define NUM_PTRS 16 // Should be even |
#define BLOCKSIZE 12 |
|
static Cyg_Mempool_Fixed mempool(mem, memsize, BLOCKSIZE); |
|
static cyg_uint8 *ptr[NUM_PTRS]; |
|
// We make a number of passes over a table of pointers which point to |
// blocks of allocated memory. The block is freed and a new block |
// allocated. The order of the processing of blocks is varied. |
static void entry( CYG_ADDRWORD data ) |
{ |
for(cyg_ucount32 passes = 0; passes < 10; passes++) { |
|
|
// The order which the table is processed varies according to |
// stepsize. |
cyg_ucount8 stepsize = (passes*2 + 1) % NUM_PTRS; // odd |
|
|
for(cyg_ucount8 c=0, i=0; c < NUM_PTRS; c++) { |
i = (i+stepsize) % NUM_PTRS; |
if(ptr[i]) { |
for(cyg_ucount32 j=BLOCKSIZE;j--;) { |
CYG_TEST_CHECK(ptr[i][j]==i, "Memory corrupted"); |
} |
CYG_TEST_CHECK(mempool.free(ptr[i]), "bad free"); |
} |
ptr[i] = mempool.try_alloc(); |
|
CYG_TEST_CHECK(NULL != ptr[i], "Memory pool not big enough"); |
CYG_TEST_CHECK(mem<=ptr[i] && ptr[i]+BLOCKSIZE < mem+memsize, |
"Allocated region not within pool"); |
|
// Scribble over memory to check whether region overlaps |
// with other regions. The contents of the memory are |
// checked on freeing. This also tests that the memory |
// does not overlap with allocator memory structures. |
for(cyg_ucount32 j=BLOCKSIZE;j--;) { |
ptr[i][j]=i; |
} |
} |
} |
|
CYG_TEST_PASS_FINISH("Fixed memory pool 2 OK"); |
} |
|
|
void memfix2_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting Fixed memory pool 2 test"); |
|
for(cyg_ucount32 i = 0; i<NUM_PTRS; i++) { |
ptr[i] = NULL; |
} |
|
#ifdef CYGPKG_KERNEL |
new_thread(entry, 0); |
Cyg_Scheduler::start(); |
#else |
entry(0); |
#endif |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
memfix2_main(); |
} |
// EOF memfix2.cxx |
/common/v2_0/tests/memvar1.cxx
0,0 → 1,221
//========================================================================== |
// |
// memvar1.cxx |
// |
// Variable memory pool test 1 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Description: Tests basic variable memory pool functionality |
//####DESCRIPTIONEND#### |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
|
#ifdef CYGPKG_KERNEL |
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/sched.hxx> // Cyg_Scheduler::start() |
#include <cyg/kernel/thread.hxx> // Cyg_Thread |
|
#include <cyg/kernel/sched.inl> |
#include <cyg/kernel/thread.inl> |
|
#include <cyg/kernel/timer.hxx> // Cyg_Timer |
#include <cyg/kernel/clock.inl> // Cyg_Clock |
|
#define NTHREADS 2 |
#include "testaux.hxx" |
|
#endif |
|
#include <cyg/memalloc/memvar.hxx> |
|
#include <cyg/infra/testcase.h> |
|
static const cyg_int32 memsize = 10240; |
|
static cyg_uint8 mem[2][memsize]; |
|
static Cyg_Mempool_Variable mempool0(mem[0], memsize); |
|
static Cyg_Mempool_Variable mempool1(mem[1], memsize); |
|
|
static void check_in_mp0(cyg_uint8 *p, cyg_int32 size) |
{ |
CYG_TEST_CHECK(NULL != p, |
"Allocation failed"); |
CYG_TEST_CHECK(mem[0] <= p && p+size < mem[1], |
"Block outside memory pool"); |
} |
|
|
static void entry0( CYG_ADDRWORD data ) |
{ |
cyg_int32 f0,f1,f2,t0; |
cyg_uint8 *p0, *p1; |
cyg_int32 most_of_mem=memsize/4*3; |
Cyg_Mempool_Status stat; |
|
mempool0.get_status( CYG_MEMPOOL_STAT_ORIGBASE| |
CYG_MEMPOOL_STAT_BLOCKSIZE| |
CYG_MEMPOOL_STAT_MAXFREE| |
CYG_MEMPOOL_STAT_ORIGSIZE, stat ); |
|
CYG_TEST_CHECK(mem[0] == stat.origbase, "get_status: base wrong"); |
CYG_TEST_CHECK(memsize == stat.origsize, "get_status: size wrong"); |
|
CYG_TEST_CHECK(0 < stat.maxfree && stat.maxfree <= stat.origsize, |
"get_status: maxfree wildly wrong"); |
|
CYG_TEST_CHECK(-1 == stat.blocksize, "blocksize wrong" ); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE| |
CYG_MEMPOOL_STAT_ARENASIZE, stat ); |
t0 = stat.arenasize; |
CYG_TEST_CHECK(t0 > 0, "Negative total memory" ); |
f0 = stat.totalfree; |
CYG_TEST_CHECK(f0 > 0, "Negative free memory" ); |
CYG_TEST_CHECK(t0 <= memsize, "get_totalsize: Too much memory"); |
CYG_TEST_CHECK(f0 <= t0 , "More memory free than possible" ); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
CYG_TEST_CHECK( !stat.waiting, |
"Thread waiting for memory; there shouldn't be"); |
|
CYG_TEST_CHECK( NULL == mempool0.try_alloc(memsize+1), |
"Managed to allocate too much memory"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
p0 = mempool0.alloc(most_of_mem); |
#else |
p0 = mempool0.try_alloc(most_of_mem); |
#endif |
check_in_mp0(p0, most_of_mem); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f1 = stat.totalfree; |
CYG_TEST_CHECK(f1 > 0, "Negative free memory" ); |
CYG_TEST_CHECK(f1 < f0, "Free memory didn't decrease after allocation" ); |
|
CYG_TEST_CHECK( NULL == mempool0.try_alloc(most_of_mem), |
"Managed to allocate too much memory"); |
|
CYG_TEST_CHECK(mempool0.free(p0, most_of_mem), "Couldn't free"); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f2 = stat.totalfree; |
CYG_TEST_CHECK(f2 > f1, "Free memory didn't increase after free" ); |
|
// should be able to reallocate now memory is free |
p0 = mempool0.try_alloc(most_of_mem); |
check_in_mp0(p0, most_of_mem); |
|
p1 = mempool0.try_alloc(10); |
check_in_mp0(p1, 10); |
|
CYG_TEST_CHECK(p1+10 <= p0 || p1 >= p0+most_of_mem, |
"Ranges of allocated memory overlap"); |
|
CYG_TEST_CHECK(mempool0.free(p0, 0), "Couldn't free"); |
CYG_TEST_CHECK(mempool0.free(p1, 10), "Couldn't free"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// This shouldn't have to wait |
p0 = mempool0.alloc(most_of_mem, |
Cyg_Clock::real_time_clock->current_value() + 100000); |
check_in_mp0(p0, most_of_mem); |
p1 = mempool0.alloc(most_of_mem, |
Cyg_Clock::real_time_clock->current_value() + 2); |
CYG_TEST_CHECK(NULL == p1, "Timed alloc unexpectedly worked"); |
p1 = mempool0.alloc(10, |
Cyg_Clock::real_time_clock->current_value() + 2); |
check_in_mp0(p1, 10); |
|
// Expect thread 1 to have run while processing previous timed |
// allocation. It should therefore tbe waiting. |
mempool1.get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
CYG_TEST_CHECK(stat.waiting, "There should be a thread waiting"); |
# endif |
#endif |
|
CYG_TEST_PASS_FINISH("Variable memory pool 1 OK"); |
} |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
static void entry1( CYG_ADDRWORD data ) |
{ |
mempool1.alloc(memsize+1); |
CYG_TEST_FAIL("Oversized alloc returned"); |
} |
#endif |
|
void memvar1_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting Variable memory pool 1 test"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
new_thread(entry0, 0); |
new_thread(entry1, 1); |
|
Cyg_Scheduler::start(); |
#elif defined(CYGPKG_KERNEL) |
new_thread(entry0, 0); |
|
Cyg_Scheduler::start(); |
#else |
entry0(0); |
#endif |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
memvar1_main(); |
} |
// EOF memvar1.cxx |
/common/v2_0/tests/malloc1.c
0,0 → 1,271
//================================================================= |
// |
// malloc1.c |
// |
// Testcase for C library malloc(), calloc() and free() |
// |
//================================================================= |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//================================================================= |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-04-30 |
// Description: Contains testcode for C library malloc(), calloc() and |
// free() functions |
// |
// |
//####DESCRIPTIONEND#### |
|
// INCLUDES |
|
#include <pkgconf/system.h> |
#include <pkgconf/memalloc.h> // config header |
#ifdef CYGPKG_ISOINFRA |
# include <pkgconf/isoinfra.h> |
# include <stdlib.h> |
#endif |
#include <cyg/infra/testcase.h> |
#include <limits.h> // INT_MAX |
|
|
#if !defined(CYGPKG_ISOINFRA) |
# define NA_MSG "Requires isoinfra package" |
#elif !CYGINT_ISO_MAIN_STARTUP |
# define NA_MSG "Requires main() to be called" |
#elif !CYGINT_ISO_MALLOC |
# define NA_MSG "Requires malloc" |
#elif !CYGINT_ISO_MALLINFO |
# define NA_MSG "Requires mallinfo" |
#endif |
|
#ifdef NA_MSG |
void |
cyg_start(void) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_NA( NA_MSG ); |
CYG_TEST_FINISH("Done"); |
} |
#else |
|
|
// FUNCTIONS |
|
int |
main( int argc, char *argv[] ) |
{ |
int *i; |
char *str, *str2, *str3; |
int j; |
int poolmax; |
|
CYG_TEST_INIT(); |
|
CYG_TEST_INFO("Starting tests from testcase " __FILE__ " for C library " |
"malloc(), calloc() and free() functions"); |
|
poolmax = mallinfo().maxfree; |
|
if ( poolmax <= 0 ) { |
CYG_TEST_FAIL_FINISH( "Can't determine allocation size to use" ); |
} |
|
// Test 1 |
i = (int *) malloc( sizeof(int) ); |
|
// check if it should fit into pool |
if (sizeof(int) > poolmax) |
{ |
// didn't fit into pool, so should be NULL |
CYG_TEST_PASS_FAIL( i == NULL, |
"1 int malloc with no space left works" ); |
} |
else |
{ |
// since it should fit into pool, we can fiddle with i |
*i=-12345; |
CYG_TEST_PASS_FAIL( i && (*i==-12345), |
"1 int malloc with space left works" ); |
free(i); |
} // else |
|
// Test 2 |
str=(char *) malloc( 4096 ); |
|
if ( 4096 > poolmax) |
{ |
// didn't fit into pool, so should be NULL |
CYG_TEST_PASS_FAIL( str == NULL,"4K string with no space left works" ); |
} |
else |
{ |
// since it should fit into pool, we can fiddle with it. |
for (j=0; j<1024; j++) |
{ |
str[j*4] = 'f'; |
str[(j*4)+1] = 'r'; |
str[(j*4)+2] = 'e'; |
str[(j*4)+3] = 'd'; |
} // for |
|
for (j=0; j<1024; j++) |
{ |
if ( ((str[j*4] != 'f') || |
(str[(j*4)+1] != 'r') || |
(str[(j*4)+2] != 'e') || |
(str[(j*4)+3] != 'd')) ) |
break; |
} // for |
|
// did j reach the top? |
CYG_TEST_PASS_FAIL( j==1024, "4K string with space left works" ); |
|
free(str); |
} // else |
|
|
// Test 3 |
str=(char *) calloc( 2, 1024 ); |
|
if ( 2048 > poolmax) |
{ |
// didn't fit into pool, so should be NULL |
CYG_TEST_PASS_FAIL( str == NULL, |
"calloc 2K string with no space left works" ); |
} |
else |
{ |
// check its zeroed |
for ( j=0; j<2048; j++ ) |
{ |
if (str[j] != 0) |
break; |
} // for |
|
CYG_TEST_PASS_FAIL( j==2048, "calloc 2K string is cleared" ); |
|
// since it should fit into pool, we can fiddle with it. |
for (j=0; j<512; j++) |
{ |
str[j*4] = 'j'; |
str[(j*4)+1] = 'i'; |
str[(j*4)+2] = 'f'; |
str[(j*4)+3] = 'l'; |
} // for |
|
for (j=0; j<512; j++) |
{ |
if ( ((str[j*4] != 'j') || |
(str[(j*4)+1] != 'i') || |
(str[(j*4)+2] != 'f') || |
(str[(j*4)+3] != 'l')) ) |
break; |
} // for |
|
// did j reach the top? |
CYG_TEST_PASS_FAIL( j==512, |
"calloc 2K string - with space left works" ); |
|
free(str); |
} // else |
|
// Test 4 |
#if defined(CYGIMP_MEMALLOC_MALLOC_VARIABLE_SIMPLE) && \ |
defined(CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_COALESCE) |
poolmax = mallinfo().maxfree; // recalculate for non-coalescing allocator |
#endif |
str=(char *)malloc( poolmax+1 ); |
CYG_TEST_PASS_FAIL( str==NULL, "malloc too much data returns NULL" ); |
|
// Test 5 |
str=(char *)calloc( 1, poolmax+1 ); |
CYG_TEST_PASS_FAIL( str==NULL, "calloc too much data returns NULL" ); |
|
// Test 6 |
str=(char *)malloc(0); if (str != NULL) free(str); |
str=(char *)calloc(0, 1); if (str != NULL) free(str); |
str=(char *)calloc(1, 0); if (str != NULL) free(str); |
str=(char *)calloc(0, 0); if (str != NULL) free(str); |
// simply shouldn't barf by this point |
|
CYG_TEST_PASS_FAIL( 1, "malloc and calloc of 0 bytes doesn't crash" ); |
|
// Test 7 |
str = (char *)malloc(10); |
i = (int *)malloc(sizeof(int)); |
str2 = (char *)malloc(10); |
|
str3=(char *)i; |
|
CYG_TEST_PASS_FAIL( ((str3 <= str-sizeof(int)) || (str3 >= &str[10])) && |
((str3 <= str2-sizeof(int)) || (str3 >= &str2[10])) && |
((str+10 <= str2) || (str2+10 <= str)), |
"Objects don't overlap" ); |
|
// Test 8 |
|
free(i); |
i=(int *)malloc(sizeof(int)*2); |
str3=(char *)i; |
|
CYG_TEST_PASS_FAIL( ((str3 <= str-sizeof(int)) || (str3 >= &str[10])) && |
((str3 <= str2-sizeof(int)) || (str3 >= &str2[10])) && |
((&str[10] <= str2) || (&str2[10] <= str)), |
"Objects don't overlap when middle is freed" ); |
|
free(i); |
free(str); |
free(str2); |
|
// Test 9 |
|
#if defined(CYGIMP_MEMALLOC_MALLOC_VARIABLE_SIMPLE) && \ |
defined(CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_COALESCE) |
poolmax = mallinfo().maxfree; // recalculate for non-coalescing allocator |
#endif |
str = (char *)malloc( poolmax ); |
CYG_TEST_PASS_FAIL( str != NULL, "malloc of maximum free block size works"); |
free(str); |
|
CYG_TEST_FINISH("Finished tests from testcase " __FILE__ " for C library " |
"malloc(), calloc() and free() functions"); |
|
return 0; |
} // main() |
|
#endif // ifndef NA_MSG |
|
// EOF malloc1.c |
/common/v2_0/tests/dlmalloc1.cxx
0,0 → 1,222
//========================================================================== |
// |
// dlmalloc1.cxx |
// |
// dlmalloc memory pool test 1 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Description: Tests basic dlmalloc memory pool functionality |
//####DESCRIPTIONEND#### |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
|
#ifdef CYGPKG_KERNEL |
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/sched.hxx> // Cyg_Scheduler::start() |
#include <cyg/kernel/thread.hxx> // Cyg_Thread |
|
#include <cyg/kernel/sched.inl> |
#include <cyg/kernel/thread.inl> |
|
#include <cyg/kernel/timer.hxx> // Cyg_Timer |
#include <cyg/kernel/clock.inl> // Cyg_Clock |
|
#define STACKSIZE (CYGNUM_HAL_STACK_SIZE_TYPICAL + 20*CYGNUM_HAL_STACK_FRAME_SIZE) |
#define NTHREADS 2 |
#include "testaux.hxx" |
|
#endif |
|
#include <cyg/memalloc/dlmalloc.hxx> |
|
#include <cyg/infra/testcase.h> |
|
static const cyg_int32 memsize = 10240; |
|
static cyg_uint8 mem[2][memsize]; |
|
static Cyg_Mempool_dlmalloc mempool0(mem[0], memsize); |
|
static Cyg_Mempool_dlmalloc mempool1(mem[1], memsize); |
|
|
static void check_in_mp0(cyg_uint8 *p, cyg_int32 size) |
{ |
CYG_TEST_CHECK(NULL != p, |
"Allocation failed"); |
CYG_TEST_CHECK(mem[0] <= p && p+size < mem[1], |
"Block outside memory pool"); |
} |
|
|
static void entry0( CYG_ADDRWORD data ) |
{ |
cyg_int32 f0,f1,f2,t0; |
cyg_uint8 *p0, *p1; |
cyg_int32 most_of_mem=memsize/4*3; |
Cyg_Mempool_Status stat; |
|
mempool0.get_status( CYG_MEMPOOL_STAT_ORIGBASE| |
CYG_MEMPOOL_STAT_BLOCKSIZE| |
CYG_MEMPOOL_STAT_MAXFREE| |
CYG_MEMPOOL_STAT_ORIGSIZE, stat ); |
|
CYG_TEST_CHECK(mem[0] == stat.origbase, "get_status: base wrong"); |
CYG_TEST_CHECK(memsize == stat.origsize, "get_status: size wrong"); |
|
CYG_TEST_CHECK(0 < stat.maxfree && stat.maxfree <= stat.origsize, |
"get_status: maxfree wildly wrong"); |
|
CYG_TEST_CHECK(-1 == stat.blocksize, "blocksize wrong" ); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE| |
CYG_MEMPOOL_STAT_ARENASIZE, stat ); |
t0 = stat.arenasize; |
CYG_TEST_CHECK(t0 > 0, "Negative total memory" ); |
f0 = stat.totalfree; |
CYG_TEST_CHECK(f0 > 0, "Negative free memory" ); |
CYG_TEST_CHECK(t0 <= memsize, "get_totalsize: Too much memory"); |
CYG_TEST_CHECK(f0 <= t0 , "More memory free than possible" ); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
CYG_TEST_CHECK( !stat.waiting, |
"Thread waiting for memory; there shouldn't be"); |
|
CYG_TEST_CHECK( NULL == mempool0.try_alloc(memsize+1), |
"Managed to allocate too much memory"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
p0 = mempool0.alloc(most_of_mem); |
#else |
p0 = mempool0.try_alloc(most_of_mem); |
#endif |
check_in_mp0(p0, most_of_mem); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f1 = stat.totalfree; |
CYG_TEST_CHECK(f1 > 0, "Negative free memory" ); |
CYG_TEST_CHECK(f1 < f0, "Free memory didn't decrease after allocation" ); |
|
CYG_TEST_CHECK( NULL == mempool0.try_alloc(most_of_mem), |
"Managed to allocate too much memory"); |
|
CYG_TEST_CHECK(mempool0.free(p0, most_of_mem), "Couldn't free"); |
|
mempool0.get_status( CYG_MEMPOOL_STAT_TOTALFREE, stat ); |
f2 = stat.totalfree; |
CYG_TEST_CHECK(f2 > f1, "Free memory didn't increase after free" ); |
|
// should be able to reallocate now memory is free |
p0 = mempool0.try_alloc(most_of_mem); |
check_in_mp0(p0, most_of_mem); |
|
p1 = mempool0.try_alloc(10); |
check_in_mp0(p1, 10); |
|
CYG_TEST_CHECK(p1+10 <= p0 || p1 >= p0+most_of_mem, |
"Ranges of allocated memory overlap"); |
|
CYG_TEST_CHECK(mempool0.free(p0, 0), "Couldn't free"); |
CYG_TEST_CHECK(mempool0.free(p1, 10), "Couldn't free"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// This shouldn't have to wait |
p0 = mempool0.alloc(most_of_mem, |
Cyg_Clock::real_time_clock->current_value() + 100000); |
check_in_mp0(p0, most_of_mem); |
p1 = mempool0.alloc(most_of_mem, |
Cyg_Clock::real_time_clock->current_value() + 2); |
CYG_TEST_CHECK(NULL == p1, "Timed alloc unexpectedly worked"); |
p1 = mempool0.alloc(10, |
Cyg_Clock::real_time_clock->current_value() + 2); |
check_in_mp0(p1, 10); |
|
// Expect thread 1 to have run while processing previous timed |
// allocation. It should therefore tbe waiting. |
mempool1.get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
CYG_TEST_CHECK(stat.waiting, "There should be a thread waiting"); |
# endif |
#endif |
|
CYG_TEST_PASS_FINISH("dlmalloc memory pool 1 OK"); |
} |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
static void entry1( CYG_ADDRWORD data ) |
{ |
mempool1.alloc(memsize+1); |
CYG_TEST_FAIL("Oversized alloc returned"); |
} |
#endif |
|
void dlmalloc1_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting dlmalloc memory pool 1 test"); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
new_thread(entry0, 0); |
new_thread(entry1, 1); |
|
Cyg_Scheduler::start(); |
#elif defined(CYGPKG_KERNEL) |
new_thread(entry0, 0); |
|
Cyg_Scheduler::start(); |
#else |
entry0(0); |
#endif |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
dlmalloc1_main(); |
} |
// EOF dlmalloc1.cxx |
/common/v2_0/tests/memvar2.cxx
0,0 → 1,159
//========================================================================== |
// |
// memvar2.cxx |
// |
// Variable memory pool test 2 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Description: test allocation and freeing in variable memory pools |
//####DESCRIPTIONEND#### |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
|
#ifdef CYGPKG_KERNEL |
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/sched.hxx> // Cyg_Scheduler::start() |
#include <cyg/kernel/thread.hxx> // Cyg_Thread |
#include <cyg/kernel/thread.inl> |
#include <cyg/kernel/sema.hxx> |
|
#include <cyg/kernel/sched.inl> |
|
#define NTHREADS 1 |
#include "testaux.hxx" |
|
#endif |
|
#include <cyg/memalloc/memvar.hxx> |
|
#include <cyg/infra/testcase.h> |
|
static const cyg_int32 memsize = 10240; |
|
static cyg_uint8 mem[memsize]; |
|
static Cyg_Mempool_Variable mempool(mem, memsize); |
|
#define NUM_PTRS 16 // Should be even |
|
static cyg_uint8 *ptr[NUM_PTRS]; |
static cyg_int32 size[NUM_PTRS]; |
|
// We make a number of passes over a table of pointers which point to |
// blocks of allocated memory. The block is freed and a new block |
// allocated. The size and the order of the processing of blocks |
// is varied. |
static void entry( CYG_ADDRWORD data ) |
{ |
cyg_uint32 s = 1; |
|
// The number of passes that can be successfully performed |
// depends on the fragmentation performance of the memory |
// allocator. |
for(cyg_ucount32 passes = 0; passes < 10; passes++) { |
|
|
// The order which the table is processed varies according to |
// stepsize. |
cyg_ucount8 stepsize = (passes*2 + 1) % NUM_PTRS; // odd |
|
|
for(cyg_ucount8 c=0, i=0; c < NUM_PTRS; c++) { |
i = (i+stepsize) % NUM_PTRS; |
if(ptr[i]) { |
for(cyg_ucount32 j=size[i];j--;) { |
CYG_TEST_CHECK(ptr[i][j]==i, "Memory corrupted"); |
} |
CYG_TEST_CHECK(mempool.free(ptr[i], size[i]), |
"bad free"); |
} |
s = (s*2 + 17) % 100; // size always odds therefore non-0 |
ptr[i] = mempool.try_alloc(s); |
size[i] = s; |
|
CYG_TEST_CHECK(NULL != ptr[i], "Memory pool not big enough"); |
CYG_TEST_CHECK(mem<=ptr[i] && ptr[i]+s < mem+memsize, |
"Allocated region not within pool"); |
|
// Scribble over memory to check whether region overlaps |
// with other regions. The contents of the memory are |
// checked on freeing. This also tests that the memory |
// does not overlap with allocator memory structures. |
for(cyg_ucount32 j=size[i];j--;) { |
ptr[i][j]=i; |
} |
} |
} |
|
CYG_TEST_PASS_FINISH("Variable memory pool 2 OK"); |
} |
|
|
void memvar2_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting Variable memory pool 2 test"); |
|
for(cyg_ucount32 i = 0; i<NUM_PTRS; i++) { |
ptr[i] = NULL; |
} |
|
#ifdef CYGPKG_KERNEL |
new_thread(entry, 0); |
Cyg_Scheduler::start(); |
#else |
entry(0); |
#endif |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
memvar2_main(); |
} |
// EOF memvar2.cxx |
/common/v2_0/tests/malloc2.c
0,0 → 1,256
//================================================================= |
// |
// malloc2.c |
// |
// Stress testcase for C library malloc(), calloc() and free() |
// |
//================================================================= |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//================================================================= |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-04-30 |
// Description: Contains testcode to stress-test C library malloc(), |
// calloc() and free() functions |
// |
// |
//####DESCRIPTIONEND#### |
|
// INCLUDES |
|
#include <pkgconf/system.h> // Overall system configuration |
#include <pkgconf/memalloc.h> // config header so we can know size of malloc pool |
#ifdef CYGPKG_ISOINFRA |
# include <pkgconf/isoinfra.h> |
# include <stdlib.h> |
#endif |
|
#include <cyg/infra/testcase.h> |
|
#if !defined(CYGPKG_ISOINFRA) |
# define NA_MSG "Requires isoinfra package" |
#elif !CYGINT_ISO_MAIN_STARTUP |
# define NA_MSG "Requires main() to be called" |
#elif !CYGINT_ISO_MALLOC |
# define NA_MSG "Requires malloc" |
#elif !CYGINT_ISO_MALLINFO |
# define NA_MSG "Requires mallinfo" |
#endif |
|
#ifdef NA_MSG |
void |
cyg_start(void) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_NA( NA_MSG ); |
CYG_TEST_FINISH("Done"); |
} |
#else |
|
// CONSTANTS |
|
#define NUM_ITERATIONS 1000 |
|
// GLOBALS |
|
static int problem=0; |
|
// FUNCTIONS |
|
extern int |
cyg_memalloc_maxalloc( void ); |
|
static void * |
safe_malloc( size_t size ) |
{ |
void *ptr; |
|
ptr=malloc(size); |
|
if (ptr==NULL) |
{ |
CYG_TEST_FAIL( "malloc returned NULL! " |
"Perhaps the allocator doesn't coalesce?" ); |
problem++; |
} // if |
|
return ptr; |
} // safe_malloc() |
|
|
static void * |
safe_calloc( size_t size ) |
{ |
void *ptr; |
int i; |
|
ptr=calloc(size,1); |
|
if (ptr==NULL) |
{ |
CYG_TEST_FAIL( "calloc returned NULL! " |
"Perhaps the allocator doesn't coalesce" ); |
problem++; |
} // if |
else |
{ |
for (i=0; i < size; i++) |
{ |
if (((char *)ptr)[i] != 0) |
{ |
CYG_TEST_FAIL("calloc didn't clear data completely"); |
problem++; |
return ptr; |
} // if |
} // for |
} // else |
|
return ptr; |
} // safe_calloc() |
|
|
static void |
fill_with_data( char *buf, int size ) |
{ |
int i; |
|
for (i=0; i < size; i++) |
buf[i] = 'f'; |
|
for (i=0; i < size; i++) |
if (buf[i] != 'f') |
{ |
CYG_TEST_FAIL( "data written to buffer does not compare " |
"correctly! #1" ); |
problem++; |
return; |
} // if |
|
|
for (i=0; i < size; i++) |
buf[i] = 'z'; |
|
for (i=0; i < size; i++) |
if (buf[i] != 'z') |
{ |
CYG_TEST_FAIL( "data written to buffer does not compare " |
"correctly! #2" ); |
problem++; |
return; |
} // if |
|
} // fill_with_data() |
|
|
int |
main( int argc, char *argv[] ) |
{ |
char *str1, *str2, *str3; |
int j; |
int poolmax; |
|
CYG_TEST_INIT(); |
|
CYG_TEST_INFO("Starting stress tests from testcase " __FILE__ " for C " |
"library malloc(), calloc() and free() functions"); |
|
poolmax = mallinfo().maxfree; |
|
if ( poolmax <= 0 ) { |
CYG_TEST_FAIL_FINISH( "Can't determine allocation size to use" ); |
} |
|
if ( poolmax < 300 ) |
{ |
CYG_TEST_FAIL_FINISH("This testcase cannot safely be used with a " |
"memory pool for malloc less than 300 bytes"); |
} // if |
|
|
for ( j=1; j < NUM_ITERATIONS; j++) |
{ |
// if ((j % 100) == 0) |
// CYG_TEST_STILL_ALIVE( j, "Multiple mallocs and frees continuing" ); |
|
|
str1 = (char *)safe_malloc(50); |
fill_with_data( str1, 50 ); |
str2 = (char *)safe_calloc(11); |
fill_with_data( str2, 11 ); |
str3 = (char *)safe_malloc(32); |
fill_with_data( str3, 32 ); |
|
free(str2); |
free(str1); |
|
str2 = (char *)safe_calloc(11); |
fill_with_data( str2, 11 ); |
free(str2); |
|
str1 = (char *)safe_calloc(50); |
fill_with_data( str1, 50 ); |
free(str3); |
|
str3 = (char *)safe_malloc(32); |
fill_with_data( str3, 32 ); |
free(str1); |
|
str2 = (char *)safe_calloc(11); |
fill_with_data( str2, 11 ); |
str1 = (char *)safe_malloc(50); |
fill_with_data( str1, 50 ); |
|
free(str3); |
free(str1); |
free(str2); |
|
if (problem != 0) |
break; |
} // for |
|
// Did it completely successfully? |
if (j==NUM_ITERATIONS) |
CYG_TEST_PASS("Stress test completed successfully"); |
|
CYG_TEST_FINISH("Finished stress tests from testcase " __FILE__ " for C " |
"library malloc(), calloc() and free() functions"); |
|
return 0; |
} // main() |
|
#endif // ifndef NA_MSG |
|
// EOF malloc2.c |
/common/v2_0/tests/dlmalloc2.cxx
0,0 → 1,159
//========================================================================== |
// |
// dlmalloc2.cxx |
// |
// dlmalloc memory pool test 2 |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Description: test allocation and freeing in dlmalloc memory pools |
//####DESCRIPTIONEND#### |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
|
#ifdef CYGPKG_KERNEL |
#include <pkgconf/kernel.h> |
|
#include <cyg/kernel/sched.hxx> // Cyg_Scheduler::start() |
#include <cyg/kernel/thread.hxx> // Cyg_Thread |
#include <cyg/kernel/thread.inl> |
#include <cyg/kernel/sema.hxx> |
|
#include <cyg/kernel/sched.inl> |
|
#define NTHREADS 1 |
#include "testaux.hxx" |
|
#endif |
|
#include <cyg/memalloc/dlmalloc.hxx> |
|
#include <cyg/infra/testcase.h> |
|
static const cyg_int32 memsize = 10240; |
|
static cyg_uint8 mem[memsize]; |
|
static Cyg_Mempool_dlmalloc mempool(mem, memsize); |
|
#define NUM_PTRS 16 // Should be even |
|
static cyg_uint8 *ptr[NUM_PTRS]; |
static cyg_int32 size[NUM_PTRS]; |
|
// We make a number of passes over a table of pointers which point to |
// blocks of allocated memory. The block is freed and a new block |
// allocated. The size and the order of the processing of blocks |
// is varied. |
static void entry( CYG_ADDRWORD data ) |
{ |
cyg_uint32 s = 1; |
|
// The number of passes that can be successfully performed |
// depends on the fragmentation performance of the memory |
// allocator. |
for(cyg_ucount32 passes = 0; passes < 10; passes++) { |
|
|
// The order which the table is processed varies according to |
// stepsize. |
cyg_ucount8 stepsize = (passes*2 + 1) % NUM_PTRS; // odd |
|
|
for(cyg_ucount8 c=0, i=0; c < NUM_PTRS; c++) { |
i = (i+stepsize) % NUM_PTRS; |
if(ptr[i]) { |
for(cyg_ucount32 j=size[i];j--;) { |
CYG_TEST_CHECK(ptr[i][j]==i, "Memory corrupted"); |
} |
CYG_TEST_CHECK(mempool.free(ptr[i], size[i]), |
"bad free"); |
} |
s = (s*2 + 17) % 100; // size always odds therefore non-0 |
ptr[i] = mempool.try_alloc(s); |
size[i] = s; |
|
CYG_TEST_CHECK(NULL != ptr[i], "Memory pool not big enough"); |
CYG_TEST_CHECK(mem<=ptr[i] && ptr[i]+s < mem+memsize, |
"Allocated region not within pool"); |
|
// Scribble over memory to check whether region overlaps |
// with other regions. The contents of the memory are |
// checked on freeing. This also tests that the memory |
// does not overlap with allocator memory structures. |
for(cyg_ucount32 j=size[i];j--;) { |
ptr[i][j]=i; |
} |
} |
} |
|
CYG_TEST_PASS_FINISH("dlmalloc memory pool 2 OK"); |
} |
|
|
void dlmalloc2_main( void ) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_INFO("Starting dlmalloc memory pool 2 test"); |
|
for(cyg_ucount32 i = 0; i<NUM_PTRS; i++) { |
ptr[i] = NULL; |
} |
|
#ifdef CYGPKG_KERNEL |
new_thread(entry, 0); |
Cyg_Scheduler::start(); |
#else |
entry(0); |
#endif |
|
CYG_TEST_FAIL_FINISH("Not reached"); |
} |
|
externC void |
cyg_start( void ) |
{ |
#ifdef CYGSEM_HAL_STOP_CONSTRUCTORS_ON_FLAG |
cyg_hal_invoke_constructors(); |
#endif |
dlmalloc2_main(); |
} |
// EOF dlmalloc2.cxx |
/common/v2_0/tests/malloc3.c
0,0 → 1,198
//================================================================= |
// |
// malloc3.c |
// |
// Testcase for C library malloc(), calloc() and free() |
// |
//================================================================= |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//================================================================= |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-04-30 |
// Description: Contains testcode for C library malloc(), calloc() and |
// free() functions |
// |
// |
//####DESCRIPTIONEND#### |
|
// INCLUDES |
|
#include <pkgconf/system.h> // Overall system configuration |
#include <pkgconf/memalloc.h> // config header |
#ifdef CYGPKG_ISOINFRA |
# include <pkgconf/isoinfra.h> |
# include <stdlib.h> |
#endif |
#include <cyg/infra/testcase.h> |
|
#if !defined(CYGPKG_ISOINFRA) |
# define NA_MSG "Requires isoinfra package" |
#elif !CYGINT_ISO_MAIN_STARTUP |
# define NA_MSG "Requires main() to be called" |
#elif !CYGINT_ISO_MALLOC |
# define NA_MSG "Requires malloc" |
#elif !CYGINT_ISO_MALLINFO |
# define NA_MSG "Requires mallinfo" |
#endif |
|
#ifdef NA_MSG |
void |
cyg_start(void) |
{ |
CYG_TEST_INIT(); |
CYG_TEST_NA( NA_MSG ); |
CYG_TEST_FINISH("Done"); |
} |
#else |
|
// FUNCTIONS |
|
extern int |
cyg_memalloc_maxalloc( void ); |
|
static int |
fill_with_data( char *buf, int size ) |
{ |
int i; |
|
for (i=0; i < size; i++) |
buf[i] = 'f'; |
|
for (i=0; i < size; i++) |
if (buf[i] != 'f') { |
CYG_TEST_FAIL( "data written to buffer does not compare " |
"correctly! #1" ); |
return 0; |
} // if |
|
|
for (i=0; i < size; i++) |
buf[i] = 'z'; |
|
for (i=0; i < size; i++) |
if (buf[i] != 'z') { |
CYG_TEST_FAIL( "data written to buffer does not compare " |
"correctly! #2" ); |
return 0; |
} // if |
|
return 1; |
} // fill_with_data() |
|
int |
main( int argc, char *argv[] ) |
{ |
char *str; |
int size; |
int poolmax; |
|
CYG_TEST_INIT(); |
|
CYG_TEST_INFO("Starting tests from testcase " __FILE__ " for C library " |
"malloc() and free() functions"); |
CYG_TEST_INFO("This checks allocation and freeing of large regions"); |
|
poolmax = mallinfo().maxfree; |
|
if ( poolmax <= 0 ) { |
CYG_TEST_FAIL_FINISH( "Can't determine allocation size to use" ); |
} |
|
size = poolmax/2; |
|
// Don't allocate all the memory at once - leave room for any structures |
// used to manage the memory |
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 1"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 1 usability"); |
free( str ); |
|
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 2"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 2 usability"); |
free( str ); |
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 3"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 3 usability"); |
free( str ); |
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 4"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 4 usability"); |
free( str ); |
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 5"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 5 usability"); |
free( str ); |
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 6"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 6 usability"); |
free( str ); |
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 7"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 7 usability"); |
free( str ); |
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 8"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 8 usability"); |
free( str ); |
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 9"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ), "allocation 9 usability"); |
free( str ); |
|
str = (char *)malloc( size ); |
CYG_TEST_PASS_FAIL( str != NULL, "allocation 10"); |
CYG_TEST_PASS_FAIL( fill_with_data( str, size ),"allocation 10 usability"); |
free( str ); |
|
CYG_TEST_FINISH("Finished tests from testcase " __FILE__ " for C library " |
"malloc() and free() functions"); |
|
return 0; |
} // main() |
|
#endif // ifndef NA_MSG |
|
// EOF malloc3.c |
/common/v2_0/include/memjoin.inl
0,0 → 1,343
#ifndef CYGONCE_MEMALLOC_MEMJOIN_INL |
#define CYGONCE_MEMALLOC_MEMJOIN_INL |
|
//========================================================================== |
// |
// memjoin.inl |
// |
// Pseudo memory pool used to join together other memory pools |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Purpose: Implement joined up memory pool class interface |
// Description: Inline class for constructing a pseudo allocator that contains |
// multiple other allocators. It caters solely to the requirements |
// of the malloc implementation. |
// Usage: #include <cyg/memalloc/memjoin.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
|
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertion macros |
#include <cyg/infra/cyg_trac.h> // tracing macros |
#include <cyg/memalloc/memjoin.hxx> // header for this file just in case |
|
|
// FUNCTIONS |
|
|
// ------------------------------------------------------------------------- |
// find_pool_for_ptr returns the pool that ptr came from |
|
template <class T> |
inline T * |
Cyg_Mempool_Joined<T>::find_pool_for_ptr( const cyg_uint8 *ptr ) |
{ |
cyg_uint8 i; |
|
for ( i=0; i < poolcount; i++ ) { |
if ( ptr >= pools[i].startaddr && |
ptr < pools[i].endaddr ) { |
return pools[i].pool; |
} // if |
} // for |
return NULL; |
} // Cyg_Mempool_Joined<T>::find_pool_for_ptr() |
|
|
// ------------------------------------------------------------------------- |
// Constructor |
template <class T> |
inline |
Cyg_Mempool_Joined<T>::Cyg_Mempool_Joined( cyg_uint8 num_heaps, T *heaps[] ) |
{ |
Cyg_Mempool_Status stat; |
cyg_uint8 i; |
|
CYG_REPORT_FUNCTION(); |
CYG_REPORT_FUNCARG2( "num_heaps=%u, heaps=%08x", (int)num_heaps, heaps ); |
|
CYG_CHECK_DATA_PTRC( heaps ); |
|
poolcount = num_heaps; |
|
// allocate internal structures - this should work because we should be |
// the first allocation for this pool; and if there isn't enough space |
// for these teeny bits, what hope is there! |
for (i=0; i<num_heaps; i++) { |
pools = (struct pooldesc *) |
heaps[i]->try_alloc( num_heaps * sizeof(struct pooldesc) ); |
if ( NULL != pools ) |
break; |
} // for |
|
CYG_ASSERT( pools != NULL, |
"Couldn't allocate internal structures from any pools!"); |
|
// now set up internal structures |
for (i=0; i<num_heaps; i++) { |
pools[i].pool = heaps[i]; |
heaps[i]->get_status( CYG_MEMPOOL_STAT_ARENABASE| |
CYG_MEMPOOL_STAT_ARENASIZE, |
stat ); |
|
CYG_ASSERT( stat.arenabase != (const cyg_uint8 *)-1, |
"pool returns valid pool base" ); |
CYG_CHECK_DATA_PTR( stat.arenabase, "Bad arena location" ); |
CYG_ASSERT( stat.arenasize > 0, "pool returns valid pool size" ); |
|
pools[i].startaddr = stat.arenabase; |
pools[i].endaddr = stat.arenabase + stat.arenasize; |
} // for |
|
CYG_REPORT_RETURN(); |
} // Cyg_Mempool_Joined<T>::Cyg_Mempool_Joined() |
|
|
|
// ------------------------------------------------------------------------- |
// Destructor |
template <class T> |
inline |
Cyg_Mempool_Joined<T>::~Cyg_Mempool_Joined() |
{ |
CYG_REPORT_FUNCTION(); |
CYG_REPORT_FUNCARGVOID(); |
|
cyg_bool freestat; |
|
freestat = free( (cyg_uint8 *)pools, poolcount * sizeof(struct pooldesc) ); |
CYG_ASSERT( freestat, "free failed!"); |
CYG_REPORT_RETURN(); |
} // Cyg_Mempool_Joined<T>::~Cyg_Mempool_Joined() |
|
|
|
// ------------------------------------------------------------------------- |
// get some memory, return NULL if none available |
template <class T> |
inline cyg_uint8 * |
Cyg_Mempool_Joined<T>::try_alloc( cyg_int32 size ) |
{ |
cyg_uint8 i; |
cyg_uint8 *ptr=NULL; |
|
CYG_REPORT_FUNCTYPE( "returning memory at addr %08x" ); |
CYG_REPORT_FUNCARG1DV( size ); |
|
for (i=0; i<poolcount; i++) { |
ptr = pools[i].pool->try_alloc( size ); |
if ( NULL != ptr ) |
break; |
} |
|
CYG_REPORT_RETVAL( ptr ); |
return ptr; |
} // Cyg_Mempool_Joined<T>::try_alloc() |
|
|
// ------------------------------------------------------------------------- |
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
template <class T> |
inline cyg_uint8 * |
Cyg_Mempool_Joined<T>::resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ) |
{ |
T *pool; |
cyg_uint8 * ret; |
|
CYG_REPORT_FUNCTYPE( "success=" ); |
CYG_REPORT_FUNCARG3( "alloc_ptr=%08x, newsize=%d, &oldsize=%08x", |
alloc_ptr, newsize, oldsize ); |
CYG_CHECK_DATA_PTRC( alloc_ptr ); |
if (NULL != oldsize ) |
CYG_CHECK_DATA_PTRC( oldsize ); |
|
pool = find_pool_for_ptr( alloc_ptr ); |
CYG_ASSERT( NULL != pool, "Couldn't find pool for pointer!" ); |
|
ret = pool->resize_alloc( alloc_ptr, newsize, oldsize ); |
|
CYG_REPORT_RETVAL( ret ); |
return ret; |
} // Cyg_Mempool_Joined<T>::resize_alloc() |
|
|
// ------------------------------------------------------------------------- |
// free the memory back to the pool |
// returns true on success |
template <class T> |
inline cyg_bool |
Cyg_Mempool_Joined<T>::free( cyg_uint8 *ptr, cyg_int32 size ) |
{ |
T *pool; |
cyg_bool ret; |
|
CYG_REPORT_FUNCTYPE("success="); |
CYG_REPORT_FUNCARG2( "ptr=%08x, size=%d", ptr, size ); |
CYG_CHECK_DATA_PTRC( ptr ); |
|
pool = find_pool_for_ptr( ptr ); |
CYG_ASSERT( NULL != pool, "Couldn't find pool for pointer!" ); |
|
ret = pool->free( ptr, size ); |
|
CYG_REPORT_RETVAL( ret ); |
return ret; |
} // Cyg_Mempool_Joined<T>::free() |
|
|
// ------------------------------------------------------------------------- |
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
template <class T> |
inline void |
Cyg_Mempool_Joined<T>::get_status( cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
cyg_uint8 i; |
Cyg_Mempool_Status tmpstat; |
|
status.arenasize = status.freeblocks = 0; |
status.totalallocated = status.totalfree = 0; |
status.maxfree = status.origsize = 0; |
|
for ( i=0; i<poolcount; i++ ) { |
if ( status.arenasize >= 0 ) { |
if ( 0 != (flags & CYG_MEMPOOL_STAT_ARENASIZE) ) { |
pools[i].pool->get_status( CYG_MEMPOOL_STAT_ARENASIZE, |
tmpstat ); |
if ( tmpstat.arenasize > 0) |
status.arenasize += tmpstat.arenasize; |
else |
status.arenasize = -1; |
} // if |
} // if |
|
if ( status.freeblocks >= 0 ) { |
if ( 0 != (flags & CYG_MEMPOOL_STAT_FREEBLOCKS) ) { |
pools[i].pool->get_status( CYG_MEMPOOL_STAT_FREEBLOCKS, |
tmpstat ); |
if ( tmpstat.freeblocks > 0 ) |
status.freeblocks += tmpstat.freeblocks; |
else |
status.freeblocks = -1; |
} // if |
} // if |
|
if ( status.totalallocated >= 0 ) { |
if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) ) { |
pools[i].pool->get_status( CYG_MEMPOOL_STAT_TOTALALLOCATED, |
tmpstat ); |
if ( tmpstat.totalallocated > 0 ) |
status.totalallocated += tmpstat.totalallocated; |
else |
status.totalallocated = -1; |
} // if |
} // if |
|
if ( status.totalfree >= 0 ) { |
if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALFREE) ) { |
pools[i].pool->get_status( CYG_MEMPOOL_STAT_TOTALFREE, |
tmpstat ); |
if ( tmpstat.totalfree > 0 ) |
status.totalfree += tmpstat.totalfree; |
else |
status.totalfree = -1; |
} // if |
} // if |
|
if ( status.maxfree >= 0 ) { |
if ( 0 != (flags & CYG_MEMPOOL_STAT_MAXFREE) ) { |
pools[i].pool->get_status( CYG_MEMPOOL_STAT_MAXFREE, tmpstat ); |
if ( tmpstat.maxfree < 0 ) |
status.maxfree = -1; |
else if ( tmpstat.maxfree > status.maxfree ) |
status.maxfree = tmpstat.maxfree; |
} // if |
} // if |
|
if ( status.origsize >= 0 ) { |
if ( 0 != (flags & CYG_MEMPOOL_STAT_ORIGSIZE) ) { |
pools[i].pool->get_status( CYG_MEMPOOL_STAT_ORIGSIZE, tmpstat ); |
if ( tmpstat.origsize > 0 ) |
status.origsize += tmpstat.origsize; |
else |
status.origsize = -1; |
} // if |
} // if |
|
if ( status.maxoverhead >= 0 ) { |
if ( 0 != (flags & CYG_MEMPOOL_STAT_MAXOVERHEAD) ) { |
pools[i].pool->get_status( CYG_MEMPOOL_STAT_MAXOVERHEAD, |
tmpstat ); |
if ( tmpstat.maxoverhead < 0 ) |
status.maxoverhead = -1; |
else if ( tmpstat.maxoverhead > status.maxoverhead ) |
status.maxoverhead = tmpstat.maxoverhead; |
} // if |
} // if |
} // for |
} // Cyg_Mempool_Joined<T>::get_status() |
|
|
// ------------------------------------------------------------------------- |
|
#endif // ifndef CYGONCE_MEMALLOC_MEMJOIN_INL |
// EOF memjoin.inl |
/common/v2_0/include/dlmallocimpl.hxx
0,0 → 1,184
#ifndef CYGONCE_MEMALLOC_DLMALLOCIMPL_HXX |
#define CYGONCE_MEMALLOC_DLMALLOCIMPL_HXX |
|
//========================================================================== |
// |
// dlmallocimpl.hxx |
// |
// Interface to the port of Doug Lea's malloc implementation |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Purpose: Define standard interface to Doug Lea's malloc implementation |
// Description: Doug Lea's malloc has been ported to eCos. This file provides |
// the interface between the implementation and the standard |
// memory allocator interface required by eCos |
// Usage: #include <cyg/memalloc/dlmalloc.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
|
// INCLUDES |
|
#include <stddef.h> // size_t, ptrdiff_t |
#include <cyg/infra/cyg_type.h> // types |
|
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
// As a special case, override CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE |
// if the malloc config says so |
#ifdef CYGIMP_MEMALLOC_MALLOC_DLMALLOC |
// forward declaration to prevent header dependency problems |
class Cyg_Mempool_dlmalloc; |
# include <pkgconf/heaps.hxx> |
# if (CYGMEM_HEAP_COUNT > 1) && \ |
!defined(CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE) |
# define CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE 1 |
# endif |
#endif |
|
// CONSTANTS |
|
// number of bins - but changing this alone will not change the number of |
// bins! |
#define CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV 128 |
|
// TYPE DEFINITIONS |
|
|
class Cyg_Mempool_dlmalloc_Implementation |
{ |
public: |
/* cyg_dlmalloc_size_t is the word-size used for internal bookkeeping |
of chunk sizes. On a 64-bit machine, you can reduce malloc |
overhead, especially for very small chunks, by defining |
cyg_dlmalloc_size_t to be a 32-bit type at the expense of not |
being able to handle requests greater than 2^31. This limitation is |
hardly ever a concern; you are encouraged to set this. However, the |
default version is the same as size_t. */ |
|
typedef size_t Cyg_dlmalloc_size_t; |
|
typedef struct malloc_chunk |
{ |
Cyg_dlmalloc_size_t prev_size; /* Size of previous chunk (if free). */ |
Cyg_dlmalloc_size_t size; /* Size in bytes, including overhead. */ |
struct malloc_chunk* fd; /* double links -- used only if free. */ |
struct malloc_chunk* bk; |
}; |
|
protected: |
/* The first value returned from sbrk */ |
cyg_uint8 *arenabase; |
|
/* The total memory in the pool */ |
cyg_int32 arenasize; |
|
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE |
struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk * |
av_[ CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV * 2 + 2 ]; |
#endif |
|
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG |
|
void |
do_check_chunk( struct malloc_chunk * ); |
|
void |
do_check_free_chunk( struct malloc_chunk * ); |
|
void |
do_check_inuse_chunk( struct malloc_chunk * ); |
|
void |
do_check_malloced_chunk( struct malloc_chunk *, Cyg_dlmalloc_size_t ); |
#endif |
|
public: |
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out, note that management structures are taken from the |
// same arena. |
Cyg_Mempool_dlmalloc_Implementation( cyg_uint8 * /* base */, |
cyg_int32 /* size */, |
CYG_ADDRWORD /* argthru */ ); |
|
// Destructor |
~Cyg_Mempool_dlmalloc_Implementation() {} |
|
// get some memory, return NULL if none available |
cyg_uint8 * |
try_alloc( cyg_int32 /* size */ ); |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
resize_alloc( cyg_uint8 * /* alloc_ptr */, cyg_int32 /* newsize */, |
cyg_int32 * /* oldsize */ ); |
|
// free the memory back to the pool |
// returns true on success |
cyg_bool |
free( cyg_uint8 * /* ptr */, cyg_int32 /* size */ =0 ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void |
get_status( cyg_mempool_status_flag_t /* flags */, |
Cyg_Mempool_Status & /* status */ ); |
|
}; |
|
#endif // ifndef CYGONCE_MEMALLOC_DLMALLOCIMPL_HXX |
// EOF dlmallocimpl.hxx |
/common/v2_0/include/mempolt2.inl
0,0 → 1,400
#ifndef CYGONCE_MEMALLOC_MEMPOLT2_INL |
#define CYGONCE_MEMALLOC_MEMPOLT2_INL |
|
//========================================================================== |
// |
// mempolt2.inl |
// |
// Mempolt2 (Memory pool template) class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: jlarmour |
// Date: 2000-06-12 |
// Purpose: Define Mempolt2 class interface |
// Description: The class defined here provides the APIs for thread-safe, |
// kernel-savvy memory managers; make a class with the |
// underlying allocator as the template parameter. |
// Usage: #include <cyg/memalloc/mempolt2.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
#include <cyg/infra/cyg_ass.h> // assertion support |
#include <cyg/infra/cyg_trac.h> // tracing support |
#include <cyg/kernel/thread.inl> // implementation eg. Cyg_Thread::self(); |
#include <cyg/kernel/sched.inl> // implementation eg. Cyg_Scheduler::lock(); |
|
// ------------------------------------------------------------------------- |
// Constructor; we _require_ these arguments and just pass them through to |
// the implementation memory pool in use. |
template <class T> |
Cyg_Mempolt2<T>::Cyg_Mempolt2( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD arg_thru) // Constructor |
: pool( base, size, arg_thru ) |
{ |
} |
|
|
template <class T> |
Cyg_Mempolt2<T>::~Cyg_Mempolt2() // destructor |
{ |
// Prevent preemption |
Cyg_Scheduler::lock(); |
|
while ( ! queue.empty() ) { |
Cyg_Thread *thread = queue.dequeue(); |
thread->set_wake_reason( Cyg_Thread::DESTRUCT ); |
thread->wake(); |
} |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
} |
|
// ------------------------------------------------------------------------- |
// get some memory; wait if none available |
template <class T> |
inline cyg_uint8 * |
Cyg_Mempolt2<T>::alloc( cyg_int32 size ) |
{ |
CYG_REPORT_FUNCTION(); |
|
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_uint8 *ret; |
ret = pool.try_alloc( size ); |
if ( ret ) { |
Cyg_Scheduler::unlock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
CYG_REPORT_RETVAL( ret ); |
return ret; |
} |
|
Cyg_Thread *self = Cyg_Thread::self(); |
|
Mempolt2WaitInfo waitinfo( size ); |
|
self->set_wait_info( (CYG_ADDRWORD)&waitinfo ); |
self->set_sleep_reason( Cyg_Thread::WAIT ); |
self->sleep(); |
queue.enqueue( self ); |
|
CYG_ASSERT( 1 == Cyg_Scheduler::get_sched_lock(), |
"Called with non-zero scheduler lock"); |
|
// Unlock scheduler and allow other threads to run |
Cyg_Scheduler::unlock(); |
|
cyg_bool result = true; // just used as a flag here |
switch( self->get_wake_reason() ) |
{ |
case Cyg_Thread::DESTRUCT: |
case Cyg_Thread::BREAK: |
result = false; |
break; |
|
case Cyg_Thread::EXIT: |
self->exit(); |
break; |
|
default: |
break; |
} |
|
if ( ! result ) |
ret = NULL; |
else |
ret = waitinfo.addr; |
|
CYG_ASSERT( (!result) || (NULL != ret), "Good result but no alloc!" ); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
CYG_REPORT_RETVAL( ret ); |
return ret; |
} |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
// ------------------------------------------------------------------------- |
// get some memory with a timeout |
template <class T> |
inline cyg_uint8 * |
Cyg_Mempolt2<T>::alloc( cyg_int32 size, cyg_tick_count abs_timeout ) |
{ |
CYG_REPORT_FUNCTION(); |
|
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_uint8 *ret; |
ret = pool.try_alloc( size ); |
if ( ret ) { |
Cyg_Scheduler::unlock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
CYG_REPORT_RETVAL( ret ); |
return ret; |
} |
|
Cyg_Thread *self = Cyg_Thread::self(); |
|
Mempolt2WaitInfo waitinfo( size ); |
|
self->set_timer( abs_timeout, Cyg_Thread::TIMEOUT ); |
|
// If the timeout is in the past, the wake reason will have been set to |
// something other than NONE already. If so, skip the wait and go |
// straight to unlock. |
|
if( Cyg_Thread::NONE == self->get_wake_reason() ) { |
self->set_wait_info( (CYG_ADDRWORD)&waitinfo ); |
self->sleep(); |
queue.enqueue( self ); |
} |
|
CYG_ASSERT( 1 == Cyg_Scheduler::get_sched_lock(), |
"Called with non-zero scheduler lock"); |
|
// Unlock scheduler and allow other threads to run |
Cyg_Scheduler::unlock(); |
|
// clear the timer; if it actually fired, no worries. |
self->clear_timer(); |
|
cyg_bool result = true; // just used as a flag here |
switch( self->get_wake_reason() ) |
{ |
case Cyg_Thread::TIMEOUT: |
result = false; |
break; |
|
case Cyg_Thread::DESTRUCT: |
case Cyg_Thread::BREAK: |
result = false; |
break; |
|
case Cyg_Thread::EXIT: |
self->exit(); |
break; |
|
default: |
break; |
} |
|
if ( ! result ) |
ret = NULL; |
else |
ret = waitinfo.addr; |
|
CYG_ASSERT( (!result) || (NULL != ret), "Good result but no alloc!" ); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
CYG_REPORT_RETVAL( ret ); |
return ret; |
} |
#endif |
|
// ------------------------------------------------------------------------- |
// get some memory, return NULL if none available |
template <class T> |
inline cyg_uint8 * |
Cyg_Mempolt2<T>::try_alloc( cyg_int32 size ) |
{ |
CYG_REPORT_FUNCTION(); |
|
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_uint8 *ret = pool.try_alloc( size ); |
|
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
return ret; |
} |
|
|
// ------------------------------------------------------------------------- |
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
template <class T> |
cyg_uint8 * |
Cyg_Mempolt2<T>::resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ) |
{ |
CYG_REPORT_FUNCTION(); |
|
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_uint8 *ret = pool.resize_alloc( alloc_ptr, newsize, oldsize ); |
|
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
return ret; |
} |
|
|
// ------------------------------------------------------------------------- |
// free the memory back to the pool |
template <class T> |
cyg_bool |
Cyg_Mempolt2<T>::free( cyg_uint8 *p, cyg_int32 size ) |
{ |
CYG_REPORT_FUNCTION(); |
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_int32 ret = pool.free( p, size ); |
|
// anyone waiting? |
if ( !(queue.empty()) ) { |
Mempolt2WaitInfo *p; |
Cyg_Thread *thread; |
|
#ifdef CYGIMP_MEM_T_ONEFREE_TO_ONEALLOC |
thread = queue.dequeue(); |
p = (Mempolt2WaitInfo *)(thread->get_wait_info()); |
CYG_ASSERT( NULL == p->addr, "Thread already awoken?" ); |
|
cyg_uint8 *mem; |
mem = pool.try_alloc( p->size ); |
CYG_ASSERT( NULL != mem, "That should have succeeded" ); |
thread->set_wake_reason( Cyg_Thread::DONE ); |
thread->wake(); |
// return the successful value to it |
p->addr = mem; |
#else |
Cyg_ThreadQueue holding; |
do { |
thread = queue.dequeue(); |
p = (Mempolt2WaitInfo *)(thread->get_wait_info()); |
CYG_ASSERT( NULL == p->addr, "Thread already awoken?" ); |
|
cyg_uint8 *mem; |
if ( NULL != (mem = pool.try_alloc( p->size )) ) { |
// success! awaken the thread |
thread->set_wake_reason( Cyg_Thread::DONE ); |
thread->wake(); |
// return the successful value to it |
p->addr = mem; |
} |
else { |
// preserve the entry on the holding queue |
holding.enqueue( thread ); |
} |
} while ( !(queue.empty()) ); |
|
// Now re-queue the unaffected threads back into the pool queue |
// (no pun intended) |
while ( !(holding.empty()) ) { |
queue.enqueue( holding.dequeue() ); |
} |
#endif // CYGIMP_MEM_T_ONEFREE_TO_ONEALLOC |
} |
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
CYG_REPORT_RETVAL( ret ); |
return ret; |
} |
|
// ------------------------------------------------------------------------- |
// Get memory pool status |
// Needs atomicity protection (maybe) |
template <class T> |
inline void |
Cyg_Mempolt2<T>::get_status( cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
if (0 != (flags & CYG_MEMPOOL_STAT_WAITING)) { |
status.waiting = (0 == queue.empty()); |
} |
pool.get_status(flags, status); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
} |
|
// ------------------------------------------------------------------------- |
// debugging/assert function |
|
#ifdef CYGDBG_USE_ASSERTS |
|
template <class T> |
inline cyg_bool |
Cyg_Mempolt2<T>::check_this(cyg_assert_class_zeal zeal) const |
{ |
CYG_REPORT_FUNCTION(); |
|
if ( Cyg_Thread::DESTRUCT == Cyg_Thread::self()->get_wake_reason() ) |
// then the whole thing is invalid, and we know it. |
// so return OK, since this check should NOT make an error. |
return true; |
|
// check that we have a non-NULL pointer first |
if( this == NULL ) return false; |
|
return true; |
} |
#endif |
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_MEMALLOC_MEMPOLT2_INL |
// EOF mempolt2.inl |
/common/v2_0/include/sepmetaimpl.hxx
0,0 → 1,194
#ifndef CYGONCE_MEMALLOC_SEPMETAIMPL_HXX |
#define CYGONCE_MEMALLOC_SEPMETAIMPL_HXX |
|
//========================================================================== |
// |
// sepmetaimpl.hxx |
// |
// Variable block memory pool with separate metadata class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2001-06-28 |
// Purpose: Define Sepmetaimpl class interface |
// Description: Inline class for constructing a variable block allocator |
// with separate metadata. |
// Usage: #include <cyg/memalloc/sepmetaimpl.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
|
#include <cyg/infra/cyg_type.h> |
#include <pkgconf/memalloc.h> |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
class Cyg_Mempool_Sepmeta_Implementation { |
protected: |
// these constructors are explicitly disallowed |
Cyg_Mempool_Sepmeta_Implementation() {}; |
// Cyg_Mempool_Sepmeta_Implementation( Cyg_Mempool_Sepmeta_Implementation &ref ) |
// {}; |
Cyg_Mempool_Sepmeta_Implementation & |
operator=( Cyg_Mempool_Sepmeta_Implementation &ref ) |
{ return ref; }; |
|
struct memdq { |
struct memdq *prev, *next; // prev/next alloced/free block |
struct memdq *memprev, *memnext; // prev/next block in memory |
cyg_uint8 *mem; // memory address associated with this block |
}; |
|
struct memdq allocedhead; // list of alloced memory |
struct memdq freehead; // list of free memory |
struct memdq memhead; // initial block on free list |
struct memdq memend; // dummy memdq indicating the end |
// of memory, as if it were alloced |
struct memdq *freemetahead; // unused memdq's |
cyg_uint8 *obase; |
cyg_int32 osize; |
cyg_uint8 *metabase; |
cyg_int32 metasize; |
cyg_uint8 *bottom; |
cyg_uint8 *top; |
cyg_int32 alignment; |
cyg_int32 freemem; |
|
// round up addresses according to required alignment of pool |
cyg_uint8 * |
alignup( cyg_uint8 *addr ); |
|
cyg_uint8 * |
aligndown( cyg_uint8 *addr ); |
|
// round up addresses according to required alignment of metadata |
cyg_uint8 * |
alignmetaup( cyg_uint8 *addr ); |
|
cyg_uint8 * |
alignmetadown( cyg_uint8 *addr ); |
|
// return the alloced dq at mem |
struct memdq * |
find_alloced_dq( cyg_uint8 *mem ); |
|
// returns a free dq of at least size, or NULL if none |
struct memdq * |
find_free_dq( cyg_int32 size ); |
|
// returns the free dq following mem |
struct memdq * |
find_free_dq_slot( cyg_uint8 *mem ); |
|
void |
insert_free_block( struct memdq *freedq ); |
|
static void |
copy_data( cyg_uint8 *dst, cyg_uint8 *src, cyg_int32 nbytes ); |
|
void |
check_free_memdq( struct memdq *dq ); |
|
void |
check_alloced_memdq( struct memdq *dq ); |
|
public: |
// THIS is the public API of memory pools generally that can have the |
// kernel oriented thread-safe package layer atop. |
|
struct constructorargs { |
cyg_int32 alignment; |
cyg_uint8 *metabase; |
cyg_uint32 metasize; |
constructorargs(cyg_int32 align, cyg_uint8 *mbase, cyg_uint32 msize) |
{ |
alignment = align; metabase = mbase; metasize = msize; |
} |
}; |
|
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out. |
Cyg_Mempool_Sepmeta_Implementation( |
cyg_uint8 * /* base */, |
cyg_int32 /* size */, |
CYG_ADDRWORD /* constructorargs */ ); |
|
// Destructor |
~Cyg_Mempool_Sepmeta_Implementation(); |
|
// get size bytes of memory |
cyg_uint8 * |
try_alloc( cyg_int32 /* size */ ); |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
resize_alloc( cyg_uint8 * /* alloc_ptr */, cyg_int32 /* newsize */, |
cyg_int32 * /* oldsize */ ); |
|
// free size bytes of memory back to the pool |
// returns true on success |
cyg_bool |
free( cyg_uint8 * /* ptr */, |
cyg_int32 /* size */ ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void |
get_status( cyg_mempool_status_flag_t /* flags */, |
Cyg_Mempool_Status & /* status */ ); |
|
}; |
|
#include <cyg/memalloc/sepmetaimpl.inl> |
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_MEMALLOC_SEPMETAIMPL_HXX |
// EOF sepmetaimpl.hxx |
/common/v2_0/include/mfiximpl.hxx
0,0 → 1,127
#ifndef CYGONCE_MEMALLOC_MFIXIMPL_HXX |
#define CYGONCE_MEMALLOC_MFIXIMPL_HXX |
|
//========================================================================== |
// |
// mfiximpl.hxx |
// |
// Memory pool with fixed block class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: jlarmour |
// Date: 2000-06-12 |
// Purpose: Define Mfiximpl class interface |
// Description: Inline class for constructing a fixed block allocator |
// Usage: #include <cyg/memalloc/mfiximpl.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
#include <cyg/infra/cyg_type.h> |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
class Cyg_Mempool_Fixed_Implementation { |
protected: |
// these constructors are explicitly disallowed |
Cyg_Mempool_Fixed_Implementation() {}; |
// Cyg_Mempool_Fixed_Implementation( Cyg_Mempool_Fixed_Implementation &ref ) |
// {}; |
Cyg_Mempool_Fixed_Implementation & |
operator=( Cyg_Mempool_Fixed_Implementation &ref ) |
{ return ref; }; |
|
cyg_uint32 *bitmap; |
cyg_int32 maptop; |
cyg_uint8 *mempool; |
cyg_int32 numblocks; |
cyg_int32 freeblocks; |
cyg_int32 blocksize; |
cyg_int32 firstfree; |
cyg_uint8 *top; |
|
public: |
// THIS is the public API of memory pools generally that can have the |
// kernel oriented thread-safe package layer atop. |
// |
// The kernel package is a template whose type parameter is one of |
// these. That is the reason there are superfluous parameters here and |
// more genereralization than might be expected in a fixed block |
// allocator. |
|
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out, note that management structures are taken from the |
// same arena. The alloc_unit may be any other param in general; it |
// comes through from the outer constructor unchanged. |
Cyg_Mempool_Fixed_Implementation( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD alloc_unit ); |
|
// Destructor |
~Cyg_Mempool_Fixed_Implementation(); |
|
// get some memory; size is ignored in a fixed block allocator |
cyg_uint8 *try_alloc( cyg_int32 size ); |
|
// supposedly resize existing allocation. This is defined in the |
// fixed block allocator purely for API consistency. It will return |
// an error (false) for all values, except for the blocksize |
// returns true on success |
cyg_uint8 * |
resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize=NULL ); |
|
// free the memory back to the pool; size ignored here |
cyg_bool free( cyg_uint8 *p, cyg_int32 size ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void get_status( cyg_mempool_status_flag_t /* flags */, |
Cyg_Mempool_Status & /* status */ ); |
|
}; |
|
#include <cyg/memalloc/mfiximpl.inl> |
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_MEMALLOC_MFIXIMPL_HXX |
// EOF mfiximpl.hxx |
/common/v2_0/include/dlmalloc.hxx
0,0 → 1,172
#ifndef CYGONCE_MEMALLOC_DLMALLOC_HXX |
#define CYGONCE_MEMALLOC_DLMALLOC_HXX |
|
//========================================================================== |
// |
// dlmalloc.hxx |
// |
// Interface to the port of Doug Lea's malloc implementation |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Purpose: Define standard interface to Doug Lea's malloc implementation |
// Description: Doug Lea's malloc has been ported to eCos. This file provides |
// the interface between the implementation and the standard |
// memory allocator interface required by eCos |
// Usage: #include <cyg/memalloc/dlmalloc.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
|
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_THREADAWARE |
# include <pkgconf/system.h> |
# ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
# endif |
#endif |
|
// when used as an implementation for malloc, we need the following |
// to let the system know the name of the class |
#define CYGCLS_MEMALLOC_MALLOC_IMPL Cyg_Mempool_dlmalloc |
|
// if the implementation is all that's required, don't output anything else |
#ifndef __MALLOC_IMPL_WANTED |
|
// INCLUDES |
|
#include <stddef.h> // size_t, ptrdiff_t |
#include <cyg/infra/cyg_type.h> // types |
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_THREADAWARE |
# include <cyg/memalloc/mempolt2.hxx> // kernel safe mempool template |
#endif |
#include <cyg/memalloc/dlmallocimpl.hxx> // dlmalloc implementation |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
# include <cyg/kernel/ktypes.h> // cyg_tick_count |
#endif |
|
|
// TYPE DEFINITIONS |
|
|
class Cyg_Mempool_dlmalloc |
{ |
protected: |
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_THREADAWARE |
Cyg_Mempolt2<Cyg_Mempool_dlmalloc_Implementation> mypool; |
#else |
Cyg_Mempool_dlmalloc_Implementation mypool; |
#endif |
|
|
public: |
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out, note that management structures are taken from the |
// same arena. |
Cyg_Mempool_dlmalloc( cyg_uint8 *base, cyg_int32 size, |
CYG_ADDRWORD argthru=0 ) |
: mypool( base, size, argthru ) {} |
|
// Destructor |
~Cyg_Mempool_dlmalloc() {} |
|
// get some memory; wait if none available |
// if we aren't configured to be thread-aware this is irrelevant |
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_THREADAWARE |
cyg_uint8 * |
alloc( cyg_int32 size ) { return mypool.alloc( size ); } |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout |
cyg_uint8 * |
alloc( cyg_int32 size, cyg_tick_count delay_timeout ) { |
return mypool.alloc( size, delay_timeout ); |
} |
# endif |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 * |
try_alloc( cyg_int32 size ) { return mypool.try_alloc( size ); } |
|
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ) { |
return mypool.resize_alloc( alloc_ptr, newsize, oldsize); |
} |
|
// free the memory back to the pool |
// returns true on success |
cyg_bool |
free( cyg_uint8 *ptr, cyg_int32 size=0 ) { return mypool.free(ptr, size); } |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void |
get_status( cyg_mempool_status_flag_t flags, Cyg_Mempool_Status &status ) { |
// set to 0 - if there's anything really waiting, it will be set to |
// 1 later |
status.waiting = 0; |
mypool.get_status( flags, status ); |
} |
}; |
|
#endif // ifndef __MALLOC_IMPL_WANTED |
|
#endif // ifndef CYGONCE_MEMALLOC_DLMALLOC_HXX |
// EOF dlmalloc.hxx |
/common/v2_0/include/memvar.hxx
0,0 → 1,164
#ifndef CYGONCE_MEMALLOC_MEMVAR_HXX |
#define CYGONCE_MEMALLOC_MEMVAR_HXX |
|
//========================================================================== |
// |
// memvar.hxx |
// |
// Memory pool with variable block class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Purpose: Define Memvar class interface |
// Description: Inline class for constructing a variable block allocator |
// Usage: #include <cyg/memalloc/memvar.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
# include <pkgconf/system.h> |
# ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
# endif |
#endif |
|
// when used as an implementation for malloc, we need the following |
// to let the system know the name of the class |
#define CYGCLS_MEMALLOC_MALLOC_IMPL Cyg_Mempool_Variable |
|
// if the implementation is all that's required, don't output anything else |
#ifndef __MALLOC_IMPL_WANTED |
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertion macros |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
# include <cyg/kernel/ktypes.h> // cyg_tick_count |
#endif |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
# include <cyg/memalloc/mempolt2.hxx> // kernel safe mempool template |
#endif |
|
#include <cyg/memalloc/mvarimpl.hxx> // implementation of a variable mem pool |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
|
// TYPE DEFINITIONS |
|
class Cyg_Mempool_Variable |
{ |
protected: |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
Cyg_Mempolt2<Cyg_Mempool_Variable_Implementation> mypool; |
#else |
Cyg_Mempool_Variable_Implementation mypool; |
#endif |
|
public: |
// This API makes concrete a class which implements a thread-safe |
// kernel-savvy memory pool which manages variable size blocks. |
|
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out, note that management structures are taken from the |
// same arena. |
Cyg_Mempool_Variable( cyg_uint8 * /* base */, cyg_int32 /* size */, |
cyg_int32 /* alignment */=8); |
|
// Destructor |
~Cyg_Mempool_Variable(); |
|
// get some memory; wait if none available |
// if we aren't configured to be thread-aware this is irrelevant |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
cyg_uint8 * |
alloc( cyg_int32 /* size */ ); |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout |
cyg_uint8 * |
alloc( cyg_int32 /* size */, cyg_tick_count /* delay_timeout */ ); |
# endif |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 * |
try_alloc( cyg_int32 /* size */ ); |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
resize_alloc( cyg_uint8 * /* alloc_ptr */, cyg_int32 /* newsize */, |
cyg_int32 * /* oldsize */ =NULL ); |
|
// free the memory back to the pool |
// returns true on success |
cyg_bool |
free( cyg_uint8 * /* ptr */, cyg_int32 /* size */ =0 ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void |
get_status( cyg_mempool_status_flag_t /* flags */, |
Cyg_Mempool_Status & /* status */ ); |
|
CYGDBG_DEFINE_CHECK_THIS |
}; |
|
#endif // ifndef __MALLOC_IMPL_WANTED |
|
#endif // ifndef CYGONCE_MEMALLOC_MEMVAR_HXX |
// EOF memvar.hxx |
/common/v2_0/include/mvarimpl.hxx
0,0 → 1,154
#ifndef CYGONCE_MEMALLOC_MVARIMPL_HXX |
#define CYGONCE_MEMALLOC_MVARIMPL_HXX |
|
//========================================================================== |
// |
// mvarimpl.hxx |
// |
// Memory pool with variable block class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Purpose: Define Mvarimpl class interface |
// Description: Inline class for constructing a variable block allocator |
// Usage: #include <cyg/memalloc/mvarimpl.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
|
#include <cyg/infra/cyg_type.h> |
#include <pkgconf/memalloc.h> |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
class Cyg_Mempool_Variable_Implementation { |
protected: |
// these constructors are explicitly disallowed |
Cyg_Mempool_Variable_Implementation() {}; |
// Cyg_Mempool_Variable_Implementation( Cyg_Mempool_Variable_Implementation &ref ) |
// {}; |
Cyg_Mempool_Variable_Implementation & |
operator=( Cyg_Mempool_Variable_Implementation &ref ) |
{ return ref; }; |
|
struct memdq { |
struct memdq *prev, *next; |
cyg_int32 size; |
}; |
|
struct memdq head; |
cyg_uint8 *obase; |
cyg_int32 osize; |
cyg_uint8 *bottom; |
cyg_uint8 *top; |
cyg_int32 alignment; |
cyg_int32 freemem; |
|
// round up size passed to alloc/free to a size that will be used |
// for allocation |
cyg_int32 |
roundup(cyg_int32 size); |
|
struct memdq * |
addr2memdq( cyg_uint8 *addr ); |
|
struct memdq * |
alloc2memdq( cyg_uint8 *addr ); |
|
cyg_uint8 * |
memdq2alloc( struct memdq *dq ); |
|
void |
insert_free_block( struct memdq *freedq ); |
|
public: |
// THIS is the public API of memory pools generally that can have the |
// kernel oriented thread-safe package layer atop. |
|
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out. |
Cyg_Mempool_Variable_Implementation( |
cyg_uint8 * /* base */, |
cyg_int32 /* size */, |
CYG_ADDRWORD /* alignment */ = 8 ); |
|
// Destructor |
~Cyg_Mempool_Variable_Implementation(); |
|
// get size bytes of memory |
cyg_uint8 * |
try_alloc( cyg_int32 /* size */ ); |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ); |
|
// free size bytes of memory back to the pool |
// returns true on success |
cyg_bool |
free( cyg_uint8 * /* ptr */, |
cyg_int32 /* size */ ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void |
get_status( cyg_mempool_status_flag_t /* flags */, |
Cyg_Mempool_Status & /* status */ ); |
|
}; |
|
#include <cyg/memalloc/mvarimpl.inl> |
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_MEMALLOC_MVARIMPL_HXX |
// EOF mvarimpl.hxx |
/common/v2_0/include/common.hxx
0,0 → 1,135
#ifndef CYGONCE_MEMALLOC_COMMON_HXX |
#define CYGONCE_MEMALLOC_COMMON_HXX |
|
/*========================================================================== |
// |
// common.hxx |
// |
// Shared definitions used by memory allocators |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Purpose: Shared definitions used by memory allocators |
// Description: |
// Usage: #include <cyg/memalloc/common.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================*/ |
|
/* CONFIGURATION */ |
|
#include <pkgconf/memalloc.h> |
|
/* TYPE DEFINITIONS */ |
|
// struct Cyg_Mempool_Status is returned by the get_status() method of |
// standard eCos memory allocators. After return from get_status(), any |
// field of type T may be set to ((T)-1) to indicate that the information |
// is not available or not applicable to this allocator. |
|
|
class Cyg_Mempool_Status { |
public: |
const cyg_uint8 *arenabase; // base address of entire pool |
cyg_int32 arenasize; // total size of entire pool |
cyg_int32 freeblocks; // number of chunks free for use |
cyg_int32 totalallocated; // total allocated space in bytes |
cyg_int32 totalfree; // total space in bytes not in use |
cyg_int32 blocksize; // block size if fixed block |
cyg_int32 maxfree; // size of largest unused block |
cyg_int8 waiting; // are there any threads waiting for memory? |
const cyg_uint8 *origbase; // address of original region used when pool |
// created |
cyg_int32 origsize; // size of original region used when pool |
// created |
|
// maxoverhead is the *maximum* per-allocation overhead imposed by |
// the allocator implementation. Note: this is rarely the typical |
// overhead which often depends on the size of the allocation requested. |
// It includes overhead due to alignment constraints. For example, if |
// maxfree and maxoverhead are available for this allocator, then an |
// allocation request of (maxfree-maxoverhead) bytes must always succeed |
// Unless maxoverhead is set to -1 of course, in which case the allocator |
// does not support reporting this information. |
|
cyg_int8 maxoverhead; |
|
void |
init() { |
arenabase = (const cyg_uint8 *)-1; |
arenasize = -1; |
freeblocks = -1; |
totalallocated = -1; |
totalfree = -1; |
blocksize = -1; |
maxfree = -1; |
waiting = -1; |
origbase = (const cyg_uint8 *)-1; |
origsize = -1; |
maxoverhead = -1; |
} |
|
// constructor |
Cyg_Mempool_Status() { init(); } |
}; |
|
// Flags to pass to get_status() methods to tell it which stat(s) is/are |
// being requested |
|
#define CYG_MEMPOOL_STAT_ARENABASE (1<<0) |
#define CYG_MEMPOOL_STAT_ARENASIZE (1<<1) |
#define CYG_MEMPOOL_STAT_FREEBLOCKS (1<<2) |
#define CYG_MEMPOOL_STAT_TOTALALLOCATED (1<<3) |
#define CYG_MEMPOOL_STAT_TOTALFREE (1<<4) |
#define CYG_MEMPOOL_STAT_BLOCKSIZE (1<<5) |
#define CYG_MEMPOOL_STAT_MAXFREE (1<<6) |
#define CYG_MEMPOOL_STAT_WAITING (1<<7) |
#define CYG_MEMPOOL_STAT_ORIGBASE (1<<9) |
#define CYG_MEMPOOL_STAT_ORIGSIZE (1<<10) |
#define CYG_MEMPOOL_STAT_MAXOVERHEAD (1<<11) |
|
// And an opaque type for any arguments with these flags |
typedef cyg_uint16 cyg_mempool_status_flag_t; |
|
|
#endif /* ifndef CYGONCE_MEMALLOC_COMMON_HXX */ |
/* EOF common.hxx */ |
/common/v2_0/include/sepmetaimpl.inl
0,0 → 1,666
#ifndef CYGONCE_MEMALLOC_SEPMETAIMPL_INL |
#define CYGONCE_MEMALLOC_SEPMETAIMPL_INL |
|
//========================================================================== |
// |
// sepmetaimpl.inl |
// |
// Variable block memory pool with separate metadata class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: hmt |
// Date: 2001-06-28 |
// Purpose: Define Sepmetaimpl class interface |
// Description: Inline class for constructing a variable block allocator |
// with separate metadata. |
// Usage: #include <cyg/memalloc/sepmetaimpl.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
#include <pkgconf/system.h> |
#ifdef CYGPKG_ISOINFRA |
# include <pkgconf/isoinfra.h> |
#endif |
#include <pkgconf/memalloc.h> |
#include <cyg/memalloc/sepmetaimpl.hxx> |
|
#include <cyg/infra/cyg_ass.h> // assertion support |
#include <cyg/infra/cyg_trac.h> // tracing support |
|
// Simple allocator |
|
// The memory block lists are doubly linked lists. One for all alloced |
// blocks, one for all free blocks. There's also a list of unused |
// metadata from the metadata pool. The head of the |
// list has the same structure but its memnext/memprev fields are zero. |
// Always having at least one item on the list simplifies the alloc and |
// free code. |
#ifdef CYGINT_ISO_STRING_MEMFUNCS |
# include <string.h> |
#endif |
|
inline void |
Cyg_Mempool_Sepmeta_Implementation::copy_data( cyg_uint8 *dst, |
cyg_uint8 *src, |
cyg_int32 nbytes ) |
{ |
#ifdef CYGINT_ISO_STRING_MEMFUNCS |
memmove( dst, src, nbytes ); |
#else |
if ((src < dst) && (dst < (src + nbytes))) { |
// Have to copy backwards |
src += nbytes; |
dst += nbytes; |
while (nbytes--) { |
*--dst = *--src; |
} |
} else { |
while (nbytes--) { |
*dst++ = *src++; |
} |
} |
#endif |
} |
|
inline cyg_uint8 * |
Cyg_Mempool_Sepmeta_Implementation::alignup( cyg_uint8 *addr ) |
{ |
return (cyg_uint8 *)((cyg_int32)(addr + alignment-1) & -alignment); |
} |
|
inline cyg_uint8 * |
Cyg_Mempool_Sepmeta_Implementation::aligndown( cyg_uint8 *addr ) |
{ |
return (cyg_uint8 *)((cyg_int32)addr & -alignment); |
} |
|
inline cyg_uint8 * |
Cyg_Mempool_Sepmeta_Implementation::alignmetaup( cyg_uint8 *addr ) |
{ |
const size_t memdqalign = __alignof__ (struct memdq); |
return (cyg_uint8 *)((cyg_int32)(addr + memdqalign-1) & -memdqalign); |
} |
|
inline cyg_uint8 * |
Cyg_Mempool_Sepmeta_Implementation::alignmetadown( cyg_uint8 *addr ) |
{ |
const size_t memdqalign = __alignof__ (struct memdq); |
return (cyg_uint8 *)((cyg_int32)addr & -memdqalign); |
} |
|
// return the alloced dq at mem |
inline struct Cyg_Mempool_Sepmeta_Implementation::memdq * |
Cyg_Mempool_Sepmeta_Implementation::find_alloced_dq( cyg_uint8 *mem ) |
{ |
struct memdq *dq=allocedhead.next; |
|
while (dq->mem != mem ) { |
CYG_ASSERT( dq->next->prev==dq, "Bad link in dq"); |
CYG_ASSERT( dq->memnext->memprev==dq, "Bad link in mem dq"); |
if (dq->next == &memend) // address not found! |
return NULL; |
dq = dq->next; |
} |
return dq; |
} |
|
// returns a free dq of at least size, or NULL if none |
inline struct Cyg_Mempool_Sepmeta_Implementation::memdq * |
Cyg_Mempool_Sepmeta_Implementation::find_free_dq( cyg_int32 size ) |
{ |
struct memdq *dq = freehead.next; |
|
while ( (dq->memnext->mem - dq->mem) < size ) { |
CYG_ASSERT( dq->next->prev==dq, "Bad link in dq"); |
CYG_ASSERT( dq->memnext->memprev==dq, "Bad link in mem dq"); |
if (dq->next == &freehead) { // reached end of list |
return NULL; |
} |
dq = dq->next; // next on free list |
} |
return dq; |
} |
|
// returns the free dq following mem |
inline struct Cyg_Mempool_Sepmeta_Implementation::memdq * |
Cyg_Mempool_Sepmeta_Implementation::find_free_dq_slot( cyg_uint8 *mem ) |
{ |
struct memdq *dq; |
for (dq = freehead.next; dq->mem < mem; dq = dq->next) { |
if ( dq == &freehead ) // wrapped round |
break; |
} |
return dq; |
} |
|
inline void |
Cyg_Mempool_Sepmeta_Implementation::check_free_memdq( struct memdq *dq ) |
{ |
if (dq == &freehead) |
return; |
CYG_ASSERT(dq->memnext->memprev == dq, "corrupted free dq #1"); |
CYG_ASSERT(dq->next->prev == dq, "corrupted free dq #2"); |
CYG_ASSERT(dq->memprev->memnext == dq, "corrupted free dq #3"); |
CYG_ASSERT(dq->prev->next == dq, "corrupted free dq #4"); |
CYG_ASSERT(dq->memnext->mem > dq->mem, "free dq mem not sorted #1"); |
if (dq->memprev != &memend) |
CYG_ASSERT(dq->memprev->mem < dq->mem, "free dq mem not sorted #2"); |
} |
|
inline void |
Cyg_Mempool_Sepmeta_Implementation::check_alloced_memdq( struct memdq *dq ) |
{ |
CYG_ASSERT(dq->memnext->memprev == dq, "corrupted alloced dq #1"); |
CYG_ASSERT(dq->next->prev == dq, "corrupted alloced dq #2"); |
CYG_ASSERT(dq->memprev->memnext == dq, "corrupted alloced dq #3"); |
CYG_ASSERT(dq->prev->next == dq, "corrupted alloced dq #4"); |
if (dq != &memend) |
CYG_ASSERT(dq->memnext->mem > dq->mem, "alloced dq mem not sorted #1"); |
if (dq->memprev != &memhead) |
CYG_ASSERT(dq->memprev->mem < dq->mem, "alloced dq mem not sorted #2"); |
} |
|
// ------------------------------------------------------------------------- |
|
inline void |
Cyg_Mempool_Sepmeta_Implementation::insert_free_block( struct memdq *dq ) |
{ |
// scan for correct slot in the sorted free list |
struct memdq *fdq = find_free_dq_slot( dq->mem ); |
|
CYG_ASSERT(fdq != &freehead ? fdq->mem > dq->mem : 1, |
"Block address is already in freelist"); |
|
check_free_memdq(fdq); |
|
if (dq->memnext == fdq) { |
// we can coalesce these two together |
// adjust fdq's mem address backwards to include dq |
fdq->mem = dq->mem; |
// and remove dq |
fdq->memprev = dq->memprev; |
fdq->memprev->memnext = fdq; |
// Don't need to adjust fdq's next/prev links as it stays in the |
// same place in the free list |
|
// dq is now redundant so return to metadata free list |
dq->next = freemetahead; |
freemetahead = dq; |
|
// reset dq |
dq = fdq; |
} else { |
// insert behind fdq |
dq->next = fdq; |
dq->prev = fdq->prev; |
fdq->prev = dq; |
dq->prev->next = dq; |
} |
|
check_free_memdq(dq); |
|
// maybe also coalesce backwards |
if (dq->memprev == dq->prev) { |
// adjust dq's mem address backwards to include dq->prev |
dq->mem = dq->prev->mem; |
|
// return dq->prev to metadata free list |
dq->prev->next = freemetahead; |
freemetahead = dq->prev; |
|
// and remove dq->prev from mem list |
dq->memprev = dq->prev->memprev; |
dq->memprev->memnext = dq; |
// and free list |
dq->prev = dq->prev->prev; |
dq->prev->next = dq; |
|
check_free_memdq(dq); |
} |
} |
|
// ------------------------------------------------------------------------- |
#include <cyg/infra/diag.h> |
inline |
Cyg_Mempool_Sepmeta_Implementation::Cyg_Mempool_Sepmeta_Implementation( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD consargs) |
{ |
CYG_REPORT_FUNCTION(); |
struct constructorargs *args = (struct constructorargs *)consargs; |
CYG_CHECK_DATA_PTRC( args ); |
|
alignment = args->alignment; |
|
CYG_ASSERT( alignment > 0, "Bad alignment" ); |
CYG_ASSERT( 0!=alignment, "alignment is zero" ); |
CYG_ASSERT( 0==(alignment & alignment-1), "alignment not a power of 2" ); |
|
obase=base; |
osize=size; |
metabase = args->metabase; |
metasize = args->metasize; |
|
// bottom is set to the lowest available address given the alignment. |
bottom = alignup( base ); |
cyg_uint8 *metabottom = alignmetaup( metabase ); |
|
// because we split free blocks by allocating memory from the end, not |
// the beginning, then to preserve alignment, the *top* must also be |
// aligned |
top = aligndown( base+size ); |
cyg_uint8 *metatop = metabottom + |
sizeof(struct memdq)*(metasize/sizeof(struct memdq)); |
|
CYG_ASSERT( top > bottom , "heap too small" ); |
CYG_ASSERT( top <= (base+size), "top too large" ); |
CYG_ASSERT( (((cyg_int32)(top)) & alignment-1)==0, |
"top badly aligned" ); |
CYG_ASSERT( (((cyg_int32)(bottom)) & alignment-1)==0, |
"bottom badly aligned" ); |
|
CYG_ASSERT( metatop > metabottom , "meta space too small" ); |
CYG_ASSERT( metatop <= (metabase+metasize), "metatop too large" ); |
|
// Initialize list of unused metadata blocks. Only need to do next |
// pointers - can ignore prev and size |
struct memdq *fq = freemetahead = (struct memdq *)metabottom; |
|
while ((cyg_uint8 *)fq < metatop) { |
fq->next = fq+1; |
fq++; |
} |
|
CYG_ASSERT((cyg_uint8 *)fq == metatop, "traversed metadata not aligned"); |
|
// set final pointer to NULL; |
--fq; fq->next = NULL; |
|
// initialize the free list. memhead is the initial free block occupying |
// all of free memory. |
memhead.next = memhead.prev = &freehead; |
// The mem list is circular for consistency. |
memhead.memprev = memhead.memnext = &memend; |
memhead.mem = bottom; |
|
// initialize block that indicates end of memory. This pretends to |
// be an allocated block |
memend.next = memend.prev = &allocedhead; |
memend.memnext = memend.memprev = &memhead; |
memend.mem = top; |
|
// initialize alloced list memdq. memend pretends to be allocated memory |
// at the end |
allocedhead.next = allocedhead.prev = &memend; |
freehead.next = freehead.prev = &memhead; |
// Since allocedhead and freehead are placeholders, not real blocks, |
// assign addresses which can't match list searches |
allocedhead.memnext = allocedhead.memprev = NULL; |
freehead.memnext = freehead.memprev = NULL; |
freehead.mem = allocedhead.mem = NULL; |
|
freemem = top - bottom; |
} |
|
// ------------------------------------------------------------------------- |
|
inline |
Cyg_Mempool_Sepmeta_Implementation::~Cyg_Mempool_Sepmeta_Implementation() |
{ |
} |
|
// ------------------------------------------------------------------------- |
// allocation is mostly simple |
// First we look down the free list for a large enough block |
// If we find a block the right size, we unlink the block from |
// the free list and return a pointer to it. |
// If we find a larger block, we chop a piece off the end |
// and return that |
// Otherwise we reach the end of the list and return NULL |
|
inline cyg_uint8 * |
Cyg_Mempool_Sepmeta_Implementation::try_alloc( cyg_int32 size ) |
{ |
struct memdq *alloced; |
|
CYG_REPORT_FUNCTION(); |
|
// Allow uninitialised (zero sized) heaps because they could exist as a |
// quirk of the MLT setup where a dynamically sized heap is at the top of |
// memory. |
if (NULL == bottom || NULL==metabase) |
return NULL; |
|
size = (size + alignment - 1) & -alignment; |
|
struct memdq *dq = find_free_dq( size ); |
if (NULL == dq) |
return NULL; |
|
cyg_int32 dqsize = dq->memnext->mem - dq->mem; |
|
if( size == dqsize ) { |
// exact fit -- unlink from free list |
dq->prev->next = dq->next; |
dq->next->prev = dq->prev; |
|
// set up this block for insertion into alloced list |
dq->next = dq->memnext; // since dq was free, dq->memnext must |
// be allocated otherwise it would have |
// been coalesced |
dq->prev = dq->next->prev; |
|
alloced = dq; |
} else { |
|
CYG_ASSERT( dqsize > size, "block found is too small"); |
|
// Split into two memdq's, returning the second one |
|
// first get a memdq |
|
if ( NULL == freemetahead ) // out of metadata. |
return NULL; |
|
// FIXME: since we don't search all the way for an exact fit |
// first we may be able to find an exact fit later and therefore |
// not need more metadata. We don't do this yet though. |
|
alloced = freemetahead; |
freemetahead = alloced->next; |
|
// now set its values |
alloced->memnext = dq->memnext; |
alloced->next = dq->memnext; // since dq was free, dq->memnext must |
// be allocated otherwise it would have |
// been coalesced |
alloced->memprev = dq; |
alloced->prev = alloced->next->prev; |
|
alloced->mem = alloced->next->mem - size; |
|
// now set up dq (the portion that remains a free block) |
// dq->next and dq->prev are unchanged as we still end up pointing |
// at the same adjacent free blocks |
// dq->memprev obviously doesn't change |
|
dq->memnext = alloced; |
|
// finish inserting into memory block list |
alloced->memnext->memprev = alloced; |
alloced->next->prev = alloced->prev->next = alloced; |
|
check_free_memdq(dq); |
} |
|
CYG_ASSERT( bottom <= alloced->mem && alloced->mem <= top, |
"alloced outside pool" ); |
|
// Insert block into alloced list. |
alloced->next->prev = alloced->prev->next = alloced; |
|
check_alloced_memdq(alloced); |
|
freemem -=size; |
|
CYG_ASSERT( ((CYG_ADDRESS)alloced->mem & (alignment-1)) == 0, |
"returned memory not aligned" ); |
return alloced->mem; |
} |
|
// ------------------------------------------------------------------------- |
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
|
inline cyg_uint8 * |
Cyg_Mempool_Sepmeta_Implementation::resize_alloc( cyg_uint8 *alloc_ptr, |
cyg_int32 newsize, |
cyg_int32 *oldsize ) |
{ |
cyg_int32 currsize, origsize; |
|
CYG_REPORT_FUNCTION(); |
|
CYG_CHECK_DATA_PTRC( alloc_ptr ); |
if ( NULL != oldsize ) |
CYG_CHECK_DATA_PTRC( oldsize ); |
|
CYG_ASSERT( (bottom <= alloc_ptr) && (alloc_ptr <= top), |
"alloc_ptr outside pool" ); |
|
struct memdq *dq=find_alloced_dq( alloc_ptr ); |
CYG_ASSERT( dq != NULL, "passed address not previously alloced"); |
|
currsize = origsize = dq->memnext->mem - dq->mem; |
if ( NULL != oldsize ) |
*oldsize = currsize; |
|
if ( newsize > currsize ) { |
cyg_int32 nextmemsize=0, prevmemsize=0; |
|
// see if we can increase the allocation size. Don't change anything |
// so we don't have to undo it later if it wouldn't fit |
if ( dq->next != dq->memnext ) { // if not equal, memnext must |
// be on free list |
nextmemsize = dq->memnext->memnext->mem - dq->memnext->mem; |
} |
if ( dq->prev != dq->memprev) { // ditto |
prevmemsize = dq->mem - dq->memprev->mem; |
} |
if (nextmemsize + prevmemsize + currsize < newsize) |
return NULL; // can't fit it |
|
// expand forwards |
if ( nextmemsize != 0 ) { |
if (nextmemsize <= (newsize - currsize)) { // taking all of it |
struct memdq *fblk = dq->memnext; |
|
// fix up mem list ptrs |
dq->memnext = fblk->memnext; |
dq->memnext->memprev=dq; |
// fix up free list ptrs |
fblk->next->prev = fblk->prev; |
fblk->prev->next = fblk->next; |
|
// return to meta list |
fblk->next = freemetahead; |
freemetahead = fblk->next; |
currsize += nextmemsize; |
} else { // only needs some |
dq->memnext->mem += (newsize - currsize); |
currsize = newsize; |
} |
} |
|
// expand backwards |
if ( currsize < newsize && prevmemsize != 0 ) { |
cyg_uint8 *oldmem = dq->mem; |
|
CYG_ASSERT( prevmemsize >= newsize - currsize, |
"miscalculated expansion" ); |
if (prevmemsize == (newsize - currsize)) { // taking all of it |
struct memdq *fblk = dq->memprev; |
|
// fix up mem list ptrs |
dq->memprev = fblk->memprev; |
dq->memprev->memnext=dq; |
dq->mem = fblk->mem; |
// fix up free list ptrs |
fblk->next->prev = fblk->prev; |
fblk->prev->next = fblk->next; |
|
// return to meta list |
fblk->next = freemetahead; |
freemetahead = fblk->next; |
} else { // only needs some |
dq->mem -= (newsize - currsize); |
} |
|
// move data into place |
copy_data( dq->mem, oldmem, origsize ); |
} |
} |
|
if (newsize < currsize) { |
// shrink allocation |
|
// easy if the next block is already a free block |
if ( dq->memnext != dq->next ) { |
dq->memnext->mem -= currsize - newsize; |
CYG_ASSERT( dq->memnext->mem > dq->mem, |
"moving next block back corruption" ); |
} else { |
// if its already allocated we need to create a new free list |
// entry |
if (NULL == freemetahead) |
return NULL; // can't do it |
|
struct memdq *fdq = freemetahead; |
freemetahead = fdq->next; |
|
fdq->memprev = dq; |
fdq->memnext = dq->memnext; |
fdq->mem = dq->mem + newsize; |
|
insert_free_block( fdq ); |
} |
} |
|
freemem += origsize - newsize; |
|
return dq->mem; |
} // resize_alloc() |
|
|
// ------------------------------------------------------------------------- |
// When no coalescing is done, free is simply a matter of using the |
// freed memory as an element of the free list linking it in at the |
// start. When coalescing, the free list is sorted |
|
inline cyg_bool |
Cyg_Mempool_Sepmeta_Implementation::free( cyg_uint8 *p, cyg_int32 size ) |
{ |
CYG_REPORT_FUNCTION(); |
|
CYG_CHECK_DATA_PTRC( p ); |
|
if (!((bottom <= p) && (p <= top))) |
return false; |
|
struct memdq *dq = find_alloced_dq( p ); |
if (NULL == dq) |
return false; |
|
if (0 == size) |
size = dq->memnext->mem - dq->mem; |
else { |
size = (size + alignment - 1) & -alignment; |
if( (dq->memnext->mem - dq->mem) != size ) |
return false; |
} |
|
check_alloced_memdq( dq ); |
|
// Remove dq from alloced list |
dq->prev->next = dq->next; |
dq->next->prev = dq->prev; |
|
insert_free_block( dq ); |
|
freemem += size; |
|
return true; |
} |
|
// ------------------------------------------------------------------------- |
|
inline void |
Cyg_Mempool_Sepmeta_Implementation::get_status( |
cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
CYG_REPORT_FUNCTION(); |
|
// as quick or quicker to just set it, rather than test flag first |
status.arenabase = obase; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_ARENASIZE) ) |
status.arenasize = top - bottom; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) ) |
status.totalallocated = (top-bottom) - freemem; |
// as quick or quicker to just set it, rather than test flag first |
status.totalfree = freemem; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_MAXFREE) ) { |
struct memdq *dq = &freehead; |
cyg_int32 mf = 0; |
|
do { |
CYG_ASSERT( dq->next->prev==dq, "Bad link in dq"); |
dq = dq->next; |
if (dq == &freehead) // wrapped round |
break; |
if(dq->memnext->mem - dq->mem > mf) |
mf = dq->memnext->mem - dq->mem; |
} while(1); |
status.maxfree = mf; |
} |
// as quick or quicker to just set it, rather than test flag first |
status.origbase = obase; |
// as quick or quicker to just set it, rather than test flag first |
status.origsize = osize; |
|
CYG_REPORT_RETURN(); |
|
} // get_status() |
|
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_MEMALLOC_SEPMETAIMPL_INL |
// EOF sepmetaimpl.inl |
/common/v2_0/include/mempoolt.hxx
0,0 → 1,123
#ifndef CYGONCE_KERNEL_MEMPOOLT_HXX |
#define CYGONCE_KERNEL_MEMPOOLT_HXX |
|
//========================================================================== |
// |
// mempoolt.hxx |
// |
// Mempoolt (Memory pool template) class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: hmt |
// Date: 1998-02-10 |
// Purpose: Define Mempoolt class interface |
|
// Description: The class defined here provides the APIs for thread-safe, |
// kernel-savvy memory managers; make a class with the |
// underlying allocator as the template parameter. |
// Usage: #include <cyg/kernel/mempoolt.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
#include <cyg/kernel/ktypes.h> |
#include <cyg/infra/cyg_ass.h> // assertion macros |
#include <cyg/kernel/thread.hxx> |
|
template <class T> |
class Cyg_Mempoolt |
{ |
private: |
T pool; // underlying memory manager |
Cyg_ThreadQueue queue; // queue of waiting threads |
|
public: |
|
CYGDBG_DEFINE_CHECK_THIS |
|
Cyg_Mempoolt( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD arg_thru ); // Constructor |
~Cyg_Mempoolt(); // Destructor |
|
// get some memory; wait if none available; return NULL if failed |
// due to interrupt |
cyg_uint8 *alloc( cyg_int32 size ); |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout; return NULL if failed |
// due to interrupt or timeout |
cyg_uint8 *alloc( cyg_int32 size, cyg_tick_count abs_timeout ); |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 *try_alloc( cyg_int32 size ); |
|
// free the memory back to the pool |
cyg_bool free( cyg_uint8 *p, cyg_int32 size ); |
|
// if applicable: return -1 if not fixed size |
cyg_int32 get_blocksize(); |
|
// is anyone waiting for memory? |
cyg_bool waiting() { return ! queue.empty(); } |
|
// these two are obvious and generic |
cyg_int32 get_totalmem(); |
cyg_int32 get_freemem(); |
|
// get information about the construction parameters for external |
// freeing after the destruction of the holding object. |
void get_arena( |
cyg_uint8 * &base, |
cyg_int32 &size, |
CYG_ADDRWORD &arg_thru ); |
|
// Return the size of the memory allocation (previously returned |
// by alloc() or try_alloc() ) at ptr. Returns -1 if not found |
cyg_int32 |
get_allocation_size( cyg_uint8 * /* ptr */ ); |
}; |
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_KERNEL_MEMPOOLT_HXX |
// EOF mempoolt.hxx |
/common/v2_0/include/memjoin.hxx
0,0 → 1,131
#ifndef CYGONCE_MEMALLOC_MEMJOIN_HXX |
#define CYGONCE_MEMALLOC_MEMJOIN_HXX |
|
//========================================================================== |
// |
// memjoin.hxx |
// |
// Pseudo memory pool used to join together other memory pools |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Purpose: Define joined up memory pool class interface |
// Description: Inline class for constructing a pseudo allocator that contains |
// multiple other allocators. It caters solely to the requirements |
// of the malloc implementation. |
// Usage: #include <cyg/memalloc/memjoin.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
|
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
//#include <cyg/infra/cyg_ass.h> // assertion macros |
|
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
|
// TYPE DEFINITIONS |
|
template <class T> |
class Cyg_Mempool_Joined |
{ |
protected: |
struct pooldesc { |
const cyg_uint8 *startaddr; |
const cyg_uint8 *endaddr; |
T *pool; |
}; |
struct pooldesc *pools; |
cyg_uint8 poolcount; |
|
T * |
find_pool_for_ptr( const cyg_uint8 * /* ptr */ ); |
|
public: |
// Constructor |
Cyg_Mempool_Joined( cyg_uint8 /* num_heaps */, T * /* heaps */[] ); |
|
// Destructor |
~Cyg_Mempool_Joined(); |
|
// get some memory, return NULL if none available |
cyg_uint8 * |
try_alloc( cyg_int32 /* size */ ); |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
resize_alloc( cyg_uint8 * /* alloc_ptr */, cyg_int32 /* newsize */, |
cyg_int32 * /* oldsize */ =NULL ); |
|
// free the memory back to the pool |
// returns true on success |
cyg_bool |
free( cyg_uint8 * /* ptr */, cyg_int32 /* size */ =0 ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void |
get_status( cyg_mempool_status_flag_t /* flags */, |
Cyg_Mempool_Status & /* status */ ); |
|
}; |
|
#include <cyg/memalloc/memjoin.inl> |
|
#endif // ifndef CYGONCE_MEMALLOC_MEMJOIN_HXX |
// EOF memjoin.hxx |
/common/v2_0/include/memfixed.hxx
0,0 → 1,146
#ifndef CYGONCE_MEMALLOC_MEMFIXED_HXX |
#define CYGONCE_MEMALLOC_MEMFIXED_HXX |
|
//========================================================================== |
// |
// memfixed.hxx |
// |
// Memory pool with fixed block class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: jlarmour |
// Date: 2000-06-12 |
// Purpose: Define Memfixed class interface |
// Description: Inline class for constructing a fixed block allocator |
// Usage: #include <cyg/memalloc/memfixed.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
# include <pkgconf/system.h> |
# ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
# endif |
#endif |
|
|
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertion macros |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
# include <cyg/kernel/ktypes.h> // cyg_tick_count |
#endif |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
# include <cyg/memalloc/mempolt2.hxx> // kernel safe mempool template |
#endif |
|
#include <cyg/memalloc/mfiximpl.hxx> // implementation of a fixed mem pool |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
|
// TYPE DEFINITIONS |
|
class Cyg_Mempool_Fixed |
{ |
protected: |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
Cyg_Mempolt2<Cyg_Mempool_Fixed_Implementation> mypool; |
#else |
Cyg_Mempool_Fixed_Implementation mypool; |
#endif |
|
public: |
// this API makes concrete a class which implements a thread-safe |
// kernel-savvy memory pool which manages fixed size blocks. |
|
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out, note that management structures are taken from the |
// same arena. Alloc_unit is the blocksize allocated. |
Cyg_Mempool_Fixed( |
cyg_uint8 * /* base */, |
cyg_int32 /* size */, |
CYG_ADDRWORD /* alloc_unit */ ); |
|
// Destructor |
~Cyg_Mempool_Fixed(); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
// get some memory; wait if none available |
cyg_uint8 *alloc(); |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout |
cyg_uint8 *alloc( cyg_tick_count /* delay_timeout */ ); |
# endif |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 *try_alloc(); |
|
// supposedly resize existing allocation. This is defined in the |
// fixed block allocator purely for API consistency. It will return |
// an error (false) for all values, except for the blocksize |
// returns true on success |
cyg_uint8 * |
resize_alloc( cyg_uint8 * /* alloc_ptr */, cyg_int32 /* newsize */, |
cyg_int32 * /* oldsize */ =NULL ); |
|
// free the memory back to the pool |
cyg_bool free( cyg_uint8 * /* p */ ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void get_status( cyg_mempool_status_flag_t /* flags */, |
Cyg_Mempool_Status & /* status */ ); |
|
CYGDBG_DEFINE_CHECK_THIS |
}; |
|
#endif // ifndef CYGONCE_MEMALLOC_MEMFIXED_HXX |
// EOF memfixed.hxx |
/common/v2_0/include/kapidata.h
0,0 → 1,100
#ifndef CYGONCE_MEMALLOC_KAPIDATA_H |
#define CYGONCE_MEMALLOC_KAPIDATA_H |
|
/*========================================================================== |
// |
// kapidata.h |
// |
// Memory allocator portion of kernel C API |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Purpose: Memory allocator data for kernel C API |
// Description: This is intentionally only to be included via |
// <cyg/kernel/kapi.h> |
// Usage: This file should not be used directly - instead it should |
// be used via <cyg/kernel/kapi.h> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================*/ |
|
#include <pkgconf/memalloc.h> |
|
/*---------------------------------------------------------------------------*/ |
|
/* This corresponds to the extra fields provided by the mempoolt template |
not the actual size of the template in any given instance. */ |
typedef struct cyg_mempoolt { |
cyg_threadqueue queue; |
} cyg_mempoolt; |
|
|
struct cyg_mempool_var_memdq { |
struct cyg_mempool_var_memdq *prev, *next; |
cyg_int32 size; |
}; |
|
struct cyg_mempool_var { |
struct cyg_mempool_var_memdq head; |
cyg_uint8 *obase; |
cyg_int32 osize; |
cyg_uint8 *bottom; |
cyg_uint8 *top; |
cyg_int32 alignment; |
cyg_int32 freemem; |
cyg_mempoolt mempoolt; |
}; |
|
struct cyg_mempool_fix { |
cyg_uint32 *bitmap; |
cyg_int32 maptop; |
cyg_uint8 *mempool; |
cyg_int32 numblocks; |
cyg_int32 freeblocks; |
cyg_int32 blocksize; |
cyg_int32 firstfree; |
cyg_uint8 *top; |
cyg_mempoolt mempoolt; |
}; |
|
#endif /* ifndef CYGONCE_MEMALLOC_KAPIDATA_H */ |
/* EOF kapidata.h */ |
/common/v2_0/include/sepmeta.hxx
0,0 → 1,174
#ifndef CYGONCE_MEMALLOC_SEPMETA_HXX |
#define CYGONCE_MEMALLOC_SEPMETA_HXX |
|
//========================================================================== |
// |
// sepmeta.hxx |
// |
// Variable block memory pool with separate metadata |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2001-06-28 |
// Purpose: Define Sepmeta class interface |
// Description: Inline class for constructing a variable block allocator |
// with separate metadata |
// Usage: #include <cyg/memalloc/sepmeta.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
# include <pkgconf/system.h> |
# ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
# endif |
#endif |
|
#if 0 |
// when used as an implementation for malloc, we need the following |
// to let the system know the name of the class |
#define CYGCLS_MEMALLOC_MALLOC_IMPL Cyg_Mempool_Sepmeta |
#endif |
|
// if the implementation is all that's required, don't output anything else |
#ifndef __MALLOC_IMPL_WANTED |
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertion macros |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
# include <cyg/kernel/ktypes.h> // cyg_tick_count |
#endif |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
# include <cyg/memalloc/mempolt2.hxx> // kernel safe mempool template |
#endif |
|
#include <cyg/memalloc/sepmetaimpl.hxx>// implementation of this mem pool |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
|
// TYPE DEFINITIONS |
|
class Cyg_Mempool_Sepmeta |
{ |
protected: |
// This is a horrible workaround for the fact that C++ doesn't let |
// you construct mypool explicitly if you have to initialize a struct |
// to pass as an argument first. |
struct Cyg_Mempool_Sepmeta_Implementation::constructorargs args; |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
Cyg_Mempolt2<Cyg_Mempool_Sepmeta_Implementation> mypool; |
#else |
Cyg_Mempool_Sepmeta_Implementation mypool; |
#endif |
public: |
// This API makes concrete a class which implements a thread-safe |
// kernel-savvy memory pool which manages variable size blocks with |
// separate metadata. |
|
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out, note that management structures are taken from the |
// same arena. |
Cyg_Mempool_Sepmeta( cyg_uint8 * /* base */, cyg_int32 /* size */, |
cyg_int32 /* alignment */, |
cyg_uint8 * /* metabase */, |
cyg_uint32 /* metasize */); |
|
// Destructor |
~Cyg_Mempool_Sepmeta(); |
|
// get some memory; wait if none available |
// if we aren't configured to be thread-aware this is irrelevant |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
cyg_uint8 * |
alloc( cyg_int32 /* size */ ); |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout |
cyg_uint8 * |
alloc( cyg_int32 /* size */, cyg_tick_count /* delay_timeout */ ); |
# endif |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 * |
try_alloc( cyg_int32 /* size */ ); |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
resize_alloc( cyg_uint8 * /* alloc_ptr */, cyg_int32 /* newsize */, |
cyg_int32 * /* oldsize */ =NULL ); |
|
// free the memory back to the pool |
// returns true on success |
cyg_bool |
free( cyg_uint8 * /* ptr */, cyg_int32 /* size */ =0 ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void |
get_status( cyg_mempool_status_flag_t /* flags */, |
Cyg_Mempool_Status & /* status */ ); |
|
CYGDBG_DEFINE_CHECK_THIS |
}; |
|
#endif // ifndef __MALLOC_IMPL_WANTED |
|
#endif // ifndef CYGONCE_MEMALLOC_SEPMETA_HXX |
// EOF sepmeta.hxx |
/common/v2_0/include/mempolt2.hxx
0,0 → 1,139
#ifndef CYGONCE_MEMALLOC_MEMPOLT2_HXX |
#define CYGONCE_MEMALLOC_MEMPOLT2_HXX |
|
//========================================================================== |
// |
// mempolt2.hxx |
// |
// Mempolt2 (Memory pool template) class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: jlarmour |
// Date: 2000-06-12 |
// Purpose: Define Mempolt2 class interface |
// Description: The class defined here provides the APIs for thread-safe, |
// kernel-savvy memory managers; make a class with the |
// underlying allocator as the template parameter. |
// Usage: #include <cyg/memalloc/mempolt2.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// It is assumed that implementations using this file have already mandated |
// that the kernel is present. So we just go ahead and use it |
|
#include <pkgconf/memalloc.h> |
#include <cyg/kernel/ktypes.h> |
#include <cyg/infra/cyg_ass.h> // assertion macros |
#include <cyg/kernel/thread.hxx> |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
template <class T> |
class Cyg_Mempolt2 |
{ |
private: |
T pool; // underlying memory manager |
Cyg_ThreadQueue queue; // queue of waiting threads |
|
class Mempolt2WaitInfo { |
private: |
Mempolt2WaitInfo() {} |
public: |
cyg_int32 size; |
cyg_uint8 *addr; |
Mempolt2WaitInfo( cyg_int32 allocsize ) |
{ size = allocsize; addr = 0; } |
}; |
|
public: |
|
Cyg_Mempolt2( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD arg_thru ); // Constructor |
~Cyg_Mempolt2(); // Destructor |
|
// get some memory; wait if none available; return NULL if failed |
// due to interrupt |
cyg_uint8 *alloc( cyg_int32 size ); |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout; return NULL if failed |
// due to interrupt or timeout |
cyg_uint8 *alloc( cyg_int32 size, cyg_tick_count abs_timeout ); |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 *try_alloc( cyg_int32 size ); |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ); |
|
// free the memory back to the pool |
// returns true on success |
cyg_bool free( cyg_uint8 *p, cyg_int32 size ); |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void get_status( cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ); |
|
CYGDBG_DEFINE_CHECK_THIS |
|
}; |
|
#include <cyg/memalloc/mempolt2.inl> |
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_MEMALLOC_MEMPOLT2_HXX |
// EOF mempolt2.hxx |
/common/v2_0/include/mfiximpl.inl
0,0 → 1,238
#ifndef CYGONCE_MEMALLOC_MFIXIMPL_INL |
#define CYGONCE_MEMALLOC_MFIXIMPL_INL |
|
//========================================================================== |
// |
// mfiximpl.inl |
// |
// Memory pool with fixed block class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: jlarmour |
// Date: 2000-06-12 |
// Purpose: Define Mfiximpl class interface |
// Description: Inline class for constructing a fixed block allocator |
// Usage: #include <cyg/kernel/mfiximpl.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
#include <pkgconf/memalloc.h> |
#include <cyg/hal/hal_arch.h> // HAL_LSBIT_INDEX magic asm code |
#include <cyg/memalloc/mfiximpl.hxx> |
|
// ------------------------------------------------------------------------- |
|
inline |
Cyg_Mempool_Fixed_Implementation::Cyg_Mempool_Fixed_Implementation( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD alloc_unit ) |
{ |
cyg_int32 i; |
bitmap = (cyg_uint32 *)base; |
blocksize = alloc_unit; |
|
CYG_ASSERT( blocksize > 0, "Bad blocksize" ); |
CYG_ASSERT( size > 2, "Bad blocksize" ); |
CYG_ASSERT( blocksize < size, "blocksize, size bad" ); |
|
numblocks = size / blocksize; |
top = base + size; |
|
CYG_ASSERT( numblocks >= 2, "numblocks bad" ); |
|
i = (numblocks + 31)/32; // number of words to map blocks |
while ( (i * 4 + numblocks * blocksize) > size ) { |
numblocks --; // steal one block for admin |
i = (numblocks + 31)/32; // number of words to map blocks |
} |
|
CYG_ASSERT( 0 < i, "Bad word count for bitmap after fitment" ); |
CYG_ASSERT( 0 < numblocks, "Bad block count after fitment" ); |
|
maptop = i; |
// this should leave space for the bitmap and maintain alignment |
mempool = top - (numblocks * blocksize); |
CYG_ASSERT( base < mempool && mempool < top, "mempool escaped" ); |
CYG_ASSERT( (cyg_uint8 *)(&bitmap[ maptop ]) <= mempool, |
"mempool overwrites bitmap" ); |
CYG_ASSERT( &mempool[ numblocks * blocksize ] <= top, |
"mempool overflows top" ); |
freeblocks = numblocks; |
firstfree = 0; |
|
// clear out the bitmap; no blocks allocated yet |
for ( i = 0; i < maptop; i++ ) |
bitmap[ i ] = 0; |
// apart from the non-existent ones at the top |
for ( i = ((numblocks-1)&31) + 1; i < 32; i++ ) |
bitmap[ maptop - 1 ] |= ( 1 << i ); |
} |
|
// ------------------------------------------------------------------------- |
|
inline |
Cyg_Mempool_Fixed_Implementation::~Cyg_Mempool_Fixed_Implementation() |
{ |
} |
|
// ------------------------------------------------------------------------- |
|
inline cyg_uint8 * |
Cyg_Mempool_Fixed_Implementation::try_alloc( cyg_int32 size ) |
{ |
// size parameter is not used |
CYG_UNUSED_PARAM( cyg_int32, size ); |
if ( 0 >= freeblocks ) |
return NULL; |
cyg_int32 i = firstfree; |
cyg_uint8 *p = NULL; |
do { |
if ( 0xffffffff != bitmap[ i ] ) { |
// then there is a free block in this bucket |
register cyg_uint32 j, k; |
k = ~bitmap[ i ]; // look for a 1 in complement |
HAL_LSBIT_INDEX( j, k ); |
CYG_ASSERT( 0 <= j && j <= 31, "Bad bit index" ); |
CYG_ASSERT( 0 == (bitmap[ i ] & (1 << j)), "Found bit not clear" ); |
bitmap[ i ] |= (1 << j); // set it allocated |
firstfree = i; |
freeblocks--; |
CYG_ASSERT( freeblocks >= 0, "allocated too many" ); |
p = &mempool[ ((32 * i) + j) * blocksize ]; |
break; |
} |
if ( ++i >= maptop ) |
i = 0; // wrap if at top |
} while ( i != firstfree ); // prevent hang if internal error |
CYG_ASSERT( NULL != p, "Should have a block here" ); |
CYG_ASSERT( mempool <= p && p <= top, "alloc mem escaped" ); |
return p; |
} |
|
// ------------------------------------------------------------------------- |
// supposedly resize existing allocation. This is defined in the |
// fixed block allocator purely for API consistency. It will return |
// an error (false) for all values, except for the blocksize |
// returns true on success |
|
inline cyg_uint8 * |
Cyg_Mempool_Fixed_Implementation::resize_alloc( cyg_uint8 *alloc_ptr, |
cyg_int32 newsize, |
cyg_int32 *oldsize ) |
{ |
CYG_CHECK_DATA_PTRC( alloc_ptr ); |
if ( NULL != oldsize ) |
CYG_CHECK_DATA_PTRC( oldsize ); |
|
CYG_ASSERT( alloc_ptr >= mempool && alloc_ptr < top, |
"alloc_ptr outside pool" ); |
|
if ( NULL != oldsize ) |
*oldsize = blocksize; |
|
if (newsize == blocksize) |
return alloc_ptr; |
else |
return NULL; |
} // resize_alloc() |
|
|
// ------------------------------------------------------------------------- |
|
inline cyg_bool |
Cyg_Mempool_Fixed_Implementation::free( cyg_uint8 *p, cyg_int32 size ) |
{ |
// size parameter is not used |
CYG_UNUSED_PARAM( cyg_int32, size ); |
if ( p < mempool || p >= top ) |
return false; // address way out of bounds |
cyg_int32 i = p - mempool; |
i = i / blocksize; |
if ( &mempool[ i * blocksize ] != p ) |
return false; // address not aligned |
cyg_int32 j = i / 32; |
CYG_ASSERT( 0 <= j && j < maptop, "map index escaped" ); |
i = i - 32 * j; |
CYG_ASSERT( 0 <= i && i < 32, "map bit index escaped" ); |
if ( ! ((1 << i) & bitmap[ j ] ) ) |
return false; // block was not allocated |
bitmap[ j ] &=~(1 << i); // clear the bit |
freeblocks++; // count the block |
CYG_ASSERT( freeblocks <= numblocks, "freeblocks overflow" ); |
return true; |
} |
|
// ------------------------------------------------------------------------- |
|
inline void |
Cyg_Mempool_Fixed_Implementation::get_status( |
cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
// as quick or quicker to just set it, rather than test flag first |
status.arenabase = (const cyg_uint8 *)bitmap; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_ARENASIZE) ) |
status.arenasize = top - (cyg_uint8 *)bitmap; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_FREEBLOCKS) ) |
status.freeblocks = freeblocks; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) ) |
status.totalallocated = blocksize * numblocks; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALFREE) ) |
status.totalfree = blocksize * freeblocks; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_BLOCKSIZE) ) |
status.blocksize = blocksize; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_MAXFREE) ) { |
status.maxfree = freeblocks > 0 ? blocksize : 0; |
} |
// as quick or quicker to just set it, rather than test flag first |
status.origbase = (const cyg_uint8 *)bitmap; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_ORIGSIZE) ) |
status.origsize = top - (cyg_uint8 *)bitmap; |
// quicker to just set it, rather than test flag first |
status.maxoverhead = 0; |
|
} // get_status() |
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_MEMALLOC_MFIXIMPL_INL |
// EOF mfiximpl.inl |
/common/v2_0/include/mvarimpl.inl
0,0 → 1,450
#ifndef CYGONCE_MEMALLOC_MVARIMPL_INL |
#define CYGONCE_MEMALLOC_MVARIMPL_INL |
|
//========================================================================== |
// |
// mvarimpl.inl |
// |
// Memory pool with variable block class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: jlarmour |
// Date: 2000-06-12 |
// Purpose: Define Mvarimpl class interface |
// Description: Inline class for constructing a variable block allocator |
// Usage: #include <cyg/memalloc/mvarimpl.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
#include <pkgconf/memalloc.h> |
#include <cyg/memalloc/mvarimpl.hxx> |
|
#include <cyg/infra/cyg_ass.h> // assertion support |
#include <cyg/infra/cyg_trac.h> // tracing support |
|
// Simple allocator |
|
// The free list is stored on a doubly linked list, each member of |
// which is stored in the body of the free memory. The head of the |
// list has the same structure but its size field is zero. This |
// resides in the memory pool structure. Always having at least one |
// item on the list simplifies the alloc and free code. |
|
// |
inline cyg_int32 |
Cyg_Mempool_Variable_Implementation::roundup( cyg_int32 size ) |
{ |
|
size += sizeof(struct memdq); |
size = (size + alignment - 1) & -alignment; |
return size; |
} |
|
inline struct Cyg_Mempool_Variable_Implementation::memdq * |
Cyg_Mempool_Variable_Implementation::addr2memdq( cyg_uint8 *addr ) |
{ |
struct memdq *dq; |
dq = (struct memdq *)(roundup((cyg_int32)addr) - sizeof(struct memdq)); |
return dq; |
} |
|
inline struct Cyg_Mempool_Variable_Implementation::memdq * |
Cyg_Mempool_Variable_Implementation::alloc2memdq( cyg_uint8 *addr ) |
{ |
return (struct memdq *)(addr - sizeof(struct memdq)); |
} |
|
inline cyg_uint8 * |
Cyg_Mempool_Variable_Implementation::memdq2alloc( struct memdq *dq ) |
{ |
return ((cyg_uint8 *)dq + sizeof(struct memdq)); |
} |
|
// ------------------------------------------------------------------------- |
|
inline void |
Cyg_Mempool_Variable_Implementation::insert_free_block( struct memdq *dq ) |
{ |
struct memdq *hdq=&head; |
|
freemem += dq->size; |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_COALESCE |
// For simple coalescing have the free list be sorted by memory base address |
struct memdq *idq; |
|
for (idq = hdq->next; idq != hdq; idq = idq->next) { |
if (idq > dq) |
break; |
} |
// we want to insert immediately before idq |
dq->next = idq; |
dq->prev = idq->prev; |
idq->prev = dq; |
dq->prev->next = dq; |
|
// Now do coalescing, but leave the head of the list alone. |
if (dq->next != hdq && (char *)dq + dq->size == (char *)dq->next) { |
dq->size += dq->next->size; |
dq->next = dq->next->next; |
dq->next->prev = dq; |
} |
if (dq->prev != hdq && (char *)dq->prev + dq->prev->size == (char *)dq) { |
dq->prev->size += dq->size; |
dq->prev->next = dq->next; |
dq->next->prev = dq->prev; |
dq = dq->prev; |
} |
#else |
dq->prev = hdq; |
dq->next = hdq->next; |
hdq->next = dq; |
dq->next->prev=dq; |
#endif |
} |
|
// ------------------------------------------------------------------------- |
|
inline |
Cyg_Mempool_Variable_Implementation::Cyg_Mempool_Variable_Implementation( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD align ) |
{ |
CYG_REPORT_FUNCTION(); |
|
CYG_ASSERT( align > 0, "Bad alignment" ); |
CYG_ASSERT(0!=align ,"align is zero"); |
CYG_ASSERT(0==(align & align-1),"align not a power of 2"); |
|
if ((unsigned)size < sizeof(struct memdq)) { |
bottom = NULL; |
return; |
} |
|
obase=base; |
osize=size; |
|
alignment = align; |
while (alignment < (cyg_int32)sizeof(struct memdq)) |
alignment += alignment; |
CYG_ASSERT(0==(alignment & alignment-1),"alignment not a power of 2"); |
|
// the memdq for each allocation is always positioned immediately before |
// an aligned address, so that the allocation (i.e. what eventually gets |
// returned from alloc()) is at the correctly aligned address |
// Therefore bottom is set to the lowest available address given the size of |
// struct memdq and the alignment. |
bottom = (cyg_uint8 *)addr2memdq(base); |
|
// because we split free blocks by allocating memory from the end, not |
// the beginning, then to preserve alignment, the *top* must also be |
// aligned such that (top-bottom) is a multiple of the alignment |
top = (cyg_uint8 *)((cyg_int32)(base+size+sizeof(struct memdq)) & -alignment) - |
sizeof(struct memdq); |
|
CYG_ASSERT( top > bottom , "heap too small" ); |
CYG_ASSERT( top <= (base+size), "top too large" ); |
CYG_ASSERT( ((cyg_int32)(top+sizeof(struct memdq)) & alignment-1)==0, |
"top badly aligned" ); |
|
struct memdq *hdq = &head, *dq = (struct memdq *)bottom; |
|
CYG_ASSERT( ((cyg_int32)memdq2alloc(dq) & alignment-1)==0, |
"bottom badly aligned" ); |
|
hdq->prev = hdq->next = dq; |
hdq->size = 0; |
dq->prev = dq->next = hdq; |
|
freemem = dq->size = top - bottom; |
} |
|
// ------------------------------------------------------------------------- |
|
inline |
Cyg_Mempool_Variable_Implementation::~Cyg_Mempool_Variable_Implementation() |
{ |
} |
|
// ------------------------------------------------------------------------- |
// allocation is simple |
// First we look down the free list for a large enough block |
// If we find a block the right size, we unlink the block from |
// the free list and return a pointer to it. |
// If we find a larger block, we chop a piece off the end |
// and return that |
// Otherwise we will eventually get back to the head of the list |
// and return NULL |
inline cyg_uint8 * |
Cyg_Mempool_Variable_Implementation::try_alloc( cyg_int32 size ) |
{ |
struct memdq *dq = &head; |
cyg_uint8 *alloced; |
|
CYG_REPORT_FUNCTION(); |
|
// Allow uninitialised (zero sized) heaps because they could exist as a |
// quirk of the MLT setup where a dynamically sized heap is at the top of |
// memory. |
if (NULL == bottom) |
return NULL; |
|
size = roundup(size); |
|
do { |
CYG_ASSERT( dq->next->prev==dq, "Bad link in dq"); |
dq = dq->next; |
if(0 == dq->size) { |
CYG_ASSERT(dq == &head, "bad free block"); |
return NULL; |
} |
} while(dq->size < size); |
|
if( size == dq->size ) { |
// exact fit -- unlink from free list |
dq->prev->next = dq->next; |
dq->next->prev = dq->prev; |
alloced = (cyg_uint8 *)dq; |
} else { |
|
CYG_ASSERT( dq->size > size, "block found is too small"); |
|
// allocate portion of memory from end of block |
|
dq->size -=size; |
|
// The portion left over has to be large enough to store a |
// struct memdq. This is guaranteed because the alignment is |
// larger than the size of this structure. |
|
CYG_ASSERT( (cyg_int32)sizeof(struct memdq)<=dq->size , |
"not enough space for list item" ); |
|
alloced = (cyg_uint8 *)dq + dq->size; |
} |
|
CYG_ASSERT( bottom<=alloced && alloced<=top, "alloced outside pool" ); |
|
// Set size on allocated block |
|
dq = (struct memdq *)alloced; |
dq->size = size; |
dq->next = dq->prev = (struct memdq *)0xd530d53; // magic number |
|
freemem -=size; |
|
cyg_uint8 *ptr = memdq2alloc( dq ); |
CYG_ASSERT( ((CYG_ADDRESS)ptr & (alignment-1)) == 0, |
"returned memory not aligned" ); |
return ptr; |
} |
|
// ------------------------------------------------------------------------- |
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
|
inline cyg_uint8 * |
Cyg_Mempool_Variable_Implementation::resize_alloc( cyg_uint8 *alloc_ptr, |
cyg_int32 newsize, |
cyg_int32 *oldsize ) |
{ |
cyg_uint8 *ret = NULL; |
|
CYG_REPORT_FUNCTION(); |
|
CYG_CHECK_DATA_PTRC( alloc_ptr ); |
if ( NULL != oldsize ) |
CYG_CHECK_DATA_PTRC( oldsize ); |
|
CYG_ASSERT( (bottom <= alloc_ptr) && (alloc_ptr <= top), |
"alloc_ptr outside pool" ); |
|
struct memdq *dq=alloc2memdq( alloc_ptr ); |
|
// check magic number in block for validity |
CYG_ASSERT( (dq->next == dq->prev) && |
(dq->next == (struct memdq *)0xd530d53), "bad alloc_ptr" ); |
|
newsize = roundup(newsize); |
|
if ( NULL != oldsize ) |
*oldsize = dq->size; |
|
if ( newsize > dq->size ) { |
// see if we can increase the allocation size |
if ( (cyg_uint8 *)dq + newsize <= top ) { // obviously can't exceed pool |
struct memdq *nextdq = (struct memdq *)((cyg_uint8 *)dq + dq->size); |
|
if ( (nextdq->next != nextdq->prev) && |
(nextdq->size >= (newsize - dq->size)) ) { |
// it's free and it's big enough |
// we therefore temporarily join this block and *all* of |
// the next block, so that the code below can then split it |
nextdq->next->prev = nextdq->prev; |
nextdq->prev->next = nextdq->next; |
dq->size += nextdq->size; |
freemem -= nextdq->size; |
} |
} // if |
} // if |
|
// this is also used if the allocation size was increased and we need |
// to split it |
if ( newsize < dq->size ) { |
// We can shrink the allocation by splitting into smaller allocation and |
// new free block |
struct memdq *newdq = (struct memdq *)((cyg_uint8 *)dq + newsize); |
|
newdq->size = dq->size - newsize; |
dq->size = newsize; |
|
CYG_ASSERT( (cyg_int32)sizeof(struct memdq)<=newdq->size , |
"not enough space for list item" ); |
|
// now return the new space back to the freelist |
insert_free_block( newdq ); |
|
ret = alloc_ptr; |
|
} // if |
else if ( newsize == dq->size ) { |
ret = alloc_ptr; |
} |
|
return ret; |
|
} // resize_alloc() |
|
|
// ------------------------------------------------------------------------- |
// When no coalescing is done, free is simply a matter of using the |
// freed memory as an element of the free list linking it in at the |
// start. When coalescing, the free list is sorted |
|
inline cyg_bool |
Cyg_Mempool_Variable_Implementation::free( cyg_uint8 *p, cyg_int32 size ) |
{ |
CYG_REPORT_FUNCTION(); |
|
CYG_CHECK_DATA_PTRC( p ); |
|
if (!((bottom <= p) && (p <= top))) |
return false; |
|
struct memdq *dq=alloc2memdq( p ); |
|
// check magic number in block for validity |
if ( (dq->next != dq->prev) || |
(dq->next != (struct memdq *)0xd530d53) ) |
return false; |
|
if ( 0==size ) { |
size = dq->size; |
} else { |
size = roundup(size); |
} |
|
if( dq->size != size ) |
return false; |
|
CYG_ASSERT( (cyg_int32)sizeof(struct memdq)<=size , |
"not enough space for list item" ); |
|
insert_free_block( dq ); |
|
return true; |
} |
|
// ------------------------------------------------------------------------- |
|
inline void |
Cyg_Mempool_Variable_Implementation::get_status( |
cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
CYG_REPORT_FUNCTION(); |
|
// as quick or quicker to just set it, rather than test flag first |
status.arenabase = obase; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_ARENASIZE) ) |
status.arenasize = top - bottom; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) ) |
status.totalallocated = (top-bottom) - freemem; |
// as quick or quicker to just set it, rather than test flag first |
status.totalfree = freemem; |
if ( 0 != (flags & CYG_MEMPOOL_STAT_MAXFREE) ) { |
struct memdq *dq = &head; |
cyg_int32 mf = 0; |
|
do { |
CYG_ASSERT( dq->next->prev==dq, "Bad link in dq"); |
dq = dq->next; |
if(0 == dq->size) { |
CYG_ASSERT(dq == &head, "bad free block"); |
break; |
} |
if(dq->size > mf) |
mf = dq->size; |
} while(1); |
status.maxfree = mf - sizeof(struct memdq); |
} |
// as quick or quicker to just set it, rather than test flag first |
status.origbase = obase; |
// as quick or quicker to just set it, rather than test flag first |
status.origsize = osize; |
|
CYG_REPORT_RETURN(); |
|
} // get_status() |
|
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_MEMALLOC_MVARIMPL_INL |
// EOF mvarimpl.inl |
/common/v2_0/include/kapi.h
0,0 → 1,182
#ifndef CYGONCE_MEMALLOC_KAPI_H |
#define CYGONCE_MEMALLOC_KAPI_H |
|
/*========================================================================== |
// |
// kapi.h |
// |
// Memory allocator portion of kernel C API |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Purpose: Memory allocator portion of kernel C API |
// Description: This is intentionally only to be included from |
// <cyg/kernel/kapi.h> |
// Usage: This file should not be used directly - instead it should |
// be used via <cyg/kernel/kapi.h> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================*/ |
|
/* CONFIGURATION */ |
|
#include <pkgconf/memalloc.h> |
|
/* TYPE DEFINITIONS */ |
|
struct cyg_mempool_var; |
typedef struct cyg_mempool_var cyg_mempool_var; |
|
struct cyg_mempool_fix; |
typedef struct cyg_mempool_fix cyg_mempool_fix; |
|
/*-----------------------------------------------------------------------*/ |
/* Memory pools */ |
|
/* There are two sorts of memory pools. A variable size memory pool |
is for allocating blocks of any size. A fixed size memory pool, has |
the block size specified when the pool is created, and only provides |
blocks of that size. */ |
|
/* Create a variable size memory pool */ |
void cyg_mempool_var_create( |
void *base, /* base of memory to use for pool */ |
cyg_int32 size, /* size of memory in bytes */ |
cyg_handle_t *handle, /* returned handle of memory pool */ |
cyg_mempool_var *var /* space to put pool structure in */ |
); |
|
/* Delete variable size memory pool */ |
void cyg_mempool_var_delete(cyg_handle_t varpool); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
|
/* Allocates a block of length size. This waits if the memory is not |
currently available. */ |
void *cyg_mempool_var_alloc(cyg_handle_t varpool, cyg_int32 size); |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
|
/* Allocates a block of length size. This waits until abstime, |
if the memory is not already available. NULL is returned if |
no memory is available. */ |
void *cyg_mempool_var_timed_alloc( |
cyg_handle_t varpool, |
cyg_int32 size, |
cyg_tick_count_t abstime); |
|
# endif |
#endif |
|
/* Allocates a block of length size. NULL is returned if no memory is |
available. */ |
void *cyg_mempool_var_try_alloc( |
cyg_handle_t varpool, |
cyg_int32 size); |
|
/* Frees memory back into variable size pool. */ |
void cyg_mempool_var_free(cyg_handle_t varpool, void *p); |
|
/* Returns true if there are any threads waiting for memory in the |
given memory pool. */ |
cyg_bool_t cyg_mempool_var_waiting(cyg_handle_t varpool); |
|
typedef struct { |
cyg_int32 totalmem; |
cyg_int32 freemem; |
void *base; |
cyg_int32 size; |
cyg_int32 blocksize; |
cyg_int32 maxfree; // The largest free block |
} cyg_mempool_info; |
|
/* Puts information about a variable memory pool into the structure |
provided. */ |
void cyg_mempool_var_get_info(cyg_handle_t varpool, cyg_mempool_info *info); |
|
/* Create a fixed size memory pool */ |
void cyg_mempool_fix_create( |
void *base, // base of memory to use for pool |
cyg_int32 size, // size of memory in byte |
cyg_int32 blocksize, // size of allocation in bytes |
cyg_handle_t *handle, // handle of memory pool |
cyg_mempool_fix *fix // space to put pool structure in |
); |
|
/* Delete fixed size memory pool */ |
void cyg_mempool_fix_delete(cyg_handle_t fixpool); |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
/* Allocates a block. This waits if the memory is not |
currently available. */ |
void *cyg_mempool_fix_alloc(cyg_handle_t fixpool); |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
|
/* Allocates a block. This waits until abstime, if the memory |
is not already available. NULL is returned if no memory is |
available. */ |
void *cyg_mempool_fix_timed_alloc( |
cyg_handle_t fixpool, |
cyg_tick_count_t abstime); |
|
# endif |
#endif |
|
/* Allocates a block. NULL is returned if no memory is available. */ |
void *cyg_mempool_fix_try_alloc(cyg_handle_t fixpool); |
|
/* Frees memory back into fixed size pool. */ |
void cyg_mempool_fix_free(cyg_handle_t fixpool, void *p); |
|
/* Returns true if there are any threads waiting for memory in the |
given memory pool. */ |
cyg_bool_t cyg_mempool_fix_waiting(cyg_handle_t fixpool); |
|
/* Puts information about a variable memory pool into the structure |
provided. */ |
void cyg_mempool_fix_get_info(cyg_handle_t fixpool, cyg_mempool_info *info); |
|
|
|
#endif /* ifndef CYGONCE_MEMALLOC_KAPI_H */ |
/* EOF kapi.h */ |
/common/v2_0/include/mempoolt.inl
0,0 → 1,393
#ifndef CYGONCE_KERNEL_MEMPOOLT_INL |
#define CYGONCE_KERNEL_MEMPOOLT_INL |
|
//========================================================================== |
// |
// mempoolt.inl |
// |
// Mempoolt (Memory pool template) class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: hmt |
// Date: 1998-02-10 |
// Purpose: Define Mempoolt class interface |
|
// Description: The class defined here provides the APIs for thread-safe, |
// kernel-savvy memory managers; make a class with the |
// underlying allocator as the template parameter. |
// Usage: #include <cyg/kernel/mempoolt.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
#include <cyg/kernel/thread.inl> // implementation eg. Cyg_Thread::self(); |
#include <cyg/kernel/sched.inl> // implementation eg. Cyg_Scheduler::lock(); |
|
// ------------------------------------------------------------------------- |
// Constructor; we _require_ these arguments and just pass them through to |
// the implementation memory pool in use. |
template <class T> |
Cyg_Mempoolt<T>::Cyg_Mempoolt( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD arg_thru) // Constructor |
: pool( base, size, arg_thru ) |
{ |
} |
|
|
template <class T> |
Cyg_Mempoolt<T>::~Cyg_Mempoolt() // destructor |
{ |
// Prevent preemption |
Cyg_Scheduler::lock(); |
|
while ( ! queue.empty() ) { |
Cyg_Thread *thread = queue.dequeue(); |
thread->set_wake_reason( Cyg_Thread::DESTRUCT ); |
thread->wake(); |
} |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
} |
|
// ------------------------------------------------------------------------- |
// get some memory; wait if none available |
template <class T> |
inline cyg_uint8 * |
Cyg_Mempoolt<T>::alloc( cyg_int32 size ) |
{ |
CYG_REPORT_FUNCTION(); |
|
Cyg_Thread *self = Cyg_Thread::self(); |
|
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
// Loop while we got no memory, sleeping each time around the |
// loop. This copes with the possibility of a higher priority thread |
// grabbing the freed storage between the wakeup in free() and this |
// thread actually starting. |
cyg_uint8 *ret; |
cyg_bool result = true; |
while( result && (NULL == (ret = pool.alloc( size ))) ) { |
self->set_sleep_reason( Cyg_Thread::WAIT ); |
self->sleep(); |
queue.enqueue( self ); |
|
CYG_ASSERT( 1 == Cyg_Scheduler::get_sched_lock(), |
"Called with non-zero scheduler lock"); |
|
// Unlock scheduler and allow other threads to run |
Cyg_Scheduler::unlock(); |
Cyg_Scheduler::lock(); |
|
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
switch( self->get_wake_reason() ) |
{ |
case Cyg_Thread::DESTRUCT: |
case Cyg_Thread::BREAK: |
result = false; |
break; |
|
case Cyg_Thread::EXIT: |
self->exit(); |
break; |
|
default: |
break; |
} |
} |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
if ( ! result ) |
ret = NULL; |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
CYG_REPORT_RETVAL( ret ); |
return ret; |
} |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
// ------------------------------------------------------------------------- |
// get some memory with a timeout |
template <class T> |
inline cyg_uint8 * |
Cyg_Mempoolt<T>::alloc( cyg_int32 size, cyg_tick_count abs_timeout ) |
{ |
CYG_REPORT_FUNCTION(); |
|
Cyg_Thread *self = Cyg_Thread::self(); |
|
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
// Loop while we got no memory, sleeping each time around the |
// loop. This copes with the possibility of a higher priority thread |
// grabbing the freed storage between the wakeup in free() and this |
// thread actually starting. |
cyg_uint8 *ret; |
cyg_bool result = true; |
// Set the timer _once_ outside the loop. |
self->set_timer( abs_timeout, Cyg_Thread::TIMEOUT ); |
|
// If the timeout is in the past, the wake reason will have been |
// set to something other than NONE already. Set the result false |
// to force an immediate return. |
|
if( self->get_wake_reason() != Cyg_Thread::NONE ) |
result = false; |
|
while( result && (NULL == (ret = pool.alloc( size ))) ) { |
self->set_sleep_reason( Cyg_Thread::TIMEOUT ); |
self->sleep(); |
queue.enqueue( self ); |
|
CYG_ASSERT( 1 == Cyg_Scheduler::get_sched_lock(), |
"Called with non-zero scheduler lock"); |
|
// Unlock scheduler and allow other threads to run |
Cyg_Scheduler::unlock(); |
Cyg_Scheduler::lock(); |
|
CYG_ASSERTCLASS( this, "Bad this pointer"); |
switch( self->get_wake_reason() ) |
{ |
case Cyg_Thread::TIMEOUT: |
result = false; |
break; |
|
case Cyg_Thread::DESTRUCT: |
case Cyg_Thread::BREAK: |
result = false; |
break; |
|
case Cyg_Thread::EXIT: |
self->exit(); |
break; |
|
default: |
break; |
} |
} |
|
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
if ( ! result ) |
ret = NULL; |
|
// clear the timer; if it actually fired, no worries. |
self->clear_timer(); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
CYG_REPORT_RETVAL( ret ); |
return ret; |
} |
#endif |
|
// ------------------------------------------------------------------------- |
// get some memory, return NULL if none available |
template <class T> |
inline cyg_uint8 * |
Cyg_Mempoolt<T>::try_alloc( cyg_int32 size ) |
{ |
CYG_REPORT_FUNCTION(); |
|
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_uint8 *ret = pool.alloc( size ); |
|
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
CYG_REPORT_RETVAL( ret ); |
return ret; |
} |
|
|
// ------------------------------------------------------------------------- |
// free the memory back to the pool |
template <class T> |
cyg_bool |
Cyg_Mempoolt<T>::free( cyg_uint8 *p, cyg_int32 size ) |
{ |
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_int32 ret = pool.free( p, size ); |
|
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
while ( ret && !queue.empty() ) { |
// we succeeded and there are people waiting |
Cyg_Thread *thread = queue.dequeue(); |
|
CYG_ASSERTCLASS( thread, "Bad thread pointer"); |
|
// we wake them all up (ie. broadcast) to cope with variable block |
// allocators freeing a big block when lots of small allocs wait. |
thread->set_wake_reason( Cyg_Thread::DONE ); |
thread->wake(); |
// we cannot yield here; if a higher prio thread can't satisfy its |
// request it would re-queue and we would loop forever |
} |
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
return ret; |
} |
|
// ------------------------------------------------------------------------- |
// if applicable: return -1 if not fixed size |
template <class T> |
inline cyg_int32 |
Cyg_Mempoolt<T>::get_blocksize() |
{ |
// there should not be any atomicity issues here |
return pool.get_blocksize(); |
} |
|
// ------------------------------------------------------------------------- |
// these two are obvious and generic, but need atomicity protection (maybe) |
template <class T> |
inline cyg_int32 |
Cyg_Mempoolt<T>::get_totalmem() |
{ |
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_int32 ret = pool.get_totalmem(); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
return ret; |
} |
|
template <class T> |
inline cyg_int32 |
Cyg_Mempoolt<T>::get_freemem() |
{ |
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
cyg_int32 ret = pool.get_freemem(); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
return ret; |
} |
|
// ------------------------------------------------------------------------- |
// get information about the construction parameters for external |
// freeing after the destruction of the holding object |
template <class T> |
inline void |
Cyg_Mempoolt<T>::get_arena( |
cyg_uint8 * &base, cyg_int32 &size, CYG_ADDRWORD &arg_thru ) |
{ |
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
pool.get_arena( base, size, arg_thru ); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
} |
|
// ------------------------------------------------------------------------- |
// Return the size of the memory allocation (previously returned |
// by alloc() or try_alloc() ) at ptr. Returns -1 if not found |
template <class T> |
cyg_int32 |
Cyg_Mempoolt<T>::get_allocation_size( cyg_uint8 *ptr ) |
{ |
cyg_int32 ret; |
|
// Prevent preemption |
Cyg_Scheduler::lock(); |
CYG_ASSERTCLASS( this, "Bad this pointer"); |
|
ret = pool.get_allocation_size( ptr ); |
|
// Unlock the scheduler and maybe switch threads |
Cyg_Scheduler::unlock(); |
|
return ret; |
} |
|
// ------------------------------------------------------------------------- |
// debugging/assert function |
|
#ifdef CYGDBG_USE_ASSERTS |
|
template <class T> |
inline cyg_bool |
Cyg_Mempoolt<T>::check_this(cyg_assert_class_zeal zeal) const |
{ |
CYG_REPORT_FUNCTION(); |
|
if ( Cyg_Thread::DESTRUCT == Cyg_Thread::self()->get_wake_reason() ) |
// then the whole thing is invalid, and we know it. |
// so return OK, since this check should NOT make an error. |
return true; |
|
// check that we have a non-NULL pointer first |
if( this == NULL ) return false; |
|
return true; |
} |
#endif |
|
// ------------------------------------------------------------------------- |
#endif // ifndef CYGONCE_KERNEL_MEMPOOLT_INL |
// EOF mempoolt.inl |
/common/v2_0/doc/notes.txt
0,0 → 1,361
Memory allocation package - Implementation Notes |
------------------------------------------------ |
|
|
|
Made with loving care by Jonathan Larmour (jlarmour@redhat.com) |
Initial version: 2000-07-03 |
Last updated: 2000-07-03 |
|
|
|
Meta |
---- |
|
This document describes some interesting bits and pieces about the memory |
allocation package - CYGPKG_MEMALLOC. It is intended as a guide to |
developers, not users. This isn't (yet) in formal documentation format, |
and probably should be. |
|
|
Philosophy |
---------- |
|
The object of this package is to provide everything required for dynamic |
memory allocation, some sample implementations, the ability to plug in |
more implementations, and a standard malloc() style interface to those |
allocators. |
|
The classic Unix-style view of a heap is using brk()/sbrk() to extend the |
data segment of the application. However this is inappropriate for an |
embedded system because: |
|
- you may not have an MMU, which means memory may be disjoint, thus breaking |
this paradigm |
|
- in a single process system there is no need to play tricks since there |
is only the one address space and therefore heap area to use. |
|
Therefore instead, we base the heap on the idea of fixed size memory pools. |
The size of each pool is known in advance. |
|
|
Overview |
-------- |
|
Most of the infrastructure this package provides is geared towards |
supporting the ISO standard malloc() family of functions. A "standard" |
eCos allocator should be able to plug in to this infrastructure and |
transparently work. The interface is based on simple use of C++ - nothing |
too advanced. |
|
The allocator to use is dictated by the |
CYGBLD_MEMALLOC_MALLOC_IMPLEMENTATION_HEADER option. Choosing the |
allocator can be done by ensuring the CDL for the new allocator |
has a "requires" that sets the location of the header to use when that |
allocator is enabled. New allocators should default to disabled, so they |
don't have to worry about which one is the default, thus causing CDL |
conflicts. When enabled the new allocator should also claim to implement |
CYGINT_MEMALLOC_MALLOC_ALLOCATORS. |
|
The implementation header file that is set must have a special property |
though - it may be included with __MALLOC_IMPL_WANTED defined. If this |
is the case, then this means the infrastructure wants to find out the |
name of the class that is implemented in this header file. This is done |
by setting CYGCLS_MEMALLOC_MALLOC_IMPL. If __MALLOC_IMPL_WANTED is defined |
then no non-preprocessor output should be generated, as this will be included |
in a TCL script in due course. An existing example from this package would |
be: |
|
#define CYGCLS_MEMALLOC_MALLOC_IMPL Cyg_Mempool_dlmalloc |
|
// if the implementation is all that's required, don't output anything else |
#ifndef __MALLOC_IMPL_WANTED |
|
class Cyg_Mempool_dlmalloc |
{ |
[etc.] |
|
To meet the expectations of malloc, the class should have the following |
public interfaces (for details it is best to look at some of the |
examples in this package): |
|
- a constructor taking arguments of the form: |
|
ALLOCATORNAME( cyg_uint8 *base, cyg_int32 size ); |
|
If you want to be able to support other arguments for when accessing |
the allocator directly you can add them, but give them default values, |
or use overloading |
|
- a destructor |
|
- a try_alloc() function that returns new memory, or NULL on failure: |
|
cyg_uint8 * |
try_alloc( cyg_int32 size ); |
|
- a free() function taking one pointer argument that returns a boolean |
for success or failure: |
|
cyg_bool |
free( cyg_uint8 *ptr ); |
|
Again, extra arguments can be added, as long as they are defaulted. |
|
|
- resize_alloc() which is designed purely to support realloc(). It |
has the prototype: |
cyg_uint8 * |
resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ); |
|
The idea is that if alloc_ptr can be adjusted to newsize, then it will |
be. If oldsize is non-NULL the old size (possibly rounded) is placed |
there. However what this *doesn't* do (unlike the real realloc()) is |
fall back to doing a new malloc(). All it does is try to do tricks |
inside the allocator. It's up to higher layers to call malloc(). |
|
- get_status() allows the retrieval of info from the allocator. The idea |
is to pass in the bitmask OR of the flags defined in common.hxx, which |
selects what information is requested. If the request is supported by |
the allocator, the approriate structure fields are filled in; otherwise |
unsupported fields will be left with the value -1. (The constructor for |
Cyg_Mempool_Status initializes them to -1). If you want to reinitialize |
the structure and deliberately lose the data in a Cyg_Mempool_Status |
object, you need to invoke the init() method of the status object to |
reinitialize it. |
|
void |
get_status( cyg_mempool_status_flag_t flags, Cyg_Mempool_Status &status ); |
|
A subset of the available stats are exported via mallinfo() |
|
|
Cyg_Mempolt2 template |
--------------------- |
|
If using the eCos kernel with multiple threads accessing the allocators, |
then obviously you need to be sure that the allocator is accessed in a |
thread-safe way. The malloc() wrappers do not make any assumptions |
about this. One helpful approach currently used by all the allocators |
in this package is to (optionally) use a template (Cyg_Mempolt2) that |
provides extra functions like a blocking alloc() that waits for memory |
to be freed before returning, and a timed variant. Other calls are |
generally passed straight through, but with the kernel scheduler locked |
to prevent pre-emption. |
|
You don't have to use this facility to fit into the infrastructure though, |
and thread safety is not a prerequisite for the rest of the infrastructure. |
And indeed certain allocators will be able to do scheduling at a finer |
granularity than just locking the scheduler every time. |
|
The odd name is because of an original desire to keep 8.3 filenames, which |
was reflected in the class name to make it correspond to the filename. |
There used to be an alternative Cyg_Mempoolt template, but that has fallen |
into disuse and is no longer supported. |
|
|
Automatic heap sizing |
--------------------- |
|
This package contains infrastructure to allow the automatic definition |
of memory pools that occupy all available memory. In order to do this |
you must use the eCos Memory Layout Tool to define a user-defined section. |
These sections *must* have the prefix "heap", for example "heap1", "heap2", |
"heapdram" etc. otherwise they will be ignored. |
|
The user-defined section may be of fixed size, or of unknown size. If it |
has unknown size then its size is dictated by either the location of |
the next following section with an absolute address, or if there are |
no following sections, the end of the memory region. The latter should |
be the norm. |
|
If no user-defined sections starting with "heap" are found, a fallback |
static array (i.e. allocated in the BSS) will be used, whose size can |
be set in the configuration. |
|
It is also possible to define multiple heap sections. This is |
necessary when you have multiple disjoint memory regions, and no MMU |
to join it up into one contiguous memory space. In which case |
a special wrapper allocator object is automatically used. This object |
is an instantiation of the Cyg_Mempool_Joined template class, |
defined in memjoin.hxx. It is instantiated with a list of every heap |
section, which it then records. It's sole purpose is to act as a go |
between to the underlying implementation, and does the right thing by |
using pointer addresses to determine which memory pool the pointer |
allocator, and therefore which memory pool instantiation to use. |
|
Obviously using the Cyg_Mempool_Joined class adds overhead, but if this |
is a problem, then in that case you shouldn't define multiple disjoint |
heaps! |
|
|
Run-time heap sizing |
-------------------- |
|
As a special case, some platforms support the addition of memory in the |
field, in which case it is desirable to automatically make this |
available to malloc. The mechanism for this is to define a macro in |
the HAL, specifically, defined in hal_intr.h: |
|
HAL_MEM_REAL_REGION_TOP( cyg_uint8 *regionend ) |
|
This macro takes the address of the "normal" end of the region. This |
corresponds with the size of the memory region in the MLT, and would |
be end of the "unexpanded" region. This makes sense because the memory |
region must be determined by the "worst case" of what memory will be |
installed. |
|
This macro then returns a pointer which is the *real* region end, |
as determined by the HAL at run-time. |
|
By having the macro in this form, it is therefore flexible enough to |
work with multiple memory regions. |
|
There is an example in the ARM HAL - specifically the EBSA285. |
|
|
How it works |
------------ |
|
The MLT outputs macros providing information about user-defined sections |
into a header file, available via system.h with the CYGHWR_MEMORY_LAYOUT_H |
define. When the user-defined section has no known size, it determines |
the size correctly relative to the end of the region, and sets the SIZE |
macro accordingly. |
|
A custom build rule preprocesses src/heapgen.cpp to generate heapgeninc.tcl |
This contains TCL "set"s to allow access to the values of various |
bits of configuration data. heapgen.cpp also includes the malloc |
implementation header (as defined by |
CYGBLD_MEMALLOC_MALLOC_IMPLEMENTATION_HEADER) with __MALLOC_IMPL_WANTED |
defined. This tells the header that it should define the macro |
CYGCLS_MEMALLOC_MALLOC_IMPL to be the name of the actual class. This |
is then also exported with a TCL "set". |
|
src/heapgen.tcl then includes heapgeninc.tcl which gives it access to |
the configuration values. heapgen.tcl then searches the LDI file for |
any sections beginning with "heap" (with possibly leading underscores). |
It records each one it finds and then generates a file heaps.cxx in the |
build tree to instantiate a memory pool object of the required class for |
each heap. It also generates a list containing the addresses of each |
pool that was instantiated. A header file heaps.hxx is then generated |
that exports the number of pools, a reference to this list array and |
includes the implementation header. |
|
Custom build rules then copy the heaps.hxx into the include/pkgconf |
subdir of the install tree, and compile the heaps.cxx. |
|
To access the generated information, you must #include <pkgconf/heaps.hxx> |
The number of heaps is given by the CYGMEM_HEAP_COUNT macro. The type of |
the pools is given by CYGCLS_MEMALLOC_MALLOC_IMPL, and the array of |
instantiated pools is available with cygmem_memalloc_heaps. For example, |
here is a sample heaps.hxx: |
|
#ifndef CYGONCE_PKGCONF_HEAPS_HXX |
#define CYGONCE_PKGCONF_HEAPS_HXX |
/* <pkgconf/heaps.hxx> */ |
|
/* This is a generated file - do not edit! */ |
|
#define CYGMEM_HEAP_COUNT 1 |
#include <cyg/memalloc/dlmalloc.hxx> |
|
extern Cyg_Mempool_dlmalloc *cygmem_memalloc_heaps[ 2 ]; |
|
#endif |
/* EOF <pkgconf/heaps.hxx> */ |
|
The array has size 2 because it consists of one pool, plus a terminating |
NULL. |
|
In future the addition of cdl_get() available from TCL scripts contained |
within the CDL scripts will remove the need for a lot of this magic. |
|
|
dlmalloc |
-------- |
|
A port of dlmalloc is included. Far too many changes were required to make |
it fit within the scheme above, so therefore there was no point |
trying to preserve the layout to make it easier to merge in new versions. |
However dlmalloc rarely changes any more - it is very stable. |
|
The version of dlmalloc used was a mixture of 2.6.6 and the dlmalloc from |
newlib (based on 2.6.4). In the event, most of the patches merged were |
of no consequence to the final version. |
|
For reference, the various versions examined are included in the |
doc/dlmalloc subdirectory: dlmalloc-2.6.4.c, dlmalloc-2.6.6.c, |
dlmalloc-newlib.c and dlmalloc-merged.c (which is the result of merging |
the changes between 2.6.4 and the newlib version into 2.6.6). Note it |
was not tested at that point. |
|
|
Remaining issues |
---------------- |
|
You should be allowed to have different allocators for different memory |
regions. The biggest hurdle here is host tools support to express this. |
|
Currently the "joined" allocator wrapper simply treats each memory pool |
as an equal. It doesn't understand that some memory pools may be faster |
than others, and cannot make decisions about which pools (and therefore |
regions and therefore possibly speeds of memory) to use on the basis |
of allocation size. This should be (configurably) possible. |
|
|
History |
------- |
|
|
A long, long time ago, in a galaxy far far away.... the situation used to |
be that the kernel package contained the fixed block and simple variable |
block memory allocators, and those were the only memory allocator |
implementations. This was all a bit incongruous as it meant that any code |
wanting dynamic memory allocation had to include the whole kernel, even |
though the dependencies could be encapsulated. This was particularly silly |
because the implementation of malloc() (etc.) in the C library didn't use |
any of the features that *did* depend on the kernel, such as timed waits |
while allocating memory, etc. |
|
The C library malloc was pretty naff then too. It used a static buffer |
as the basis of the memory pool, with a hard-coded size, set in the |
configuration. You couldn't make it fit into all of memory. |
|
Jifl |
2000-07-03 |
|
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
/common/v2_0/doc/dlmalloc/dlmalloc-2.6.6.c
0,0 → 1,3276
/* ---------- To make a malloc.h, start cutting here ------------ */ |
|
/* |
A version of malloc/free/realloc written by Doug Lea and released to the |
public domain. Send questions/comments/complaints/performance data |
to dl@cs.oswego.edu |
|
* VERSION 2.6.6 Sun Mar 5 19:10:03 2000 Doug Lea (dl at gee) |
|
Note: There may be an updated version of this malloc obtainable at |
ftp://g.oswego.edu/pub/misc/malloc.c |
Check before installing! |
|
* Why use this malloc? |
|
This is not the fastest, most space-conserving, most portable, or |
most tunable malloc ever written. However it is among the fastest |
while also being among the most space-conserving, portable and tunable. |
Consistent balance across these factors results in a good general-purpose |
allocator. For a high-level description, see |
http://g.oswego.edu/dl/html/malloc.html |
|
* Synopsis of public routines |
|
(Much fuller descriptions are contained in the program documentation below.) |
|
malloc(size_t n); |
Return a pointer to a newly allocated chunk of at least n bytes, or null |
if no space is available. |
free(Void_t* p); |
Release the chunk of memory pointed to by p, or no effect if p is null. |
realloc(Void_t* p, size_t n); |
Return a pointer to a chunk of size n that contains the same data |
as does chunk p up to the minimum of (n, p's size) bytes, or null |
if no space is available. The returned pointer may or may not be |
the same as p. If p is null, equivalent to malloc. Unless the |
#define REALLOC_ZERO_BYTES_FREES below is set, realloc with a |
size argument of zero (re)allocates a minimum-sized chunk. |
memalign(size_t alignment, size_t n); |
Return a pointer to a newly allocated chunk of n bytes, aligned |
in accord with the alignment argument, which must be a power of |
two. |
valloc(size_t n); |
Equivalent to memalign(pagesize, n), where pagesize is the page |
size of the system (or as near to this as can be figured out from |
all the includes/defines below.) |
pvalloc(size_t n); |
Equivalent to valloc(minimum-page-that-holds(n)), that is, |
round up n to nearest pagesize. |
calloc(size_t unit, size_t quantity); |
Returns a pointer to quantity * unit bytes, with all locations |
set to zero. |
cfree(Void_t* p); |
Equivalent to free(p). |
malloc_trim(size_t pad); |
Release all but pad bytes of freed top-most memory back |
to the system. Return 1 if successful, else 0. |
malloc_usable_size(Void_t* p); |
Report the number usable allocated bytes associated with allocated |
chunk p. This may or may not report more bytes than were requested, |
due to alignment and minimum size constraints. |
malloc_stats(); |
Prints brief summary statistics on stderr. |
mallinfo() |
Returns (by copy) a struct containing various summary statistics. |
mallopt(int parameter_number, int parameter_value) |
Changes one of the tunable parameters described below. Returns |
1 if successful in changing the parameter, else 0. |
|
* Vital statistics: |
|
Alignment: 8-byte |
8 byte alignment is currently hardwired into the design. This |
seems to suffice for all current machines and C compilers. |
|
Assumed pointer representation: 4 or 8 bytes |
Code for 8-byte pointers is untested by me but has worked |
reliably by Wolfram Gloger, who contributed most of the |
changes supporting this. |
|
Assumed size_t representation: 4 or 8 bytes |
Note that size_t is allowed to be 4 bytes even if pointers are 8. |
|
Minimum overhead per allocated chunk: 4 or 8 bytes |
Each malloced chunk has a hidden overhead of 4 bytes holding size |
and status information. |
|
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) |
8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
|
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte |
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are |
needed; 4 (8) for a trailing size field |
and 8 (16) bytes for free list pointers. Thus, the minimum |
allocatable size is 16/24/32 bytes. |
|
Even a request for zero bytes (i.e., malloc(0)) returns a |
pointer to something of the minimum allocatable size. |
|
Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes |
8-byte size_t: 2^63 - 16 bytes |
|
It is assumed that (possibly signed) size_t bit values suffice to |
represent chunk sizes. `Possibly signed' is due to the fact |
that `size_t' may be defined on a system as either a signed or |
an unsigned type. To be conservative, values that would appear |
as negative numbers are avoided. |
Requests for sizes with a negative sign bit when the request |
size is treaded as a long will return null. |
|
Maximum overhead wastage per allocated chunk: normally 15 bytes |
|
Alignnment demands, plus the minimum allocatable size restriction |
make the normal worst-case wastage 15 bytes (i.e., up to 15 |
more bytes will be allocated than were requested in malloc), with |
two exceptions: |
1. Because requests for zero bytes allocate non-zero space, |
the worst case wastage for a request of zero bytes is 24 bytes. |
2. For requests >= mmap_threshold that are serviced via |
mmap(), the worst case wastage is 8 bytes plus the remainder |
from a system page (the minimal mmap unit); typically 4096 bytes. |
|
* Limitations |
|
Here are some features that are NOT currently supported |
|
* No user-definable hooks for callbacks and the like. |
* No automated mechanism for fully checking that all accesses |
to malloced memory stay within their bounds. |
* No support for compaction. |
|
* Synopsis of compile-time options: |
|
People have reported using previous versions of this malloc on all |
versions of Unix, sometimes by tweaking some of the defines |
below. It has been tested most extensively on Solaris and |
Linux. It is also reported to work on WIN32 platforms. |
People have also reported adapting this malloc for use in |
stand-alone embedded systems. |
|
The implementation is in straight, hand-tuned ANSI C. Among other |
consequences, it uses a lot of macros. Because of this, to be at |
all usable, this code should be compiled using an optimizing compiler |
(for example gcc -O2) that can simplify expressions and control |
paths. |
|
__STD_C (default: derived from C compiler defines) |
Nonzero if using ANSI-standard C compiler, a C++ compiler, or |
a C compiler sufficiently close to ANSI to get away with it. |
DEBUG (default: NOT defined) |
Define to enable debugging. Adds fairly extensive assertion-based |
checking to help track down memory errors, but noticeably slows down |
execution. |
REALLOC_ZERO_BYTES_FREES (default: NOT defined) |
Define this if you think that realloc(p, 0) should be equivalent |
to free(p). Otherwise, since malloc returns a unique pointer for |
malloc(0), so does realloc(p, 0). |
HAVE_MEMCPY (default: defined) |
Define if you are not otherwise using ANSI STD C, but still |
have memcpy and memset in your C library and want to use them. |
Otherwise, simple internal versions are supplied. |
USE_MEMCPY (default: 1 if HAVE_MEMCPY is defined, 0 otherwise) |
Define as 1 if you want the C library versions of memset and |
memcpy called in realloc and calloc (otherwise macro versions are used). |
At least on some platforms, the simple macro versions usually |
outperform libc versions. |
HAVE_MMAP (default: defined as 1) |
Define to non-zero to optionally make malloc() use mmap() to |
allocate very large blocks. |
HAVE_MREMAP (default: defined as 0 unless Linux libc set) |
Define to non-zero to optionally make realloc() use mremap() to |
reallocate very large blocks. |
malloc_getpagesize (default: derived from system #includes) |
Either a constant or routine call returning the system page size. |
HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined) |
Optionally define if you are on a system with a /usr/include/malloc.h |
that declares struct mallinfo. It is not at all necessary to |
define this even if you do, but will ensure consistency. |
INTERNAL_SIZE_T (default: size_t) |
Define to a 32-bit type (probably `unsigned int') if you are on a |
64-bit machine, yet do not want or need to allow malloc requests of |
greater than 2^31 to be handled. This saves space, especially for |
very small chunks. |
INTERNAL_LINUX_C_LIB (default: NOT defined) |
Defined only when compiled as part of Linux libc. |
Also note that there is some odd internal name-mangling via defines |
(for example, internally, `malloc' is named `mALLOc') needed |
when compiling in this case. These look funny but don't otherwise |
affect anything. |
WIN32 (default: undefined) |
Define this on MS win (95, nt) platforms to compile in sbrk emulation. |
LACKS_UNISTD_H (default: undefined if not WIN32) |
Define this if your system does not have a <unistd.h>. |
LACKS_SYS_PARAM_H (default: undefined if not WIN32) |
Define this if your system does not have a <sys/param.h>. |
MORECORE (default: sbrk) |
The name of the routine to call to obtain more memory from the system. |
MORECORE_FAILURE (default: -1) |
The value returned upon failure of MORECORE. |
MORECORE_CLEARS (default 1) |
True (1) if the routine mapped to MORECORE zeroes out memory (which |
holds for sbrk). |
DEFAULT_TRIM_THRESHOLD |
DEFAULT_TOP_PAD |
DEFAULT_MMAP_THRESHOLD |
DEFAULT_MMAP_MAX |
Default values of tunable parameters (described in detail below) |
controlling interaction with host system routines (sbrk, mmap, etc). |
These values may also be changed dynamically via mallopt(). The |
preset defaults are those that give best performance for typical |
programs/systems. |
USE_DL_PREFIX (default: undefined) |
Prefix all public routines with the string 'dl'. Useful to |
quickly avoid procedure declaration conflicts and linker symbol |
conflicts with existing memory allocation routines. |
|
|
*/ |
|
|
|
|
/* Preliminaries */ |
|
#ifndef __STD_C |
#ifdef __STDC__ |
#define __STD_C 1 |
#else |
#if __cplusplus |
#define __STD_C 1 |
#else |
#define __STD_C 0 |
#endif /*__cplusplus*/ |
#endif /*__STDC__*/ |
#endif /*__STD_C*/ |
|
#ifndef Void_t |
#if (__STD_C || defined(WIN32)) |
#define Void_t void |
#else |
#define Void_t char |
#endif |
#endif /*Void_t*/ |
|
#if __STD_C |
#include <stddef.h> /* for size_t */ |
#else |
#include <sys/types.h> |
#endif |
|
#ifdef __cplusplus |
extern "C" { |
#endif |
|
#include <stdio.h> /* needed for malloc_stats */ |
|
|
/* |
Compile-time options |
*/ |
|
|
/* |
Debugging: |
|
Because freed chunks may be overwritten with link fields, this |
malloc will often die when freed memory is overwritten by user |
programs. This can be very effective (albeit in an annoying way) |
in helping track down dangling pointers. |
|
If you compile with -DDEBUG, a number of assertion checks are |
enabled that will catch more memory errors. You probably won't be |
able to make much sense of the actual assertion errors, but they |
should help you locate incorrectly overwritten memory. The |
checking is fairly extensive, and will slow down execution |
noticeably. Calling malloc_stats or mallinfo with DEBUG set will |
attempt to check every non-mmapped allocated and free chunk in the |
course of computing the summmaries. (By nature, mmapped regions |
cannot be checked very much automatically.) |
|
Setting DEBUG may also be helpful if you are trying to modify |
this code. The assertions in the check routines spell out in more |
detail the assumptions and invariants underlying the algorithms. |
|
*/ |
|
#if DEBUG |
#include <assert.h> |
#else |
#define assert(x) ((void)0) |
#endif |
|
|
/* |
INTERNAL_SIZE_T is the word-size used for internal bookkeeping |
of chunk sizes. On a 64-bit machine, you can reduce malloc |
overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' |
at the expense of not being able to handle requests greater than |
2^31. This limitation is hardly ever a concern; you are encouraged |
to set this. However, the default version is the same as size_t. |
*/ |
|
#ifndef INTERNAL_SIZE_T |
#define INTERNAL_SIZE_T size_t |
#endif |
|
/* |
REALLOC_ZERO_BYTES_FREES should be set if a call to |
realloc with zero bytes should be the same as a call to free. |
Some people think it should. Otherwise, since this malloc |
returns a unique pointer for malloc(0), so does realloc(p, 0). |
*/ |
|
|
/* #define REALLOC_ZERO_BYTES_FREES */ |
|
|
/* |
WIN32 causes an emulation of sbrk to be compiled in |
mmap-based options are not currently supported in WIN32. |
*/ |
|
/* #define WIN32 */ |
#ifdef WIN32 |
#define MORECORE wsbrk |
#define HAVE_MMAP 0 |
|
#define LACKS_UNISTD_H |
#define LACKS_SYS_PARAM_H |
|
/* |
Include 'windows.h' to get the necessary declarations for the |
Microsoft Visual C++ data structures and routines used in the 'sbrk' |
emulation. |
|
Define WIN32_LEAN_AND_MEAN so that only the essential Microsoft |
Visual C++ header files are included. |
*/ |
#define WIN32_LEAN_AND_MEAN |
#include <windows.h> |
#endif |
|
|
/* |
HAVE_MEMCPY should be defined if you are not otherwise using |
ANSI STD C, but still have memcpy and memset in your C library |
and want to use them in calloc and realloc. Otherwise simple |
macro versions are defined here. |
|
USE_MEMCPY should be defined as 1 if you actually want to |
have memset and memcpy called. People report that the macro |
versions are often enough faster than libc versions on many |
systems that it is better to use them. |
|
*/ |
|
#define HAVE_MEMCPY |
|
#ifndef USE_MEMCPY |
#ifdef HAVE_MEMCPY |
#define USE_MEMCPY 1 |
#else |
#define USE_MEMCPY 0 |
#endif |
#endif |
|
#if (__STD_C || defined(HAVE_MEMCPY)) |
|
#if __STD_C |
void* memset(void*, int, size_t); |
void* memcpy(void*, const void*, size_t); |
#else |
#ifdef WIN32 |
// On Win32 platforms, 'memset()' and 'memcpy()' are already declared in |
// 'windows.h' |
#else |
Void_t* memset(); |
Void_t* memcpy(); |
#endif |
#endif |
#endif |
|
#if USE_MEMCPY |
|
/* The following macros are only invoked with (2n+1)-multiples of |
INTERNAL_SIZE_T units, with a positive integer n. This is exploited |
for fast inline execution when n is small. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T mzsz = (nbytes); \ |
if(mzsz <= 9*sizeof(mzsz)) { \ |
INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \ |
if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; }}} \ |
*mz++ = 0; \ |
*mz++ = 0; \ |
*mz = 0; \ |
} else memset((charp), 0, mzsz); \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T mcsz = (nbytes); \ |
if(mcsz <= 9*sizeof(mcsz)) { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \ |
if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; }}} \ |
*mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
*mcdst = *mcsrc ; \ |
} else memcpy(dest, src, mcsz); \ |
} while(0) |
|
#else /* !USE_MEMCPY */ |
|
/* Use Duff's device for good zeroing/copying performance. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mzp++ = 0; \ |
case 7: *mzp++ = 0; \ |
case 6: *mzp++ = 0; \ |
case 5: *mzp++ = 0; \ |
case 4: *mzp++ = 0; \ |
case 3: *mzp++ = 0; \ |
case 2: *mzp++ = 0; \ |
case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mcdst++ = *mcsrc++; \ |
case 7: *mcdst++ = *mcsrc++; \ |
case 6: *mcdst++ = *mcsrc++; \ |
case 5: *mcdst++ = *mcsrc++; \ |
case 4: *mcdst++ = *mcsrc++; \ |
case 3: *mcdst++ = *mcsrc++; \ |
case 2: *mcdst++ = *mcsrc++; \ |
case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#endif |
|
|
/* |
Define HAVE_MMAP to optionally make malloc() use mmap() to |
allocate very large blocks. These will be returned to the |
operating system immediately after a free(). |
*/ |
|
#ifndef HAVE_MMAP |
#define HAVE_MMAP 1 |
#endif |
|
/* |
Define HAVE_MREMAP to make realloc() use mremap() to re-allocate |
large blocks. This is currently only possible on Linux with |
kernel versions newer than 1.3.77. |
*/ |
|
#ifndef HAVE_MREMAP |
#ifdef INTERNAL_LINUX_C_LIB |
#define HAVE_MREMAP 1 |
#else |
#define HAVE_MREMAP 0 |
#endif |
#endif |
|
#if HAVE_MMAP |
|
#include <unistd.h> |
#include <fcntl.h> |
#include <sys/mman.h> |
|
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
#define MAP_ANONYMOUS MAP_ANON |
#endif |
|
#endif /* HAVE_MMAP */ |
|
/* |
Access to system page size. To the extent possible, this malloc |
manages memory from the system in page-size units. |
|
The following mechanics for getpagesize were adapted from |
bsd/gnu getpagesize.h |
*/ |
|
#ifndef LACKS_UNISTD_H |
# include <unistd.h> |
#endif |
|
#ifndef malloc_getpagesize |
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
# ifndef _SC_PAGE_SIZE |
# define _SC_PAGE_SIZE _SC_PAGESIZE |
# endif |
# endif |
# ifdef _SC_PAGE_SIZE |
# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
# else |
# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
extern size_t getpagesize(); |
# define malloc_getpagesize getpagesize() |
# else |
# ifdef WIN32 |
# define malloc_getpagesize (4096) /* TBD: Use 'GetSystemInfo' instead */ |
# else |
# ifndef LACKS_SYS_PARAM_H |
# include <sys/param.h> |
# endif |
# ifdef EXEC_PAGESIZE |
# define malloc_getpagesize EXEC_PAGESIZE |
# else |
# ifdef NBPG |
# ifndef CLSIZE |
# define malloc_getpagesize NBPG |
# else |
# define malloc_getpagesize (NBPG * CLSIZE) |
# endif |
# else |
# ifdef NBPC |
# define malloc_getpagesize NBPC |
# else |
# ifdef PAGESIZE |
# define malloc_getpagesize PAGESIZE |
# else |
# define malloc_getpagesize (4096) /* just guess */ |
# endif |
# endif |
# endif |
# endif |
# endif |
# endif |
# endif |
#endif |
|
|
|
/* |
|
This version of malloc supports the standard SVID/XPG mallinfo |
routine that returns a struct containing the same kind of |
information you can get from malloc_stats. It should work on |
any SVID/XPG compliant system that has a /usr/include/malloc.h |
defining struct mallinfo. (If you'd like to install such a thing |
yourself, cut out the preliminary declarations as described above |
and below and save them in a malloc.h file. But there's no |
compelling reason to bother to do this.) |
|
The main declaration needed is the mallinfo struct that is returned |
(by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a |
bunch of fields, most of which are not even meaningful in this |
version of malloc. Some of these fields are are instead filled by |
mallinfo() with other numbers that might possibly be of interest. |
|
HAVE_USR_INCLUDE_MALLOC_H should be set if you have a |
/usr/include/malloc.h file that includes a declaration of struct |
mallinfo. If so, it is included; else an SVID2/XPG2 compliant |
version is declared below. These must be precisely the same for |
mallinfo() to work. |
|
*/ |
|
/* #define HAVE_USR_INCLUDE_MALLOC_H */ |
|
#if HAVE_USR_INCLUDE_MALLOC_H |
#include "/usr/include/malloc.h" |
#else |
|
/* SVID2/XPG mallinfo structure */ |
|
struct mallinfo { |
int arena; /* total space allocated from system */ |
int ordblks; /* number of non-inuse chunks */ |
int smblks; /* unused -- always zero */ |
int hblks; /* number of mmapped regions */ |
int hblkhd; /* total space in mmapped regions */ |
int usmblks; /* unused -- always zero */ |
int fsmblks; /* unused -- always zero */ |
int uordblks; /* total allocated space */ |
int fordblks; /* total non-inuse space */ |
int keepcost; /* top-most, releasable (via malloc_trim) space */ |
}; |
|
/* SVID2/XPG mallopt options */ |
|
#define M_MXFAST 1 /* UNUSED in this malloc */ |
#define M_NLBLKS 2 /* UNUSED in this malloc */ |
#define M_GRAIN 3 /* UNUSED in this malloc */ |
#define M_KEEP 4 /* UNUSED in this malloc */ |
|
#endif |
|
/* mallopt options that actually do something */ |
|
#define M_TRIM_THRESHOLD -1 |
#define M_TOP_PAD -2 |
#define M_MMAP_THRESHOLD -3 |
#define M_MMAP_MAX -4 |
|
|
|
#ifndef DEFAULT_TRIM_THRESHOLD |
#define DEFAULT_TRIM_THRESHOLD (128 * 1024) |
#endif |
|
/* |
M_TRIM_THRESHOLD is the maximum amount of unused top-most memory |
to keep before releasing via malloc_trim in free(). |
|
Automatic trimming is mainly useful in long-lived programs. |
Because trimming via sbrk can be slow on some systems, and can |
sometimes be wasteful (in cases where programs immediately |
afterward allocate more large chunks) the value should be high |
enough so that your overall system performance would improve by |
releasing. |
|
The trim threshold and the mmap control parameters (see below) |
can be traded off with one another. Trimming and mmapping are |
two different ways of releasing unused memory back to the |
system. Between these two, it is often possible to keep |
system-level demands of a long-lived program down to a bare |
minimum. For example, in one test suite of sessions measuring |
the XF86 X server on Linux, using a trim threshold of 128K and a |
mmap threshold of 192K led to near-minimal long term resource |
consumption. |
|
If you are using this malloc in a long-lived program, it should |
pay to experiment with these values. As a rough guide, you |
might set to a value close to the average size of a process |
(program) running on your system. Releasing this much memory |
would allow such a process to run in memory. Generally, it's |
worth it to tune for trimming rather tham memory mapping when a |
program undergoes phases where several large chunks are |
allocated and released in ways that can reuse each other's |
storage, perhaps mixed with phases where there are no such |
chunks at all. And in well-behaved long-lived programs, |
controlling release of large blocks via trimming versus mapping |
is usually faster. |
|
However, in most programs, these parameters serve mainly as |
protection against the system-level effects of carrying around |
massive amounts of unneeded memory. Since frequent calls to |
sbrk, mmap, and munmap otherwise degrade performance, the default |
parameters are set to relatively high values that serve only as |
safeguards. |
|
The default trim value is high enough to cause trimming only in |
fairly extreme (by current memory consumption standards) cases. |
It must be greater than page size to have any useful effect. To |
disable trimming completely, you can set to (unsigned long)(-1); |
|
|
*/ |
|
|
#ifndef DEFAULT_TOP_PAD |
#define DEFAULT_TOP_PAD (0) |
#endif |
|
/* |
M_TOP_PAD is the amount of extra `padding' space to allocate or |
retain whenever sbrk is called. It is used in two ways internally: |
|
* When sbrk is called to extend the top of the arena to satisfy |
a new malloc request, this much padding is added to the sbrk |
request. |
|
* When malloc_trim is called automatically from free(), |
it is used as the `pad' argument. |
|
In both cases, the actual amount of padding is rounded |
so that the end of the arena is always a system page boundary. |
|
The main reason for using padding is to avoid calling sbrk so |
often. Having even a small pad greatly reduces the likelihood |
that nearly every malloc request during program start-up (or |
after trimming) will invoke sbrk, which needlessly wastes |
time. |
|
Automatic rounding-up to page-size units is normally sufficient |
to avoid measurable overhead, so the default is 0. However, in |
systems where sbrk is relatively slow, it can pay to increase |
this value, at the expense of carrying around more memory than |
the program needs. |
|
*/ |
|
|
#ifndef DEFAULT_MMAP_THRESHOLD |
#define DEFAULT_MMAP_THRESHOLD (128 * 1024) |
#endif |
|
/* |
|
M_MMAP_THRESHOLD is the request size threshold for using mmap() |
to service a request. Requests of at least this size that cannot |
be allocated using already-existing space will be serviced via mmap. |
(If enough normal freed space already exists it is used instead.) |
|
Using mmap segregates relatively large chunks of memory so that |
they can be individually obtained and released from the host |
system. A request serviced through mmap is never reused by any |
other request (at least not directly; the system may just so |
happen to remap successive requests to the same locations). |
|
Segregating space in this way has the benefit that mmapped space |
can ALWAYS be individually released back to the system, which |
helps keep the system level memory demands of a long-lived |
program low. Mapped memory can never become `locked' between |
other chunks, as can happen with normally allocated chunks, which |
menas that even trimming via malloc_trim would not release them. |
|
However, it has the disadvantages that: |
|
1. The space cannot be reclaimed, consolidated, and then |
used to service later requests, as happens with normal chunks. |
2. It can lead to more wastage because of mmap page alignment |
requirements |
3. It causes malloc performance to be more dependent on host |
system memory management support routines which may vary in |
implementation quality and may impose arbitrary |
limitations. Generally, servicing a request via normal |
malloc steps is faster than going through a system's mmap. |
|
All together, these considerations should lead you to use mmap |
only for relatively large requests. |
|
|
*/ |
|
|
|
#ifndef DEFAULT_MMAP_MAX |
#if HAVE_MMAP |
#define DEFAULT_MMAP_MAX (64) |
#else |
#define DEFAULT_MMAP_MAX (0) |
#endif |
#endif |
|
/* |
M_MMAP_MAX is the maximum number of requests to simultaneously |
service using mmap. This parameter exists because: |
|
1. Some systems have a limited number of internal tables for |
use by mmap. |
2. In most systems, overreliance on mmap can degrade overall |
performance. |
3. If a program allocates many large regions, it is probably |
better off using normal sbrk-based allocation routines that |
can reclaim and reallocate normal heap memory. Using a |
small value allows transition into this mode after the |
first few allocations. |
|
Setting to 0 disables all use of mmap. If HAVE_MMAP is not set, |
the default value is 0, and attempts to set it to non-zero values |
in mallopt will fail. |
*/ |
|
|
|
|
/* |
USE_DL_PREFIX will prefix all public routines with the string 'dl'. |
Useful to quickly avoid procedure declaration conflicts and linker |
symbol conflicts with existing memory allocation routines. |
|
*/ |
|
/* #define USE_DL_PREFIX */ |
|
|
|
|
/* |
|
Special defines for linux libc |
|
Except when compiled using these special defines for Linux libc |
using weak aliases, this malloc is NOT designed to work in |
multithreaded applications. No semaphores or other concurrency |
control are provided to ensure that multiple malloc or free calls |
don't run at the same time, which could be disasterous. A single |
semaphore could be used across malloc, realloc, and free (which is |
essentially the effect of the linux weak alias approach). It would |
be hard to obtain finer granularity. |
|
*/ |
|
|
#ifdef INTERNAL_LINUX_C_LIB |
|
#if __STD_C |
|
Void_t * __default_morecore_init (ptrdiff_t); |
Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init; |
|
#else |
|
Void_t * __default_morecore_init (); |
Void_t *(*__morecore)() = __default_morecore_init; |
|
#endif |
|
#define MORECORE (*__morecore) |
#define MORECORE_FAILURE 0 |
#define MORECORE_CLEARS 1 |
|
#else /* INTERNAL_LINUX_C_LIB */ |
|
#if __STD_C |
extern Void_t* sbrk(ptrdiff_t); |
#else |
extern Void_t* sbrk(); |
#endif |
|
#ifndef MORECORE |
#define MORECORE sbrk |
#endif |
|
#ifndef MORECORE_FAILURE |
#define MORECORE_FAILURE -1 |
#endif |
|
#ifndef MORECORE_CLEARS |
#define MORECORE_CLEARS 1 |
#endif |
|
#endif /* INTERNAL_LINUX_C_LIB */ |
|
#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__) |
|
#define cALLOc __libc_calloc |
#define fREe __libc_free |
#define mALLOc __libc_malloc |
#define mEMALIGn __libc_memalign |
#define rEALLOc __libc_realloc |
#define vALLOc __libc_valloc |
#define pvALLOc __libc_pvalloc |
#define mALLINFo __libc_mallinfo |
#define mALLOPt __libc_mallopt |
|
#pragma weak calloc = __libc_calloc |
#pragma weak free = __libc_free |
#pragma weak cfree = __libc_free |
#pragma weak malloc = __libc_malloc |
#pragma weak memalign = __libc_memalign |
#pragma weak realloc = __libc_realloc |
#pragma weak valloc = __libc_valloc |
#pragma weak pvalloc = __libc_pvalloc |
#pragma weak mallinfo = __libc_mallinfo |
#pragma weak mallopt = __libc_mallopt |
|
#else |
|
#ifdef USE_DL_PREFIX |
#define cALLOc dlcalloc |
#define fREe dlfree |
#define mALLOc dlmalloc |
#define mEMALIGn dlmemalign |
#define rEALLOc dlrealloc |
#define vALLOc dlvalloc |
#define pvALLOc dlpvalloc |
#define mALLINFo dlmallinfo |
#define mALLOPt dlmallopt |
#else /* USE_DL_PREFIX */ |
#define cALLOc calloc |
#define fREe free |
#define mALLOc malloc |
#define mEMALIGn memalign |
#define rEALLOc realloc |
#define vALLOc valloc |
#define pvALLOc pvalloc |
#define mALLINFo mallinfo |
#define mALLOPt mallopt |
#endif /* USE_DL_PREFIX */ |
|
#endif |
|
/* Public routines */ |
|
#if __STD_C |
|
Void_t* mALLOc(size_t); |
void fREe(Void_t*); |
Void_t* rEALLOc(Void_t*, size_t); |
Void_t* mEMALIGn(size_t, size_t); |
Void_t* vALLOc(size_t); |
Void_t* pvALLOc(size_t); |
Void_t* cALLOc(size_t, size_t); |
void cfree(Void_t*); |
int malloc_trim(size_t); |
size_t malloc_usable_size(Void_t*); |
void malloc_stats(); |
int mALLOPt(int, int); |
struct mallinfo mALLINFo(void); |
#else |
Void_t* mALLOc(); |
void fREe(); |
Void_t* rEALLOc(); |
Void_t* mEMALIGn(); |
Void_t* vALLOc(); |
Void_t* pvALLOc(); |
Void_t* cALLOc(); |
void cfree(); |
int malloc_trim(); |
size_t malloc_usable_size(); |
void malloc_stats(); |
int mALLOPt(); |
struct mallinfo mALLINFo(); |
#endif |
|
|
#ifdef __cplusplus |
}; /* end of extern "C" */ |
#endif |
|
/* ---------- To make a malloc.h, end cutting here ------------ */ |
|
|
/* |
Emulation of sbrk for WIN32 |
All code within the ifdef WIN32 is untested by me. |
|
Thanks to Martin Fong and others for supplying this. |
*/ |
|
|
#ifdef WIN32 |
|
#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \ |
~(malloc_getpagesize-1)) |
#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1)) |
|
/* resrve 64MB to insure large contiguous space */ |
#define RESERVED_SIZE (1024*1024*64) |
#define NEXT_SIZE (2048*1024) |
#define TOP_MEMORY ((unsigned long)2*1024*1024*1024) |
|
struct GmListElement; |
typedef struct GmListElement GmListElement; |
|
struct GmListElement |
{ |
GmListElement* next; |
void* base; |
}; |
|
static GmListElement* head = 0; |
static unsigned int gNextAddress = 0; |
static unsigned int gAddressBase = 0; |
static unsigned int gAllocatedSize = 0; |
|
static |
GmListElement* makeGmListElement (void* bas) |
{ |
GmListElement* this; |
this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement)); |
assert (this); |
if (this) |
{ |
this->base = bas; |
this->next = head; |
head = this; |
} |
return this; |
} |
|
void gcleanup () |
{ |
BOOL rval; |
assert ( (head == NULL) || (head->base == (void*)gAddressBase)); |
if (gAddressBase && (gNextAddress - gAddressBase)) |
{ |
rval = VirtualFree ((void*)gAddressBase, |
gNextAddress - gAddressBase, |
MEM_DECOMMIT); |
assert (rval); |
} |
while (head) |
{ |
GmListElement* next = head->next; |
rval = VirtualFree (head->base, 0, MEM_RELEASE); |
assert (rval); |
LocalFree (head); |
head = next; |
} |
} |
|
static |
void* findRegion (void* start_address, unsigned long size) |
{ |
MEMORY_BASIC_INFORMATION info; |
if (size >= TOP_MEMORY) return NULL; |
|
while ((unsigned long)start_address + size < TOP_MEMORY) |
{ |
VirtualQuery (start_address, &info, sizeof (info)); |
if ((info.State == MEM_FREE) && (info.RegionSize >= size)) |
return start_address; |
else |
{ |
// Requested region is not available so see if the |
// next region is available. Set 'start_address' |
// to the next region and call 'VirtualQuery()' |
// again. |
|
start_address = (char*)info.BaseAddress + info.RegionSize; |
|
// Make sure we start looking for the next region |
// on the *next* 64K boundary. Otherwise, even if |
// the new region is free according to |
// 'VirtualQuery()', the subsequent call to |
// 'VirtualAlloc()' (which follows the call to |
// this routine in 'wsbrk()') will round *down* |
// the requested address to a 64K boundary which |
// we already know is an address in the |
// unavailable region. Thus, the subsequent call |
// to 'VirtualAlloc()' will fail and bring us back |
// here, causing us to go into an infinite loop. |
|
start_address = |
(void *) AlignPage64K((unsigned long) start_address); |
} |
} |
return NULL; |
|
} |
|
|
void* wsbrk (long size) |
{ |
void* tmp; |
if (size > 0) |
{ |
if (gAddressBase == 0) |
{ |
gAllocatedSize = max (RESERVED_SIZE, AlignPage (size)); |
gNextAddress = gAddressBase = |
(unsigned int)VirtualAlloc (NULL, gAllocatedSize, |
MEM_RESERVE, PAGE_NOACCESS); |
} else if (AlignPage (gNextAddress + size) > (gAddressBase + |
gAllocatedSize)) |
{ |
long new_size = max (NEXT_SIZE, AlignPage (size)); |
void* new_address = (void*)(gAddressBase+gAllocatedSize); |
do |
{ |
new_address = findRegion (new_address, new_size); |
|
if (new_address == 0) |
return (void*)-1; |
|
gAddressBase = gNextAddress = |
(unsigned int)VirtualAlloc (new_address, new_size, |
MEM_RESERVE, PAGE_NOACCESS); |
// repeat in case of race condition |
// The region that we found has been snagged |
// by another thread |
} |
while (gAddressBase == 0); |
|
assert (new_address == (void*)gAddressBase); |
|
gAllocatedSize = new_size; |
|
if (!makeGmListElement ((void*)gAddressBase)) |
return (void*)-1; |
} |
if ((size + gNextAddress) > AlignPage (gNextAddress)) |
{ |
void* res; |
res = VirtualAlloc ((void*)AlignPage (gNextAddress), |
(size + gNextAddress - |
AlignPage (gNextAddress)), |
MEM_COMMIT, PAGE_READWRITE); |
if (res == 0) |
return (void*)-1; |
} |
tmp = (void*)gNextAddress; |
gNextAddress = (unsigned int)tmp + size; |
return tmp; |
} |
else if (size < 0) |
{ |
unsigned int alignedGoal = AlignPage (gNextAddress + size); |
/* Trim by releasing the virtual memory */ |
if (alignedGoal >= gAddressBase) |
{ |
VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal, |
MEM_DECOMMIT); |
gNextAddress = gNextAddress + size; |
return (void*)gNextAddress; |
} |
else |
{ |
VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase, |
MEM_DECOMMIT); |
gNextAddress = gAddressBase; |
return (void*)-1; |
} |
} |
else |
{ |
return (void*)gNextAddress; |
} |
} |
|
#endif |
|
|
|
/* |
Type declarations |
*/ |
|
|
struct malloc_chunk |
{ |
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ |
INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ |
struct malloc_chunk* fd; /* double links -- used only if free. */ |
struct malloc_chunk* bk; |
}; |
|
typedef struct malloc_chunk* mchunkptr; |
|
/* |
|
malloc_chunk details: |
|
(The following includes lightly edited explanations by Colin Plumb.) |
|
Chunks of memory are maintained using a `boundary tag' method as |
described in e.g., Knuth or Standish. (See the paper by Paul |
Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a |
survey of such techniques.) Sizes of free chunks are stored both |
in the front of each chunk and at the end. This makes |
consolidating fragmented chunks into bigger chunks very fast. The |
size fields also hold bits representing whether chunks are free or |
in use. |
|
An allocated chunk looks like this: |
|
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk, if allocated | | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| User data starts here... . |
. . |
. (malloc_usable_space() bytes) . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
|
Where "chunk" is the front of the chunk for the purpose of most of |
the malloc code, but "mem" is the pointer that is returned to the |
user. "Nextchunk" is the beginning of the next contiguous chunk. |
|
Chunks always begin on even word boundries, so the mem portion |
(which is returned to the user) is also on an even word boundary, and |
thus double-word aligned. |
|
Free chunks are stored in circular doubly-linked lists, and look like this: |
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space (may be 0 bytes long) . |
. . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
The P (PREV_INUSE) bit, stored in the unused low-order bit of the |
chunk size (which is always a multiple of two words), is an in-use |
bit for the *previous* chunk. If that bit is *clear*, then the |
word before the current chunk size contains the previous chunk |
size, and can be used to find the front of the previous chunk. |
(The very first chunk allocated always has this bit set, |
preventing access to non-existent (or non-owned) memory.) |
|
Note that the `foot' of the current chunk is actually represented |
as the prev_size of the NEXT chunk. (This makes it easier to |
deal with alignments etc). |
|
The two exceptions to all this are |
|
1. The special chunk `top', which doesn't bother using the |
trailing size field since there is no |
next contiguous chunk that would have to index off it. (After |
initialization, `top' is forced to always exist. If it would |
become less than MINSIZE bytes long, it is replenished via |
malloc_extend_top.) |
|
2. Chunks allocated via mmap, which have the second-lowest-order |
bit (IS_MMAPPED) set in their size fields. Because they are |
never merged or traversed from any other chunk, they have no |
foot size or inuse information. |
|
Available chunks are kept in any of several places (all declared below): |
|
* `av': An array of chunks serving as bin headers for consolidated |
chunks. Each bin is doubly linked. The bins are approximately |
proportionally (log) spaced. There are a lot of these bins |
(128). This may look excessive, but works very well in |
practice. All procedures maintain the invariant that no |
consolidated chunk physically borders another one. Chunks in |
bins are kept in size order, with ties going to the |
approximately least recently used chunk. |
|
The chunks in each bin are maintained in decreasing sorted order by |
size. This is irrelevant for the small bins, which all contain |
the same-sized chunks, but facilitates best-fit allocation for |
larger chunks. (These lists are just sequential. Keeping them in |
order almost never requires enough traversal to warrant using |
fancier ordered data structures.) Chunks of the same size are |
linked with the most recently freed at the front, and allocations |
are taken from the back. This results in LRU or FIFO allocation |
order, which tends to give each chunk an equal opportunity to be |
consolidated with adjacent freed chunks, resulting in larger free |
chunks and less fragmentation. |
|
* `top': The top-most available chunk (i.e., the one bordering the |
end of available memory) is treated specially. It is never |
included in any bin, is used only if no other chunk is |
available, and is released back to the system if it is very |
large (see M_TRIM_THRESHOLD). |
|
* `last_remainder': A bin holding only the remainder of the |
most recently split (non-top) chunk. This bin is checked |
before other non-fitting chunks, so as to provide better |
locality for runs of sequentially allocated chunks. |
|
* Implicitly, through the host system's memory mapping tables. |
If supported, requests greater than a threshold are usually |
serviced via calls to mmap, and then later released via munmap. |
|
*/ |
|
|
|
|
|
|
/* sizes, alignments */ |
|
#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) |
#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ) |
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) |
#define MINSIZE (sizeof(struct malloc_chunk)) |
|
/* conversion from malloc headers to user pointers, and back */ |
|
#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) |
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) |
|
/* pad request bytes into a usable size */ |
|
#define request2size(req) \ |
(((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \ |
(long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \ |
(((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK))) |
|
/* Check if m has acceptable alignment */ |
|
#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) |
|
|
|
|
/* |
Physical chunk operations |
*/ |
|
|
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
|
#define PREV_INUSE 0x1 |
|
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ |
|
#define IS_MMAPPED 0x2 |
|
/* Bits to mask off when extracting size */ |
|
#define SIZE_BITS (PREV_INUSE|IS_MMAPPED) |
|
|
/* Ptr to next physical malloc_chunk. */ |
|
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) |
|
/* Ptr to previous physical malloc_chunk */ |
|
#define prev_chunk(p)\ |
((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) |
|
|
/* Treat space at ptr + offset as a chunk */ |
|
#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
|
|
|
|
/* |
Dealing with use bits |
*/ |
|
/* extract p's inuse bit */ |
|
#define inuse(p)\ |
((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) |
|
/* extract inuse bit of previous chunk */ |
|
#define prev_inuse(p) ((p)->size & PREV_INUSE) |
|
/* check for mmap()'ed chunk */ |
|
#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) |
|
/* set/clear chunk as in use without otherwise disturbing */ |
|
#define set_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE |
|
#define clear_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) |
|
/* check/set/clear inuse bits in known places */ |
|
#define inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) |
|
#define set_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) |
|
#define clear_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) |
|
|
|
|
/* |
Dealing with size fields |
*/ |
|
/* Get size, ignoring use bits */ |
|
#define chunksize(p) ((p)->size & ~(SIZE_BITS)) |
|
/* Set size at head, without disturbing its use bit */ |
|
#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) |
|
/* Set size/use ignoring previous bits in header */ |
|
#define set_head(p, s) ((p)->size = (s)) |
|
/* Set size at footer (only when chunk is not in use) */ |
|
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) |
|
|
|
|
|
/* |
Bins |
|
The bins, `av_' are an array of pairs of pointers serving as the |
heads of (initially empty) doubly-linked lists of chunks, laid out |
in a way so that each pair can be treated as if it were in a |
malloc_chunk. (This way, the fd/bk offsets for linking bin heads |
and chunks are the same). |
|
Bins for sizes < 512 bytes contain chunks of all the same size, spaced |
8 bytes apart. Larger bins are approximately logarithmically |
spaced. (See the table below.) The `av_' array is never mentioned |
directly in the code, but instead via bin access macros. |
|
Bin layout: |
|
64 bins of size 8 |
32 bins of size 64 |
16 bins of size 512 |
8 bins of size 4096 |
4 bins of size 32768 |
2 bins of size 262144 |
1 bin of size what's left |
|
There is actually a little bit of slop in the numbers in bin_index |
for the sake of speed. This makes no difference elsewhere. |
|
The special chunks `top' and `last_remainder' get their own bins, |
(this is implemented via yet more trickery with the av_ array), |
although `top' is never properly linked to its bin since it is |
always handled specially. |
|
*/ |
|
#define NAV 128 /* number of bins */ |
|
typedef struct malloc_chunk* mbinptr; |
|
/* access macros */ |
|
#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ)) |
#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr))) |
#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr))) |
|
/* |
The first 2 bins are never indexed. The corresponding av_ cells are instead |
used for bookkeeping. This is not to save space, but to simplify |
indexing, maintain locality, and avoid some initialization tests. |
*/ |
|
#define top (bin_at(0)->fd) /* The topmost chunk */ |
#define last_remainder (bin_at(1)) /* remainder from last split */ |
|
|
/* |
Because top initially points to its own bin with initial |
zero size, thus forcing extension on the first malloc request, |
we avoid having any special code in malloc to check whether |
it even exists yet. But we still need to in malloc_extend_top. |
*/ |
|
#define initial_top ((mchunkptr)(bin_at(0))) |
|
/* Helper macro to initialize bins */ |
|
#define IAV(i) bin_at(i), bin_at(i) |
|
static mbinptr av_[NAV * 2 + 2] = { |
0, 0, |
IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), |
IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), |
IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), |
IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31), |
IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39), |
IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47), |
IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55), |
IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63), |
IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71), |
IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79), |
IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), |
IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), |
IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), |
IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), |
IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), |
IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) |
}; |
|
|
|
/* field-extraction macros */ |
|
#define first(b) ((b)->fd) |
#define last(b) ((b)->bk) |
|
/* |
Indexing into bins |
*/ |
|
#define bin_index(sz) \ |
(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ |
((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ |
((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ |
((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ |
((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ |
((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ |
126) |
/* |
bins for chunks < 512 are all spaced 8 bytes apart, and hold |
identically sized chunks. This is exploited in malloc. |
*/ |
|
#define MAX_SMALLBIN 63 |
#define MAX_SMALLBIN_SIZE 512 |
#define SMALLBIN_WIDTH 8 |
|
#define smallbin_index(sz) (((unsigned long)(sz)) >> 3) |
|
/* |
Requests are `small' if both the corresponding and the next bin are small |
*/ |
|
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) |
|
|
|
/* |
To help compensate for the large number of bins, a one-level index |
structure is used for bin-by-bin searching. `binblocks' is a |
one-word bitvector recording whether groups of BINBLOCKWIDTH bins |
have any (possibly) non-empty bins, so they can be skipped over |
all at once during during traversals. The bits are NOT always |
cleared as soon as all bins in a block are empty, but instead only |
when all are noticed to be empty during traversal in malloc. |
*/ |
|
#define BINBLOCKWIDTH 4 /* bins per block */ |
|
#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */ |
|
/* bin<->block macros */ |
|
#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH)) |
#define mark_binblock(ii) (binblocks |= idx2binblock(ii)) |
#define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii))) |
|
|
|
|
|
/* Other static bookkeeping data */ |
|
/* variables holding tunable values */ |
|
static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD; |
static unsigned long top_pad = DEFAULT_TOP_PAD; |
static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX; |
static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
|
/* The first value returned from sbrk */ |
static char* sbrk_base = (char*)(-1); |
|
/* The maximum memory obtained from system via sbrk */ |
static unsigned long max_sbrked_mem = 0; |
|
/* The maximum via either sbrk or mmap */ |
static unsigned long max_total_mem = 0; |
|
/* internal working copy of mallinfo */ |
static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
|
/* The total memory obtained from system via sbrk */ |
#define sbrked_mem (current_mallinfo.arena) |
|
/* Tracking mmaps */ |
|
static unsigned int n_mmaps = 0; |
static unsigned int max_n_mmaps = 0; |
static unsigned long mmapped_mem = 0; |
static unsigned long max_mmapped_mem = 0; |
|
|
|
/* |
Debugging support |
*/ |
|
#if DEBUG |
|
|
/* |
These routines make a number of assertions about the states |
of data structures that should be true at all times. If any |
are not true, it's very likely that a user program has somehow |
trashed memory. (It's also possible that there is a coding error |
in malloc. In which case, please report it!) |
*/ |
|
#if __STD_C |
static void do_check_chunk(mchunkptr p) |
#else |
static void do_check_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
|
/* No checkable chunk is mmapped */ |
assert(!chunk_is_mmapped(p)); |
|
/* Check for legal address ... */ |
assert((char*)p >= sbrk_base); |
if (p != top) |
assert((char*)p + sz <= (char*)top); |
else |
assert((char*)p + sz <= sbrk_base + sbrked_mem); |
|
} |
|
|
#if __STD_C |
static void do_check_free_chunk(mchunkptr p) |
#else |
static void do_check_free_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
mchunkptr next = chunk_at_offset(p, sz); |
|
do_check_chunk(p); |
|
/* Check whether it claims to be free ... */ |
assert(!inuse(p)); |
|
/* Unless a special marker, must have OK fields */ |
if ((long)sz >= (long)MINSIZE) |
{ |
assert((sz & MALLOC_ALIGN_MASK) == 0); |
assert(aligned_OK(chunk2mem(p))); |
/* ... matching footer field */ |
assert(next->prev_size == sz); |
/* ... and is fully consolidated */ |
assert(prev_inuse(p)); |
assert (next == top || inuse(next)); |
|
/* ... and has minimally sane links */ |
assert(p->fd->bk == p); |
assert(p->bk->fd == p); |
} |
else /* markers are always of size SIZE_SZ */ |
assert(sz == SIZE_SZ); |
} |
|
#if __STD_C |
static void do_check_inuse_chunk(mchunkptr p) |
#else |
static void do_check_inuse_chunk(p) mchunkptr p; |
#endif |
{ |
mchunkptr next = next_chunk(p); |
do_check_chunk(p); |
|
/* Check whether it claims to be in use ... */ |
assert(inuse(p)); |
|
/* ... and is surrounded by OK chunks. |
Since more things can be checked with free chunks than inuse ones, |
if an inuse chunk borders them and debug is on, it's worth doing them. |
*/ |
if (!prev_inuse(p)) |
{ |
mchunkptr prv = prev_chunk(p); |
assert(next_chunk(prv) == p); |
do_check_free_chunk(prv); |
} |
if (next == top) |
{ |
assert(prev_inuse(next)); |
assert(chunksize(next) >= MINSIZE); |
} |
else if (!inuse(next)) |
do_check_free_chunk(next); |
|
} |
|
#if __STD_C |
static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) |
#else |
static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
long room = sz - s; |
|
do_check_inuse_chunk(p); |
|
/* Legal size ... */ |
assert((long)sz >= (long)MINSIZE); |
assert((sz & MALLOC_ALIGN_MASK) == 0); |
assert(room >= 0); |
assert(room < (long)MINSIZE); |
|
/* ... and alignment */ |
assert(aligned_OK(chunk2mem(p))); |
|
|
/* ... and was allocated at front of an available chunk */ |
assert(prev_inuse(p)); |
|
} |
|
|
#define check_free_chunk(P) do_check_free_chunk(P) |
#define check_inuse_chunk(P) do_check_inuse_chunk(P) |
#define check_chunk(P) do_check_chunk(P) |
#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) |
#else |
#define check_free_chunk(P) |
#define check_inuse_chunk(P) |
#define check_chunk(P) |
#define check_malloced_chunk(P,N) |
#endif |
|
|
|
/* |
Macro-based internal utilities |
*/ |
|
|
/* |
Linking chunks in bin lists. |
Call these only with variables, not arbitrary expressions, as arguments. |
*/ |
|
/* |
Place chunk p of size s in its bin, in size order, |
putting it ahead of others of same size. |
*/ |
|
|
#define frontlink(P, S, IDX, BK, FD) \ |
{ \ |
if (S < MAX_SMALLBIN_SIZE) \ |
{ \ |
IDX = smallbin_index(S); \ |
mark_binblock(IDX); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
else \ |
{ \ |
IDX = bin_index(S); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
if (FD == BK) mark_binblock(IDX); \ |
else \ |
{ \ |
while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ |
BK = FD->bk; \ |
} \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
} |
|
|
/* take a chunk off a list */ |
|
#define unlink(P, BK, FD) \ |
{ \ |
BK = P->bk; \ |
FD = P->fd; \ |
FD->bk = BK; \ |
BK->fd = FD; \ |
} \ |
|
/* Place p as the last remainder */ |
|
#define link_last_remainder(P) \ |
{ \ |
last_remainder->fd = last_remainder->bk = P; \ |
P->fd = P->bk = last_remainder; \ |
} |
|
/* Clear the last_remainder bin */ |
|
#define clear_last_remainder \ |
(last_remainder->fd = last_remainder->bk = last_remainder) |
|
|
|
|
|
|
/* Routines dealing with mmap(). */ |
|
#if HAVE_MMAP |
|
#if __STD_C |
static mchunkptr mmap_chunk(size_t size) |
#else |
static mchunkptr mmap_chunk(size) size_t size; |
#endif |
{ |
size_t page_mask = malloc_getpagesize - 1; |
mchunkptr p; |
|
#ifndef MAP_ANONYMOUS |
static int fd = -1; |
#endif |
|
if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ |
|
/* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because |
* there is no following chunk whose prev_size field could be used. |
*/ |
size = (size + SIZE_SZ + page_mask) & ~page_mask; |
|
#ifdef MAP_ANONYMOUS |
p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, |
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
#else /* !MAP_ANONYMOUS */ |
if (fd < 0) |
{ |
fd = open("/dev/zero", O_RDWR); |
if(fd < 0) return 0; |
} |
p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); |
#endif |
|
if(p == (mchunkptr)-1) return 0; |
|
n_mmaps++; |
if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; |
|
/* We demand that eight bytes into a page must be 8-byte aligned. */ |
assert(aligned_OK(chunk2mem(p))); |
|
/* The offset to the start of the mmapped region is stored |
* in the prev_size field of the chunk; normally it is zero, |
* but that can be changed in memalign(). |
*/ |
p->prev_size = 0; |
set_head(p, size|IS_MMAPPED); |
|
mmapped_mem += size; |
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) |
max_mmapped_mem = mmapped_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
return p; |
} |
|
#if __STD_C |
static void munmap_chunk(mchunkptr p) |
#else |
static void munmap_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T size = chunksize(p); |
int ret; |
|
assert (chunk_is_mmapped(p)); |
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); |
assert((n_mmaps > 0)); |
assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); |
|
n_mmaps--; |
mmapped_mem -= (size + p->prev_size); |
|
ret = munmap((char *)p - p->prev_size, size + p->prev_size); |
|
/* munmap returns non-zero on failure */ |
assert(ret == 0); |
} |
|
#if HAVE_MREMAP |
|
#if __STD_C |
static mchunkptr mremap_chunk(mchunkptr p, size_t new_size) |
#else |
static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size; |
#endif |
{ |
size_t page_mask = malloc_getpagesize - 1; |
INTERNAL_SIZE_T offset = p->prev_size; |
INTERNAL_SIZE_T size = chunksize(p); |
char *cp; |
|
assert (chunk_is_mmapped(p)); |
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); |
assert((n_mmaps > 0)); |
assert(((size + offset) & (malloc_getpagesize-1)) == 0); |
|
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ |
new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; |
|
cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1); |
|
if (cp == (char *)-1) return 0; |
|
p = (mchunkptr)(cp + offset); |
|
assert(aligned_OK(chunk2mem(p))); |
|
assert((p->prev_size == offset)); |
set_head(p, (new_size - offset)|IS_MMAPPED); |
|
mmapped_mem -= size + offset; |
mmapped_mem += new_size; |
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) |
max_mmapped_mem = mmapped_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
return p; |
} |
|
#endif /* HAVE_MREMAP */ |
|
#endif /* HAVE_MMAP */ |
|
|
|
|
/* |
Extend the top-most chunk by obtaining memory from system. |
Main interface to sbrk (but see also malloc_trim). |
*/ |
|
#if __STD_C |
static void malloc_extend_top(INTERNAL_SIZE_T nb) |
#else |
static void malloc_extend_top(nb) INTERNAL_SIZE_T nb; |
#endif |
{ |
char* brk; /* return value from sbrk */ |
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */ |
INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */ |
char* new_brk; /* return of 2nd sbrk call */ |
INTERNAL_SIZE_T top_size; /* new size of top chunk */ |
|
mchunkptr old_top = top; /* Record state of old top */ |
INTERNAL_SIZE_T old_top_size = chunksize(old_top); |
char* old_end = (char*)(chunk_at_offset(old_top, old_top_size)); |
|
/* Pad request with top_pad plus minimal overhead */ |
|
INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; |
unsigned long pagesz = malloc_getpagesize; |
|
/* If not the first time through, round to preserve page boundary */ |
/* Otherwise, we need to correct to a page size below anyway. */ |
/* (We also correct below if an intervening foreign sbrk call.) */ |
|
if (sbrk_base != (char*)(-1)) |
sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1); |
|
brk = (char*)(MORECORE (sbrk_size)); |
|
/* Fail if sbrk failed or if a foreign sbrk call killed our space */ |
if (brk == (char*)(MORECORE_FAILURE) || |
(brk < old_end && old_top != initial_top)) |
return; |
|
sbrked_mem += sbrk_size; |
|
if (brk == old_end) /* can just add bytes to current top */ |
{ |
top_size = sbrk_size + old_top_size; |
set_head(top, top_size | PREV_INUSE); |
} |
else |
{ |
if (sbrk_base == (char*)(-1)) /* First time through. Record base */ |
sbrk_base = brk; |
else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */ |
sbrked_mem += brk - (char*)old_end; |
|
/* Guarantee alignment of first new chunk made from this space */ |
front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK; |
if (front_misalign > 0) |
{ |
correction = (MALLOC_ALIGNMENT) - front_misalign; |
brk += correction; |
} |
else |
correction = 0; |
|
/* Guarantee the next brk will be at a page boundary */ |
|
correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) & |
~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size)); |
|
/* Allocate correction */ |
new_brk = (char*)(MORECORE (correction)); |
if (new_brk == (char*)(MORECORE_FAILURE)) return; |
|
sbrked_mem += correction; |
|
top = (mchunkptr)brk; |
top_size = new_brk - brk + correction; |
set_head(top, top_size | PREV_INUSE); |
|
if (old_top != initial_top) |
{ |
|
/* There must have been an intervening foreign sbrk call. */ |
/* A double fencepost is necessary to prevent consolidation */ |
|
/* If not enough space to do this, then user did something very wrong */ |
if (old_top_size < MINSIZE) |
{ |
set_head(top, PREV_INUSE); /* will force null return from malloc */ |
return; |
} |
|
/* Also keep size a multiple of MALLOC_ALIGNMENT */ |
old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; |
set_head_size(old_top, old_top_size); |
chunk_at_offset(old_top, old_top_size )->size = |
SIZE_SZ|PREV_INUSE; |
chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size = |
SIZE_SZ|PREV_INUSE; |
/* If possible, release the rest. */ |
if (old_top_size >= MINSIZE) |
fREe(chunk2mem(old_top)); |
} |
} |
|
if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem) |
max_sbrked_mem = sbrked_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
|
/* We always land on a page boundary */ |
assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0); |
} |
|
|
|
|
/* Main public routines */ |
|
|
/* |
Malloc Algorthim: |
|
The requested size is first converted into a usable form, `nb'. |
This currently means to add 4 bytes overhead plus possibly more to |
obtain 8-byte alignment and/or to obtain a size of at least |
MINSIZE (currently 16 bytes), the smallest allocatable size. |
(All fits are considered `exact' if they are within MINSIZE bytes.) |
|
From there, the first successful of the following steps is taken: |
|
1. The bin corresponding to the request size is scanned, and if |
a chunk of exactly the right size is found, it is taken. |
|
2. The most recently remaindered chunk is used if it is big |
enough. This is a form of (roving) first fit, used only in |
the absence of exact fits. Runs of consecutive requests use |
the remainder of the chunk used for the previous such request |
whenever possible. This limited use of a first-fit style |
allocation strategy tends to give contiguous chunks |
coextensive lifetimes, which improves locality and can reduce |
fragmentation in the long run. |
|
3. Other bins are scanned in increasing size order, using a |
chunk big enough to fulfill the request, and splitting off |
any remainder. This search is strictly by best-fit; i.e., |
the smallest (with ties going to approximately the least |
recently used) chunk that fits is selected. |
|
4. If large enough, the chunk bordering the end of memory |
(`top') is split off. (This use of `top' is in accord with |
the best-fit search rule. In effect, `top' is treated as |
larger (and thus less well fitting) than any other available |
chunk since it can be extended to be as large as necessary |
(up to system limitations). |
|
5. If the request size meets the mmap threshold and the |
system supports mmap, and there are few enough currently |
allocated mmapped regions, and a call to mmap succeeds, |
the request is allocated via direct memory mapping. |
|
6. Otherwise, the top of memory is extended by |
obtaining more space from the system (normally using sbrk, |
but definable to anything else via the MORECORE macro). |
Memory is gathered from the system (in system page-sized |
units) in a way that allows chunks obtained across different |
sbrk calls to be consolidated, but does not require |
contiguous memory. Thus, it should be safe to intersperse |
mallocs with other sbrk calls. |
|
|
All allocations are made from the the `lowest' part of any found |
chunk. (The implementation invariant is that prev_inuse is |
always true of any allocated chunk; i.e., that each allocated |
chunk borders either a previously allocated and still in-use chunk, |
or the base of its memory arena.) |
|
*/ |
|
#if __STD_C |
Void_t* mALLOc(size_t bytes) |
#else |
Void_t* mALLOc(bytes) size_t bytes; |
#endif |
{ |
mchunkptr victim; /* inspected/selected chunk */ |
INTERNAL_SIZE_T victim_size; /* its size */ |
int idx; /* index for bin traversal */ |
mbinptr bin; /* associated bin */ |
mchunkptr remainder; /* remainder from a split */ |
long remainder_size; /* its size */ |
int remainder_index; /* its bin index */ |
unsigned long block; /* block traverser bit */ |
int startidx; /* first bin of a traversed block */ |
mchunkptr fwd; /* misc temp for linking */ |
mchunkptr bck; /* misc temp for linking */ |
mbinptr q; /* misc temp */ |
|
INTERNAL_SIZE_T nb; |
|
if ((long)bytes < 0) return 0; |
|
nb = request2size(bytes); /* padded request size; */ |
|
/* Check for exact match in a bin */ |
|
if (is_small_request(nb)) /* Faster version for small requests */ |
{ |
idx = smallbin_index(nb); |
|
/* No traversal or size check necessary for small bins. */ |
|
q = bin_at(idx); |
victim = last(q); |
|
/* Also scan the next one, since it would have a remainder < MINSIZE */ |
if (victim == q) |
{ |
q = next_bin(q); |
victim = last(q); |
} |
if (victim != q) |
{ |
victim_size = chunksize(victim); |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */ |
|
} |
else |
{ |
idx = bin_index(nb); |
bin = bin_at(idx); |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = victim_size - nb; |
|
if (remainder_size >= (long)MINSIZE) /* too big */ |
{ |
--idx; /* adjust to rescan below after checking last remainder */ |
break; |
} |
|
else if (remainder_size >= 0) /* exact fit */ |
{ |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
} |
|
++idx; |
|
} |
|
/* Try to use the last split-off remainder */ |
|
if ( (victim = last_remainder->fd) != last_remainder) |
{ |
victim_size = chunksize(victim); |
remainder_size = victim_size - nb; |
|
if (remainder_size >= (long)MINSIZE) /* re-split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
clear_last_remainder; |
|
if (remainder_size >= 0) /* exhaust */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
/* Else place in bin */ |
|
frontlink(victim, victim_size, remainder_index, bck, fwd); |
} |
|
/* |
If there are any possibly nonempty big-enough blocks, |
search for best fitting chunk by scanning bins in blockwidth units. |
*/ |
|
if ( (block = idx2binblock(idx)) <= binblocks) |
{ |
|
/* Get to the first marked block */ |
|
if ( (block & binblocks) == 0) |
{ |
/* force to an even block boundary */ |
idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; |
block <<= 1; |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
|
/* For each possibly nonempty block ... */ |
for (;;) |
{ |
startidx = idx; /* (track incomplete blocks) */ |
q = bin = bin_at(idx); |
|
/* For each bin in this block ... */ |
do |
{ |
/* Find and use first big enough chunk ... */ |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = victim_size - nb; |
|
if (remainder_size >= (long)MINSIZE) /* split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
unlink(victim, bck, fwd); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
else if (remainder_size >= 0) /* take */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
unlink(victim, bck, fwd); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
} |
|
bin = next_bin(bin); |
|
} while ((++idx & (BINBLOCKWIDTH - 1)) != 0); |
|
/* Clear out the block bit. */ |
|
do /* Possibly backtrack to try to clear a partial block */ |
{ |
if ((startidx & (BINBLOCKWIDTH - 1)) == 0) |
{ |
binblocks &= ~block; |
break; |
} |
--startidx; |
q = prev_bin(q); |
} while (first(q) == q); |
|
/* Get to the next possibly nonempty block */ |
|
if ( (block <<= 1) <= binblocks && (block != 0) ) |
{ |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
else |
break; |
} |
} |
|
|
/* Try to use top chunk */ |
|
/* Require that there be a remainder, ensuring top always exists */ |
if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) |
{ |
|
#if HAVE_MMAP |
/* If big and would otherwise need to extend, try to use mmap instead */ |
if ((unsigned long)nb >= (unsigned long)mmap_threshold && |
(victim = mmap_chunk(nb)) != 0) |
return chunk2mem(victim); |
#endif |
|
/* Try to extend */ |
malloc_extend_top(nb); |
if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) |
return 0; /* propagate failure */ |
} |
|
victim = top; |
set_head(victim, nb | PREV_INUSE); |
top = chunk_at_offset(victim, nb); |
set_head(top, remainder_size | PREV_INUSE); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
|
} |
|
|
|
|
/* |
|
free() algorithm : |
|
cases: |
|
1. free(0) has no effect. |
|
2. If the chunk was allocated via mmap, it is release via munmap(). |
|
3. If a returned chunk borders the current high end of memory, |
it is consolidated into the top, and if the total unused |
topmost memory exceeds the trim threshold, malloc_trim is |
called. |
|
4. Other chunks are consolidated as they arrive, and |
placed in corresponding bins. (This includes the case of |
consolidating with the current `last_remainder'). |
|
*/ |
|
|
#if __STD_C |
void fREe(Void_t* mem) |
#else |
void fREe(mem) Void_t* mem; |
#endif |
{ |
mchunkptr p; /* chunk corresponding to mem */ |
INTERNAL_SIZE_T hd; /* its head field */ |
INTERNAL_SIZE_T sz; /* its size */ |
int idx; /* its bin index */ |
mchunkptr next; /* next contiguous chunk */ |
INTERNAL_SIZE_T nextsz; /* its size */ |
INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ |
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
int islr; /* track whether merging with last_remainder */ |
|
if (mem == 0) /* free(0) has no effect */ |
return; |
|
p = mem2chunk(mem); |
hd = p->size; |
|
#if HAVE_MMAP |
if (hd & IS_MMAPPED) /* release mmapped memory. */ |
{ |
munmap_chunk(p); |
return; |
} |
#endif |
|
check_inuse_chunk(p); |
|
sz = hd & ~PREV_INUSE; |
next = chunk_at_offset(p, sz); |
nextsz = chunksize(next); |
|
if (next == top) /* merge with top */ |
{ |
sz += nextsz; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -((long) prevsz)); |
sz += prevsz; |
unlink(p, bck, fwd); |
} |
|
set_head(p, sz | PREV_INUSE); |
top = p; |
if ((unsigned long)(sz) >= (unsigned long)trim_threshold) |
malloc_trim(top_pad); |
return; |
} |
|
set_head(next, nextsz); /* clear inuse bit */ |
|
islr = 0; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -((long) prevsz)); |
sz += prevsz; |
|
if (p->fd == last_remainder) /* keep as last_remainder */ |
islr = 1; |
else |
unlink(p, bck, fwd); |
} |
|
if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */ |
{ |
sz += nextsz; |
|
if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ |
{ |
islr = 1; |
link_last_remainder(p); |
} |
else |
unlink(next, bck, fwd); |
} |
|
|
set_head(p, sz | PREV_INUSE); |
set_foot(p, sz); |
if (!islr) |
frontlink(p, sz, idx, bck, fwd); |
} |
|
|
|
|
|
/* |
|
Realloc algorithm: |
|
Chunks that were obtained via mmap cannot be extended or shrunk |
unless HAVE_MREMAP is defined, in which case mremap is used. |
Otherwise, if their reallocation is for additional space, they are |
copied. If for less, they are just left alone. |
|
Otherwise, if the reallocation is for additional space, and the |
chunk can be extended, it is, else a malloc-copy-free sequence is |
taken. There are several different ways that a chunk could be |
extended. All are tried: |
|
* Extending forward into following adjacent free chunk. |
* Shifting backwards, joining preceding adjacent space |
* Both shifting backwards and extending forward. |
* Extending into newly sbrked space |
|
Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a |
size argument of zero (re)allocates a minimum-sized chunk. |
|
If the reallocation is for less space, and the new request is for |
a `small' (<512 bytes) size, then the newly unused space is lopped |
off and freed. |
|
The old unix realloc convention of allowing the last-free'd chunk |
to be used as an argument to realloc is no longer supported. |
I don't know of any programs still relying on this feature, |
and allowing it would also allow too many other incorrect |
usages of realloc to be sensible. |
|
|
*/ |
|
|
#if __STD_C |
Void_t* rEALLOc(Void_t* oldmem, size_t bytes) |
#else |
Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; |
#endif |
{ |
INTERNAL_SIZE_T nb; /* padded request size */ |
|
mchunkptr oldp; /* chunk corresponding to oldmem */ |
INTERNAL_SIZE_T oldsize; /* its size */ |
|
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
Void_t* newmem; /* corresponding user mem */ |
|
mchunkptr next; /* next contiguous chunk after oldp */ |
INTERNAL_SIZE_T nextsize; /* its size */ |
|
mchunkptr prev; /* previous contiguous chunk before oldp */ |
INTERNAL_SIZE_T prevsize; /* its size */ |
|
mchunkptr remainder; /* holds split off extra space from newp */ |
INTERNAL_SIZE_T remainder_size; /* its size */ |
|
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
|
#ifdef REALLOC_ZERO_BYTES_FREES |
if (bytes == 0) { fREe(oldmem); return 0; } |
#endif |
|
if ((long)bytes < 0) return 0; |
|
/* realloc of null is supposed to be same as malloc */ |
if (oldmem == 0) return mALLOc(bytes); |
|
newp = oldp = mem2chunk(oldmem); |
newsize = oldsize = chunksize(oldp); |
|
|
nb = request2size(bytes); |
|
#if HAVE_MMAP |
if (chunk_is_mmapped(oldp)) |
{ |
#if HAVE_MREMAP |
newp = mremap_chunk(oldp, nb); |
if(newp) return chunk2mem(newp); |
#endif |
/* Note the extra SIZE_SZ overhead. */ |
if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */ |
/* Must alloc, copy, free. */ |
newmem = mALLOc(bytes); |
if (newmem == 0) return 0; /* propagate failure */ |
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); |
munmap_chunk(oldp); |
return newmem; |
} |
#endif |
|
check_inuse_chunk(oldp); |
|
if ((long)(oldsize) < (long)(nb)) |
{ |
|
/* Try expanding forward */ |
|
next = chunk_at_offset(oldp, oldsize); |
if (next == top || !inuse(next)) |
{ |
nextsize = chunksize(next); |
|
/* Forward into top only if a remainder */ |
if (next == top) |
{ |
if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
newsize += nextsize; |
top = chunk_at_offset(oldp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(oldp, nb); |
return chunk2mem(oldp); |
} |
} |
|
/* Forward into next chunk */ |
else if (((long)(nextsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
newsize += nextsize; |
goto split; |
} |
} |
else |
{ |
next = 0; |
nextsize = 0; |
} |
|
/* Try shifting backwards. */ |
|
if (!prev_inuse(oldp)) |
{ |
prev = prev_chunk(oldp); |
prevsize = chunksize(prev); |
|
/* try forward + backward first to save a later consolidation */ |
|
if (next != 0) |
{ |
/* into top */ |
if (next == top) |
{ |
if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize + nextsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
top = chunk_at_offset(newp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(newp, nb); |
return newmem; |
} |
} |
|
/* into next chunk */ |
else if (((long)(nextsize + prevsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += nextsize + prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* backward only */ |
if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* Must allocate */ |
|
newmem = mALLOc (bytes); |
|
if (newmem == 0) /* propagate failure */ |
return 0; |
|
/* Avoid copy if newp is next chunk after oldp. */ |
/* (This can only happen when new chunk is sbrk'ed.) */ |
|
if ( (newp = mem2chunk(newmem)) == next_chunk(oldp)) |
{ |
newsize += chunksize(newp); |
newp = oldp; |
goto split; |
} |
|
/* Otherwise copy, free, and exit */ |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
fREe(oldmem); |
return newmem; |
} |
|
|
split: /* split off extra room in old or expanded chunk */ |
|
if (newsize - nb >= MINSIZE) /* split off remainder */ |
{ |
remainder = chunk_at_offset(newp, nb); |
remainder_size = newsize - nb; |
set_head_size(newp, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_inuse_bit_at_offset(remainder, remainder_size); |
fREe(chunk2mem(remainder)); /* let free() deal with it */ |
} |
else |
{ |
set_head_size(newp, newsize); |
set_inuse_bit_at_offset(newp, newsize); |
} |
|
check_inuse_chunk(newp); |
return chunk2mem(newp); |
} |
|
|
|
|
/* |
|
memalign algorithm: |
|
memalign requests more than enough space from malloc, finds a spot |
within that chunk that meets the alignment request, and then |
possibly frees the leading and trailing space. |
|
The alignment argument must be a power of two. This property is not |
checked by memalign, so misuse may result in random runtime errors. |
|
8-byte alignment is guaranteed by normal malloc calls, so don't |
bother calling memalign with an argument of 8 or less. |
|
Overreliance on memalign is a sure way to fragment space. |
|
*/ |
|
|
#if __STD_C |
Void_t* mEMALIGn(size_t alignment, size_t bytes) |
#else |
Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; |
#endif |
{ |
INTERNAL_SIZE_T nb; /* padded request size */ |
char* m; /* memory returned by malloc call */ |
mchunkptr p; /* corresponding chunk */ |
char* brk; /* alignment point within p */ |
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */ |
mchunkptr remainder; /* spare room at end to split off */ |
long remainder_size; /* its size */ |
|
if ((long)bytes < 0) return 0; |
|
/* If need less alignment than we give anyway, just relay to malloc */ |
|
if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes); |
|
/* Otherwise, ensure that it is at least a minimum chunk size */ |
|
if (alignment < MINSIZE) alignment = MINSIZE; |
|
/* Call malloc with worst case padding to hit alignment. */ |
|
nb = request2size(bytes); |
m = (char*)(mALLOc(nb + alignment + MINSIZE)); |
|
if (m == 0) return 0; /* propagate failure */ |
|
p = mem2chunk(m); |
|
if ((((unsigned long)(m)) % alignment) == 0) /* aligned */ |
{ |
#if HAVE_MMAP |
if(chunk_is_mmapped(p)) |
return chunk2mem(p); /* nothing more to do */ |
#endif |
} |
else /* misaligned */ |
{ |
/* |
Find an aligned spot inside chunk. |
Since we need to give back leading space in a chunk of at |
least MINSIZE, if the first calculation places us at |
a spot with less than MINSIZE leader, we can move to the |
next aligned spot -- we've allocated enough total room so that |
this is always possible. |
*/ |
|
brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment)); |
if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment; |
|
newp = (mchunkptr)brk; |
leadsize = brk - (char*)(p); |
newsize = chunksize(p) - leadsize; |
|
#if HAVE_MMAP |
if(chunk_is_mmapped(p)) |
{ |
newp->prev_size = p->prev_size + leadsize; |
set_head(newp, newsize|IS_MMAPPED); |
return chunk2mem(newp); |
} |
#endif |
|
/* give back leader, use the rest */ |
|
set_head(newp, newsize | PREV_INUSE); |
set_inuse_bit_at_offset(newp, newsize); |
set_head_size(p, leadsize); |
fREe(chunk2mem(p)); |
p = newp; |
|
assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0); |
} |
|
/* Also give back spare room at the end */ |
|
remainder_size = chunksize(p) - nb; |
|
if (remainder_size >= (long)MINSIZE) |
{ |
remainder = chunk_at_offset(p, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_head_size(p, nb); |
fREe(chunk2mem(remainder)); |
} |
|
check_inuse_chunk(p); |
return chunk2mem(p); |
|
} |
|
|
|
|
/* |
valloc just invokes memalign with alignment argument equal |
to the page size of the system (or as near to this as can |
be figured out from all the includes/defines above.) |
*/ |
|
#if __STD_C |
Void_t* vALLOc(size_t bytes) |
#else |
Void_t* vALLOc(bytes) size_t bytes; |
#endif |
{ |
return mEMALIGn (malloc_getpagesize, bytes); |
} |
|
/* |
pvalloc just invokes valloc for the nearest pagesize |
that will accommodate request |
*/ |
|
|
#if __STD_C |
Void_t* pvALLOc(size_t bytes) |
#else |
Void_t* pvALLOc(bytes) size_t bytes; |
#endif |
{ |
size_t pagesize = malloc_getpagesize; |
return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1)); |
} |
|
/* |
|
calloc calls malloc, then zeroes out the allocated chunk. |
|
*/ |
|
#if __STD_C |
Void_t* cALLOc(size_t n, size_t elem_size) |
#else |
Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size; |
#endif |
{ |
mchunkptr p; |
INTERNAL_SIZE_T csz; |
|
INTERNAL_SIZE_T sz = n * elem_size; |
|
|
/* check if expand_top called, in which case don't need to clear */ |
#if MORECORE_CLEARS |
mchunkptr oldtop = top; |
INTERNAL_SIZE_T oldtopsize = chunksize(top); |
#endif |
Void_t* mem = mALLOc (sz); |
|
if ((long)n < 0) return 0; |
|
if (mem == 0) |
return 0; |
else |
{ |
p = mem2chunk(mem); |
|
/* Two optional cases in which clearing not necessary */ |
|
|
#if HAVE_MMAP |
if (chunk_is_mmapped(p)) return mem; |
#endif |
|
csz = chunksize(p); |
|
#if MORECORE_CLEARS |
if (p == oldtop && csz > oldtopsize) |
{ |
/* clear only the bytes from non-freshly-sbrked memory */ |
csz = oldtopsize; |
} |
#endif |
|
MALLOC_ZERO(mem, csz - SIZE_SZ); |
return mem; |
} |
} |
|
/* |
|
cfree just calls free. It is needed/defined on some systems |
that pair it with calloc, presumably for odd historical reasons. |
|
*/ |
|
#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__) |
#if __STD_C |
void cfree(Void_t *mem) |
#else |
void cfree(mem) Void_t *mem; |
#endif |
{ |
fREe(mem); |
} |
#endif |
|
|
|
/* |
|
Malloc_trim gives memory back to the system (via negative |
arguments to sbrk) if there is unused memory at the `high' end of |
the malloc pool. You can call this after freeing large blocks of |
memory to potentially reduce the system-level memory requirements |
of a program. However, it cannot guarantee to reduce memory. Under |
some allocation patterns, some large free blocks of memory will be |
locked between two used chunks, so they cannot be given back to |
the system. |
|
The `pad' argument to malloc_trim represents the amount of free |
trailing space to leave untrimmed. If this argument is zero, |
only the minimum amount of memory to maintain internal data |
structures will be left (one page or less). Non-zero arguments |
can be supplied to maintain enough trailing space to service |
future expected allocations without having to re-obtain memory |
from the system. |
|
Malloc_trim returns 1 if it actually released any memory, else 0. |
|
*/ |
|
#if __STD_C |
int malloc_trim(size_t pad) |
#else |
int malloc_trim(pad) size_t pad; |
#endif |
{ |
long top_size; /* Amount of top-most memory */ |
long extra; /* Amount to release */ |
char* current_brk; /* address returned by pre-check sbrk call */ |
char* new_brk; /* address returned by negative sbrk call */ |
|
unsigned long pagesz = malloc_getpagesize; |
|
top_size = chunksize(top); |
extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; |
|
if (extra < (long)pagesz) /* Not enough memory to release */ |
return 0; |
|
else |
{ |
/* Test to make sure no one else called sbrk */ |
current_brk = (char*)(MORECORE (0)); |
if (current_brk != (char*)(top) + top_size) |
return 0; /* Apparently we don't own memory; must fail */ |
|
else |
{ |
new_brk = (char*)(MORECORE (-extra)); |
|
if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */ |
{ |
/* Try to figure out what we have */ |
current_brk = (char*)(MORECORE (0)); |
top_size = current_brk - (char*)top; |
if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */ |
{ |
sbrked_mem = current_brk - sbrk_base; |
set_head(top, top_size | PREV_INUSE); |
} |
check_chunk(top); |
return 0; |
} |
|
else |
{ |
/* Success. Adjust top accordingly. */ |
set_head(top, (top_size - extra) | PREV_INUSE); |
sbrked_mem -= extra; |
check_chunk(top); |
return 1; |
} |
} |
} |
} |
|
|
|
/* |
malloc_usable_size: |
|
This routine tells you how many bytes you can actually use in an |
allocated chunk, which may be more than you requested (although |
often not). You can use this many bytes without worrying about |
overwriting other allocated objects. Not a particularly great |
programming practice, but still sometimes useful. |
|
*/ |
|
#if __STD_C |
size_t malloc_usable_size(Void_t* mem) |
#else |
size_t malloc_usable_size(mem) Void_t* mem; |
#endif |
{ |
mchunkptr p; |
if (mem == 0) |
return 0; |
else |
{ |
p = mem2chunk(mem); |
if(!chunk_is_mmapped(p)) |
{ |
if (!inuse(p)) return 0; |
check_inuse_chunk(p); |
return chunksize(p) - SIZE_SZ; |
} |
return chunksize(p) - 2*SIZE_SZ; |
} |
} |
|
|
|
|
/* Utility to update current_mallinfo for malloc_stats and mallinfo() */ |
|
static void malloc_update_mallinfo() |
{ |
int i; |
mbinptr b; |
mchunkptr p; |
#if DEBUG |
mchunkptr q; |
#endif |
|
INTERNAL_SIZE_T avail = chunksize(top); |
int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; |
|
for (i = 1; i < NAV; ++i) |
{ |
b = bin_at(i); |
for (p = last(b); p != b; p = p->bk) |
{ |
#if DEBUG |
check_free_chunk(p); |
for (q = next_chunk(p); |
q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE; |
q = next_chunk(q)) |
check_inuse_chunk(q); |
#endif |
avail += chunksize(p); |
navail++; |
} |
} |
|
current_mallinfo.ordblks = navail; |
current_mallinfo.uordblks = sbrked_mem - avail; |
current_mallinfo.fordblks = avail; |
current_mallinfo.hblks = n_mmaps; |
current_mallinfo.hblkhd = mmapped_mem; |
current_mallinfo.keepcost = chunksize(top); |
|
} |
|
|
|
/* |
|
malloc_stats: |
|
Prints on stderr the amount of space obtain from the system (both |
via sbrk and mmap), the maximum amount (which may be more than |
current if malloc_trim and/or munmap got called), the maximum |
number of simultaneous mmap regions used, and the current number |
of bytes allocated via malloc (or realloc, etc) but not yet |
freed. (Note that this is the number of bytes allocated, not the |
number requested. It will be larger than the number requested |
because of alignment and bookkeeping overhead.) |
|
*/ |
|
void malloc_stats() |
{ |
malloc_update_mallinfo(); |
fprintf(stderr, "max system bytes = %10u\n", |
(unsigned int)(max_total_mem)); |
fprintf(stderr, "system bytes = %10u\n", |
(unsigned int)(sbrked_mem + mmapped_mem)); |
fprintf(stderr, "in use bytes = %10u\n", |
(unsigned int)(current_mallinfo.uordblks + mmapped_mem)); |
#if HAVE_MMAP |
fprintf(stderr, "max mmap regions = %10u\n", |
(unsigned int)max_n_mmaps); |
#endif |
} |
|
/* |
mallinfo returns a copy of updated current mallinfo. |
*/ |
|
struct mallinfo mALLINFo() |
{ |
malloc_update_mallinfo(); |
return current_mallinfo; |
} |
|
|
|
|
/* |
mallopt: |
|
mallopt is the general SVID/XPG interface to tunable parameters. |
The format is to provide a (parameter-number, parameter-value) pair. |
mallopt then sets the corresponding parameter to the argument |
value if it can (i.e., so long as the value is meaningful), |
and returns 1 if successful else 0. |
|
See descriptions of tunable parameters above. |
|
*/ |
|
#if __STD_C |
int mALLOPt(int param_number, int value) |
#else |
int mALLOPt(param_number, value) int param_number; int value; |
#endif |
{ |
switch(param_number) |
{ |
case M_TRIM_THRESHOLD: |
trim_threshold = value; return 1; |
case M_TOP_PAD: |
top_pad = value; return 1; |
case M_MMAP_THRESHOLD: |
mmap_threshold = value; return 1; |
case M_MMAP_MAX: |
#if HAVE_MMAP |
n_mmaps_max = value; return 1; |
#else |
if (value != 0) return 0; else n_mmaps_max = value; return 1; |
#endif |
|
default: |
return 0; |
} |
} |
|
/* |
|
History: |
|
V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) |
* return null for negative arguments |
* Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com> |
* Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' |
(e.g. WIN32 platforms) |
* Cleanup up header file inclusion for WIN32 platforms |
* Cleanup code to avoid Microsoft Visual C++ compiler complaints |
* Add 'USE_DL_PREFIX' to quickly allow co-existence with existing |
memory allocation routines |
* Set 'malloc_getpagesize' for WIN32 platforms (needs more work) |
* Use 'assert' rather than 'ASSERT' in WIN32 code to conform to |
usage of 'assert' in non-WIN32 code |
* Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to |
avoid infinite loop |
* Always call 'fREe()' rather than 'free()' |
|
V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) |
* Fixed ordering problem with boundary-stamping |
|
V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) |
* Added pvalloc, as recommended by H.J. Liu |
* Added 64bit pointer support mainly from Wolfram Gloger |
* Added anonymously donated WIN32 sbrk emulation |
* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen |
* malloc_extend_top: fix mask error that caused wastage after |
foreign sbrks |
* Add linux mremap support code from HJ Liu |
|
V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) |
* Integrated most documentation with the code. |
* Add support for mmap, with help from |
Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Use last_remainder in more cases. |
* Pack bins using idea from colin@nyx10.cs.du.edu |
* Use ordered bins instead of best-fit threshhold |
* Eliminate block-local decls to simplify tracing and debugging. |
* Support another case of realloc via move into top |
* Fix error occuring when initial sbrk_base not word-aligned. |
* Rely on page size for units instead of SBRK_UNIT to |
avoid surprises about sbrk alignment conventions. |
* Add mallinfo, mallopt. Thanks to Raymond Nijssen |
(raymond@es.ele.tue.nl) for the suggestion. |
* Add `pad' argument to malloc_trim and top_pad mallopt parameter. |
* More precautions for cases where other routines call sbrk, |
courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Added macros etc., allowing use in linux libc from |
H.J. Lu (hjl@gnu.ai.mit.edu) |
* Inverted this history list |
|
V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) |
* Re-tuned and fixed to behave more nicely with V2.6.0 changes. |
* Removed all preallocation code since under current scheme |
the work required to undo bad preallocations exceeds |
the work saved in good cases for most test programs. |
* No longer use return list or unconsolidated bins since |
no scheme using them consistently outperforms those that don't |
given above changes. |
* Use best fit for very large chunks to prevent some worst-cases. |
* Added some support for debugging |
|
V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) |
* Removed footers when chunks are in use. Thanks to |
Paul Wilson (wilson@cs.texas.edu) for the suggestion. |
|
V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) |
* Added malloc_trim, with help from Wolfram Gloger |
(wmglo@Dent.MED.Uni-Muenchen.DE). |
|
V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) |
|
V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) |
* realloc: try to expand in both directions |
* malloc: swap order of clean-bin strategy; |
* realloc: only conditionally expand backwards |
* Try not to scavenge used bins |
* Use bin counts as a guide to preallocation |
* Occasionally bin return list chunks in first scan |
* Add a few optimizations from colin@nyx10.cs.du.edu |
|
V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) |
* faster bin computation & slightly different binning |
* merged all consolidations to one part of malloc proper |
(eliminating old malloc_find_space & malloc_clean_bin) |
* Scan 2 returns chunks (not just 1) |
* Propagate failure in realloc if malloc returns 0 |
* Add stuff to allow compilation on non-ANSI compilers |
from kpv@research.att.com |
|
V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) |
* removed potential for odd address access in prev_chunk |
* removed dependency on getpagesize.h |
* misc cosmetics and a bit more internal documentation |
* anticosmetics: mangled names in macros to evade debugger strangeness |
* tested on sparc, hp-700, dec-mips, rs6000 |
with gcc & native cc (hp, dec only) allowing |
Detlefs & Zorn comparison study (in SIGPLAN Notices.) |
|
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) |
* Based loosely on libg++-1.2X malloc. (It retains some of the overall |
structure of old version, but most details differ.) |
|
*/ |
|
|
/common/v2_0/doc/dlmalloc/dlmalloc-newlib.c
0,0 → 1,3643
/* ---------- To make a malloc.h, start cutting here ------------ */ |
|
/* |
A version of malloc/free/realloc written by Doug Lea and released to the |
public domain. Send questions/comments/complaints/performance data |
to dl@cs.oswego.edu |
|
* VERSION 2.6.4 Thu Nov 28 07:54:55 1996 Doug Lea (dl at gee) |
|
Note: There may be an updated version of this malloc obtainable at |
ftp://g.oswego.edu/pub/misc/malloc.c |
Check before installing! |
|
* Why use this malloc? |
|
This is not the fastest, most space-conserving, most portable, or |
most tunable malloc ever written. However it is among the fastest |
while also being among the most space-conserving, portable and tunable. |
Consistent balance across these factors results in a good general-purpose |
allocator. For a high-level description, see |
http://g.oswego.edu/dl/html/malloc.html |
|
* Synopsis of public routines |
|
(Much fuller descriptions are contained in the program documentation below.) |
|
malloc(size_t n); |
Return a pointer to a newly allocated chunk of at least n bytes, or null |
if no space is available. |
free(Void_t* p); |
Release the chunk of memory pointed to by p, or no effect if p is null. |
realloc(Void_t* p, size_t n); |
Return a pointer to a chunk of size n that contains the same data |
as does chunk p up to the minimum of (n, p's size) bytes, or null |
if no space is available. The returned pointer may or may not be |
the same as p. If p is null, equivalent to malloc. Unless the |
#define REALLOC_ZERO_BYTES_FREES below is set, realloc with a |
size argument of zero (re)allocates a minimum-sized chunk. |
memalign(size_t alignment, size_t n); |
Return a pointer to a newly allocated chunk of n bytes, aligned |
in accord with the alignment argument, which must be a power of |
two. |
valloc(size_t n); |
Equivalent to memalign(pagesize, n), where pagesize is the page |
size of the system (or as near to this as can be figured out from |
all the includes/defines below.) |
pvalloc(size_t n); |
Equivalent to valloc(minimum-page-that-holds(n)), that is, |
round up n to nearest pagesize. |
calloc(size_t unit, size_t quantity); |
Returns a pointer to quantity * unit bytes, with all locations |
set to zero. |
cfree(Void_t* p); |
Equivalent to free(p). |
malloc_trim(size_t pad); |
Release all but pad bytes of freed top-most memory back |
to the system. Return 1 if successful, else 0. |
malloc_usable_size(Void_t* p); |
Report the number usable allocated bytes associated with allocated |
chunk p. This may or may not report more bytes than were requested, |
due to alignment and minimum size constraints. |
malloc_stats(); |
Prints brief summary statistics on stderr. |
mallinfo() |
Returns (by copy) a struct containing various summary statistics. |
mallopt(int parameter_number, int parameter_value) |
Changes one of the tunable parameters described below. Returns |
1 if successful in changing the parameter, else 0. |
|
* Vital statistics: |
|
Alignment: 8-byte |
8 byte alignment is currently hardwired into the design. This |
seems to suffice for all current machines and C compilers. |
|
Assumed pointer representation: 4 or 8 bytes |
Code for 8-byte pointers is untested by me but has worked |
reliably by Wolfram Gloger, who contributed most of the |
changes supporting this. |
|
Assumed size_t representation: 4 or 8 bytes |
Note that size_t is allowed to be 4 bytes even if pointers are 8. |
|
Minimum overhead per allocated chunk: 4 or 8 bytes |
Each malloced chunk has a hidden overhead of 4 bytes holding size |
and status information. |
|
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) |
8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
|
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte |
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are |
needed; 4 (8) for a trailing size field |
and 8 (16) bytes for free list pointers. Thus, the minimum |
allocatable size is 16/24/32 bytes. |
|
Even a request for zero bytes (i.e., malloc(0)) returns a |
pointer to something of the minimum allocatable size. |
|
Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes |
8-byte size_t: 2^63 - 16 bytes |
|
It is assumed that (possibly signed) size_t bit values suffice to |
represent chunk sizes. `Possibly signed' is due to the fact |
that `size_t' may be defined on a system as either a signed or |
an unsigned type. To be conservative, values that would appear |
as negative numbers are avoided. |
Requests for sizes with a negative sign bit will return a |
minimum-sized chunk. |
|
Maximum overhead wastage per allocated chunk: normally 15 bytes |
|
Alignnment demands, plus the minimum allocatable size restriction |
make the normal worst-case wastage 15 bytes (i.e., up to 15 |
more bytes will be allocated than were requested in malloc), with |
two exceptions: |
1. Because requests for zero bytes allocate non-zero space, |
the worst case wastage for a request of zero bytes is 24 bytes. |
2. For requests >= mmap_threshold that are serviced via |
mmap(), the worst case wastage is 8 bytes plus the remainder |
from a system page (the minimal mmap unit); typically 4096 bytes. |
|
* Limitations |
|
Here are some features that are NOT currently supported |
|
* No user-definable hooks for callbacks and the like. |
* No automated mechanism for fully checking that all accesses |
to malloced memory stay within their bounds. |
* No support for compaction. |
|
* Synopsis of compile-time options: |
|
People have reported using previous versions of this malloc on all |
versions of Unix, sometimes by tweaking some of the defines |
below. It has been tested most extensively on Solaris and |
Linux. It is also reported to work on WIN32 platforms. |
People have also reported adapting this malloc for use in |
stand-alone embedded systems. |
|
The implementation is in straight, hand-tuned ANSI C. Among other |
consequences, it uses a lot of macros. Because of this, to be at |
all usable, this code should be compiled using an optimizing compiler |
(for example gcc -O2) that can simplify expressions and control |
paths. |
|
__STD_C (default: derived from C compiler defines) |
Nonzero if using ANSI-standard C compiler, a C++ compiler, or |
a C compiler sufficiently close to ANSI to get away with it. |
DEBUG (default: NOT defined) |
Define to enable debugging. Adds fairly extensive assertion-based |
checking to help track down memory errors, but noticeably slows down |
execution. |
SEPARATE_OBJECTS (default: NOT defined) |
Define this to compile into separate .o files. You must then |
compile malloc.c several times, defining a DEFINE_* macro each |
time. The list of DEFINE_* macros appears below. |
MALLOC_LOCK (default: NOT defined) |
MALLOC_UNLOCK (default: NOT defined) |
Define these to C expressions which are run to lock and unlock |
the malloc data structures. Calls may be nested; that is, |
MALLOC_LOCK may be called more than once before the corresponding |
MALLOC_UNLOCK calls. MALLOC_LOCK must avoid waiting for a lock |
that it already holds. |
MALLOC_ALIGNMENT (default: NOT defined) |
Define this to 16 if you need 16 byte alignment instead of 8 byte alignment |
which is the normal default. |
SIZE_T_SMALLER_THAN_LONG (default: NOT defined) |
Define this when the platform you are compiling has sizeof(long) > sizeof(size_t). |
The option causes some extra code to be generated to handle operations |
that use size_t operands and have long results. |
REALLOC_ZERO_BYTES_FREES (default: NOT defined) |
Define this if you think that realloc(p, 0) should be equivalent |
to free(p). Otherwise, since malloc returns a unique pointer for |
malloc(0), so does realloc(p, 0). |
HAVE_MEMCPY (default: defined) |
Define if you are not otherwise using ANSI STD C, but still |
have memcpy and memset in your C library and want to use them. |
Otherwise, simple internal versions are supplied. |
USE_MEMCPY (default: 1 if HAVE_MEMCPY is defined, 0 otherwise) |
Define as 1 if you want the C library versions of memset and |
memcpy called in realloc and calloc (otherwise macro versions are used). |
At least on some platforms, the simple macro versions usually |
outperform libc versions. |
HAVE_MMAP (default: defined as 1) |
Define to non-zero to optionally make malloc() use mmap() to |
allocate very large blocks. |
HAVE_MREMAP (default: defined as 0 unless Linux libc set) |
Define to non-zero to optionally make realloc() use mremap() to |
reallocate very large blocks. |
malloc_getpagesize (default: derived from system #includes) |
Either a constant or routine call returning the system page size. |
HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined) |
Optionally define if you are on a system with a /usr/include/malloc.h |
that declares struct mallinfo. It is not at all necessary to |
define this even if you do, but will ensure consistency. |
INTERNAL_SIZE_T (default: size_t) |
Define to a 32-bit type (probably `unsigned int') if you are on a |
64-bit machine, yet do not want or need to allow malloc requests of |
greater than 2^31 to be handled. This saves space, especially for |
very small chunks. |
INTERNAL_LINUX_C_LIB (default: NOT defined) |
Defined only when compiled as part of Linux libc. |
Also note that there is some odd internal name-mangling via defines |
(for example, internally, `malloc' is named `mALLOc') needed |
when compiling in this case. These look funny but don't otherwise |
affect anything. |
INTERNAL_NEWLIB (default: NOT defined) |
Defined only when compiled as part of the Cygnus newlib |
distribution. |
WIN32 (default: undefined) |
Define this on MS win (95, nt) platforms to compile in sbrk emulation. |
LACKS_UNISTD_H (default: undefined) |
Define this if your system does not have a <unistd.h>. |
MORECORE (default: sbrk) |
The name of the routine to call to obtain more memory from the system. |
MORECORE_FAILURE (default: -1) |
The value returned upon failure of MORECORE. |
MORECORE_CLEARS (default 1) |
True (1) if the routine mapped to MORECORE zeroes out memory (which |
holds for sbrk). |
DEFAULT_TRIM_THRESHOLD |
DEFAULT_TOP_PAD |
DEFAULT_MMAP_THRESHOLD |
DEFAULT_MMAP_MAX |
Default values of tunable parameters (described in detail below) |
controlling interaction with host system routines (sbrk, mmap, etc). |
These values may also be changed dynamically via mallopt(). The |
preset defaults are those that give best performance for typical |
programs/systems. |
|
|
*/ |
|
|
|
|
/* Preliminaries */ |
|
#ifndef __STD_C |
#ifdef __STDC__ |
#define __STD_C 1 |
#else |
#if __cplusplus |
#define __STD_C 1 |
#else |
#define __STD_C 0 |
#endif /*__cplusplus*/ |
#endif /*__STDC__*/ |
#endif /*__STD_C*/ |
|
#ifndef Void_t |
#if __STD_C |
#define Void_t void |
#else |
#define Void_t char |
#endif |
#endif /*Void_t*/ |
|
#if __STD_C |
#include <stddef.h> /* for size_t */ |
#else |
#include <sys/types.h> |
#endif |
|
#ifdef __cplusplus |
extern "C" { |
#endif |
|
#include <stdio.h> /* needed for malloc_stats */ |
|
|
/* |
Compile-time options |
*/ |
|
|
/* |
|
Special defines for Cygnus newlib distribution. |
|
*/ |
|
#ifdef INTERNAL_NEWLIB |
|
#include <sys/config.h> |
|
/* |
In newlib, all the publically visible routines take a reentrancy |
pointer. We don't currently do anything much with it, but we do |
pass it to the lock routine. |
*/ |
|
#include <reent.h> |
|
#define POINTER_UINT unsigned _POINTER_INT |
#define SEPARATE_OBJECTS |
#define HAVE_MMAP 0 |
#define MORECORE(size) _sbrk_r(reent_ptr, (size)) |
#define MORECORE_CLEARS 0 |
#define MALLOC_LOCK __malloc_lock(reent_ptr) |
#define MALLOC_UNLOCK __malloc_unlock(reent_ptr) |
|
#ifndef _WIN32 |
#ifdef SMALL_MEMORY |
#define malloc_getpagesize (128) |
#else |
#define malloc_getpagesize (4096) |
#endif |
#endif |
|
#if __STD_C |
extern void __malloc_lock(struct _reent *); |
extern void __malloc_unlock(struct _reent *); |
#else |
extern void __malloc_lock(); |
extern void __malloc_unlock(); |
#endif |
|
#if __STD_C |
#define RARG struct _reent *reent_ptr, |
#define RONEARG struct _reent *reent_ptr |
#else |
#define RARG reent_ptr |
#define RONEARG reent_ptr |
#define RDECL struct _reent *reent_ptr; |
#endif |
|
#define RCALL reent_ptr, |
#define RONECALL reent_ptr |
|
#else /* ! INTERNAL_NEWLIB */ |
|
#define POINTER_UINT unsigned long |
#define RARG |
#define RONEARG |
#define RDECL |
#define RCALL |
#define RONECALL |
|
#endif /* ! INTERNAL_NEWLIB */ |
|
/* |
Debugging: |
|
Because freed chunks may be overwritten with link fields, this |
malloc will often die when freed memory is overwritten by user |
programs. This can be very effective (albeit in an annoying way) |
in helping track down dangling pointers. |
|
If you compile with -DDEBUG, a number of assertion checks are |
enabled that will catch more memory errors. You probably won't be |
able to make much sense of the actual assertion errors, but they |
should help you locate incorrectly overwritten memory. The |
checking is fairly extensive, and will slow down execution |
noticeably. Calling malloc_stats or mallinfo with DEBUG set will |
attempt to check every non-mmapped allocated and free chunk in the |
course of computing the summmaries. (By nature, mmapped regions |
cannot be checked very much automatically.) |
|
Setting DEBUG may also be helpful if you are trying to modify |
this code. The assertions in the check routines spell out in more |
detail the assumptions and invariants underlying the algorithms. |
|
*/ |
|
#if DEBUG |
#include <assert.h> |
#else |
#define assert(x) ((void)0) |
#endif |
|
|
/* |
SEPARATE_OBJECTS should be defined if you want each function to go |
into a separate .o file. You must then compile malloc.c once per |
function, defining the appropriate DEFINE_ macro. See below for the |
list of macros. |
*/ |
|
#ifndef SEPARATE_OBJECTS |
#define DEFINE_MALLOC |
#define DEFINE_FREE |
#define DEFINE_REALLOC |
#define DEFINE_CALLOC |
#define DEFINE_CFREE |
#define DEFINE_MEMALIGN |
#define DEFINE_VALLOC |
#define DEFINE_PVALLOC |
#define DEFINE_MALLINFO |
#define DEFINE_MALLOC_STATS |
#define DEFINE_MALLOC_USABLE_SIZE |
#define DEFINE_MALLOPT |
|
#define STATIC static |
#else |
#define STATIC |
#endif |
|
/* |
Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to |
lock and unlock the malloc data structures. MALLOC_LOCK may be |
called recursively. |
*/ |
|
#ifndef MALLOC_LOCK |
#define MALLOC_LOCK |
#endif |
|
#ifndef MALLOC_UNLOCK |
#define MALLOC_UNLOCK |
#endif |
|
/* |
INTERNAL_SIZE_T is the word-size used for internal bookkeeping |
of chunk sizes. On a 64-bit machine, you can reduce malloc |
overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' |
at the expense of not being able to handle requests greater than |
2^31. This limitation is hardly ever a concern; you are encouraged |
to set this. However, the default version is the same as size_t. |
*/ |
|
#ifndef INTERNAL_SIZE_T |
#define INTERNAL_SIZE_T size_t |
#endif |
|
/* |
Following is needed on implementations whereby long > size_t. |
The problem is caused because the code performs subtractions of |
size_t values and stores the result in long values. In the case |
where long > size_t and the first value is actually less than |
the second value, the resultant value is positive. For example, |
(long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF |
which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF. This is due to the |
fact that assignment from unsigned to signed won't sign extend. |
*/ |
|
#ifdef SIZE_T_SMALLER_THAN_LONG |
#define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) ); |
#else |
#define long_sub_size_t(x, y) ( (long)(x - y) ) |
#endif |
|
/* |
REALLOC_ZERO_BYTES_FREES should be set if a call to |
realloc with zero bytes should be the same as a call to free. |
Some people think it should. Otherwise, since this malloc |
returns a unique pointer for malloc(0), so does realloc(p, 0). |
*/ |
|
|
/* #define REALLOC_ZERO_BYTES_FREES */ |
|
|
/* |
WIN32 causes an emulation of sbrk to be compiled in |
mmap-based options are not currently supported in WIN32. |
*/ |
|
/* #define WIN32 */ |
#ifdef WIN32 |
#define MORECORE wsbrk |
#define HAVE_MMAP 0 |
#endif |
|
|
/* |
HAVE_MEMCPY should be defined if you are not otherwise using |
ANSI STD C, but still have memcpy and memset in your C library |
and want to use them in calloc and realloc. Otherwise simple |
macro versions are defined here. |
|
USE_MEMCPY should be defined as 1 if you actually want to |
have memset and memcpy called. People report that the macro |
versions are often enough faster than libc versions on many |
systems that it is better to use them. |
|
*/ |
|
#define HAVE_MEMCPY |
|
#ifndef USE_MEMCPY |
#ifdef HAVE_MEMCPY |
#define USE_MEMCPY 1 |
#else |
#define USE_MEMCPY 0 |
#endif |
#endif |
|
#if (__STD_C || defined(HAVE_MEMCPY)) |
|
#if __STD_C |
void* memset(void*, int, size_t); |
void* memcpy(void*, const void*, size_t); |
#else |
Void_t* memset(); |
Void_t* memcpy(); |
#endif |
#endif |
|
#if USE_MEMCPY |
|
/* The following macros are only invoked with (2n+1)-multiples of |
INTERNAL_SIZE_T units, with a positive integer n. This is exploited |
for fast inline execution when n is small. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T mzsz = (nbytes); \ |
if(mzsz <= 9*sizeof(mzsz)) { \ |
INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \ |
if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; }}} \ |
*mz++ = 0; \ |
*mz++ = 0; \ |
*mz = 0; \ |
} else memset((charp), 0, mzsz); \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T mcsz = (nbytes); \ |
if(mcsz <= 9*sizeof(mcsz)) { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \ |
if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; }}} \ |
*mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
*mcdst = *mcsrc ; \ |
} else memcpy(dest, src, mcsz); \ |
} while(0) |
|
#else /* !USE_MEMCPY */ |
|
/* Use Duff's device for good zeroing/copying performance. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mzp++ = 0; \ |
case 7: *mzp++ = 0; \ |
case 6: *mzp++ = 0; \ |
case 5: *mzp++ = 0; \ |
case 4: *mzp++ = 0; \ |
case 3: *mzp++ = 0; \ |
case 2: *mzp++ = 0; \ |
case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mcdst++ = *mcsrc++; \ |
case 7: *mcdst++ = *mcsrc++; \ |
case 6: *mcdst++ = *mcsrc++; \ |
case 5: *mcdst++ = *mcsrc++; \ |
case 4: *mcdst++ = *mcsrc++; \ |
case 3: *mcdst++ = *mcsrc++; \ |
case 2: *mcdst++ = *mcsrc++; \ |
case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#endif |
|
|
/* |
Define HAVE_MMAP to optionally make malloc() use mmap() to |
allocate very large blocks. These will be returned to the |
operating system immediately after a free(). |
*/ |
|
#ifndef HAVE_MMAP |
#define HAVE_MMAP 1 |
#endif |
|
/* |
Define HAVE_MREMAP to make realloc() use mremap() to re-allocate |
large blocks. This is currently only possible on Linux with |
kernel versions newer than 1.3.77. |
*/ |
|
#ifndef HAVE_MREMAP |
#ifdef INTERNAL_LINUX_C_LIB |
#define HAVE_MREMAP 1 |
#else |
#define HAVE_MREMAP 0 |
#endif |
#endif |
|
#if HAVE_MMAP |
|
#include <unistd.h> |
#include <fcntl.h> |
#include <sys/mman.h> |
|
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
#define MAP_ANONYMOUS MAP_ANON |
#endif |
|
#endif /* HAVE_MMAP */ |
|
/* |
Access to system page size. To the extent possible, this malloc |
manages memory from the system in page-size units. |
|
The following mechanics for getpagesize were adapted from |
bsd/gnu getpagesize.h |
*/ |
|
#ifndef LACKS_UNISTD_H |
# include <unistd.h> |
#endif |
|
#ifndef malloc_getpagesize |
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
# ifndef _SC_PAGE_SIZE |
# define _SC_PAGE_SIZE _SC_PAGESIZE |
# endif |
# endif |
# ifdef _SC_PAGE_SIZE |
# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
# else |
# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
extern size_t getpagesize(); |
# define malloc_getpagesize getpagesize() |
# else |
# include <sys/param.h> |
# ifdef EXEC_PAGESIZE |
# define malloc_getpagesize EXEC_PAGESIZE |
# else |
# ifdef NBPG |
# ifndef CLSIZE |
# define malloc_getpagesize NBPG |
# else |
# define malloc_getpagesize (NBPG * CLSIZE) |
# endif |
# else |
# ifdef NBPC |
# define malloc_getpagesize NBPC |
# else |
# ifdef PAGESIZE |
# define malloc_getpagesize PAGESIZE |
# else |
# define malloc_getpagesize (4096) /* just guess */ |
# endif |
# endif |
# endif |
# endif |
# endif |
# endif |
#endif |
|
|
|
/* |
|
This version of malloc supports the standard SVID/XPG mallinfo |
routine that returns a struct containing the same kind of |
information you can get from malloc_stats. It should work on |
any SVID/XPG compliant system that has a /usr/include/malloc.h |
defining struct mallinfo. (If you'd like to install such a thing |
yourself, cut out the preliminary declarations as described above |
and below and save them in a malloc.h file. But there's no |
compelling reason to bother to do this.) |
|
The main declaration needed is the mallinfo struct that is returned |
(by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a |
bunch of fields, most of which are not even meaningful in this |
version of malloc. Some of these fields are are instead filled by |
mallinfo() with other numbers that might possibly be of interest. |
|
HAVE_USR_INCLUDE_MALLOC_H should be set if you have a |
/usr/include/malloc.h file that includes a declaration of struct |
mallinfo. If so, it is included; else an SVID2/XPG2 compliant |
version is declared below. These must be precisely the same for |
mallinfo() to work. |
|
*/ |
|
/* #define HAVE_USR_INCLUDE_MALLOC_H */ |
|
#if HAVE_USR_INCLUDE_MALLOC_H |
#include "/usr/include/malloc.h" |
#else |
|
/* SVID2/XPG mallinfo structure */ |
|
struct mallinfo { |
int arena; /* total space allocated from system */ |
int ordblks; /* number of non-inuse chunks */ |
int smblks; /* unused -- always zero */ |
int hblks; /* number of mmapped regions */ |
int hblkhd; /* total space in mmapped regions */ |
int usmblks; /* unused -- always zero */ |
int fsmblks; /* unused -- always zero */ |
int uordblks; /* total allocated space */ |
int fordblks; /* total non-inuse space */ |
int keepcost; /* top-most, releasable (via malloc_trim) space */ |
}; |
|
/* SVID2/XPG mallopt options */ |
|
#define M_MXFAST 1 /* UNUSED in this malloc */ |
#define M_NLBLKS 2 /* UNUSED in this malloc */ |
#define M_GRAIN 3 /* UNUSED in this malloc */ |
#define M_KEEP 4 /* UNUSED in this malloc */ |
|
#endif |
|
/* mallopt options that actually do something */ |
|
#define M_TRIM_THRESHOLD -1 |
#define M_TOP_PAD -2 |
#define M_MMAP_THRESHOLD -3 |
#define M_MMAP_MAX -4 |
|
|
|
#ifndef DEFAULT_TRIM_THRESHOLD |
#define DEFAULT_TRIM_THRESHOLD (128L * 1024L) |
#endif |
|
/* |
M_TRIM_THRESHOLD is the maximum amount of unused top-most memory |
to keep before releasing via malloc_trim in free(). |
|
Automatic trimming is mainly useful in long-lived programs. |
Because trimming via sbrk can be slow on some systems, and can |
sometimes be wasteful (in cases where programs immediately |
afterward allocate more large chunks) the value should be high |
enough so that your overall system performance would improve by |
releasing. |
|
The trim threshold and the mmap control parameters (see below) |
can be traded off with one another. Trimming and mmapping are |
two different ways of releasing unused memory back to the |
system. Between these two, it is often possible to keep |
system-level demands of a long-lived program down to a bare |
minimum. For example, in one test suite of sessions measuring |
the XF86 X server on Linux, using a trim threshold of 128K and a |
mmap threshold of 192K led to near-minimal long term resource |
consumption. |
|
If you are using this malloc in a long-lived program, it should |
pay to experiment with these values. As a rough guide, you |
might set to a value close to the average size of a process |
(program) running on your system. Releasing this much memory |
would allow such a process to run in memory. Generally, it's |
worth it to tune for trimming rather tham memory mapping when a |
program undergoes phases where several large chunks are |
allocated and released in ways that can reuse each other's |
storage, perhaps mixed with phases where there are no such |
chunks at all. And in well-behaved long-lived programs, |
controlling release of large blocks via trimming versus mapping |
is usually faster. |
|
However, in most programs, these parameters serve mainly as |
protection against the system-level effects of carrying around |
massive amounts of unneeded memory. Since frequent calls to |
sbrk, mmap, and munmap otherwise degrade performance, the default |
parameters are set to relatively high values that serve only as |
safeguards. |
|
The default trim value is high enough to cause trimming only in |
fairly extreme (by current memory consumption standards) cases. |
It must be greater than page size to have any useful effect. To |
disable trimming completely, you can set to (unsigned long)(-1); |
|
|
*/ |
|
|
#ifndef DEFAULT_TOP_PAD |
#define DEFAULT_TOP_PAD (0) |
#endif |
|
/* |
M_TOP_PAD is the amount of extra `padding' space to allocate or |
retain whenever sbrk is called. It is used in two ways internally: |
|
* When sbrk is called to extend the top of the arena to satisfy |
a new malloc request, this much padding is added to the sbrk |
request. |
|
* When malloc_trim is called automatically from free(), |
it is used as the `pad' argument. |
|
In both cases, the actual amount of padding is rounded |
so that the end of the arena is always a system page boundary. |
|
The main reason for using padding is to avoid calling sbrk so |
often. Having even a small pad greatly reduces the likelihood |
that nearly every malloc request during program start-up (or |
after trimming) will invoke sbrk, which needlessly wastes |
time. |
|
Automatic rounding-up to page-size units is normally sufficient |
to avoid measurable overhead, so the default is 0. However, in |
systems where sbrk is relatively slow, it can pay to increase |
this value, at the expense of carrying around more memory than |
the program needs. |
|
*/ |
|
|
#ifndef DEFAULT_MMAP_THRESHOLD |
#define DEFAULT_MMAP_THRESHOLD (128 * 1024) |
#endif |
|
/* |
|
M_MMAP_THRESHOLD is the request size threshold for using mmap() |
to service a request. Requests of at least this size that cannot |
be allocated using already-existing space will be serviced via mmap. |
(If enough normal freed space already exists it is used instead.) |
|
Using mmap segregates relatively large chunks of memory so that |
they can be individually obtained and released from the host |
system. A request serviced through mmap is never reused by any |
other request (at least not directly; the system may just so |
happen to remap successive requests to the same locations). |
|
Segregating space in this way has the benefit that mmapped space |
can ALWAYS be individually released back to the system, which |
helps keep the system level memory demands of a long-lived |
program low. Mapped memory can never become `locked' between |
other chunks, as can happen with normally allocated chunks, which |
menas that even trimming via malloc_trim would not release them. |
|
However, it has the disadvantages that: |
|
1. The space cannot be reclaimed, consolidated, and then |
used to service later requests, as happens with normal chunks. |
2. It can lead to more wastage because of mmap page alignment |
requirements |
3. It causes malloc performance to be more dependent on host |
system memory management support routines which may vary in |
implementation quality and may impose arbitrary |
limitations. Generally, servicing a request via normal |
malloc steps is faster than going through a system's mmap. |
|
All together, these considerations should lead you to use mmap |
only for relatively large requests. |
|
|
*/ |
|
|
|
#ifndef DEFAULT_MMAP_MAX |
#if HAVE_MMAP |
#define DEFAULT_MMAP_MAX (64) |
#else |
#define DEFAULT_MMAP_MAX (0) |
#endif |
#endif |
|
/* |
M_MMAP_MAX is the maximum number of requests to simultaneously |
service using mmap. This parameter exists because: |
|
1. Some systems have a limited number of internal tables for |
use by mmap. |
2. In most systems, overreliance on mmap can degrade overall |
performance. |
3. If a program allocates many large regions, it is probably |
better off using normal sbrk-based allocation routines that |
can reclaim and reallocate normal heap memory. Using a |
small value allows transition into this mode after the |
first few allocations. |
|
Setting to 0 disables all use of mmap. If HAVE_MMAP is not set, |
the default value is 0, and attempts to set it to non-zero values |
in mallopt will fail. |
*/ |
|
|
|
|
/* |
|
Special defines for linux libc |
|
Except when compiled using these special defines for Linux libc |
using weak aliases, this malloc is NOT designed to work in |
multithreaded applications. No semaphores or other concurrency |
control are provided to ensure that multiple malloc or free calls |
don't run at the same time, which could be disasterous. A single |
semaphore could be used across malloc, realloc, and free (which is |
essentially the effect of the linux weak alias approach). It would |
be hard to obtain finer granularity. |
|
*/ |
|
|
#ifdef INTERNAL_LINUX_C_LIB |
|
#if __STD_C |
|
Void_t * __default_morecore_init (ptrdiff_t); |
Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init; |
|
#else |
|
Void_t * __default_morecore_init (); |
Void_t *(*__morecore)() = __default_morecore_init; |
|
#endif |
|
#define MORECORE (*__morecore) |
#define MORECORE_FAILURE 0 |
#define MORECORE_CLEARS 1 |
|
#else /* INTERNAL_LINUX_C_LIB */ |
|
#ifndef INTERNAL_NEWLIB |
#if __STD_C |
extern Void_t* sbrk(ptrdiff_t); |
#else |
extern Void_t* sbrk(); |
#endif |
#endif |
|
#ifndef MORECORE |
#define MORECORE sbrk |
#endif |
|
#ifndef MORECORE_FAILURE |
#define MORECORE_FAILURE -1 |
#endif |
|
#ifndef MORECORE_CLEARS |
#define MORECORE_CLEARS 1 |
#endif |
|
#endif /* INTERNAL_LINUX_C_LIB */ |
|
#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__) |
|
#define cALLOc __libc_calloc |
#define fREe __libc_free |
#define mALLOc __libc_malloc |
#define mEMALIGn __libc_memalign |
#define rEALLOc __libc_realloc |
#define vALLOc __libc_valloc |
#define pvALLOc __libc_pvalloc |
#define mALLINFo __libc_mallinfo |
#define mALLOPt __libc_mallopt |
|
#pragma weak calloc = __libc_calloc |
#pragma weak free = __libc_free |
#pragma weak cfree = __libc_free |
#pragma weak malloc = __libc_malloc |
#pragma weak memalign = __libc_memalign |
#pragma weak realloc = __libc_realloc |
#pragma weak valloc = __libc_valloc |
#pragma weak pvalloc = __libc_pvalloc |
#pragma weak mallinfo = __libc_mallinfo |
#pragma weak mallopt = __libc_mallopt |
|
#else |
|
#ifdef INTERNAL_NEWLIB |
|
#define cALLOc _calloc_r |
#define fREe _free_r |
#define mALLOc _malloc_r |
#define mEMALIGn _memalign_r |
#define rEALLOc _realloc_r |
#define vALLOc _valloc_r |
#define pvALLOc _pvalloc_r |
#define mALLINFo _mallinfo_r |
#define mALLOPt _mallopt_r |
|
#define malloc_stats _malloc_stats_r |
#define malloc_trim _malloc_trim_r |
#define malloc_usable_size _malloc_usable_size_r |
|
#define malloc_update_mallinfo __malloc_update_mallinfo |
|
#define malloc_av_ __malloc_av_ |
#define malloc_current_mallinfo __malloc_current_mallinfo |
#define malloc_max_sbrked_mem __malloc_max_sbrked_mem |
#define malloc_max_total_mem __malloc_max_total_mem |
#define malloc_sbrk_base __malloc_sbrk_base |
#define malloc_top_pad __malloc_top_pad |
#define malloc_trim_threshold __malloc_trim_threshold |
|
#else /* ! INTERNAL_NEWLIB */ |
|
#define cALLOc calloc |
#define fREe free |
#define mALLOc malloc |
#define mEMALIGn memalign |
#define rEALLOc realloc |
#define vALLOc valloc |
#define pvALLOc pvalloc |
#define mALLINFo mallinfo |
#define mALLOPt mallopt |
|
#endif /* ! INTERNAL_NEWLIB */ |
#endif |
|
/* Public routines */ |
|
#if __STD_C |
|
Void_t* mALLOc(RARG size_t); |
void fREe(RARG Void_t*); |
Void_t* rEALLOc(RARG Void_t*, size_t); |
Void_t* mEMALIGn(RARG size_t, size_t); |
Void_t* vALLOc(RARG size_t); |
Void_t* pvALLOc(RARG size_t); |
Void_t* cALLOc(RARG size_t, size_t); |
void cfree(Void_t*); |
int malloc_trim(RARG size_t); |
size_t malloc_usable_size(RARG Void_t*); |
void malloc_stats(RONEARG); |
int mALLOPt(RARG int, int); |
struct mallinfo mALLINFo(RONEARG); |
#else |
Void_t* mALLOc(); |
void fREe(); |
Void_t* rEALLOc(); |
Void_t* mEMALIGn(); |
Void_t* vALLOc(); |
Void_t* pvALLOc(); |
Void_t* cALLOc(); |
void cfree(); |
int malloc_trim(); |
size_t malloc_usable_size(); |
void malloc_stats(); |
int mALLOPt(); |
struct mallinfo mALLINFo(); |
#endif |
|
|
#ifdef __cplusplus |
}; /* end of extern "C" */ |
#endif |
|
/* ---------- To make a malloc.h, end cutting here ------------ */ |
|
|
/* |
Emulation of sbrk for WIN32 |
All code within the ifdef WIN32 is untested by me. |
*/ |
|
|
#ifdef WIN32 |
|
#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & |
~(malloc_getpagesize-1)) |
|
/* resrve 64MB to insure large contiguous space */ |
#define RESERVED_SIZE (1024*1024*64) |
#define NEXT_SIZE (2048*1024) |
#define TOP_MEMORY ((unsigned long)2*1024*1024*1024) |
|
struct GmListElement; |
typedef struct GmListElement GmListElement; |
|
struct GmListElement |
{ |
GmListElement* next; |
void* base; |
}; |
|
static GmListElement* head = 0; |
static unsigned int gNextAddress = 0; |
static unsigned int gAddressBase = 0; |
static unsigned int gAllocatedSize = 0; |
|
static |
GmListElement* makeGmListElement (void* bas) |
{ |
GmListElement* this; |
this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement)); |
ASSERT (this); |
if (this) |
{ |
this->base = bas; |
this->next = head; |
head = this; |
} |
return this; |
} |
|
void gcleanup () |
{ |
BOOL rval; |
ASSERT ( (head == NULL) || (head->base == (void*)gAddressBase)); |
if (gAddressBase && (gNextAddress - gAddressBase)) |
{ |
rval = VirtualFree ((void*)gAddressBase, |
gNextAddress - gAddressBase, |
MEM_DECOMMIT); |
ASSERT (rval); |
} |
while (head) |
{ |
GmListElement* next = head->next; |
rval = VirtualFree (head->base, 0, MEM_RELEASE); |
ASSERT (rval); |
LocalFree (head); |
head = next; |
} |
} |
|
static |
void* findRegion (void* start_address, unsigned long size) |
{ |
MEMORY_BASIC_INFORMATION info; |
while ((unsigned long)start_address < TOP_MEMORY) |
{ |
VirtualQuery (start_address, &info, sizeof (info)); |
if (info.State != MEM_FREE) |
start_address = (char*)info.BaseAddress + info.RegionSize; |
else if (info.RegionSize >= size) |
return start_address; |
else |
start_address = (char*)info.BaseAddress + info.RegionSize; |
} |
return NULL; |
|
} |
|
|
void* wsbrk (long size) |
{ |
void* tmp; |
if (size > 0) |
{ |
if (gAddressBase == 0) |
{ |
gAllocatedSize = max (RESERVED_SIZE, AlignPage (size)); |
gNextAddress = gAddressBase = |
(unsigned int)VirtualAlloc (NULL, gAllocatedSize, |
MEM_RESERVE, PAGE_NOACCESS); |
} else if (AlignPage (gNextAddress + size) > (gAddressBase + |
gAllocatedSize)) |
{ |
long new_size = max (NEXT_SIZE, AlignPage (size)); |
void* new_address = (void*)(gAddressBase+gAllocatedSize); |
do |
{ |
new_address = findRegion (new_address, new_size); |
|
if (new_address == 0) |
return (void*)-1; |
|
gAddressBase = gNextAddress = |
(unsigned int)VirtualAlloc (new_address, new_size, |
MEM_RESERVE, PAGE_NOACCESS); |
// repeat in case of race condition |
// The region that we found has been snagged |
// by another thread |
} |
while (gAddressBase == 0); |
|
ASSERT (new_address == (void*)gAddressBase); |
|
gAllocatedSize = new_size; |
|
if (!makeGmListElement ((void*)gAddressBase)) |
return (void*)-1; |
} |
if ((size + gNextAddress) > AlignPage (gNextAddress)) |
{ |
void* res; |
res = VirtualAlloc ((void*)AlignPage (gNextAddress), |
(size + gNextAddress - |
AlignPage (gNextAddress)), |
MEM_COMMIT, PAGE_READWRITE); |
if (res == 0) |
return (void*)-1; |
} |
tmp = (void*)gNextAddress; |
gNextAddress = (unsigned int)tmp + size; |
return tmp; |
} |
else if (size < 0) |
{ |
unsigned int alignedGoal = AlignPage (gNextAddress + size); |
/* Trim by releasing the virtual memory */ |
if (alignedGoal >= gAddressBase) |
{ |
VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal, |
MEM_DECOMMIT); |
gNextAddress = gNextAddress + size; |
return (void*)gNextAddress; |
} |
else |
{ |
VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase, |
MEM_DECOMMIT); |
gNextAddress = gAddressBase; |
return (void*)-1; |
} |
} |
else |
{ |
return (void*)gNextAddress; |
} |
} |
|
#endif |
|
|
|
/* |
Type declarations |
*/ |
|
|
struct malloc_chunk |
{ |
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ |
INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ |
struct malloc_chunk* fd; /* double links -- used only if free. */ |
struct malloc_chunk* bk; |
}; |
|
typedef struct malloc_chunk* mchunkptr; |
|
/* |
|
malloc_chunk details: |
|
(The following includes lightly edited explanations by Colin Plumb.) |
|
Chunks of memory are maintained using a `boundary tag' method as |
described in e.g., Knuth or Standish. (See the paper by Paul |
Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a |
survey of such techniques.) Sizes of free chunks are stored both |
in the front of each chunk and at the end. This makes |
consolidating fragmented chunks into bigger chunks very fast. The |
size fields also hold bits representing whether chunks are free or |
in use. |
|
An allocated chunk looks like this: |
|
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk, if allocated | | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| User data starts here... . |
. . |
. (malloc_usable_space() bytes) . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
|
Where "chunk" is the front of the chunk for the purpose of most of |
the malloc code, but "mem" is the pointer that is returned to the |
user. "Nextchunk" is the beginning of the next contiguous chunk. |
|
Chunks always begin on even word boundries, so the mem portion |
(which is returned to the user) is also on an even word boundary, and |
thus double-word aligned. |
|
Free chunks are stored in circular doubly-linked lists, and look like this: |
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space (may be 0 bytes long) . |
. . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
The P (PREV_INUSE) bit, stored in the unused low-order bit of the |
chunk size (which is always a multiple of two words), is an in-use |
bit for the *previous* chunk. If that bit is *clear*, then the |
word before the current chunk size contains the previous chunk |
size, and can be used to find the front of the previous chunk. |
(The very first chunk allocated always has this bit set, |
preventing access to non-existent (or non-owned) memory.) |
|
Note that the `foot' of the current chunk is actually represented |
as the prev_size of the NEXT chunk. (This makes it easier to |
deal with alignments etc). |
|
The two exceptions to all this are |
|
1. The special chunk `top', which doesn't bother using the |
trailing size field since there is no |
next contiguous chunk that would have to index off it. (After |
initialization, `top' is forced to always exist. If it would |
become less than MINSIZE bytes long, it is replenished via |
malloc_extend_top.) |
|
2. Chunks allocated via mmap, which have the second-lowest-order |
bit (IS_MMAPPED) set in their size fields. Because they are |
never merged or traversed from any other chunk, they have no |
foot size or inuse information. |
|
Available chunks are kept in any of several places (all declared below): |
|
* `av': An array of chunks serving as bin headers for consolidated |
chunks. Each bin is doubly linked. The bins are approximately |
proportionally (log) spaced. There are a lot of these bins |
(128). This may look excessive, but works very well in |
practice. All procedures maintain the invariant that no |
consolidated chunk physically borders another one. Chunks in |
bins are kept in size order, with ties going to the |
approximately least recently used chunk. |
|
The chunks in each bin are maintained in decreasing sorted order by |
size. This is irrelevant for the small bins, which all contain |
the same-sized chunks, but facilitates best-fit allocation for |
larger chunks. (These lists are just sequential. Keeping them in |
order almost never requires enough traversal to warrant using |
fancier ordered data structures.) Chunks of the same size are |
linked with the most recently freed at the front, and allocations |
are taken from the back. This results in LRU or FIFO allocation |
order, which tends to give each chunk an equal opportunity to be |
consolidated with adjacent freed chunks, resulting in larger free |
chunks and less fragmentation. |
|
* `top': The top-most available chunk (i.e., the one bordering the |
end of available memory) is treated specially. It is never |
included in any bin, is used only if no other chunk is |
available, and is released back to the system if it is very |
large (see M_TRIM_THRESHOLD). |
|
* `last_remainder': A bin holding only the remainder of the |
most recently split (non-top) chunk. This bin is checked |
before other non-fitting chunks, so as to provide better |
locality for runs of sequentially allocated chunks. |
|
* Implicitly, through the host system's memory mapping tables. |
If supported, requests greater than a threshold are usually |
serviced via calls to mmap, and then later released via munmap. |
|
*/ |
|
|
|
|
|
|
/* sizes, alignments */ |
|
#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) |
#ifndef MALLOC_ALIGNMENT |
#define MALLOC_ALIGN 8 |
#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ) |
#else |
#define MALLOC_ALIGN MALLOC_ALIGNMENT |
#endif |
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) |
#define MINSIZE (sizeof(struct malloc_chunk)) |
|
/* conversion from malloc headers to user pointers, and back */ |
|
#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) |
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) |
|
/* pad request bytes into a usable size */ |
|
#define request2size(req) \ |
(((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \ |
(long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \ |
(((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK))) |
|
/* Check if m has acceptable alignment */ |
|
#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) |
|
|
|
|
/* |
Physical chunk operations |
*/ |
|
|
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
|
#define PREV_INUSE 0x1 |
|
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ |
|
#define IS_MMAPPED 0x2 |
|
/* Bits to mask off when extracting size */ |
|
#define SIZE_BITS (PREV_INUSE|IS_MMAPPED) |
|
|
/* Ptr to next physical malloc_chunk. */ |
|
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) |
|
/* Ptr to previous physical malloc_chunk */ |
|
#define prev_chunk(p)\ |
((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) |
|
|
/* Treat space at ptr + offset as a chunk */ |
|
#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
|
|
|
|
/* |
Dealing with use bits |
*/ |
|
/* extract p's inuse bit */ |
|
#define inuse(p)\ |
((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) |
|
/* extract inuse bit of previous chunk */ |
|
#define prev_inuse(p) ((p)->size & PREV_INUSE) |
|
/* check for mmap()'ed chunk */ |
|
#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) |
|
/* set/clear chunk as in use without otherwise disturbing */ |
|
#define set_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE |
|
#define clear_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) |
|
/* check/set/clear inuse bits in known places */ |
|
#define inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) |
|
#define set_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) |
|
#define clear_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) |
|
|
|
|
/* |
Dealing with size fields |
*/ |
|
/* Get size, ignoring use bits */ |
|
#define chunksize(p) ((p)->size & ~(SIZE_BITS)) |
|
/* Set size at head, without disturbing its use bit */ |
|
#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) |
|
/* Set size/use ignoring previous bits in header */ |
|
#define set_head(p, s) ((p)->size = (s)) |
|
/* Set size at footer (only when chunk is not in use) */ |
|
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) |
|
|
|
|
|
/* |
Bins |
|
The bins, `av_' are an array of pairs of pointers serving as the |
heads of (initially empty) doubly-linked lists of chunks, laid out |
in a way so that each pair can be treated as if it were in a |
malloc_chunk. (This way, the fd/bk offsets for linking bin heads |
and chunks are the same). |
|
Bins for sizes < 512 bytes contain chunks of all the same size, spaced |
8 bytes apart. Larger bins are approximately logarithmically |
spaced. (See the table below.) The `av_' array is never mentioned |
directly in the code, but instead via bin access macros. |
|
Bin layout: |
|
64 bins of size 8 |
32 bins of size 64 |
16 bins of size 512 |
8 bins of size 4096 |
4 bins of size 32768 |
2 bins of size 262144 |
1 bin of size what's left |
|
There is actually a little bit of slop in the numbers in bin_index |
for the sake of speed. This makes no difference elsewhere. |
|
The special chunks `top' and `last_remainder' get their own bins, |
(this is implemented via yet more trickery with the av_ array), |
although `top' is never properly linked to its bin since it is |
always handled specially. |
|
*/ |
|
#ifdef SEPARATE_OBJECTS |
#define av_ malloc_av_ |
#endif |
|
#define NAV 128 /* number of bins */ |
|
typedef struct malloc_chunk* mbinptr; |
|
/* access macros */ |
|
#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ)) |
#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr))) |
#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr))) |
|
/* |
The first 2 bins are never indexed. The corresponding av_ cells are instead |
used for bookkeeping. This is not to save space, but to simplify |
indexing, maintain locality, and avoid some initialization tests. |
*/ |
|
#define top (bin_at(0)->fd) /* The topmost chunk */ |
#define last_remainder (bin_at(1)) /* remainder from last split */ |
|
|
/* |
Because top initially points to its own bin with initial |
zero size, thus forcing extension on the first malloc request, |
we avoid having any special code in malloc to check whether |
it even exists yet. But we still need to in malloc_extend_top. |
*/ |
|
#define initial_top ((mchunkptr)(bin_at(0))) |
|
/* Helper macro to initialize bins */ |
|
#define IAV(i) bin_at(i), bin_at(i) |
|
#ifdef DEFINE_MALLOC |
STATIC mbinptr av_[NAV * 2 + 2] = { |
0, 0, |
IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), |
IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), |
IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), |
IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31), |
IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39), |
IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47), |
IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55), |
IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63), |
IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71), |
IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79), |
IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), |
IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), |
IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), |
IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), |
IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), |
IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) |
}; |
#else |
extern mbinptr av_[NAV * 2 + 2]; |
#endif |
|
|
|
/* field-extraction macros */ |
|
#define first(b) ((b)->fd) |
#define last(b) ((b)->bk) |
|
/* |
Indexing into bins |
*/ |
|
#define bin_index(sz) \ |
(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ |
((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ |
((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ |
((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ |
((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ |
((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ |
126) |
/* |
bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold |
identically sized chunks. This is exploited in malloc. |
*/ |
|
#define MAX_SMALLBIN_SIZE 512 |
#define SMALLBIN_WIDTH 8 |
#define SMALLBIN_WIDTH_BITS 3 |
#define MAX_SMALLBIN (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1 |
|
#define smallbin_index(sz) (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS) |
|
/* |
Requests are `small' if both the corresponding and the next bin are small |
*/ |
|
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) |
|
|
|
/* |
To help compensate for the large number of bins, a one-level index |
structure is used for bin-by-bin searching. `binblocks' is a |
one-word bitvector recording whether groups of BINBLOCKWIDTH bins |
have any (possibly) non-empty bins, so they can be skipped over |
all at once during during traversals. The bits are NOT always |
cleared as soon as all bins in a block are empty, but instead only |
when all are noticed to be empty during traversal in malloc. |
*/ |
|
#define BINBLOCKWIDTH 4 /* bins per block */ |
|
#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */ |
|
/* bin<->block macros */ |
|
#define idx2binblock(ix) ((unsigned long)1 << (ix / BINBLOCKWIDTH)) |
#define mark_binblock(ii) (binblocks |= idx2binblock(ii)) |
#define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii))) |
|
|
|
|
|
/* Other static bookkeeping data */ |
|
#ifdef SEPARATE_OBJECTS |
#define trim_threshold malloc_trim_threshold |
#define top_pad malloc_top_pad |
#define n_mmaps_max malloc_n_mmaps_max |
#define mmap_threshold malloc_mmap_threshold |
#define sbrk_base malloc_sbrk_base |
#define max_sbrked_mem malloc_max_sbrked_mem |
#define max_total_mem malloc_max_total_mem |
#define current_mallinfo malloc_current_mallinfo |
#define n_mmaps malloc_n_mmaps |
#define max_n_mmaps malloc_max_n_mmaps |
#define mmapped_mem malloc_mmapped_mem |
#define max_mmapped_mem malloc_max_mmapped_mem |
#endif |
|
/* variables holding tunable values */ |
|
#ifdef DEFINE_MALLOC |
|
STATIC unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD; |
STATIC unsigned long top_pad = DEFAULT_TOP_PAD; |
#if HAVE_MMAP |
STATIC unsigned int n_mmaps_max = DEFAULT_MMAP_MAX; |
STATIC unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
#endif |
|
/* The first value returned from sbrk */ |
STATIC char* sbrk_base = (char*)(-1); |
|
/* The maximum memory obtained from system via sbrk */ |
STATIC unsigned long max_sbrked_mem = 0; |
|
/* The maximum via either sbrk or mmap */ |
STATIC unsigned long max_total_mem = 0; |
|
/* internal working copy of mallinfo */ |
STATIC struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
|
#if HAVE_MMAP |
|
/* Tracking mmaps */ |
|
STATIC unsigned int n_mmaps = 0; |
STATIC unsigned int max_n_mmaps = 0; |
STATIC unsigned long mmapped_mem = 0; |
STATIC unsigned long max_mmapped_mem = 0; |
|
#endif |
|
#else /* ! DEFINE_MALLOC */ |
|
extern unsigned long trim_threshold; |
extern unsigned long top_pad; |
#if HAVE_MMAP |
extern unsigned int n_mmaps_max; |
extern unsigned long mmap_threshold; |
#endif |
extern char* sbrk_base; |
extern unsigned long max_sbrked_mem; |
extern unsigned long max_total_mem; |
extern struct mallinfo current_mallinfo; |
#if HAVE_MMAP |
extern unsigned int n_mmaps; |
extern unsigned int max_n_mmaps; |
extern unsigned long mmapped_mem; |
extern unsigned long max_mmapped_mem; |
#endif |
|
#endif /* ! DEFINE_MALLOC */ |
|
/* The total memory obtained from system via sbrk */ |
#define sbrked_mem (current_mallinfo.arena) |
|
|
|
/* |
Debugging support |
*/ |
|
#if DEBUG |
|
|
/* |
These routines make a number of assertions about the states |
of data structures that should be true at all times. If any |
are not true, it's very likely that a user program has somehow |
trashed memory. (It's also possible that there is a coding error |
in malloc. In which case, please report it!) |
*/ |
|
#if __STD_C |
static void do_check_chunk(mchunkptr p) |
#else |
static void do_check_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
|
/* No checkable chunk is mmapped */ |
assert(!chunk_is_mmapped(p)); |
|
/* Check for legal address ... */ |
assert((char*)p >= sbrk_base); |
if (p != top) |
assert((char*)p + sz <= (char*)top); |
else |
assert((char*)p + sz <= sbrk_base + sbrked_mem); |
|
} |
|
|
#if __STD_C |
static void do_check_free_chunk(mchunkptr p) |
#else |
static void do_check_free_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
mchunkptr next = chunk_at_offset(p, sz); |
|
do_check_chunk(p); |
|
/* Check whether it claims to be free ... */ |
assert(!inuse(p)); |
|
/* Unless a special marker, must have OK fields */ |
if ((long)sz >= (long)MINSIZE) |
{ |
assert((sz & MALLOC_ALIGN_MASK) == 0); |
assert(aligned_OK(chunk2mem(p))); |
/* ... matching footer field */ |
assert(next->prev_size == sz); |
/* ... and is fully consolidated */ |
assert(prev_inuse(p)); |
assert (next == top || inuse(next)); |
|
/* ... and has minimally sane links */ |
assert(p->fd->bk == p); |
assert(p->bk->fd == p); |
} |
else /* markers are always of size SIZE_SZ */ |
assert(sz == SIZE_SZ); |
} |
|
#if __STD_C |
static void do_check_inuse_chunk(mchunkptr p) |
#else |
static void do_check_inuse_chunk(p) mchunkptr p; |
#endif |
{ |
mchunkptr next = next_chunk(p); |
do_check_chunk(p); |
|
/* Check whether it claims to be in use ... */ |
assert(inuse(p)); |
|
/* ... and is surrounded by OK chunks. |
Since more things can be checked with free chunks than inuse ones, |
if an inuse chunk borders them and debug is on, it's worth doing them. |
*/ |
if (!prev_inuse(p)) |
{ |
mchunkptr prv = prev_chunk(p); |
assert(next_chunk(prv) == p); |
do_check_free_chunk(prv); |
} |
if (next == top) |
{ |
assert(prev_inuse(next)); |
assert(chunksize(next) >= MINSIZE); |
} |
else if (!inuse(next)) |
do_check_free_chunk(next); |
|
} |
|
#if __STD_C |
static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) |
#else |
static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
long room = long_sub_size_t(sz, s); |
|
do_check_inuse_chunk(p); |
|
/* Legal size ... */ |
assert((long)sz >= (long)MINSIZE); |
assert((sz & MALLOC_ALIGN_MASK) == 0); |
assert(room >= 0); |
assert(room < (long)MINSIZE); |
|
/* ... and alignment */ |
assert(aligned_OK(chunk2mem(p))); |
|
|
/* ... and was allocated at front of an available chunk */ |
assert(prev_inuse(p)); |
|
} |
|
|
#define check_free_chunk(P) do_check_free_chunk(P) |
#define check_inuse_chunk(P) do_check_inuse_chunk(P) |
#define check_chunk(P) do_check_chunk(P) |
#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) |
#else |
#define check_free_chunk(P) |
#define check_inuse_chunk(P) |
#define check_chunk(P) |
#define check_malloced_chunk(P,N) |
#endif |
|
|
|
/* |
Macro-based internal utilities |
*/ |
|
|
/* |
Linking chunks in bin lists. |
Call these only with variables, not arbitrary expressions, as arguments. |
*/ |
|
/* |
Place chunk p of size s in its bin, in size order, |
putting it ahead of others of same size. |
*/ |
|
|
#define frontlink(P, S, IDX, BK, FD) \ |
{ \ |
if (S < MAX_SMALLBIN_SIZE) \ |
{ \ |
IDX = smallbin_index(S); \ |
mark_binblock(IDX); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
else \ |
{ \ |
IDX = bin_index(S); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
if (FD == BK) mark_binblock(IDX); \ |
else \ |
{ \ |
while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ |
BK = FD->bk; \ |
} \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
} |
|
|
/* take a chunk off a list */ |
|
#define unlink(P, BK, FD) \ |
{ \ |
BK = P->bk; \ |
FD = P->fd; \ |
FD->bk = BK; \ |
BK->fd = FD; \ |
} \ |
|
/* Place p as the last remainder */ |
|
#define link_last_remainder(P) \ |
{ \ |
last_remainder->fd = last_remainder->bk = P; \ |
P->fd = P->bk = last_remainder; \ |
} |
|
/* Clear the last_remainder bin */ |
|
#define clear_last_remainder \ |
(last_remainder->fd = last_remainder->bk = last_remainder) |
|
|
|
|
|
|
/* Routines dealing with mmap(). */ |
|
#if HAVE_MMAP |
|
#ifdef DEFINE_MALLOC |
|
#if __STD_C |
static mchunkptr mmap_chunk(size_t size) |
#else |
static mchunkptr mmap_chunk(size) size_t size; |
#endif |
{ |
size_t page_mask = malloc_getpagesize - 1; |
mchunkptr p; |
|
#ifndef MAP_ANONYMOUS |
static int fd = -1; |
#endif |
|
if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ |
|
/* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because |
* there is no following chunk whose prev_size field could be used. |
*/ |
size = (size + SIZE_SZ + page_mask) & ~page_mask; |
|
#ifdef MAP_ANONYMOUS |
p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, |
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
#else /* !MAP_ANONYMOUS */ |
if (fd < 0) |
{ |
fd = open("/dev/zero", O_RDWR); |
if(fd < 0) return 0; |
} |
p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); |
#endif |
|
if(p == (mchunkptr)-1) return 0; |
|
n_mmaps++; |
if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; |
|
/* We demand that eight bytes into a page must be 8-byte aligned. */ |
assert(aligned_OK(chunk2mem(p))); |
|
/* The offset to the start of the mmapped region is stored |
* in the prev_size field of the chunk; normally it is zero, |
* but that can be changed in memalign(). |
*/ |
p->prev_size = 0; |
set_head(p, size|IS_MMAPPED); |
|
mmapped_mem += size; |
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) |
max_mmapped_mem = mmapped_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
return p; |
} |
|
#endif /* DEFINE_MALLOC */ |
|
#ifdef SEPARATE_OBJECTS |
#define munmap_chunk malloc_munmap_chunk |
#endif |
|
#ifdef DEFINE_FREE |
|
#if __STD_C |
STATIC void munmap_chunk(mchunkptr p) |
#else |
STATIC void munmap_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T size = chunksize(p); |
int ret; |
|
assert (chunk_is_mmapped(p)); |
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); |
assert((n_mmaps > 0)); |
assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); |
|
n_mmaps--; |
mmapped_mem -= (size + p->prev_size); |
|
ret = munmap((char *)p - p->prev_size, size + p->prev_size); |
|
/* munmap returns non-zero on failure */ |
assert(ret == 0); |
} |
|
#else /* ! DEFINE_FREE */ |
|
#if __STD_C |
extern void munmap_chunk(mchunkptr); |
#else |
extern void munmap_chunk(); |
#endif |
|
#endif /* ! DEFINE_FREE */ |
|
#if HAVE_MREMAP |
|
#ifdef DEFINE_REALLOC |
|
#if __STD_C |
static mchunkptr mremap_chunk(mchunkptr p, size_t new_size) |
#else |
static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size; |
#endif |
{ |
size_t page_mask = malloc_getpagesize - 1; |
INTERNAL_SIZE_T offset = p->prev_size; |
INTERNAL_SIZE_T size = chunksize(p); |
char *cp; |
|
assert (chunk_is_mmapped(p)); |
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); |
assert((n_mmaps > 0)); |
assert(((size + offset) & (malloc_getpagesize-1)) == 0); |
|
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ |
new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; |
|
cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1); |
|
if (cp == (char *)-1) return 0; |
|
p = (mchunkptr)(cp + offset); |
|
assert(aligned_OK(chunk2mem(p))); |
|
assert((p->prev_size == offset)); |
set_head(p, (new_size - offset)|IS_MMAPPED); |
|
mmapped_mem -= size + offset; |
mmapped_mem += new_size; |
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) |
max_mmapped_mem = mmapped_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
return p; |
} |
|
#endif /* DEFINE_REALLOC */ |
|
#endif /* HAVE_MREMAP */ |
|
#endif /* HAVE_MMAP */ |
|
|
|
|
#ifdef DEFINE_MALLOC |
|
/* |
Extend the top-most chunk by obtaining memory from system. |
Main interface to sbrk (but see also malloc_trim). |
*/ |
|
#if __STD_C |
static void malloc_extend_top(RARG INTERNAL_SIZE_T nb) |
#else |
static void malloc_extend_top(RARG nb) RDECL INTERNAL_SIZE_T nb; |
#endif |
{ |
char* brk; /* return value from sbrk */ |
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */ |
INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */ |
char* new_brk; /* return of 2nd sbrk call */ |
INTERNAL_SIZE_T top_size; /* new size of top chunk */ |
|
mchunkptr old_top = top; /* Record state of old top */ |
INTERNAL_SIZE_T old_top_size = chunksize(old_top); |
char* old_end = (char*)(chunk_at_offset(old_top, old_top_size)); |
|
/* Pad request with top_pad plus minimal overhead */ |
|
INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; |
unsigned long pagesz = malloc_getpagesize; |
|
/* If not the first time through, round to preserve page boundary */ |
/* Otherwise, we need to correct to a page size below anyway. */ |
/* (We also correct below if an intervening foreign sbrk call.) */ |
|
if (sbrk_base != (char*)(-1)) |
sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1); |
|
brk = (char*)(MORECORE (sbrk_size)); |
|
/* Fail if sbrk failed or if a foreign sbrk call killed our space */ |
if (brk == (char*)(MORECORE_FAILURE) || |
(brk < old_end && old_top != initial_top)) |
return; |
|
sbrked_mem += sbrk_size; |
|
if (brk == old_end) /* can just add bytes to current top */ |
{ |
top_size = sbrk_size + old_top_size; |
set_head(top, top_size | PREV_INUSE); |
} |
else |
{ |
if (sbrk_base == (char*)(-1)) /* First time through. Record base */ |
sbrk_base = brk; |
else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */ |
sbrked_mem += brk - (char*)old_end; |
|
/* Guarantee alignment of first new chunk made from this space */ |
front_misalign = (POINTER_UINT)chunk2mem(brk) & MALLOC_ALIGN_MASK; |
if (front_misalign > 0) |
{ |
correction = (MALLOC_ALIGNMENT) - front_misalign; |
brk += correction; |
} |
else |
correction = 0; |
|
/* Guarantee the next brk will be at a page boundary */ |
correction += pagesz - ((POINTER_UINT)(brk + sbrk_size) & (pagesz - 1)); |
|
/* Allocate correction */ |
new_brk = (char*)(MORECORE (correction)); |
if (new_brk == (char*)(MORECORE_FAILURE)) return; |
|
sbrked_mem += correction; |
|
top = (mchunkptr)brk; |
top_size = new_brk - brk + correction; |
set_head(top, top_size | PREV_INUSE); |
|
if (old_top != initial_top) |
{ |
|
/* There must have been an intervening foreign sbrk call. */ |
/* A double fencepost is necessary to prevent consolidation */ |
|
/* If not enough space to do this, then user did something very wrong */ |
if (old_top_size < MINSIZE) |
{ |
set_head(top, PREV_INUSE); /* will force null return from malloc */ |
return; |
} |
|
/* Also keep size a multiple of MALLOC_ALIGNMENT */ |
old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; |
set_head_size(old_top, old_top_size); |
chunk_at_offset(old_top, old_top_size )->size = |
SIZE_SZ|PREV_INUSE; |
chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size = |
SIZE_SZ|PREV_INUSE; |
/* If possible, release the rest. */ |
if (old_top_size >= MINSIZE) |
fREe(RCALL chunk2mem(old_top)); |
} |
} |
|
if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem) |
max_sbrked_mem = sbrked_mem; |
#if HAVE_MMAP |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
#else |
if ((unsigned long)(sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = sbrked_mem; |
#endif |
|
/* We always land on a page boundary */ |
assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0); |
} |
|
#endif /* DEFINE_MALLOC */ |
|
|
/* Main public routines */ |
|
#ifdef DEFINE_MALLOC |
|
/* |
Malloc Algorthim: |
|
The requested size is first converted into a usable form, `nb'. |
This currently means to add 4 bytes overhead plus possibly more to |
obtain 8-byte alignment and/or to obtain a size of at least |
MINSIZE (currently 16 bytes), the smallest allocatable size. |
(All fits are considered `exact' if they are within MINSIZE bytes.) |
|
From there, the first successful of the following steps is taken: |
|
1. The bin corresponding to the request size is scanned, and if |
a chunk of exactly the right size is found, it is taken. |
|
2. The most recently remaindered chunk is used if it is big |
enough. This is a form of (roving) first fit, used only in |
the absence of exact fits. Runs of consecutive requests use |
the remainder of the chunk used for the previous such request |
whenever possible. This limited use of a first-fit style |
allocation strategy tends to give contiguous chunks |
coextensive lifetimes, which improves locality and can reduce |
fragmentation in the long run. |
|
3. Other bins are scanned in increasing size order, using a |
chunk big enough to fulfill the request, and splitting off |
any remainder. This search is strictly by best-fit; i.e., |
the smallest (with ties going to approximately the least |
recently used) chunk that fits is selected. |
|
4. If large enough, the chunk bordering the end of memory |
(`top') is split off. (This use of `top' is in accord with |
the best-fit search rule. In effect, `top' is treated as |
larger (and thus less well fitting) than any other available |
chunk since it can be extended to be as large as necessary |
(up to system limitations). |
|
5. If the request size meets the mmap threshold and the |
system supports mmap, and there are few enough currently |
allocated mmapped regions, and a call to mmap succeeds, |
the request is allocated via direct memory mapping. |
|
6. Otherwise, the top of memory is extended by |
obtaining more space from the system (normally using sbrk, |
but definable to anything else via the MORECORE macro). |
Memory is gathered from the system (in system page-sized |
units) in a way that allows chunks obtained across different |
sbrk calls to be consolidated, but does not require |
contiguous memory. Thus, it should be safe to intersperse |
mallocs with other sbrk calls. |
|
|
All allocations are made from the the `lowest' part of any found |
chunk. (The implementation invariant is that prev_inuse is |
always true of any allocated chunk; i.e., that each allocated |
chunk borders either a previously allocated and still in-use chunk, |
or the base of its memory arena.) |
|
*/ |
|
#if __STD_C |
Void_t* mALLOc(RARG size_t bytes) |
#else |
Void_t* mALLOc(RARG bytes) RDECL size_t bytes; |
#endif |
{ |
#ifdef MALLOC_PROVIDED |
|
malloc (bytes); |
|
#else |
|
mchunkptr victim; /* inspected/selected chunk */ |
INTERNAL_SIZE_T victim_size; /* its size */ |
int idx; /* index for bin traversal */ |
mbinptr bin; /* associated bin */ |
mchunkptr remainder; /* remainder from a split */ |
long remainder_size; /* its size */ |
int remainder_index; /* its bin index */ |
unsigned long block; /* block traverser bit */ |
int startidx; /* first bin of a traversed block */ |
mchunkptr fwd; /* misc temp for linking */ |
mchunkptr bck; /* misc temp for linking */ |
mbinptr q; /* misc temp */ |
|
INTERNAL_SIZE_T nb = request2size(bytes); /* padded request size; */ |
|
MALLOC_LOCK; |
|
/* Check for exact match in a bin */ |
|
if (is_small_request(nb)) /* Faster version for small requests */ |
{ |
idx = smallbin_index(nb); |
|
/* No traversal or size check necessary for small bins. */ |
|
q = bin_at(idx); |
victim = last(q); |
|
#if MALLOC_ALIGN != 16 |
/* Also scan the next one, since it would have a remainder < MINSIZE */ |
if (victim == q) |
{ |
q = next_bin(q); |
victim = last(q); |
} |
#endif |
if (victim != q) |
{ |
victim_size = chunksize(victim); |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */ |
|
} |
else |
{ |
idx = bin_index(nb); |
bin = bin_at(idx); |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* too big */ |
{ |
--idx; /* adjust to rescan below after checking last remainder */ |
break; |
} |
|
else if (remainder_size >= 0) /* exact fit */ |
{ |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
} |
|
++idx; |
|
} |
|
/* Try to use the last split-off remainder */ |
|
if ( (victim = last_remainder->fd) != last_remainder) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* re-split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
clear_last_remainder; |
|
if (remainder_size >= 0) /* exhaust */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
/* Else place in bin */ |
|
frontlink(victim, victim_size, remainder_index, bck, fwd); |
} |
|
/* |
If there are any possibly nonempty big-enough blocks, |
search for best fitting chunk by scanning bins in blockwidth units. |
*/ |
|
if ( (block = idx2binblock(idx)) <= binblocks) |
{ |
|
/* Get to the first marked block */ |
|
if ( (block & binblocks) == 0) |
{ |
/* force to an even block boundary */ |
idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; |
block <<= 1; |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
|
/* For each possibly nonempty block ... */ |
for (;;) |
{ |
startidx = idx; /* (track incomplete blocks) */ |
q = bin = bin_at(idx); |
|
/* For each bin in this block ... */ |
do |
{ |
/* Find and use first big enough chunk ... */ |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
unlink(victim, bck, fwd); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
else if (remainder_size >= 0) /* take */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
unlink(victim, bck, fwd); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
} |
|
bin = next_bin(bin); |
|
#if MALLOC_ALIGN == 16 |
if (idx < MAX_SMALLBIN) |
{ |
bin = next_bin(bin); |
++idx; |
} |
#endif |
} while ((++idx & (BINBLOCKWIDTH - 1)) != 0); |
|
/* Clear out the block bit. */ |
|
do /* Possibly backtrack to try to clear a partial block */ |
{ |
if ((startidx & (BINBLOCKWIDTH - 1)) == 0) |
{ |
binblocks &= ~block; |
break; |
} |
--startidx; |
q = prev_bin(q); |
} while (first(q) == q); |
|
/* Get to the next possibly nonempty block */ |
|
if ( (block <<= 1) <= binblocks && (block != 0) ) |
{ |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
else |
break; |
} |
} |
|
|
/* Try to use top chunk */ |
|
/* Require that there be a remainder, ensuring top always exists */ |
remainder_size = long_sub_size_t(chunksize(top), nb); |
if (chunksize(top) < nb || remainder_size < (long)MINSIZE) |
{ |
|
#if HAVE_MMAP |
/* If big and would otherwise need to extend, try to use mmap instead */ |
if ((unsigned long)nb >= (unsigned long)mmap_threshold && |
(victim = mmap_chunk(nb)) != 0) |
{ |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
#endif |
|
/* Try to extend */ |
malloc_extend_top(RCALL nb); |
remainder_size = long_sub_size_t(chunksize(top), nb); |
if (chunksize(top) < nb || remainder_size < (long)MINSIZE) |
{ |
MALLOC_UNLOCK; |
return 0; /* propagate failure */ |
} |
} |
|
victim = top; |
set_head(victim, nb | PREV_INUSE); |
top = chunk_at_offset(victim, nb); |
set_head(top, remainder_size | PREV_INUSE); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
|
#endif /* MALLOC_PROVIDED */ |
} |
|
#endif /* DEFINE_MALLOC */ |
|
#ifdef DEFINE_FREE |
|
/* |
|
free() algorithm : |
|
cases: |
|
1. free(0) has no effect. |
|
2. If the chunk was allocated via mmap, it is release via munmap(). |
|
3. If a returned chunk borders the current high end of memory, |
it is consolidated into the top, and if the total unused |
topmost memory exceeds the trim threshold, malloc_trim is |
called. |
|
4. Other chunks are consolidated as they arrive, and |
placed in corresponding bins. (This includes the case of |
consolidating with the current `last_remainder'). |
|
*/ |
|
|
#if __STD_C |
void fREe(RARG Void_t* mem) |
#else |
void fREe(RARG mem) RDECL Void_t* mem; |
#endif |
{ |
#ifdef MALLOC_PROVIDED |
|
free (mem); |
|
#else |
|
mchunkptr p; /* chunk corresponding to mem */ |
INTERNAL_SIZE_T hd; /* its head field */ |
INTERNAL_SIZE_T sz; /* its size */ |
int idx; /* its bin index */ |
mchunkptr next; /* next contiguous chunk */ |
INTERNAL_SIZE_T nextsz; /* its size */ |
INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ |
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
int islr; /* track whether merging with last_remainder */ |
|
if (mem == 0) /* free(0) has no effect */ |
return; |
|
MALLOC_LOCK; |
|
p = mem2chunk(mem); |
hd = p->size; |
|
#if HAVE_MMAP |
if (hd & IS_MMAPPED) /* release mmapped memory. */ |
{ |
munmap_chunk(p); |
MALLOC_UNLOCK; |
return; |
} |
#endif |
|
check_inuse_chunk(p); |
|
sz = hd & ~PREV_INUSE; |
next = chunk_at_offset(p, sz); |
nextsz = chunksize(next); |
|
if (next == top) /* merge with top */ |
{ |
sz += nextsz; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -prevsz); |
sz += prevsz; |
unlink(p, bck, fwd); |
} |
|
set_head(p, sz | PREV_INUSE); |
top = p; |
if ((unsigned long)(sz) >= (unsigned long)trim_threshold) |
malloc_trim(RCALL top_pad); |
MALLOC_UNLOCK; |
return; |
} |
|
set_head(next, nextsz); /* clear inuse bit */ |
|
islr = 0; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -prevsz); |
sz += prevsz; |
|
if (p->fd == last_remainder) /* keep as last_remainder */ |
islr = 1; |
else |
unlink(p, bck, fwd); |
} |
|
if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */ |
{ |
sz += nextsz; |
|
if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ |
{ |
islr = 1; |
link_last_remainder(p); |
} |
else |
unlink(next, bck, fwd); |
} |
|
|
set_head(p, sz | PREV_INUSE); |
set_foot(p, sz); |
if (!islr) |
frontlink(p, sz, idx, bck, fwd); |
|
MALLOC_UNLOCK; |
|
#endif /* MALLOC_PROVIDED */ |
} |
|
#endif /* DEFINE_FREE */ |
|
#ifdef DEFINE_REALLOC |
|
/* |
|
Realloc algorithm: |
|
Chunks that were obtained via mmap cannot be extended or shrunk |
unless HAVE_MREMAP is defined, in which case mremap is used. |
Otherwise, if their reallocation is for additional space, they are |
copied. If for less, they are just left alone. |
|
Otherwise, if the reallocation is for additional space, and the |
chunk can be extended, it is, else a malloc-copy-free sequence is |
taken. There are several different ways that a chunk could be |
extended. All are tried: |
|
* Extending forward into following adjacent free chunk. |
* Shifting backwards, joining preceding adjacent space |
* Both shifting backwards and extending forward. |
* Extending into newly sbrked space |
|
Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a |
size argument of zero (re)allocates a minimum-sized chunk. |
|
If the reallocation is for less space, and the new request is for |
a `small' (<512 bytes) size, then the newly unused space is lopped |
off and freed. |
|
The old unix realloc convention of allowing the last-free'd chunk |
to be used as an argument to realloc is no longer supported. |
I don't know of any programs still relying on this feature, |
and allowing it would also allow too many other incorrect |
usages of realloc to be sensible. |
|
|
*/ |
|
|
#if __STD_C |
Void_t* rEALLOc(RARG Void_t* oldmem, size_t bytes) |
#else |
Void_t* rEALLOc(RARG oldmem, bytes) RDECL Void_t* oldmem; size_t bytes; |
#endif |
{ |
#ifdef MALLOC_PROVIDED |
|
realloc (oldmem, bytes); |
|
#else |
|
INTERNAL_SIZE_T nb; /* padded request size */ |
|
mchunkptr oldp; /* chunk corresponding to oldmem */ |
INTERNAL_SIZE_T oldsize; /* its size */ |
|
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
Void_t* newmem; /* corresponding user mem */ |
|
mchunkptr next; /* next contiguous chunk after oldp */ |
INTERNAL_SIZE_T nextsize; /* its size */ |
|
mchunkptr prev; /* previous contiguous chunk before oldp */ |
INTERNAL_SIZE_T prevsize; /* its size */ |
|
mchunkptr remainder; /* holds split off extra space from newp */ |
INTERNAL_SIZE_T remainder_size; /* its size */ |
|
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
|
#ifdef REALLOC_ZERO_BYTES_FREES |
if (bytes == 0) { fREe(RCALL oldmem); return 0; } |
#endif |
|
|
/* realloc of null is supposed to be same as malloc */ |
if (oldmem == 0) return mALLOc(RCALL bytes); |
|
MALLOC_LOCK; |
|
newp = oldp = mem2chunk(oldmem); |
newsize = oldsize = chunksize(oldp); |
|
|
nb = request2size(bytes); |
|
#if HAVE_MMAP |
if (chunk_is_mmapped(oldp)) |
{ |
#if HAVE_MREMAP |
newp = mremap_chunk(oldp, nb); |
if(newp) |
{ |
MALLOC_UNLOCK; |
return chunk2mem(newp); |
} |
#endif |
/* Note the extra SIZE_SZ overhead. */ |
if(oldsize - SIZE_SZ >= nb) |
{ |
MALLOC_UNLOCK; |
return oldmem; /* do nothing */ |
} |
/* Must alloc, copy, free. */ |
newmem = mALLOc(RCALL bytes); |
if (newmem == 0) |
{ |
MALLOC_UNLOCK; |
return 0; /* propagate failure */ |
} |
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); |
munmap_chunk(oldp); |
MALLOC_UNLOCK; |
return newmem; |
} |
#endif |
|
check_inuse_chunk(oldp); |
|
if ((long)(oldsize) < (long)(nb)) |
{ |
|
/* Try expanding forward */ |
|
next = chunk_at_offset(oldp, oldsize); |
if (next == top || !inuse(next)) |
{ |
nextsize = chunksize(next); |
|
/* Forward into top only if a remainder */ |
if (next == top) |
{ |
if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
newsize += nextsize; |
top = chunk_at_offset(oldp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(oldp, nb); |
MALLOC_UNLOCK; |
return chunk2mem(oldp); |
} |
} |
|
/* Forward into next chunk */ |
else if (((long)(nextsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
newsize += nextsize; |
goto split; |
} |
} |
else |
{ |
next = 0; |
nextsize = 0; |
} |
|
/* Try shifting backwards. */ |
|
if (!prev_inuse(oldp)) |
{ |
prev = prev_chunk(oldp); |
prevsize = chunksize(prev); |
|
/* try forward + backward first to save a later consolidation */ |
|
if (next != 0) |
{ |
/* into top */ |
if (next == top) |
{ |
if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize + nextsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
top = chunk_at_offset(newp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(newp, nb); |
MALLOC_UNLOCK; |
return newmem; |
} |
} |
|
/* into next chunk */ |
else if (((long)(nextsize + prevsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += nextsize + prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* backward only */ |
if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* Must allocate */ |
|
newmem = mALLOc (RCALL bytes); |
|
if (newmem == 0) /* propagate failure */ |
{ |
MALLOC_UNLOCK; |
return 0; |
} |
|
/* Avoid copy if newp is next chunk after oldp. */ |
/* (This can only happen when new chunk is sbrk'ed.) */ |
|
if ( (newp = mem2chunk(newmem)) == next_chunk(oldp)) |
{ |
newsize += chunksize(newp); |
newp = oldp; |
goto split; |
} |
|
/* Otherwise copy, free, and exit */ |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
fREe(RCALL oldmem); |
MALLOC_UNLOCK; |
return newmem; |
} |
|
|
split: /* split off extra room in old or expanded chunk */ |
|
remainder_size = long_sub_size_t(newsize, nb); |
|
if (remainder_size >= (long)MINSIZE) /* split off remainder */ |
{ |
remainder = chunk_at_offset(newp, nb); |
set_head_size(newp, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_inuse_bit_at_offset(remainder, remainder_size); |
fREe(RCALL chunk2mem(remainder)); /* let free() deal with it */ |
} |
else |
{ |
set_head_size(newp, newsize); |
set_inuse_bit_at_offset(newp, newsize); |
} |
|
check_inuse_chunk(newp); |
MALLOC_UNLOCK; |
return chunk2mem(newp); |
|
#endif /* MALLOC_PROVIDED */ |
} |
|
#endif /* DEFINE_REALLOC */ |
|
#ifdef DEFINE_MEMALIGN |
|
/* |
|
memalign algorithm: |
|
memalign requests more than enough space from malloc, finds a spot |
within that chunk that meets the alignment request, and then |
possibly frees the leading and trailing space. |
|
The alignment argument must be a power of two. This property is not |
checked by memalign, so misuse may result in random runtime errors. |
|
8-byte alignment is guaranteed by normal malloc calls, so don't |
bother calling memalign with an argument of 8 or less. |
|
Overreliance on memalign is a sure way to fragment space. |
|
*/ |
|
|
#if __STD_C |
Void_t* mEMALIGn(RARG size_t alignment, size_t bytes) |
#else |
Void_t* mEMALIGn(RARG alignment, bytes) RDECL size_t alignment; size_t bytes; |
#endif |
{ |
INTERNAL_SIZE_T nb; /* padded request size */ |
char* m; /* memory returned by malloc call */ |
mchunkptr p; /* corresponding chunk */ |
char* brk; /* alignment point within p */ |
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */ |
mchunkptr remainder; /* spare room at end to split off */ |
long remainder_size; /* its size */ |
|
/* If need less alignment than we give anyway, just relay to malloc */ |
|
if (alignment <= MALLOC_ALIGNMENT) return mALLOc(RCALL bytes); |
|
/* Otherwise, ensure that it is at least a minimum chunk size */ |
|
if (alignment < MINSIZE) alignment = MINSIZE; |
|
/* Call malloc with worst case padding to hit alignment. */ |
|
nb = request2size(bytes); |
m = (char*)(mALLOc(RCALL nb + alignment + MINSIZE)); |
|
if (m == 0) return 0; /* propagate failure */ |
|
MALLOC_LOCK; |
|
p = mem2chunk(m); |
|
if ((((unsigned long)(m)) % alignment) == 0) /* aligned */ |
{ |
#if HAVE_MMAP |
if(chunk_is_mmapped(p)) |
{ |
MALLOC_UNLOCK; |
return chunk2mem(p); /* nothing more to do */ |
} |
#endif |
} |
else /* misaligned */ |
{ |
/* |
Find an aligned spot inside chunk. |
Since we need to give back leading space in a chunk of at |
least MINSIZE, if the first calculation places us at |
a spot with less than MINSIZE leader, we can move to the |
next aligned spot -- we've allocated enough total room so that |
this is always possible. |
*/ |
|
brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -alignment); |
if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk = brk + alignment; |
|
newp = (mchunkptr)brk; |
leadsize = brk - (char*)(p); |
newsize = chunksize(p) - leadsize; |
|
#if HAVE_MMAP |
if(chunk_is_mmapped(p)) |
{ |
newp->prev_size = p->prev_size + leadsize; |
set_head(newp, newsize|IS_MMAPPED); |
MALLOC_UNLOCK; |
return chunk2mem(newp); |
} |
#endif |
|
/* give back leader, use the rest */ |
|
set_head(newp, newsize | PREV_INUSE); |
set_inuse_bit_at_offset(newp, newsize); |
set_head_size(p, leadsize); |
fREe(RCALL chunk2mem(p)); |
p = newp; |
|
assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0); |
} |
|
/* Also give back spare room at the end */ |
|
remainder_size = long_sub_size_t(chunksize(p), nb); |
|
if (remainder_size >= (long)MINSIZE) |
{ |
remainder = chunk_at_offset(p, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_head_size(p, nb); |
fREe(RCALL chunk2mem(remainder)); |
} |
|
check_inuse_chunk(p); |
MALLOC_UNLOCK; |
return chunk2mem(p); |
|
} |
|
#endif /* DEFINE_MEMALIGN */ |
|
#ifdef DEFINE_VALLOC |
|
/* |
valloc just invokes memalign with alignment argument equal |
to the page size of the system (or as near to this as can |
be figured out from all the includes/defines above.) |
*/ |
|
#if __STD_C |
Void_t* vALLOc(RARG size_t bytes) |
#else |
Void_t* vALLOc(RARG bytes) RDECL size_t bytes; |
#endif |
{ |
return mEMALIGn (RCALL malloc_getpagesize, bytes); |
} |
|
#endif /* DEFINE_VALLOC */ |
|
#ifdef DEFINE_PVALLOC |
|
/* |
pvalloc just invokes valloc for the nearest pagesize |
that will accommodate request |
*/ |
|
|
#if __STD_C |
Void_t* pvALLOc(RARG size_t bytes) |
#else |
Void_t* pvALLOc(RARG bytes) RDECL size_t bytes; |
#endif |
{ |
size_t pagesize = malloc_getpagesize; |
return mEMALIGn (RCALL pagesize, (bytes + pagesize - 1) & ~(pagesize - 1)); |
} |
|
#endif /* DEFINE_PVALLOC */ |
|
#ifdef DEFINE_CALLOC |
|
/* |
|
calloc calls malloc, then zeroes out the allocated chunk. |
|
*/ |
|
#if __STD_C |
Void_t* cALLOc(RARG size_t n, size_t elem_size) |
#else |
Void_t* cALLOc(RARG n, elem_size) RDECL size_t n; size_t elem_size; |
#endif |
{ |
mchunkptr p; |
INTERNAL_SIZE_T csz; |
|
INTERNAL_SIZE_T sz = n * elem_size; |
|
#if MORECORE_CLEARS |
mchunkptr oldtop; |
INTERNAL_SIZE_T oldtopsize; |
#endif |
Void_t* mem; |
|
/* check if expand_top called, in which case don't need to clear */ |
#if MORECORE_CLEARS |
MALLOC_LOCK; |
oldtop = top; |
oldtopsize = chunksize(top); |
#endif |
|
mem = mALLOc (RCALL sz); |
|
if (mem == 0) |
{ |
#if MORECORE_CLEARS |
MALLOC_UNLOCK; |
#endif |
return 0; |
} |
else |
{ |
p = mem2chunk(mem); |
|
/* Two optional cases in which clearing not necessary */ |
|
|
#if HAVE_MMAP |
if (chunk_is_mmapped(p)) |
{ |
#if MORECORE_CLEARS |
MALLOC_UNLOCK; |
#endif |
return mem; |
} |
#endif |
|
csz = chunksize(p); |
|
#if MORECORE_CLEARS |
if (p == oldtop && csz > oldtopsize) |
{ |
/* clear only the bytes from non-freshly-sbrked memory */ |
csz = oldtopsize; |
} |
MALLOC_UNLOCK; |
#endif |
|
MALLOC_ZERO(mem, csz - SIZE_SZ); |
return mem; |
} |
} |
|
#endif /* DEFINE_CALLOC */ |
|
#ifdef DEFINE_CFREE |
|
/* |
|
cfree just calls free. It is needed/defined on some systems |
that pair it with calloc, presumably for odd historical reasons. |
|
*/ |
|
#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__) |
#if !defined(INTERNAL_NEWLIB) || !defined(_REENT_ONLY) |
#if __STD_C |
void cfree(Void_t *mem) |
#else |
void cfree(mem) Void_t *mem; |
#endif |
{ |
#ifdef INTERNAL_NEWLIB |
fREe(_REENT, mem); |
#else |
fREe(mem); |
#endif |
} |
#endif |
#endif |
|
#endif /* DEFINE_CFREE */ |
|
#ifdef DEFINE_FREE |
|
/* |
|
Malloc_trim gives memory back to the system (via negative |
arguments to sbrk) if there is unused memory at the `high' end of |
the malloc pool. You can call this after freeing large blocks of |
memory to potentially reduce the system-level memory requirements |
of a program. However, it cannot guarantee to reduce memory. Under |
some allocation patterns, some large free blocks of memory will be |
locked between two used chunks, so they cannot be given back to |
the system. |
|
The `pad' argument to malloc_trim represents the amount of free |
trailing space to leave untrimmed. If this argument is zero, |
only the minimum amount of memory to maintain internal data |
structures will be left (one page or less). Non-zero arguments |
can be supplied to maintain enough trailing space to service |
future expected allocations without having to re-obtain memory |
from the system. |
|
Malloc_trim returns 1 if it actually released any memory, else 0. |
|
*/ |
|
#if __STD_C |
int malloc_trim(RARG size_t pad) |
#else |
int malloc_trim(RARG pad) RDECL size_t pad; |
#endif |
{ |
long top_size; /* Amount of top-most memory */ |
long extra; /* Amount to release */ |
char* current_brk; /* address returned by pre-check sbrk call */ |
char* new_brk; /* address returned by negative sbrk call */ |
|
unsigned long pagesz = malloc_getpagesize; |
|
MALLOC_LOCK; |
|
top_size = chunksize(top); |
extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; |
|
if (extra < (long)pagesz) /* Not enough memory to release */ |
{ |
MALLOC_UNLOCK; |
return 0; |
} |
|
else |
{ |
/* Test to make sure no one else called sbrk */ |
current_brk = (char*)(MORECORE (0)); |
if (current_brk != (char*)(top) + top_size) |
{ |
MALLOC_UNLOCK; |
return 0; /* Apparently we don't own memory; must fail */ |
} |
|
else |
{ |
new_brk = (char*)(MORECORE (-extra)); |
|
if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */ |
{ |
/* Try to figure out what we have */ |
current_brk = (char*)(MORECORE (0)); |
top_size = current_brk - (char*)top; |
if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */ |
{ |
sbrked_mem = current_brk - sbrk_base; |
set_head(top, top_size | PREV_INUSE); |
} |
check_chunk(top); |
MALLOC_UNLOCK; |
return 0; |
} |
|
else |
{ |
/* Success. Adjust top accordingly. */ |
set_head(top, (top_size - extra) | PREV_INUSE); |
sbrked_mem -= extra; |
check_chunk(top); |
MALLOC_UNLOCK; |
return 1; |
} |
} |
} |
} |
|
#endif /* DEFINE_FREE */ |
|
#ifdef DEFINE_MALLOC_USABLE_SIZE |
|
/* |
malloc_usable_size: |
|
This routine tells you how many bytes you can actually use in an |
allocated chunk, which may be more than you requested (although |
often not). You can use this many bytes without worrying about |
overwriting other allocated objects. Not a particularly great |
programming practice, but still sometimes useful. |
|
*/ |
|
#if __STD_C |
size_t malloc_usable_size(RARG Void_t* mem) |
#else |
size_t malloc_usable_size(RARG mem) RDECL Void_t* mem; |
#endif |
{ |
mchunkptr p; |
if (mem == 0) |
return 0; |
else |
{ |
p = mem2chunk(mem); |
if(!chunk_is_mmapped(p)) |
{ |
if (!inuse(p)) return 0; |
#if DEBUG |
MALLOC_LOCK; |
check_inuse_chunk(p); |
MALLOC_UNLOCK; |
#endif |
return chunksize(p) - SIZE_SZ; |
} |
return chunksize(p) - 2*SIZE_SZ; |
} |
} |
|
#endif /* DEFINE_MALLOC_USABLE_SIZE */ |
|
#ifdef DEFINE_MALLINFO |
|
/* Utility to update current_mallinfo for malloc_stats and mallinfo() */ |
|
STATIC void malloc_update_mallinfo() |
{ |
int i; |
mbinptr b; |
mchunkptr p; |
#if DEBUG |
mchunkptr q; |
#endif |
|
INTERNAL_SIZE_T avail = chunksize(top); |
int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; |
|
for (i = 1; i < NAV; ++i) |
{ |
b = bin_at(i); |
for (p = last(b); p != b; p = p->bk) |
{ |
#if DEBUG |
check_free_chunk(p); |
for (q = next_chunk(p); |
q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE; |
q = next_chunk(q)) |
check_inuse_chunk(q); |
#endif |
avail += chunksize(p); |
navail++; |
} |
} |
|
current_mallinfo.ordblks = navail; |
current_mallinfo.uordblks = sbrked_mem - avail; |
current_mallinfo.fordblks = avail; |
#if HAVE_MMAP |
current_mallinfo.hblks = n_mmaps; |
current_mallinfo.hblkhd = mmapped_mem; |
#endif |
current_mallinfo.keepcost = chunksize(top); |
|
} |
|
#else /* ! DEFINE_MALLINFO */ |
|
#if __STD_C |
extern void malloc_update_mallinfo(void); |
#else |
extern void malloc_update_mallinfo(); |
#endif |
|
#endif /* ! DEFINE_MALLINFO */ |
|
#ifdef DEFINE_MALLOC_STATS |
|
/* |
|
malloc_stats: |
|
Prints on stderr the amount of space obtain from the system (both |
via sbrk and mmap), the maximum amount (which may be more than |
current if malloc_trim and/or munmap got called), the maximum |
number of simultaneous mmap regions used, and the current number |
of bytes allocated via malloc (or realloc, etc) but not yet |
freed. (Note that this is the number of bytes allocated, not the |
number requested. It will be larger than the number requested |
because of alignment and bookkeeping overhead.) |
|
*/ |
|
#if __STD_C |
void malloc_stats(RONEARG) |
#else |
void malloc_stats(RONEARG) RDECL |
#endif |
{ |
unsigned long local_max_total_mem; |
int local_sbrked_mem; |
struct mallinfo local_mallinfo; |
#if HAVE_MMAP |
unsigned long local_mmapped_mem, local_max_n_mmaps; |
#endif |
FILE *fp; |
|
MALLOC_LOCK; |
malloc_update_mallinfo(); |
local_max_total_mem = max_total_mem; |
local_sbrked_mem = sbrked_mem; |
local_mallinfo = current_mallinfo; |
#if HAVE_MMAP |
local_mmapped_mem = mmapped_mem; |
local_max_n_mmaps = max_n_mmaps; |
#endif |
MALLOC_UNLOCK; |
|
#ifdef INTERNAL_NEWLIB |
fp = _stderr_r(reent_ptr); |
#define fprintf fiprintf |
#else |
fp = stderr; |
#endif |
|
fprintf(fp, "max system bytes = %10u\n", |
(unsigned int)(local_max_total_mem)); |
#if HAVE_MMAP |
fprintf(fp, "system bytes = %10u\n", |
(unsigned int)(local_sbrked_mem + local_mmapped_mem)); |
fprintf(fp, "in use bytes = %10u\n", |
(unsigned int)(local_mallinfo.uordblks + local_mmapped_mem)); |
#else |
fprintf(fp, "system bytes = %10u\n", |
(unsigned int)local_sbrked_mem); |
fprintf(fp, "in use bytes = %10u\n", |
(unsigned int)local_mallinfo.uordblks); |
#endif |
#if HAVE_MMAP |
fprintf(fp, "max mmap regions = %10u\n", |
(unsigned int)local_max_n_mmaps); |
#endif |
} |
|
#endif /* DEFINE_MALLOC_STATS */ |
|
#ifdef DEFINE_MALLINFO |
|
/* |
mallinfo returns a copy of updated current mallinfo. |
*/ |
|
#if __STD_C |
struct mallinfo mALLINFo(RONEARG) |
#else |
struct mallinfo mALLINFo(RONEARG) RDECL |
#endif |
{ |
struct mallinfo ret; |
|
MALLOC_LOCK; |
malloc_update_mallinfo(); |
ret = current_mallinfo; |
MALLOC_UNLOCK; |
return ret; |
} |
|
#endif /* DEFINE_MALLINFO */ |
|
#ifdef DEFINE_MALLOPT |
|
/* |
mallopt: |
|
mallopt is the general SVID/XPG interface to tunable parameters. |
The format is to provide a (parameter-number, parameter-value) pair. |
mallopt then sets the corresponding parameter to the argument |
value if it can (i.e., so long as the value is meaningful), |
and returns 1 if successful else 0. |
|
See descriptions of tunable parameters above. |
|
*/ |
|
#if __STD_C |
int mALLOPt(RARG int param_number, int value) |
#else |
int mALLOPt(RARG param_number, value) RDECL int param_number; int value; |
#endif |
{ |
MALLOC_LOCK; |
switch(param_number) |
{ |
case M_TRIM_THRESHOLD: |
trim_threshold = value; MALLOC_UNLOCK; return 1; |
case M_TOP_PAD: |
top_pad = value; MALLOC_UNLOCK; return 1; |
case M_MMAP_THRESHOLD: |
#if HAVE_MMAP |
mmap_threshold = value; |
#endif |
MALLOC_UNLOCK; |
return 1; |
case M_MMAP_MAX: |
#if HAVE_MMAP |
n_mmaps_max = value; MALLOC_UNLOCK; return 1; |
#else |
MALLOC_UNLOCK; return value == 0; |
#endif |
|
default: |
MALLOC_UNLOCK; |
return 0; |
} |
} |
|
#endif /* DEFINE_MALLOPT */ |
|
/* |
|
History: |
|
V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) |
* Added pvalloc, as recommended by H.J. Liu |
* Added 64bit pointer support mainly from Wolfram Gloger |
* Added anonymously donated WIN32 sbrk emulation |
* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen |
* malloc_extend_top: fix mask error that caused wastage after |
foreign sbrks |
* Add linux mremap support code from HJ Liu |
|
V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) |
* Integrated most documentation with the code. |
* Add support for mmap, with help from |
Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Use last_remainder in more cases. |
* Pack bins using idea from colin@nyx10.cs.du.edu |
* Use ordered bins instead of best-fit threshhold |
* Eliminate block-local decls to simplify tracing and debugging. |
* Support another case of realloc via move into top |
* Fix error occuring when initial sbrk_base not word-aligned. |
* Rely on page size for units instead of SBRK_UNIT to |
avoid surprises about sbrk alignment conventions. |
* Add mallinfo, mallopt. Thanks to Raymond Nijssen |
(raymond@es.ele.tue.nl) for the suggestion. |
* Add `pad' argument to malloc_trim and top_pad mallopt parameter. |
* More precautions for cases where other routines call sbrk, |
courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Added macros etc., allowing use in linux libc from |
H.J. Lu (hjl@gnu.ai.mit.edu) |
* Inverted this history list |
|
V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) |
* Re-tuned and fixed to behave more nicely with V2.6.0 changes. |
* Removed all preallocation code since under current scheme |
the work required to undo bad preallocations exceeds |
the work saved in good cases for most test programs. |
* No longer use return list or unconsolidated bins since |
no scheme using them consistently outperforms those that don't |
given above changes. |
* Use best fit for very large chunks to prevent some worst-cases. |
* Added some support for debugging |
|
V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) |
* Removed footers when chunks are in use. Thanks to |
Paul Wilson (wilson@cs.texas.edu) for the suggestion. |
|
V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) |
* Added malloc_trim, with help from Wolfram Gloger |
(wmglo@Dent.MED.Uni-Muenchen.DE). |
|
V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) |
|
V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) |
* realloc: try to expand in both directions |
* malloc: swap order of clean-bin strategy; |
* realloc: only conditionally expand backwards |
* Try not to scavenge used bins |
* Use bin counts as a guide to preallocation |
* Occasionally bin return list chunks in first scan |
* Add a few optimizations from colin@nyx10.cs.du.edu |
|
V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) |
* faster bin computation & slightly different binning |
* merged all consolidations to one part of malloc proper |
(eliminating old malloc_find_space & malloc_clean_bin) |
* Scan 2 returns chunks (not just 1) |
* Propagate failure in realloc if malloc returns 0 |
* Add stuff to allow compilation on non-ANSI compilers |
from kpv@research.att.com |
|
V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) |
* removed potential for odd address access in prev_chunk |
* removed dependency on getpagesize.h |
* misc cosmetics and a bit more internal documentation |
* anticosmetics: mangled names in macros to evade debugger strangeness |
* tested on sparc, hp-700, dec-mips, rs6000 |
with gcc & native cc (hp, dec only) allowing |
Detlefs & Zorn comparison study (in SIGPLAN Notices.) |
|
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) |
* Based loosely on libg++-1.2X malloc. (It retains some of the overall |
structure of old version, but most details differ.) |
|
*/ |
|
/common/v2_0/doc/dlmalloc/dlmalloc-merged.c
0,0 → 1,3753
/* ---------- To make a malloc.h, start cutting here ------------ */ |
|
/* |
A version of malloc/free/realloc written by Doug Lea and released to the |
public domain. Send questions/comments/complaints/performance data |
to dl@cs.oswego.edu |
|
* VERSION 2.6.6 Sun Mar 5 19:10:03 2000 Doug Lea (dl at gee) |
|
Note: There may be an updated version of this malloc obtainable at |
ftp://g.oswego.edu/pub/misc/malloc.c |
Check before installing! |
|
* Why use this malloc? |
|
This is not the fastest, most space-conserving, most portable, or |
most tunable malloc ever written. However it is among the fastest |
while also being among the most space-conserving, portable and tunable. |
Consistent balance across these factors results in a good general-purpose |
allocator. For a high-level description, see |
http://g.oswego.edu/dl/html/malloc.html |
|
* Synopsis of public routines |
|
(Much fuller descriptions are contained in the program documentation below.) |
|
malloc(size_t n); |
Return a pointer to a newly allocated chunk of at least n bytes, or null |
if no space is available. |
free(Void_t* p); |
Release the chunk of memory pointed to by p, or no effect if p is null. |
realloc(Void_t* p, size_t n); |
Return a pointer to a chunk of size n that contains the same data |
as does chunk p up to the minimum of (n, p's size) bytes, or null |
if no space is available. The returned pointer may or may not be |
the same as p. If p is null, equivalent to malloc. Unless the |
#define REALLOC_ZERO_BYTES_FREES below is set, realloc with a |
size argument of zero (re)allocates a minimum-sized chunk. |
memalign(size_t alignment, size_t n); |
Return a pointer to a newly allocated chunk of n bytes, aligned |
in accord with the alignment argument, which must be a power of |
two. |
valloc(size_t n); |
Equivalent to memalign(pagesize, n), where pagesize is the page |
size of the system (or as near to this as can be figured out from |
all the includes/defines below.) |
pvalloc(size_t n); |
Equivalent to valloc(minimum-page-that-holds(n)), that is, |
round up n to nearest pagesize. |
calloc(size_t unit, size_t quantity); |
Returns a pointer to quantity * unit bytes, with all locations |
set to zero. |
cfree(Void_t* p); |
Equivalent to free(p). |
malloc_trim(size_t pad); |
Release all but pad bytes of freed top-most memory back |
to the system. Return 1 if successful, else 0. |
malloc_usable_size(Void_t* p); |
Report the number usable allocated bytes associated with allocated |
chunk p. This may or may not report more bytes than were requested, |
due to alignment and minimum size constraints. |
malloc_stats(); |
Prints brief summary statistics on stderr. |
mallinfo() |
Returns (by copy) a struct containing various summary statistics. |
mallopt(int parameter_number, int parameter_value) |
Changes one of the tunable parameters described below. Returns |
1 if successful in changing the parameter, else 0. |
|
* Vital statistics: |
|
Alignment: 8-byte |
8 byte alignment is currently hardwired into the design. This |
seems to suffice for all current machines and C compilers. |
|
Assumed pointer representation: 4 or 8 bytes |
Code for 8-byte pointers is untested by me but has worked |
reliably by Wolfram Gloger, who contributed most of the |
changes supporting this. |
|
Assumed size_t representation: 4 or 8 bytes |
Note that size_t is allowed to be 4 bytes even if pointers are 8. |
|
Minimum overhead per allocated chunk: 4 or 8 bytes |
Each malloced chunk has a hidden overhead of 4 bytes holding size |
and status information. |
|
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) |
8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
|
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte |
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are |
needed; 4 (8) for a trailing size field |
and 8 (16) bytes for free list pointers. Thus, the minimum |
allocatable size is 16/24/32 bytes. |
|
Even a request for zero bytes (i.e., malloc(0)) returns a |
pointer to something of the minimum allocatable size. |
|
Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes |
8-byte size_t: 2^63 - 16 bytes |
|
It is assumed that (possibly signed) size_t bit values suffice to |
represent chunk sizes. `Possibly signed' is due to the fact |
that `size_t' may be defined on a system as either a signed or |
an unsigned type. To be conservative, values that would appear |
as negative numbers are avoided. |
Requests for sizes with a negative sign bit when the request |
size is treaded as a long will return null. |
|
Maximum overhead wastage per allocated chunk: normally 15 bytes |
|
Alignnment demands, plus the minimum allocatable size restriction |
make the normal worst-case wastage 15 bytes (i.e., up to 15 |
more bytes will be allocated than were requested in malloc), with |
two exceptions: |
1. Because requests for zero bytes allocate non-zero space, |
the worst case wastage for a request of zero bytes is 24 bytes. |
2. For requests >= mmap_threshold that are serviced via |
mmap(), the worst case wastage is 8 bytes plus the remainder |
from a system page (the minimal mmap unit); typically 4096 bytes. |
|
* Limitations |
|
Here are some features that are NOT currently supported |
|
* No user-definable hooks for callbacks and the like. |
* No automated mechanism for fully checking that all accesses |
to malloced memory stay within their bounds. |
* No support for compaction. |
|
* Synopsis of compile-time options: |
|
People have reported using previous versions of this malloc on all |
versions of Unix, sometimes by tweaking some of the defines |
below. It has been tested most extensively on Solaris and |
Linux. It is also reported to work on WIN32 platforms. |
People have also reported adapting this malloc for use in |
stand-alone embedded systems. |
|
The implementation is in straight, hand-tuned ANSI C. Among other |
consequences, it uses a lot of macros. Because of this, to be at |
all usable, this code should be compiled using an optimizing compiler |
(for example gcc -O2) that can simplify expressions and control |
paths. |
|
__STD_C (default: derived from C compiler defines) |
Nonzero if using ANSI-standard C compiler, a C++ compiler, or |
a C compiler sufficiently close to ANSI to get away with it. |
DEBUG (default: NOT defined) |
Define to enable debugging. Adds fairly extensive assertion-based |
checking to help track down memory errors, but noticeably slows down |
execution. |
SEPARATE_OBJECTS (default: NOT defined) |
Define this to compile into separate .o files. You must then |
compile malloc.c several times, defining a DEFINE_* macro each |
time. The list of DEFINE_* macros appears below. |
MALLOC_LOCK (default: NOT defined) |
MALLOC_UNLOCK (default: NOT defined) |
Define these to C expressions which are run to lock and unlock |
the malloc data structures. Calls may be nested; that is, |
MALLOC_LOCK may be called more than once before the corresponding |
MALLOC_UNLOCK calls. MALLOC_LOCK must avoid waiting for a lock |
that it already holds. |
MALLOC_ALIGNMENT (default: NOT defined) |
Define this to 16 if you need 16 byte alignment instead of 8 byte alignment |
which is the normal default. |
SIZE_T_SMALLER_THAN_LONG (default: NOT defined) |
Define this when the platform you are compiling has sizeof(long) > sizeof(size_t). |
The option causes some extra code to be generated to handle operations |
that use size_t operands and have long results. |
REALLOC_ZERO_BYTES_FREES (default: NOT defined) |
Define this if you think that realloc(p, 0) should be equivalent |
to free(p). Otherwise, since malloc returns a unique pointer for |
malloc(0), so does realloc(p, 0). |
HAVE_MEMCPY (default: defined) |
Define if you are not otherwise using ANSI STD C, but still |
have memcpy and memset in your C library and want to use them. |
Otherwise, simple internal versions are supplied. |
USE_MEMCPY (default: 1 if HAVE_MEMCPY is defined, 0 otherwise) |
Define as 1 if you want the C library versions of memset and |
memcpy called in realloc and calloc (otherwise macro versions are used). |
At least on some platforms, the simple macro versions usually |
outperform libc versions. |
HAVE_MMAP (default: defined as 1) |
Define to non-zero to optionally make malloc() use mmap() to |
allocate very large blocks. |
HAVE_MREMAP (default: defined as 0 unless Linux libc set) |
Define to non-zero to optionally make realloc() use mremap() to |
reallocate very large blocks. |
malloc_getpagesize (default: derived from system #includes) |
Either a constant or routine call returning the system page size. |
HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined) |
Optionally define if you are on a system with a /usr/include/malloc.h |
that declares struct mallinfo. It is not at all necessary to |
define this even if you do, but will ensure consistency. |
INTERNAL_SIZE_T (default: size_t) |
Define to a 32-bit type (probably `unsigned int') if you are on a |
64-bit machine, yet do not want or need to allow malloc requests of |
greater than 2^31 to be handled. This saves space, especially for |
very small chunks. |
INTERNAL_LINUX_C_LIB (default: NOT defined) |
Defined only when compiled as part of Linux libc. |
Also note that there is some odd internal name-mangling via defines |
(for example, internally, `malloc' is named `mALLOc') needed |
when compiling in this case. These look funny but don't otherwise |
affect anything. |
INTERNAL_NEWLIB (default: NOT defined) |
Defined only when compiled as part of the Cygnus newlib |
distribution. |
WIN32 (default: undefined) |
Define this on MS win (95, nt) platforms to compile in sbrk emulation. |
LACKS_UNISTD_H (default: undefined if not WIN32) |
Define this if your system does not have a <unistd.h>. |
LACKS_SYS_PARAM_H (default: undefined if not WIN32) |
Define this if your system does not have a <sys/param.h>. |
MORECORE (default: sbrk) |
The name of the routine to call to obtain more memory from the system. |
MORECORE_FAILURE (default: -1) |
The value returned upon failure of MORECORE. |
MORECORE_CLEARS (default 1) |
True (1) if the routine mapped to MORECORE zeroes out memory (which |
holds for sbrk). |
DEFAULT_TRIM_THRESHOLD |
DEFAULT_TOP_PAD |
DEFAULT_MMAP_THRESHOLD |
DEFAULT_MMAP_MAX |
Default values of tunable parameters (described in detail below) |
controlling interaction with host system routines (sbrk, mmap, etc). |
These values may also be changed dynamically via mallopt(). The |
preset defaults are those that give best performance for typical |
programs/systems. |
USE_DL_PREFIX (default: undefined) |
Prefix all public routines with the string 'dl'. Useful to |
quickly avoid procedure declaration conflicts and linker symbol |
conflicts with existing memory allocation routines. |
|
|
*/ |
|
|
|
|
/* Preliminaries */ |
|
#ifndef __STD_C |
#ifdef __STDC__ |
#define __STD_C 1 |
#else |
#if __cplusplus |
#define __STD_C 1 |
#else |
#define __STD_C 0 |
#endif /*__cplusplus*/ |
#endif /*__STDC__*/ |
#endif /*__STD_C*/ |
|
#ifndef Void_t |
#if (__STD_C || defined(WIN32)) |
#define Void_t void |
#else |
#define Void_t char |
#endif |
#endif /*Void_t*/ |
|
#if __STD_C |
#include <stddef.h> /* for size_t */ |
#else |
#include <sys/types.h> |
#endif |
|
#ifdef __cplusplus |
extern "C" { |
#endif |
|
#include <stdio.h> /* needed for malloc_stats */ |
|
|
/* |
Compile-time options |
*/ |
|
|
/* |
|
Special defines for Cygnus newlib distribution. |
|
*/ |
|
#ifdef INTERNAL_NEWLIB |
|
#include <sys/config.h> |
|
/* |
In newlib, all the publically visible routines take a reentrancy |
pointer. We don't currently do anything much with it, but we do |
pass it to the lock routine. |
*/ |
|
#include <reent.h> |
|
#define POINTER_UINT unsigned _POINTER_INT |
#define SEPARATE_OBJECTS |
#define HAVE_MMAP 0 |
#define MORECORE(size) _sbrk_r(reent_ptr, (size)) |
#define MORECORE_CLEARS 0 |
#define MALLOC_LOCK __malloc_lock(reent_ptr) |
#define MALLOC_UNLOCK __malloc_unlock(reent_ptr) |
|
#ifndef _WIN32 |
#ifdef SMALL_MEMORY |
#define malloc_getpagesize (128) |
#else |
#define malloc_getpagesize (4096) |
#endif |
#endif |
|
#if __STD_C |
extern void __malloc_lock(struct _reent *); |
extern void __malloc_unlock(struct _reent *); |
#else |
extern void __malloc_lock(); |
extern void __malloc_unlock(); |
#endif |
|
#if __STD_C |
#define RARG struct _reent *reent_ptr, |
#define RONEARG struct _reent *reent_ptr |
#else |
#define RARG reent_ptr |
#define RONEARG reent_ptr |
#define RDECL struct _reent *reent_ptr; |
#endif |
|
#define RCALL reent_ptr, |
#define RONECALL reent_ptr |
|
#else /* ! INTERNAL_NEWLIB */ |
|
#define POINTER_UINT unsigned long |
#define RARG |
#define RONEARG |
#define RDECL |
#define RCALL |
#define RONECALL |
|
#endif /* ! INTERNAL_NEWLIB */ |
|
/* |
Debugging: |
|
Because freed chunks may be overwritten with link fields, this |
malloc will often die when freed memory is overwritten by user |
programs. This can be very effective (albeit in an annoying way) |
in helping track down dangling pointers. |
|
If you compile with -DDEBUG, a number of assertion checks are |
enabled that will catch more memory errors. You probably won't be |
able to make much sense of the actual assertion errors, but they |
should help you locate incorrectly overwritten memory. The |
checking is fairly extensive, and will slow down execution |
noticeably. Calling malloc_stats or mallinfo with DEBUG set will |
attempt to check every non-mmapped allocated and free chunk in the |
course of computing the summmaries. (By nature, mmapped regions |
cannot be checked very much automatically.) |
|
Setting DEBUG may also be helpful if you are trying to modify |
this code. The assertions in the check routines spell out in more |
detail the assumptions and invariants underlying the algorithms. |
|
*/ |
|
#if DEBUG |
#include <assert.h> |
#else |
#define assert(x) ((void)0) |
#endif |
|
|
/* |
SEPARATE_OBJECTS should be defined if you want each function to go |
into a separate .o file. You must then compile malloc.c once per |
function, defining the appropriate DEFINE_ macro. See below for the |
list of macros. |
*/ |
|
#ifndef SEPARATE_OBJECTS |
#define DEFINE_MALLOC |
#define DEFINE_FREE |
#define DEFINE_REALLOC |
#define DEFINE_CALLOC |
#define DEFINE_CFREE |
#define DEFINE_MEMALIGN |
#define DEFINE_VALLOC |
#define DEFINE_PVALLOC |
#define DEFINE_MALLINFO |
#define DEFINE_MALLOC_STATS |
#define DEFINE_MALLOC_USABLE_SIZE |
#define DEFINE_MALLOPT |
|
#define STATIC static |
#else |
#define STATIC |
#endif |
|
/* |
Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to |
lock and unlock the malloc data structures. MALLOC_LOCK may be |
called recursively. |
*/ |
|
#ifndef MALLOC_LOCK |
#define MALLOC_LOCK |
#endif |
|
#ifndef MALLOC_UNLOCK |
#define MALLOC_UNLOCK |
#endif |
|
/* |
INTERNAL_SIZE_T is the word-size used for internal bookkeeping |
of chunk sizes. On a 64-bit machine, you can reduce malloc |
overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' |
at the expense of not being able to handle requests greater than |
2^31. This limitation is hardly ever a concern; you are encouraged |
to set this. However, the default version is the same as size_t. |
*/ |
|
#ifndef INTERNAL_SIZE_T |
#define INTERNAL_SIZE_T size_t |
#endif |
|
/* |
Following is needed on implementations whereby long > size_t. |
The problem is caused because the code performs subtractions of |
size_t values and stores the result in long values. In the case |
where long > size_t and the first value is actually less than |
the second value, the resultant value is positive. For example, |
(long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF |
which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF. This is due to the |
fact that assignment from unsigned to signed won't sign extend. |
*/ |
|
#ifdef SIZE_T_SMALLER_THAN_LONG |
#define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) ); |
#else |
#define long_sub_size_t(x, y) ( (long)(x - y) ) |
#endif |
|
/* |
REALLOC_ZERO_BYTES_FREES should be set if a call to |
realloc with zero bytes should be the same as a call to free. |
Some people think it should. Otherwise, since this malloc |
returns a unique pointer for malloc(0), so does realloc(p, 0). |
*/ |
|
|
/* #define REALLOC_ZERO_BYTES_FREES */ |
|
|
/* |
WIN32 causes an emulation of sbrk to be compiled in |
mmap-based options are not currently supported in WIN32. |
*/ |
|
/* #define WIN32 */ |
#ifdef WIN32 |
#define MORECORE wsbrk |
#define HAVE_MMAP 0 |
|
#define LACKS_UNISTD_H |
#define LACKS_SYS_PARAM_H |
|
/* |
Include 'windows.h' to get the necessary declarations for the |
Microsoft Visual C++ data structures and routines used in the 'sbrk' |
emulation. |
|
Define WIN32_LEAN_AND_MEAN so that only the essential Microsoft |
Visual C++ header files are included. |
*/ |
#define WIN32_LEAN_AND_MEAN |
#include <windows.h> |
#endif |
|
|
/* |
HAVE_MEMCPY should be defined if you are not otherwise using |
ANSI STD C, but still have memcpy and memset in your C library |
and want to use them in calloc and realloc. Otherwise simple |
macro versions are defined here. |
|
USE_MEMCPY should be defined as 1 if you actually want to |
have memset and memcpy called. People report that the macro |
versions are often enough faster than libc versions on many |
systems that it is better to use them. |
|
*/ |
|
#define HAVE_MEMCPY |
|
#ifndef USE_MEMCPY |
#ifdef HAVE_MEMCPY |
#define USE_MEMCPY 1 |
#else |
#define USE_MEMCPY 0 |
#endif |
#endif |
|
#if (__STD_C || defined(HAVE_MEMCPY)) |
|
#if __STD_C |
void* memset(void*, int, size_t); |
void* memcpy(void*, const void*, size_t); |
#else |
#ifdef WIN32 |
// On Win32 platforms, 'memset()' and 'memcpy()' are already declared in |
// 'windows.h' |
#else |
Void_t* memset(); |
Void_t* memcpy(); |
#endif |
#endif |
#endif |
|
#if USE_MEMCPY |
|
/* The following macros are only invoked with (2n+1)-multiples of |
INTERNAL_SIZE_T units, with a positive integer n. This is exploited |
for fast inline execution when n is small. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T mzsz = (nbytes); \ |
if(mzsz <= 9*sizeof(mzsz)) { \ |
INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \ |
if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; }}} \ |
*mz++ = 0; \ |
*mz++ = 0; \ |
*mz = 0; \ |
} else memset((charp), 0, mzsz); \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T mcsz = (nbytes); \ |
if(mcsz <= 9*sizeof(mcsz)) { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \ |
if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; }}} \ |
*mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
*mcdst = *mcsrc ; \ |
} else memcpy(dest, src, mcsz); \ |
} while(0) |
|
#else /* !USE_MEMCPY */ |
|
/* Use Duff's device for good zeroing/copying performance. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mzp++ = 0; \ |
case 7: *mzp++ = 0; \ |
case 6: *mzp++ = 0; \ |
case 5: *mzp++ = 0; \ |
case 4: *mzp++ = 0; \ |
case 3: *mzp++ = 0; \ |
case 2: *mzp++ = 0; \ |
case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mcdst++ = *mcsrc++; \ |
case 7: *mcdst++ = *mcsrc++; \ |
case 6: *mcdst++ = *mcsrc++; \ |
case 5: *mcdst++ = *mcsrc++; \ |
case 4: *mcdst++ = *mcsrc++; \ |
case 3: *mcdst++ = *mcsrc++; \ |
case 2: *mcdst++ = *mcsrc++; \ |
case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#endif |
|
|
/* |
Define HAVE_MMAP to optionally make malloc() use mmap() to |
allocate very large blocks. These will be returned to the |
operating system immediately after a free(). |
*/ |
|
#ifndef HAVE_MMAP |
#define HAVE_MMAP 1 |
#endif |
|
/* |
Define HAVE_MREMAP to make realloc() use mremap() to re-allocate |
large blocks. This is currently only possible on Linux with |
kernel versions newer than 1.3.77. |
*/ |
|
#ifndef HAVE_MREMAP |
#ifdef INTERNAL_LINUX_C_LIB |
#define HAVE_MREMAP 1 |
#else |
#define HAVE_MREMAP 0 |
#endif |
#endif |
|
#if HAVE_MMAP |
|
#include <unistd.h> |
#include <fcntl.h> |
#include <sys/mman.h> |
|
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
#define MAP_ANONYMOUS MAP_ANON |
#endif |
|
#endif /* HAVE_MMAP */ |
|
/* |
Access to system page size. To the extent possible, this malloc |
manages memory from the system in page-size units. |
|
The following mechanics for getpagesize were adapted from |
bsd/gnu getpagesize.h |
*/ |
|
#ifndef LACKS_UNISTD_H |
# include <unistd.h> |
#endif |
|
#ifndef malloc_getpagesize |
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
# ifndef _SC_PAGE_SIZE |
# define _SC_PAGE_SIZE _SC_PAGESIZE |
# endif |
# endif |
# ifdef _SC_PAGE_SIZE |
# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
# else |
# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
extern size_t getpagesize(); |
# define malloc_getpagesize getpagesize() |
# else |
# ifdef WIN32 |
# define malloc_getpagesize (4096) /* TBD: Use 'GetSystemInfo' instead */ |
# else |
# ifndef LACKS_SYS_PARAM_H |
# include <sys/param.h> |
# endif |
# ifdef EXEC_PAGESIZE |
# define malloc_getpagesize EXEC_PAGESIZE |
# else |
# ifdef NBPG |
# ifndef CLSIZE |
# define malloc_getpagesize NBPG |
# else |
# define malloc_getpagesize (NBPG * CLSIZE) |
# endif |
# else |
# ifdef NBPC |
# define malloc_getpagesize NBPC |
# else |
# ifdef PAGESIZE |
# define malloc_getpagesize PAGESIZE |
# else |
# define malloc_getpagesize (4096) /* just guess */ |
# endif |
# endif |
# endif |
# endif |
# endif |
# endif |
# endif |
#endif |
|
|
|
/* |
|
This version of malloc supports the standard SVID/XPG mallinfo |
routine that returns a struct containing the same kind of |
information you can get from malloc_stats. It should work on |
any SVID/XPG compliant system that has a /usr/include/malloc.h |
defining struct mallinfo. (If you'd like to install such a thing |
yourself, cut out the preliminary declarations as described above |
and below and save them in a malloc.h file. But there's no |
compelling reason to bother to do this.) |
|
The main declaration needed is the mallinfo struct that is returned |
(by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a |
bunch of fields, most of which are not even meaningful in this |
version of malloc. Some of these fields are are instead filled by |
mallinfo() with other numbers that might possibly be of interest. |
|
HAVE_USR_INCLUDE_MALLOC_H should be set if you have a |
/usr/include/malloc.h file that includes a declaration of struct |
mallinfo. If so, it is included; else an SVID2/XPG2 compliant |
version is declared below. These must be precisely the same for |
mallinfo() to work. |
|
*/ |
|
/* #define HAVE_USR_INCLUDE_MALLOC_H */ |
|
#if HAVE_USR_INCLUDE_MALLOC_H |
#include "/usr/include/malloc.h" |
#else |
|
/* SVID2/XPG mallinfo structure */ |
|
struct mallinfo { |
int arena; /* total space allocated from system */ |
int ordblks; /* number of non-inuse chunks */ |
int smblks; /* unused -- always zero */ |
int hblks; /* number of mmapped regions */ |
int hblkhd; /* total space in mmapped regions */ |
int usmblks; /* unused -- always zero */ |
int fsmblks; /* unused -- always zero */ |
int uordblks; /* total allocated space */ |
int fordblks; /* total non-inuse space */ |
int keepcost; /* top-most, releasable (via malloc_trim) space */ |
}; |
|
/* SVID2/XPG mallopt options */ |
|
#define M_MXFAST 1 /* UNUSED in this malloc */ |
#define M_NLBLKS 2 /* UNUSED in this malloc */ |
#define M_GRAIN 3 /* UNUSED in this malloc */ |
#define M_KEEP 4 /* UNUSED in this malloc */ |
|
#endif |
|
/* mallopt options that actually do something */ |
|
#define M_TRIM_THRESHOLD -1 |
#define M_TOP_PAD -2 |
#define M_MMAP_THRESHOLD -3 |
#define M_MMAP_MAX -4 |
|
|
|
#ifndef DEFAULT_TRIM_THRESHOLD |
#define DEFAULT_TRIM_THRESHOLD (128L * 1024L) |
#endif |
|
/* |
M_TRIM_THRESHOLD is the maximum amount of unused top-most memory |
to keep before releasing via malloc_trim in free(). |
|
Automatic trimming is mainly useful in long-lived programs. |
Because trimming via sbrk can be slow on some systems, and can |
sometimes be wasteful (in cases where programs immediately |
afterward allocate more large chunks) the value should be high |
enough so that your overall system performance would improve by |
releasing. |
|
The trim threshold and the mmap control parameters (see below) |
can be traded off with one another. Trimming and mmapping are |
two different ways of releasing unused memory back to the |
system. Between these two, it is often possible to keep |
system-level demands of a long-lived program down to a bare |
minimum. For example, in one test suite of sessions measuring |
the XF86 X server on Linux, using a trim threshold of 128K and a |
mmap threshold of 192K led to near-minimal long term resource |
consumption. |
|
If you are using this malloc in a long-lived program, it should |
pay to experiment with these values. As a rough guide, you |
might set to a value close to the average size of a process |
(program) running on your system. Releasing this much memory |
would allow such a process to run in memory. Generally, it's |
worth it to tune for trimming rather tham memory mapping when a |
program undergoes phases where several large chunks are |
allocated and released in ways that can reuse each other's |
storage, perhaps mixed with phases where there are no such |
chunks at all. And in well-behaved long-lived programs, |
controlling release of large blocks via trimming versus mapping |
is usually faster. |
|
However, in most programs, these parameters serve mainly as |
protection against the system-level effects of carrying around |
massive amounts of unneeded memory. Since frequent calls to |
sbrk, mmap, and munmap otherwise degrade performance, the default |
parameters are set to relatively high values that serve only as |
safeguards. |
|
The default trim value is high enough to cause trimming only in |
fairly extreme (by current memory consumption standards) cases. |
It must be greater than page size to have any useful effect. To |
disable trimming completely, you can set to (unsigned long)(-1); |
|
|
*/ |
|
|
#ifndef DEFAULT_TOP_PAD |
#define DEFAULT_TOP_PAD (0) |
#endif |
|
/* |
M_TOP_PAD is the amount of extra `padding' space to allocate or |
retain whenever sbrk is called. It is used in two ways internally: |
|
* When sbrk is called to extend the top of the arena to satisfy |
a new malloc request, this much padding is added to the sbrk |
request. |
|
* When malloc_trim is called automatically from free(), |
it is used as the `pad' argument. |
|
In both cases, the actual amount of padding is rounded |
so that the end of the arena is always a system page boundary. |
|
The main reason for using padding is to avoid calling sbrk so |
often. Having even a small pad greatly reduces the likelihood |
that nearly every malloc request during program start-up (or |
after trimming) will invoke sbrk, which needlessly wastes |
time. |
|
Automatic rounding-up to page-size units is normally sufficient |
to avoid measurable overhead, so the default is 0. However, in |
systems where sbrk is relatively slow, it can pay to increase |
this value, at the expense of carrying around more memory than |
the program needs. |
|
*/ |
|
|
#ifndef DEFAULT_MMAP_THRESHOLD |
#define DEFAULT_MMAP_THRESHOLD (128 * 1024) |
#endif |
|
/* |
|
M_MMAP_THRESHOLD is the request size threshold for using mmap() |
to service a request. Requests of at least this size that cannot |
be allocated using already-existing space will be serviced via mmap. |
(If enough normal freed space already exists it is used instead.) |
|
Using mmap segregates relatively large chunks of memory so that |
they can be individually obtained and released from the host |
system. A request serviced through mmap is never reused by any |
other request (at least not directly; the system may just so |
happen to remap successive requests to the same locations). |
|
Segregating space in this way has the benefit that mmapped space |
can ALWAYS be individually released back to the system, which |
helps keep the system level memory demands of a long-lived |
program low. Mapped memory can never become `locked' between |
other chunks, as can happen with normally allocated chunks, which |
menas that even trimming via malloc_trim would not release them. |
|
However, it has the disadvantages that: |
|
1. The space cannot be reclaimed, consolidated, and then |
used to service later requests, as happens with normal chunks. |
2. It can lead to more wastage because of mmap page alignment |
requirements |
3. It causes malloc performance to be more dependent on host |
system memory management support routines which may vary in |
implementation quality and may impose arbitrary |
limitations. Generally, servicing a request via normal |
malloc steps is faster than going through a system's mmap. |
|
All together, these considerations should lead you to use mmap |
only for relatively large requests. |
|
|
*/ |
|
|
|
#ifndef DEFAULT_MMAP_MAX |
#if HAVE_MMAP |
#define DEFAULT_MMAP_MAX (64) |
#else |
#define DEFAULT_MMAP_MAX (0) |
#endif |
#endif |
|
/* |
M_MMAP_MAX is the maximum number of requests to simultaneously |
service using mmap. This parameter exists because: |
|
1. Some systems have a limited number of internal tables for |
use by mmap. |
2. In most systems, overreliance on mmap can degrade overall |
performance. |
3. If a program allocates many large regions, it is probably |
better off using normal sbrk-based allocation routines that |
can reclaim and reallocate normal heap memory. Using a |
small value allows transition into this mode after the |
first few allocations. |
|
Setting to 0 disables all use of mmap. If HAVE_MMAP is not set, |
the default value is 0, and attempts to set it to non-zero values |
in mallopt will fail. |
*/ |
|
|
|
|
/* |
USE_DL_PREFIX will prefix all public routines with the string 'dl'. |
Useful to quickly avoid procedure declaration conflicts and linker |
symbol conflicts with existing memory allocation routines. |
|
*/ |
|
/* #define USE_DL_PREFIX */ |
|
|
|
|
/* |
|
Special defines for linux libc |
|
Except when compiled using these special defines for Linux libc |
using weak aliases, this malloc is NOT designed to work in |
multithreaded applications. No semaphores or other concurrency |
control are provided to ensure that multiple malloc or free calls |
don't run at the same time, which could be disasterous. A single |
semaphore could be used across malloc, realloc, and free (which is |
essentially the effect of the linux weak alias approach). It would |
be hard to obtain finer granularity. |
|
*/ |
|
|
#ifdef INTERNAL_LINUX_C_LIB |
|
#if __STD_C |
|
Void_t * __default_morecore_init (ptrdiff_t); |
Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init; |
|
#else |
|
Void_t * __default_morecore_init (); |
Void_t *(*__morecore)() = __default_morecore_init; |
|
#endif |
|
#define MORECORE (*__morecore) |
#define MORECORE_FAILURE 0 |
#define MORECORE_CLEARS 1 |
|
#else /* INTERNAL_LINUX_C_LIB */ |
|
#ifndef INTERNAL_NEWLIB |
#if __STD_C |
extern Void_t* sbrk(ptrdiff_t); |
#else |
extern Void_t* sbrk(); |
#endif |
#endif |
|
#ifndef MORECORE |
#define MORECORE sbrk |
#endif |
|
#ifndef MORECORE_FAILURE |
#define MORECORE_FAILURE -1 |
#endif |
|
#ifndef MORECORE_CLEARS |
#define MORECORE_CLEARS 1 |
#endif |
|
#endif /* INTERNAL_LINUX_C_LIB */ |
|
#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__) |
|
#define cALLOc __libc_calloc |
#define fREe __libc_free |
#define mALLOc __libc_malloc |
#define mEMALIGn __libc_memalign |
#define rEALLOc __libc_realloc |
#define vALLOc __libc_valloc |
#define pvALLOc __libc_pvalloc |
#define mALLINFo __libc_mallinfo |
#define mALLOPt __libc_mallopt |
|
#pragma weak calloc = __libc_calloc |
#pragma weak free = __libc_free |
#pragma weak cfree = __libc_free |
#pragma weak malloc = __libc_malloc |
#pragma weak memalign = __libc_memalign |
#pragma weak realloc = __libc_realloc |
#pragma weak valloc = __libc_valloc |
#pragma weak pvalloc = __libc_pvalloc |
#pragma weak mallinfo = __libc_mallinfo |
#pragma weak mallopt = __libc_mallopt |
|
#else |
|
#ifdef INTERNAL_NEWLIB |
|
#define cALLOc _calloc_r |
#define fREe _free_r |
#define mALLOc _malloc_r |
#define mEMALIGn _memalign_r |
#define rEALLOc _realloc_r |
#define vALLOc _valloc_r |
#define pvALLOc _pvalloc_r |
#define mALLINFo _mallinfo_r |
#define mALLOPt _mallopt_r |
|
#define malloc_stats _malloc_stats_r |
#define malloc_trim _malloc_trim_r |
#define malloc_usable_size _malloc_usable_size_r |
|
#define malloc_update_mallinfo __malloc_update_mallinfo |
|
#define malloc_av_ __malloc_av_ |
#define malloc_current_mallinfo __malloc_current_mallinfo |
#define malloc_max_sbrked_mem __malloc_max_sbrked_mem |
#define malloc_max_total_mem __malloc_max_total_mem |
#define malloc_sbrk_base __malloc_sbrk_base |
#define malloc_top_pad __malloc_top_pad |
#define malloc_trim_threshold __malloc_trim_threshold |
|
#else /* ! INTERNAL_NEWLIB */ |
|
#ifdef USE_DL_PREFIX |
#define cALLOc dlcalloc |
#define fREe dlfree |
#define mALLOc dlmalloc |
#define mEMALIGn dlmemalign |
#define rEALLOc dlrealloc |
#define vALLOc dlvalloc |
#define pvALLOc dlpvalloc |
#define mALLINFo dlmallinfo |
#define mALLOPt dlmallopt |
#else /* USE_DL_PREFIX */ |
#define cALLOc calloc |
#define fREe free |
#define mALLOc malloc |
#define mEMALIGn memalign |
#define rEALLOc realloc |
#define vALLOc valloc |
#define pvALLOc pvalloc |
#define mALLINFo mallinfo |
#define mALLOPt mallopt |
#endif /* USE_DL_PREFIX */ |
|
#endif /* ! INTERNAL_NEWLIB */ |
#endif |
|
/* Public routines */ |
|
#if __STD_C |
|
Void_t* mALLOc(RARG size_t); |
void fREe(RARG Void_t*); |
Void_t* rEALLOc(RARG Void_t*, size_t); |
Void_t* mEMALIGn(RARG size_t, size_t); |
Void_t* vALLOc(RARG size_t); |
Void_t* pvALLOc(RARG size_t); |
Void_t* cALLOc(RARG size_t, size_t); |
void cfree(Void_t*); |
int malloc_trim(RARG size_t); |
size_t malloc_usable_size(RARG Void_t*); |
void malloc_stats(RONEARG); |
int mALLOPt(RARG int, int); |
struct mallinfo mALLINFo(RONEARG); |
#else |
Void_t* mALLOc(); |
void fREe(); |
Void_t* rEALLOc(); |
Void_t* mEMALIGn(); |
Void_t* vALLOc(); |
Void_t* pvALLOc(); |
Void_t* cALLOc(); |
void cfree(); |
int malloc_trim(); |
size_t malloc_usable_size(); |
void malloc_stats(); |
int mALLOPt(); |
struct mallinfo mALLINFo(); |
#endif |
|
|
#ifdef __cplusplus |
}; /* end of extern "C" */ |
#endif |
|
/* ---------- To make a malloc.h, end cutting here ------------ */ |
|
|
/* |
Emulation of sbrk for WIN32 |
All code within the ifdef WIN32 is untested by me. |
|
Thanks to Martin Fong and others for supplying this. |
*/ |
|
|
#ifdef WIN32 |
|
#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \ |
~(malloc_getpagesize-1)) |
#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1)) |
|
/* resrve 64MB to insure large contiguous space */ |
#define RESERVED_SIZE (1024*1024*64) |
#define NEXT_SIZE (2048*1024) |
#define TOP_MEMORY ((unsigned long)2*1024*1024*1024) |
|
struct GmListElement; |
typedef struct GmListElement GmListElement; |
|
struct GmListElement |
{ |
GmListElement* next; |
void* base; |
}; |
|
static GmListElement* head = 0; |
static unsigned int gNextAddress = 0; |
static unsigned int gAddressBase = 0; |
static unsigned int gAllocatedSize = 0; |
|
static |
GmListElement* makeGmListElement (void* bas) |
{ |
GmListElement* this; |
this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement)); |
assert (this); |
if (this) |
{ |
this->base = bas; |
this->next = head; |
head = this; |
} |
return this; |
} |
|
void gcleanup () |
{ |
BOOL rval; |
assert ( (head == NULL) || (head->base == (void*)gAddressBase)); |
if (gAddressBase && (gNextAddress - gAddressBase)) |
{ |
rval = VirtualFree ((void*)gAddressBase, |
gNextAddress - gAddressBase, |
MEM_DECOMMIT); |
assert (rval); |
} |
while (head) |
{ |
GmListElement* next = head->next; |
rval = VirtualFree (head->base, 0, MEM_RELEASE); |
assert (rval); |
LocalFree (head); |
head = next; |
} |
} |
|
static |
void* findRegion (void* start_address, unsigned long size) |
{ |
MEMORY_BASIC_INFORMATION info; |
if (size >= TOP_MEMORY) return NULL; |
|
while ((unsigned long)start_address + size < TOP_MEMORY) |
{ |
VirtualQuery (start_address, &info, sizeof (info)); |
if ((info.State == MEM_FREE) && (info.RegionSize >= size)) |
return start_address; |
else |
{ |
// Requested region is not available so see if the |
// next region is available. Set 'start_address' |
// to the next region and call 'VirtualQuery()' |
// again. |
|
start_address = (char*)info.BaseAddress + info.RegionSize; |
|
// Make sure we start looking for the next region |
// on the *next* 64K boundary. Otherwise, even if |
// the new region is free according to |
// 'VirtualQuery()', the subsequent call to |
// 'VirtualAlloc()' (which follows the call to |
// this routine in 'wsbrk()') will round *down* |
// the requested address to a 64K boundary which |
// we already know is an address in the |
// unavailable region. Thus, the subsequent call |
// to 'VirtualAlloc()' will fail and bring us back |
// here, causing us to go into an infinite loop. |
|
start_address = |
(void *) AlignPage64K((unsigned long) start_address); |
} |
} |
return NULL; |
|
} |
|
|
void* wsbrk (long size) |
{ |
void* tmp; |
if (size > 0) |
{ |
if (gAddressBase == 0) |
{ |
gAllocatedSize = max (RESERVED_SIZE, AlignPage (size)); |
gNextAddress = gAddressBase = |
(unsigned int)VirtualAlloc (NULL, gAllocatedSize, |
MEM_RESERVE, PAGE_NOACCESS); |
} else if (AlignPage (gNextAddress + size) > (gAddressBase + |
gAllocatedSize)) |
{ |
long new_size = max (NEXT_SIZE, AlignPage (size)); |
void* new_address = (void*)(gAddressBase+gAllocatedSize); |
do |
{ |
new_address = findRegion (new_address, new_size); |
|
if (new_address == 0) |
return (void*)-1; |
|
gAddressBase = gNextAddress = |
(unsigned int)VirtualAlloc (new_address, new_size, |
MEM_RESERVE, PAGE_NOACCESS); |
// repeat in case of race condition |
// The region that we found has been snagged |
// by another thread |
} |
while (gAddressBase == 0); |
|
assert (new_address == (void*)gAddressBase); |
|
gAllocatedSize = new_size; |
|
if (!makeGmListElement ((void*)gAddressBase)) |
return (void*)-1; |
} |
if ((size + gNextAddress) > AlignPage (gNextAddress)) |
{ |
void* res; |
res = VirtualAlloc ((void*)AlignPage (gNextAddress), |
(size + gNextAddress - |
AlignPage (gNextAddress)), |
MEM_COMMIT, PAGE_READWRITE); |
if (res == 0) |
return (void*)-1; |
} |
tmp = (void*)gNextAddress; |
gNextAddress = (unsigned int)tmp + size; |
return tmp; |
} |
else if (size < 0) |
{ |
unsigned int alignedGoal = AlignPage (gNextAddress + size); |
/* Trim by releasing the virtual memory */ |
if (alignedGoal >= gAddressBase) |
{ |
VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal, |
MEM_DECOMMIT); |
gNextAddress = gNextAddress + size; |
return (void*)gNextAddress; |
} |
else |
{ |
VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase, |
MEM_DECOMMIT); |
gNextAddress = gAddressBase; |
return (void*)-1; |
} |
} |
else |
{ |
return (void*)gNextAddress; |
} |
} |
|
#endif |
|
|
|
/* |
Type declarations |
*/ |
|
|
struct malloc_chunk |
{ |
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ |
INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ |
struct malloc_chunk* fd; /* double links -- used only if free. */ |
struct malloc_chunk* bk; |
}; |
|
typedef struct malloc_chunk* mchunkptr; |
|
/* |
|
malloc_chunk details: |
|
(The following includes lightly edited explanations by Colin Plumb.) |
|
Chunks of memory are maintained using a `boundary tag' method as |
described in e.g., Knuth or Standish. (See the paper by Paul |
Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a |
survey of such techniques.) Sizes of free chunks are stored both |
in the front of each chunk and at the end. This makes |
consolidating fragmented chunks into bigger chunks very fast. The |
size fields also hold bits representing whether chunks are free or |
in use. |
|
An allocated chunk looks like this: |
|
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk, if allocated | | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| User data starts here... . |
. . |
. (malloc_usable_space() bytes) . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
|
Where "chunk" is the front of the chunk for the purpose of most of |
the malloc code, but "mem" is the pointer that is returned to the |
user. "Nextchunk" is the beginning of the next contiguous chunk. |
|
Chunks always begin on even word boundries, so the mem portion |
(which is returned to the user) is also on an even word boundary, and |
thus double-word aligned. |
|
Free chunks are stored in circular doubly-linked lists, and look like this: |
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space (may be 0 bytes long) . |
. . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
The P (PREV_INUSE) bit, stored in the unused low-order bit of the |
chunk size (which is always a multiple of two words), is an in-use |
bit for the *previous* chunk. If that bit is *clear*, then the |
word before the current chunk size contains the previous chunk |
size, and can be used to find the front of the previous chunk. |
(The very first chunk allocated always has this bit set, |
preventing access to non-existent (or non-owned) memory.) |
|
Note that the `foot' of the current chunk is actually represented |
as the prev_size of the NEXT chunk. (This makes it easier to |
deal with alignments etc). |
|
The two exceptions to all this are |
|
1. The special chunk `top', which doesn't bother using the |
trailing size field since there is no |
next contiguous chunk that would have to index off it. (After |
initialization, `top' is forced to always exist. If it would |
become less than MINSIZE bytes long, it is replenished via |
malloc_extend_top.) |
|
2. Chunks allocated via mmap, which have the second-lowest-order |
bit (IS_MMAPPED) set in their size fields. Because they are |
never merged or traversed from any other chunk, they have no |
foot size or inuse information. |
|
Available chunks are kept in any of several places (all declared below): |
|
* `av': An array of chunks serving as bin headers for consolidated |
chunks. Each bin is doubly linked. The bins are approximately |
proportionally (log) spaced. There are a lot of these bins |
(128). This may look excessive, but works very well in |
practice. All procedures maintain the invariant that no |
consolidated chunk physically borders another one. Chunks in |
bins are kept in size order, with ties going to the |
approximately least recently used chunk. |
|
The chunks in each bin are maintained in decreasing sorted order by |
size. This is irrelevant for the small bins, which all contain |
the same-sized chunks, but facilitates best-fit allocation for |
larger chunks. (These lists are just sequential. Keeping them in |
order almost never requires enough traversal to warrant using |
fancier ordered data structures.) Chunks of the same size are |
linked with the most recently freed at the front, and allocations |
are taken from the back. This results in LRU or FIFO allocation |
order, which tends to give each chunk an equal opportunity to be |
consolidated with adjacent freed chunks, resulting in larger free |
chunks and less fragmentation. |
|
* `top': The top-most available chunk (i.e., the one bordering the |
end of available memory) is treated specially. It is never |
included in any bin, is used only if no other chunk is |
available, and is released back to the system if it is very |
large (see M_TRIM_THRESHOLD). |
|
* `last_remainder': A bin holding only the remainder of the |
most recently split (non-top) chunk. This bin is checked |
before other non-fitting chunks, so as to provide better |
locality for runs of sequentially allocated chunks. |
|
* Implicitly, through the host system's memory mapping tables. |
If supported, requests greater than a threshold are usually |
serviced via calls to mmap, and then later released via munmap. |
|
*/ |
|
|
|
|
|
|
/* sizes, alignments */ |
|
#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) |
#ifndef MALLOC_ALIGNMENT |
#define MALLOC_ALIGN 8 |
#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ) |
#else |
#define MALLOC_ALIGN MALLOC_ALIGNMENT |
#endif |
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) |
#define MINSIZE (sizeof(struct malloc_chunk)) |
|
/* conversion from malloc headers to user pointers, and back */ |
|
#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) |
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) |
|
/* pad request bytes into a usable size */ |
|
#define request2size(req) \ |
(((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \ |
(long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \ |
(((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK))) |
|
/* Check if m has acceptable alignment */ |
|
#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) |
|
|
|
|
/* |
Physical chunk operations |
*/ |
|
|
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
|
#define PREV_INUSE 0x1 |
|
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ |
|
#define IS_MMAPPED 0x2 |
|
/* Bits to mask off when extracting size */ |
|
#define SIZE_BITS (PREV_INUSE|IS_MMAPPED) |
|
|
/* Ptr to next physical malloc_chunk. */ |
|
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) |
|
/* Ptr to previous physical malloc_chunk */ |
|
#define prev_chunk(p)\ |
((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) |
|
|
/* Treat space at ptr + offset as a chunk */ |
|
#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
|
|
|
|
/* |
Dealing with use bits |
*/ |
|
/* extract p's inuse bit */ |
|
#define inuse(p)\ |
((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) |
|
/* extract inuse bit of previous chunk */ |
|
#define prev_inuse(p) ((p)->size & PREV_INUSE) |
|
/* check for mmap()'ed chunk */ |
|
#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) |
|
/* set/clear chunk as in use without otherwise disturbing */ |
|
#define set_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE |
|
#define clear_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) |
|
/* check/set/clear inuse bits in known places */ |
|
#define inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) |
|
#define set_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) |
|
#define clear_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) |
|
|
|
|
/* |
Dealing with size fields |
*/ |
|
/* Get size, ignoring use bits */ |
|
#define chunksize(p) ((p)->size & ~(SIZE_BITS)) |
|
/* Set size at head, without disturbing its use bit */ |
|
#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) |
|
/* Set size/use ignoring previous bits in header */ |
|
#define set_head(p, s) ((p)->size = (s)) |
|
/* Set size at footer (only when chunk is not in use) */ |
|
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) |
|
|
|
|
|
/* |
Bins |
|
The bins, `av_' are an array of pairs of pointers serving as the |
heads of (initially empty) doubly-linked lists of chunks, laid out |
in a way so that each pair can be treated as if it were in a |
malloc_chunk. (This way, the fd/bk offsets for linking bin heads |
and chunks are the same). |
|
Bins for sizes < 512 bytes contain chunks of all the same size, spaced |
8 bytes apart. Larger bins are approximately logarithmically |
spaced. (See the table below.) The `av_' array is never mentioned |
directly in the code, but instead via bin access macros. |
|
Bin layout: |
|
64 bins of size 8 |
32 bins of size 64 |
16 bins of size 512 |
8 bins of size 4096 |
4 bins of size 32768 |
2 bins of size 262144 |
1 bin of size what's left |
|
There is actually a little bit of slop in the numbers in bin_index |
for the sake of speed. This makes no difference elsewhere. |
|
The special chunks `top' and `last_remainder' get their own bins, |
(this is implemented via yet more trickery with the av_ array), |
although `top' is never properly linked to its bin since it is |
always handled specially. |
|
*/ |
|
#ifdef SEPARATE_OBJECTS |
#define av_ malloc_av_ |
#endif |
|
#define NAV 128 /* number of bins */ |
|
typedef struct malloc_chunk* mbinptr; |
|
/* access macros */ |
|
#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ)) |
#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr))) |
#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr))) |
|
/* |
The first 2 bins are never indexed. The corresponding av_ cells are instead |
used for bookkeeping. This is not to save space, but to simplify |
indexing, maintain locality, and avoid some initialization tests. |
*/ |
|
#define top (bin_at(0)->fd) /* The topmost chunk */ |
#define last_remainder (bin_at(1)) /* remainder from last split */ |
|
|
/* |
Because top initially points to its own bin with initial |
zero size, thus forcing extension on the first malloc request, |
we avoid having any special code in malloc to check whether |
it even exists yet. But we still need to in malloc_extend_top. |
*/ |
|
#define initial_top ((mchunkptr)(bin_at(0))) |
|
/* Helper macro to initialize bins */ |
|
#define IAV(i) bin_at(i), bin_at(i) |
|
#ifdef DEFINE_MALLOC |
STATIC mbinptr av_[NAV * 2 + 2] = { |
0, 0, |
IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), |
IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), |
IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), |
IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31), |
IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39), |
IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47), |
IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55), |
IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63), |
IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71), |
IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79), |
IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), |
IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), |
IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), |
IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), |
IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), |
IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) |
}; |
#else |
extern mbinptr av_[NAV * 2 + 2]; |
#endif |
|
|
|
/* field-extraction macros */ |
|
#define first(b) ((b)->fd) |
#define last(b) ((b)->bk) |
|
/* |
Indexing into bins |
*/ |
|
#define bin_index(sz) \ |
(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ |
((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ |
((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ |
((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ |
((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ |
((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ |
126) |
/* |
bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold |
identically sized chunks. This is exploited in malloc. |
*/ |
|
#define MAX_SMALLBIN_SIZE 512 |
#define SMALLBIN_WIDTH 8 |
#define SMALLBIN_WIDTH_BITS 3 |
#define MAX_SMALLBIN (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1 |
|
#define smallbin_index(sz) (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS) |
|
/* |
Requests are `small' if both the corresponding and the next bin are small |
*/ |
|
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) |
|
|
|
/* |
To help compensate for the large number of bins, a one-level index |
structure is used for bin-by-bin searching. `binblocks' is a |
one-word bitvector recording whether groups of BINBLOCKWIDTH bins |
have any (possibly) non-empty bins, so they can be skipped over |
all at once during during traversals. The bits are NOT always |
cleared as soon as all bins in a block are empty, but instead only |
when all are noticed to be empty during traversal in malloc. |
*/ |
|
#define BINBLOCKWIDTH 4 /* bins per block */ |
|
#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */ |
|
/* bin<->block macros */ |
|
#define idx2binblock(ix) ((unsigned long)1 << (ix / BINBLOCKWIDTH)) |
#define mark_binblock(ii) (binblocks |= idx2binblock(ii)) |
#define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii))) |
|
|
|
|
|
/* Other static bookkeeping data */ |
|
#ifdef SEPARATE_OBJECTS |
#define trim_threshold malloc_trim_threshold |
#define top_pad malloc_top_pad |
#define n_mmaps_max malloc_n_mmaps_max |
#define mmap_threshold malloc_mmap_threshold |
#define sbrk_base malloc_sbrk_base |
#define max_sbrked_mem malloc_max_sbrked_mem |
#define max_total_mem malloc_max_total_mem |
#define current_mallinfo malloc_current_mallinfo |
#define n_mmaps malloc_n_mmaps |
#define max_n_mmaps malloc_max_n_mmaps |
#define mmapped_mem malloc_mmapped_mem |
#define max_mmapped_mem malloc_max_mmapped_mem |
#endif |
|
/* variables holding tunable values */ |
|
#ifdef DEFINE_MALLOC |
|
STATIC unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD; |
STATIC unsigned long top_pad = DEFAULT_TOP_PAD; |
#if HAVE_MMAP |
STATIC unsigned int n_mmaps_max = DEFAULT_MMAP_MAX; |
STATIC unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
#endif |
|
/* The first value returned from sbrk */ |
STATIC char* sbrk_base = (char*)(-1); |
|
/* The maximum memory obtained from system via sbrk */ |
STATIC unsigned long max_sbrked_mem = 0; |
|
/* The maximum via either sbrk or mmap */ |
STATIC unsigned long max_total_mem = 0; |
|
/* internal working copy of mallinfo */ |
STATIC struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
|
#if HAVE_MMAP |
|
/* Tracking mmaps */ |
|
STATIC unsigned int n_mmaps = 0; |
STATIC unsigned int max_n_mmaps = 0; |
STATIC unsigned long mmapped_mem = 0; |
STATIC unsigned long max_mmapped_mem = 0; |
|
#endif |
|
#else /* ! DEFINE_MALLOC */ |
|
extern unsigned long trim_threshold; |
extern unsigned long top_pad; |
#if HAVE_MMAP |
extern unsigned int n_mmaps_max; |
extern unsigned long mmap_threshold; |
#endif |
extern char* sbrk_base; |
extern unsigned long max_sbrked_mem; |
extern unsigned long max_total_mem; |
extern struct mallinfo current_mallinfo; |
#if HAVE_MMAP |
extern unsigned int n_mmaps; |
extern unsigned int max_n_mmaps; |
extern unsigned long mmapped_mem; |
extern unsigned long max_mmapped_mem; |
#endif |
|
#endif /* ! DEFINE_MALLOC */ |
|
/* The total memory obtained from system via sbrk */ |
#define sbrked_mem (current_mallinfo.arena) |
|
|
|
/* |
Debugging support |
*/ |
|
#if DEBUG |
|
|
/* |
These routines make a number of assertions about the states |
of data structures that should be true at all times. If any |
are not true, it's very likely that a user program has somehow |
trashed memory. (It's also possible that there is a coding error |
in malloc. In which case, please report it!) |
*/ |
|
#if __STD_C |
static void do_check_chunk(mchunkptr p) |
#else |
static void do_check_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
|
/* No checkable chunk is mmapped */ |
assert(!chunk_is_mmapped(p)); |
|
/* Check for legal address ... */ |
assert((char*)p >= sbrk_base); |
if (p != top) |
assert((char*)p + sz <= (char*)top); |
else |
assert((char*)p + sz <= sbrk_base + sbrked_mem); |
|
} |
|
|
#if __STD_C |
static void do_check_free_chunk(mchunkptr p) |
#else |
static void do_check_free_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
mchunkptr next = chunk_at_offset(p, sz); |
|
do_check_chunk(p); |
|
/* Check whether it claims to be free ... */ |
assert(!inuse(p)); |
|
/* Unless a special marker, must have OK fields */ |
if ((long)sz >= (long)MINSIZE) |
{ |
assert((sz & MALLOC_ALIGN_MASK) == 0); |
assert(aligned_OK(chunk2mem(p))); |
/* ... matching footer field */ |
assert(next->prev_size == sz); |
/* ... and is fully consolidated */ |
assert(prev_inuse(p)); |
assert (next == top || inuse(next)); |
|
/* ... and has minimally sane links */ |
assert(p->fd->bk == p); |
assert(p->bk->fd == p); |
} |
else /* markers are always of size SIZE_SZ */ |
assert(sz == SIZE_SZ); |
} |
|
#if __STD_C |
static void do_check_inuse_chunk(mchunkptr p) |
#else |
static void do_check_inuse_chunk(p) mchunkptr p; |
#endif |
{ |
mchunkptr next = next_chunk(p); |
do_check_chunk(p); |
|
/* Check whether it claims to be in use ... */ |
assert(inuse(p)); |
|
/* ... and is surrounded by OK chunks. |
Since more things can be checked with free chunks than inuse ones, |
if an inuse chunk borders them and debug is on, it's worth doing them. |
*/ |
if (!prev_inuse(p)) |
{ |
mchunkptr prv = prev_chunk(p); |
assert(next_chunk(prv) == p); |
do_check_free_chunk(prv); |
} |
if (next == top) |
{ |
assert(prev_inuse(next)); |
assert(chunksize(next) >= MINSIZE); |
} |
else if (!inuse(next)) |
do_check_free_chunk(next); |
|
} |
|
#if __STD_C |
static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) |
#else |
static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
long room = long_sub_size_t(sz, s); |
|
do_check_inuse_chunk(p); |
|
/* Legal size ... */ |
assert((long)sz >= (long)MINSIZE); |
assert((sz & MALLOC_ALIGN_MASK) == 0); |
assert(room >= 0); |
assert(room < (long)MINSIZE); |
|
/* ... and alignment */ |
assert(aligned_OK(chunk2mem(p))); |
|
|
/* ... and was allocated at front of an available chunk */ |
assert(prev_inuse(p)); |
|
} |
|
|
#define check_free_chunk(P) do_check_free_chunk(P) |
#define check_inuse_chunk(P) do_check_inuse_chunk(P) |
#define check_chunk(P) do_check_chunk(P) |
#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) |
#else |
#define check_free_chunk(P) |
#define check_inuse_chunk(P) |
#define check_chunk(P) |
#define check_malloced_chunk(P,N) |
#endif |
|
|
|
/* |
Macro-based internal utilities |
*/ |
|
|
/* |
Linking chunks in bin lists. |
Call these only with variables, not arbitrary expressions, as arguments. |
*/ |
|
/* |
Place chunk p of size s in its bin, in size order, |
putting it ahead of others of same size. |
*/ |
|
|
#define frontlink(P, S, IDX, BK, FD) \ |
{ \ |
if (S < MAX_SMALLBIN_SIZE) \ |
{ \ |
IDX = smallbin_index(S); \ |
mark_binblock(IDX); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
else \ |
{ \ |
IDX = bin_index(S); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
if (FD == BK) mark_binblock(IDX); \ |
else \ |
{ \ |
while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ |
BK = FD->bk; \ |
} \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
} |
|
|
/* take a chunk off a list */ |
|
#define unlink(P, BK, FD) \ |
{ \ |
BK = P->bk; \ |
FD = P->fd; \ |
FD->bk = BK; \ |
BK->fd = FD; \ |
} \ |
|
/* Place p as the last remainder */ |
|
#define link_last_remainder(P) \ |
{ \ |
last_remainder->fd = last_remainder->bk = P; \ |
P->fd = P->bk = last_remainder; \ |
} |
|
/* Clear the last_remainder bin */ |
|
#define clear_last_remainder \ |
(last_remainder->fd = last_remainder->bk = last_remainder) |
|
|
|
|
|
|
/* Routines dealing with mmap(). */ |
|
#if HAVE_MMAP |
|
#ifdef DEFINE_MALLOC |
|
#if __STD_C |
static mchunkptr mmap_chunk(size_t size) |
#else |
static mchunkptr mmap_chunk(size) size_t size; |
#endif |
{ |
size_t page_mask = malloc_getpagesize - 1; |
mchunkptr p; |
|
#ifndef MAP_ANONYMOUS |
static int fd = -1; |
#endif |
|
if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ |
|
/* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because |
* there is no following chunk whose prev_size field could be used. |
*/ |
size = (size + SIZE_SZ + page_mask) & ~page_mask; |
|
#ifdef MAP_ANONYMOUS |
p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, |
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
#else /* !MAP_ANONYMOUS */ |
if (fd < 0) |
{ |
fd = open("/dev/zero", O_RDWR); |
if(fd < 0) return 0; |
} |
p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); |
#endif |
|
if(p == (mchunkptr)-1) return 0; |
|
n_mmaps++; |
if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; |
|
/* We demand that eight bytes into a page must be 8-byte aligned. */ |
assert(aligned_OK(chunk2mem(p))); |
|
/* The offset to the start of the mmapped region is stored |
* in the prev_size field of the chunk; normally it is zero, |
* but that can be changed in memalign(). |
*/ |
p->prev_size = 0; |
set_head(p, size|IS_MMAPPED); |
|
mmapped_mem += size; |
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) |
max_mmapped_mem = mmapped_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
return p; |
} |
|
#endif /* DEFINE_MALLOC */ |
|
#ifdef SEPARATE_OBJECTS |
#define munmap_chunk malloc_munmap_chunk |
#endif |
|
#ifdef DEFINE_FREE |
|
#if __STD_C |
STATIC void munmap_chunk(mchunkptr p) |
#else |
STATIC void munmap_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T size = chunksize(p); |
int ret; |
|
assert (chunk_is_mmapped(p)); |
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); |
assert((n_mmaps > 0)); |
assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); |
|
n_mmaps--; |
mmapped_mem -= (size + p->prev_size); |
|
ret = munmap((char *)p - p->prev_size, size + p->prev_size); |
|
/* munmap returns non-zero on failure */ |
assert(ret == 0); |
} |
|
#else /* ! DEFINE_FREE */ |
|
#if __STD_C |
extern void munmap_chunk(mchunkptr); |
#else |
extern void munmap_chunk(); |
#endif |
|
#endif /* ! DEFINE_FREE */ |
|
#if HAVE_MREMAP |
|
#ifdef DEFINE_REALLOC |
|
#if __STD_C |
static mchunkptr mremap_chunk(mchunkptr p, size_t new_size) |
#else |
static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size; |
#endif |
{ |
size_t page_mask = malloc_getpagesize - 1; |
INTERNAL_SIZE_T offset = p->prev_size; |
INTERNAL_SIZE_T size = chunksize(p); |
char *cp; |
|
assert (chunk_is_mmapped(p)); |
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); |
assert((n_mmaps > 0)); |
assert(((size + offset) & (malloc_getpagesize-1)) == 0); |
|
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ |
new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; |
|
cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1); |
|
if (cp == (char *)-1) return 0; |
|
p = (mchunkptr)(cp + offset); |
|
assert(aligned_OK(chunk2mem(p))); |
|
assert((p->prev_size == offset)); |
set_head(p, (new_size - offset)|IS_MMAPPED); |
|
mmapped_mem -= size + offset; |
mmapped_mem += new_size; |
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) |
max_mmapped_mem = mmapped_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
return p; |
} |
|
#endif /* DEFINE_REALLOC */ |
|
#endif /* HAVE_MREMAP */ |
|
#endif /* HAVE_MMAP */ |
|
|
|
|
#ifdef DEFINE_MALLOC |
|
/* |
Extend the top-most chunk by obtaining memory from system. |
Main interface to sbrk (but see also malloc_trim). |
*/ |
|
#if __STD_C |
static void malloc_extend_top(RARG INTERNAL_SIZE_T nb) |
#else |
static void malloc_extend_top(RARG nb) RDECL INTERNAL_SIZE_T nb; |
#endif |
{ |
char* brk; /* return value from sbrk */ |
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */ |
INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */ |
char* new_brk; /* return of 2nd sbrk call */ |
INTERNAL_SIZE_T top_size; /* new size of top chunk */ |
|
mchunkptr old_top = top; /* Record state of old top */ |
INTERNAL_SIZE_T old_top_size = chunksize(old_top); |
char* old_end = (char*)(chunk_at_offset(old_top, old_top_size)); |
|
/* Pad request with top_pad plus minimal overhead */ |
|
INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; |
unsigned long pagesz = malloc_getpagesize; |
|
/* If not the first time through, round to preserve page boundary */ |
/* Otherwise, we need to correct to a page size below anyway. */ |
/* (We also correct below if an intervening foreign sbrk call.) */ |
|
if (sbrk_base != (char*)(-1)) |
sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1); |
|
brk = (char*)(MORECORE (sbrk_size)); |
|
/* Fail if sbrk failed or if a foreign sbrk call killed our space */ |
if (brk == (char*)(MORECORE_FAILURE) || |
(brk < old_end && old_top != initial_top)) |
return; |
|
sbrked_mem += sbrk_size; |
|
if (brk == old_end) /* can just add bytes to current top */ |
{ |
top_size = sbrk_size + old_top_size; |
set_head(top, top_size | PREV_INUSE); |
} |
else |
{ |
if (sbrk_base == (char*)(-1)) /* First time through. Record base */ |
sbrk_base = brk; |
else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */ |
sbrked_mem += brk - (char*)old_end; |
|
/* Guarantee alignment of first new chunk made from this space */ |
front_misalign = (POINTER_UINT)chunk2mem(brk) & MALLOC_ALIGN_MASK; |
if (front_misalign > 0) |
{ |
correction = (MALLOC_ALIGNMENT) - front_misalign; |
brk += correction; |
} |
else |
correction = 0; |
|
/* Guarantee the next brk will be at a page boundary */ |
correction += (((((POINTER_UINT)(brk + sbrk_size))+(pagesz-1)) & |
~(pagesz - 1)) - ((POINTER_UINT)(brk + sbrk_size)); |
|
/* Allocate correction */ |
new_brk = (char*)(MORECORE (correction)); |
if (new_brk == (char*)(MORECORE_FAILURE)) return; |
|
sbrked_mem += correction; |
|
top = (mchunkptr)brk; |
top_size = new_brk - brk + correction; |
set_head(top, top_size | PREV_INUSE); |
|
if (old_top != initial_top) |
{ |
|
/* There must have been an intervening foreign sbrk call. */ |
/* A double fencepost is necessary to prevent consolidation */ |
|
/* If not enough space to do this, then user did something very wrong */ |
if (old_top_size < MINSIZE) |
{ |
set_head(top, PREV_INUSE); /* will force null return from malloc */ |
return; |
} |
|
/* Also keep size a multiple of MALLOC_ALIGNMENT */ |
old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; |
set_head_size(old_top, old_top_size); |
chunk_at_offset(old_top, old_top_size )->size = |
SIZE_SZ|PREV_INUSE; |
chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size = |
SIZE_SZ|PREV_INUSE; |
/* If possible, release the rest. */ |
if (old_top_size >= MINSIZE) |
fREe(RCALL chunk2mem(old_top)); |
} |
} |
|
if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem) |
max_sbrked_mem = sbrked_mem; |
#if HAVE_MMAP |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
#else |
if ((unsigned long)(sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = sbrked_mem; |
#endif |
|
/* We always land on a page boundary */ |
assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0); |
} |
|
#endif /* DEFINE_MALLOC */ |
|
|
/* Main public routines */ |
|
#ifdef DEFINE_MALLOC |
|
/* |
Malloc Algorthim: |
|
The requested size is first converted into a usable form, `nb'. |
This currently means to add 4 bytes overhead plus possibly more to |
obtain 8-byte alignment and/or to obtain a size of at least |
MINSIZE (currently 16 bytes), the smallest allocatable size. |
(All fits are considered `exact' if they are within MINSIZE bytes.) |
|
From there, the first successful of the following steps is taken: |
|
1. The bin corresponding to the request size is scanned, and if |
a chunk of exactly the right size is found, it is taken. |
|
2. The most recently remaindered chunk is used if it is big |
enough. This is a form of (roving) first fit, used only in |
the absence of exact fits. Runs of consecutive requests use |
the remainder of the chunk used for the previous such request |
whenever possible. This limited use of a first-fit style |
allocation strategy tends to give contiguous chunks |
coextensive lifetimes, which improves locality and can reduce |
fragmentation in the long run. |
|
3. Other bins are scanned in increasing size order, using a |
chunk big enough to fulfill the request, and splitting off |
any remainder. This search is strictly by best-fit; i.e., |
the smallest (with ties going to approximately the least |
recently used) chunk that fits is selected. |
|
4. If large enough, the chunk bordering the end of memory |
(`top') is split off. (This use of `top' is in accord with |
the best-fit search rule. In effect, `top' is treated as |
larger (and thus less well fitting) than any other available |
chunk since it can be extended to be as large as necessary |
(up to system limitations). |
|
5. If the request size meets the mmap threshold and the |
system supports mmap, and there are few enough currently |
allocated mmapped regions, and a call to mmap succeeds, |
the request is allocated via direct memory mapping. |
|
6. Otherwise, the top of memory is extended by |
obtaining more space from the system (normally using sbrk, |
but definable to anything else via the MORECORE macro). |
Memory is gathered from the system (in system page-sized |
units) in a way that allows chunks obtained across different |
sbrk calls to be consolidated, but does not require |
contiguous memory. Thus, it should be safe to intersperse |
mallocs with other sbrk calls. |
|
|
All allocations are made from the the `lowest' part of any found |
chunk. (The implementation invariant is that prev_inuse is |
always true of any allocated chunk; i.e., that each allocated |
chunk borders either a previously allocated and still in-use chunk, |
or the base of its memory arena.) |
|
*/ |
|
#if __STD_C |
Void_t* mALLOc(RARG size_t bytes) |
#else |
Void_t* mALLOc(RARG bytes) RDECL size_t bytes; |
#endif |
{ |
#ifdef MALLOC_PROVIDED |
|
malloc (bytes); |
|
#else |
|
mchunkptr victim; /* inspected/selected chunk */ |
INTERNAL_SIZE_T victim_size; /* its size */ |
int idx; /* index for bin traversal */ |
mbinptr bin; /* associated bin */ |
mchunkptr remainder; /* remainder from a split */ |
long remainder_size; /* its size */ |
int remainder_index; /* its bin index */ |
unsigned long block; /* block traverser bit */ |
int startidx; /* first bin of a traversed block */ |
mchunkptr fwd; /* misc temp for linking */ |
mchunkptr bck; /* misc temp for linking */ |
mbinptr q; /* misc temp */ |
|
INTERNAL_SIZE_T nb; |
|
if ((long)bytes < 0) return 0; |
|
nb = request2size(bytes); /* padded request size; */ |
|
MALLOC_LOCK; |
|
/* Check for exact match in a bin */ |
|
if (is_small_request(nb)) /* Faster version for small requests */ |
{ |
idx = smallbin_index(nb); |
|
/* No traversal or size check necessary for small bins. */ |
|
q = bin_at(idx); |
victim = last(q); |
|
#if MALLOC_ALIGN != 16 |
/* Also scan the next one, since it would have a remainder < MINSIZE */ |
if (victim == q) |
{ |
q = next_bin(q); |
victim = last(q); |
} |
#endif |
if (victim != q) |
{ |
victim_size = chunksize(victim); |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */ |
|
} |
else |
{ |
idx = bin_index(nb); |
bin = bin_at(idx); |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* too big */ |
{ |
--idx; /* adjust to rescan below after checking last remainder */ |
break; |
} |
|
else if (remainder_size >= 0) /* exact fit */ |
{ |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
} |
|
++idx; |
|
} |
|
/* Try to use the last split-off remainder */ |
|
if ( (victim = last_remainder->fd) != last_remainder) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* re-split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
clear_last_remainder; |
|
if (remainder_size >= 0) /* exhaust */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
/* Else place in bin */ |
|
frontlink(victim, victim_size, remainder_index, bck, fwd); |
} |
|
/* |
If there are any possibly nonempty big-enough blocks, |
search for best fitting chunk by scanning bins in blockwidth units. |
*/ |
|
if ( (block = idx2binblock(idx)) <= binblocks) |
{ |
|
/* Get to the first marked block */ |
|
if ( (block & binblocks) == 0) |
{ |
/* force to an even block boundary */ |
idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; |
block <<= 1; |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
|
/* For each possibly nonempty block ... */ |
for (;;) |
{ |
startidx = idx; /* (track incomplete blocks) */ |
q = bin = bin_at(idx); |
|
/* For each bin in this block ... */ |
do |
{ |
/* Find and use first big enough chunk ... */ |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
unlink(victim, bck, fwd); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
else if (remainder_size >= 0) /* take */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
unlink(victim, bck, fwd); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
} |
|
bin = next_bin(bin); |
|
#if MALLOC_ALIGN == 16 |
if (idx < MAX_SMALLBIN) |
{ |
bin = next_bin(bin); |
++idx; |
} |
#endif |
} while ((++idx & (BINBLOCKWIDTH - 1)) != 0); |
|
/* Clear out the block bit. */ |
|
do /* Possibly backtrack to try to clear a partial block */ |
{ |
if ((startidx & (BINBLOCKWIDTH - 1)) == 0) |
{ |
binblocks &= ~block; |
break; |
} |
--startidx; |
q = prev_bin(q); |
} while (first(q) == q); |
|
/* Get to the next possibly nonempty block */ |
|
if ( (block <<= 1) <= binblocks && (block != 0) ) |
{ |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
else |
break; |
} |
} |
|
|
/* Try to use top chunk */ |
|
/* Require that there be a remainder, ensuring top always exists */ |
remainder_size = long_sub_size_t(chunksize(top), nb); |
if (chunksize(top) < nb || remainder_size < (long)MINSIZE) |
{ |
|
#if HAVE_MMAP |
/* If big and would otherwise need to extend, try to use mmap instead */ |
if ((unsigned long)nb >= (unsigned long)mmap_threshold && |
(victim = mmap_chunk(nb)) != 0) |
{ |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
#endif |
|
/* Try to extend */ |
malloc_extend_top(RCALL nb); |
remainder_size = long_sub_size_t(chunksize(top), nb); |
if (chunksize(top) < nb || remainder_size < (long)MINSIZE) |
{ |
MALLOC_UNLOCK; |
return 0; /* propagate failure */ |
} |
} |
|
victim = top; |
set_head(victim, nb | PREV_INUSE); |
top = chunk_at_offset(victim, nb); |
set_head(top, remainder_size | PREV_INUSE); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
|
#endif /* MALLOC_PROVIDED */ |
} |
|
#endif /* DEFINE_MALLOC */ |
|
#ifdef DEFINE_FREE |
|
/* |
|
free() algorithm : |
|
cases: |
|
1. free(0) has no effect. |
|
2. If the chunk was allocated via mmap, it is release via munmap(). |
|
3. If a returned chunk borders the current high end of memory, |
it is consolidated into the top, and if the total unused |
topmost memory exceeds the trim threshold, malloc_trim is |
called. |
|
4. Other chunks are consolidated as they arrive, and |
placed in corresponding bins. (This includes the case of |
consolidating with the current `last_remainder'). |
|
*/ |
|
|
#if __STD_C |
void fREe(RARG Void_t* mem) |
#else |
void fREe(RARG mem) RDECL Void_t* mem; |
#endif |
{ |
#ifdef MALLOC_PROVIDED |
|
free (mem); |
|
#else |
|
mchunkptr p; /* chunk corresponding to mem */ |
INTERNAL_SIZE_T hd; /* its head field */ |
INTERNAL_SIZE_T sz; /* its size */ |
int idx; /* its bin index */ |
mchunkptr next; /* next contiguous chunk */ |
INTERNAL_SIZE_T nextsz; /* its size */ |
INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ |
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
int islr; /* track whether merging with last_remainder */ |
|
if (mem == 0) /* free(0) has no effect */ |
return; |
|
MALLOC_LOCK; |
|
p = mem2chunk(mem); |
hd = p->size; |
|
#if HAVE_MMAP |
if (hd & IS_MMAPPED) /* release mmapped memory. */ |
{ |
munmap_chunk(p); |
MALLOC_UNLOCK; |
return; |
} |
#endif |
|
check_inuse_chunk(p); |
|
sz = hd & ~PREV_INUSE; |
next = chunk_at_offset(p, sz); |
nextsz = chunksize(next); |
|
if (next == top) /* merge with top */ |
{ |
sz += nextsz; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -((long) prevsz)); |
sz += prevsz; |
unlink(p, bck, fwd); |
} |
|
set_head(p, sz | PREV_INUSE); |
top = p; |
if ((unsigned long)(sz) >= (unsigned long)trim_threshold) |
malloc_trim(RCALL top_pad); |
MALLOC_UNLOCK; |
return; |
} |
|
set_head(next, nextsz); /* clear inuse bit */ |
|
islr = 0; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -((long) prevsz)); |
sz += prevsz; |
|
if (p->fd == last_remainder) /* keep as last_remainder */ |
islr = 1; |
else |
unlink(p, bck, fwd); |
} |
|
if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */ |
{ |
sz += nextsz; |
|
if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ |
{ |
islr = 1; |
link_last_remainder(p); |
} |
else |
unlink(next, bck, fwd); |
} |
|
|
set_head(p, sz | PREV_INUSE); |
set_foot(p, sz); |
if (!islr) |
frontlink(p, sz, idx, bck, fwd); |
|
MALLOC_UNLOCK; |
|
#endif /* MALLOC_PROVIDED */ |
} |
|
#endif /* DEFINE_FREE */ |
|
#ifdef DEFINE_REALLOC |
|
/* |
|
Realloc algorithm: |
|
Chunks that were obtained via mmap cannot be extended or shrunk |
unless HAVE_MREMAP is defined, in which case mremap is used. |
Otherwise, if their reallocation is for additional space, they are |
copied. If for less, they are just left alone. |
|
Otherwise, if the reallocation is for additional space, and the |
chunk can be extended, it is, else a malloc-copy-free sequence is |
taken. There are several different ways that a chunk could be |
extended. All are tried: |
|
* Extending forward into following adjacent free chunk. |
* Shifting backwards, joining preceding adjacent space |
* Both shifting backwards and extending forward. |
* Extending into newly sbrked space |
|
Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a |
size argument of zero (re)allocates a minimum-sized chunk. |
|
If the reallocation is for less space, and the new request is for |
a `small' (<512 bytes) size, then the newly unused space is lopped |
off and freed. |
|
The old unix realloc convention of allowing the last-free'd chunk |
to be used as an argument to realloc is no longer supported. |
I don't know of any programs still relying on this feature, |
and allowing it would also allow too many other incorrect |
usages of realloc to be sensible. |
|
|
*/ |
|
|
#if __STD_C |
Void_t* rEALLOc(RARG Void_t* oldmem, size_t bytes) |
#else |
Void_t* rEALLOc(RARG oldmem, bytes) RDECL Void_t* oldmem; size_t bytes; |
#endif |
{ |
#ifdef MALLOC_PROVIDED |
|
realloc (oldmem, bytes); |
|
#else |
|
INTERNAL_SIZE_T nb; /* padded request size */ |
|
mchunkptr oldp; /* chunk corresponding to oldmem */ |
INTERNAL_SIZE_T oldsize; /* its size */ |
|
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
Void_t* newmem; /* corresponding user mem */ |
|
mchunkptr next; /* next contiguous chunk after oldp */ |
INTERNAL_SIZE_T nextsize; /* its size */ |
|
mchunkptr prev; /* previous contiguous chunk before oldp */ |
INTERNAL_SIZE_T prevsize; /* its size */ |
|
mchunkptr remainder; /* holds split off extra space from newp */ |
INTERNAL_SIZE_T remainder_size; /* its size */ |
|
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
|
#ifdef REALLOC_ZERO_BYTES_FREES |
if (bytes == 0) { fREe(RCALL oldmem); return 0; } |
#endif |
|
if ((long)bytes < 0) return 0; |
|
/* realloc of null is supposed to be same as malloc */ |
if (oldmem == 0) return mALLOc(RCALL bytes); |
|
MALLOC_LOCK; |
|
newp = oldp = mem2chunk(oldmem); |
newsize = oldsize = chunksize(oldp); |
|
|
nb = request2size(bytes); |
|
#if HAVE_MMAP |
if (chunk_is_mmapped(oldp)) |
{ |
#if HAVE_MREMAP |
newp = mremap_chunk(oldp, nb); |
if(newp) |
{ |
MALLOC_UNLOCK; |
return chunk2mem(newp); |
} |
#endif |
/* Note the extra SIZE_SZ overhead. */ |
if(oldsize - SIZE_SZ >= nb) |
{ |
MALLOC_UNLOCK; |
return oldmem; /* do nothing */ |
} |
/* Must alloc, copy, free. */ |
newmem = mALLOc(RCALL bytes); |
if (newmem == 0) |
{ |
MALLOC_UNLOCK; |
return 0; /* propagate failure */ |
} |
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); |
munmap_chunk(oldp); |
MALLOC_UNLOCK; |
return newmem; |
} |
#endif |
|
check_inuse_chunk(oldp); |
|
if ((long)(oldsize) < (long)(nb)) |
{ |
|
/* Try expanding forward */ |
|
next = chunk_at_offset(oldp, oldsize); |
if (next == top || !inuse(next)) |
{ |
nextsize = chunksize(next); |
|
/* Forward into top only if a remainder */ |
if (next == top) |
{ |
if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
newsize += nextsize; |
top = chunk_at_offset(oldp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(oldp, nb); |
MALLOC_UNLOCK; |
return chunk2mem(oldp); |
} |
} |
|
/* Forward into next chunk */ |
else if (((long)(nextsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
newsize += nextsize; |
goto split; |
} |
} |
else |
{ |
next = 0; |
nextsize = 0; |
} |
|
/* Try shifting backwards. */ |
|
if (!prev_inuse(oldp)) |
{ |
prev = prev_chunk(oldp); |
prevsize = chunksize(prev); |
|
/* try forward + backward first to save a later consolidation */ |
|
if (next != 0) |
{ |
/* into top */ |
if (next == top) |
{ |
if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize + nextsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
top = chunk_at_offset(newp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(newp, nb); |
MALLOC_UNLOCK; |
return newmem; |
} |
} |
|
/* into next chunk */ |
else if (((long)(nextsize + prevsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += nextsize + prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* backward only */ |
if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* Must allocate */ |
|
newmem = mALLOc (RCALL bytes); |
|
if (newmem == 0) /* propagate failure */ |
{ |
MALLOC_UNLOCK; |
return 0; |
} |
|
/* Avoid copy if newp is next chunk after oldp. */ |
/* (This can only happen when new chunk is sbrk'ed.) */ |
|
if ( (newp = mem2chunk(newmem)) == next_chunk(oldp)) |
{ |
newsize += chunksize(newp); |
newp = oldp; |
goto split; |
} |
|
/* Otherwise copy, free, and exit */ |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
fREe(RCALL oldmem); |
MALLOC_UNLOCK; |
return newmem; |
} |
|
|
split: /* split off extra room in old or expanded chunk */ |
|
remainder_size = long_sub_size_t(newsize, nb); |
|
if (remainder_size >= (long)MINSIZE) /* split off remainder */ |
{ |
remainder = chunk_at_offset(newp, nb); |
set_head_size(newp, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_inuse_bit_at_offset(remainder, remainder_size); |
fREe(RCALL chunk2mem(remainder)); /* let free() deal with it */ |
} |
else |
{ |
set_head_size(newp, newsize); |
set_inuse_bit_at_offset(newp, newsize); |
} |
|
check_inuse_chunk(newp); |
MALLOC_UNLOCK; |
return chunk2mem(newp); |
|
#endif /* MALLOC_PROVIDED */ |
} |
|
#endif /* DEFINE_REALLOC */ |
|
#ifdef DEFINE_MEMALIGN |
|
/* |
|
memalign algorithm: |
|
memalign requests more than enough space from malloc, finds a spot |
within that chunk that meets the alignment request, and then |
possibly frees the leading and trailing space. |
|
The alignment argument must be a power of two. This property is not |
checked by memalign, so misuse may result in random runtime errors. |
|
8-byte alignment is guaranteed by normal malloc calls, so don't |
bother calling memalign with an argument of 8 or less. |
|
Overreliance on memalign is a sure way to fragment space. |
|
*/ |
|
|
#if __STD_C |
Void_t* mEMALIGn(RARG size_t alignment, size_t bytes) |
#else |
Void_t* mEMALIGn(RARG alignment, bytes) RDECL size_t alignment; size_t bytes; |
#endif |
{ |
INTERNAL_SIZE_T nb; /* padded request size */ |
char* m; /* memory returned by malloc call */ |
mchunkptr p; /* corresponding chunk */ |
char* brk; /* alignment point within p */ |
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */ |
mchunkptr remainder; /* spare room at end to split off */ |
long remainder_size; /* its size */ |
|
if ((long)bytes < 0) return 0; |
|
/* If need less alignment than we give anyway, just relay to malloc */ |
|
if (alignment <= MALLOC_ALIGNMENT) return mALLOc(RCALL bytes); |
|
/* Otherwise, ensure that it is at least a minimum chunk size */ |
|
if (alignment < MINSIZE) alignment = MINSIZE; |
|
/* Call malloc with worst case padding to hit alignment. */ |
|
nb = request2size(bytes); |
m = (char*)(mALLOc(RCALL nb + alignment + MINSIZE)); |
|
if (m == 0) return 0; /* propagate failure */ |
|
MALLOC_LOCK; |
|
p = mem2chunk(m); |
|
if ((((unsigned long)(m)) % alignment) == 0) /* aligned */ |
{ |
#if HAVE_MMAP |
if(chunk_is_mmapped(p)) |
{ |
MALLOC_UNLOCK; |
return chunk2mem(p); /* nothing more to do */ |
} |
#endif |
} |
else /* misaligned */ |
{ |
/* |
Find an aligned spot inside chunk. |
Since we need to give back leading space in a chunk of at |
least MINSIZE, if the first calculation places us at |
a spot with less than MINSIZE leader, we can move to the |
next aligned spot -- we've allocated enough total room so that |
this is always possible. |
*/ |
|
brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment)); |
if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk = brk + alignment; |
|
newp = (mchunkptr)brk; |
leadsize = brk - (char*)(p); |
newsize = chunksize(p) - leadsize; |
|
#if HAVE_MMAP |
if(chunk_is_mmapped(p)) |
{ |
newp->prev_size = p->prev_size + leadsize; |
set_head(newp, newsize|IS_MMAPPED); |
MALLOC_UNLOCK; |
return chunk2mem(newp); |
} |
#endif |
|
/* give back leader, use the rest */ |
|
set_head(newp, newsize | PREV_INUSE); |
set_inuse_bit_at_offset(newp, newsize); |
set_head_size(p, leadsize); |
fREe(RCALL chunk2mem(p)); |
p = newp; |
|
assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0); |
} |
|
/* Also give back spare room at the end */ |
|
remainder_size = long_sub_size_t(chunksize(p), nb); |
|
if (remainder_size >= (long)MINSIZE) |
{ |
remainder = chunk_at_offset(p, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_head_size(p, nb); |
fREe(RCALL chunk2mem(remainder)); |
} |
|
check_inuse_chunk(p); |
MALLOC_UNLOCK; |
return chunk2mem(p); |
|
} |
|
#endif /* DEFINE_MEMALIGN */ |
|
#ifdef DEFINE_VALLOC |
|
/* |
valloc just invokes memalign with alignment argument equal |
to the page size of the system (or as near to this as can |
be figured out from all the includes/defines above.) |
*/ |
|
#if __STD_C |
Void_t* vALLOc(RARG size_t bytes) |
#else |
Void_t* vALLOc(RARG bytes) RDECL size_t bytes; |
#endif |
{ |
return mEMALIGn (RCALL malloc_getpagesize, bytes); |
} |
|
#endif /* DEFINE_VALLOC */ |
|
#ifdef DEFINE_PVALLOC |
|
/* |
pvalloc just invokes valloc for the nearest pagesize |
that will accommodate request |
*/ |
|
|
#if __STD_C |
Void_t* pvALLOc(RARG size_t bytes) |
#else |
Void_t* pvALLOc(RARG bytes) RDECL size_t bytes; |
#endif |
{ |
size_t pagesize = malloc_getpagesize; |
return mEMALIGn (RCALL pagesize, (bytes + pagesize - 1) & ~(pagesize - 1)); |
} |
|
#endif /* DEFINE_PVALLOC */ |
|
#ifdef DEFINE_CALLOC |
|
/* |
|
calloc calls malloc, then zeroes out the allocated chunk. |
|
*/ |
|
#if __STD_C |
Void_t* cALLOc(RARG size_t n, size_t elem_size) |
#else |
Void_t* cALLOc(RARG n, elem_size) RDECL size_t n; size_t elem_size; |
#endif |
{ |
mchunkptr p; |
INTERNAL_SIZE_T csz; |
|
INTERNAL_SIZE_T sz = n * elem_size; |
|
#if MORECORE_CLEARS |
mchunkptr oldtop; |
INTERNAL_SIZE_T oldtopsize; |
#endif |
Void_t* mem; |
|
|
/* check if expand_top called, in which case don't need to clear */ |
#if MORECORE_CLEARS |
MALLOC_LOCK; |
oldtop = top; |
oldtopsize = chunksize(top); |
#endif |
|
mem = mALLOc (RCALL sz); |
|
if ((long)n < 0) return 0; |
|
if (mem == 0) |
{ |
#if MORECORE_CLEARS |
MALLOC_UNLOCK; |
#endif |
return 0; |
} |
else |
{ |
p = mem2chunk(mem); |
|
/* Two optional cases in which clearing not necessary */ |
|
|
#if HAVE_MMAP |
if (chunk_is_mmapped(p)) |
{ |
#if MORECORE_CLEARS |
MALLOC_UNLOCK; |
#endif |
return mem; |
} |
#endif |
|
csz = chunksize(p); |
|
#if MORECORE_CLEARS |
if (p == oldtop && csz > oldtopsize) |
{ |
/* clear only the bytes from non-freshly-sbrked memory */ |
csz = oldtopsize; |
} |
MALLOC_UNLOCK; |
#endif |
|
MALLOC_ZERO(mem, csz - SIZE_SZ); |
return mem; |
} |
} |
|
#endif /* DEFINE_CALLOC */ |
|
#ifdef DEFINE_CFREE |
|
/* |
|
cfree just calls free. It is needed/defined on some systems |
that pair it with calloc, presumably for odd historical reasons. |
|
*/ |
|
#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__) |
#if !defined(INTERNAL_NEWLIB) || !defined(_REENT_ONLY) |
#if __STD_C |
void cfree(Void_t *mem) |
#else |
void cfree(mem) Void_t *mem; |
#endif |
{ |
#ifdef INTERNAL_NEWLIB |
fREe(_REENT, mem); |
#else |
fREe(mem); |
#endif |
} |
#endif |
#endif |
|
#endif /* DEFINE_CFREE */ |
|
#ifdef DEFINE_FREE |
|
/* |
|
Malloc_trim gives memory back to the system (via negative |
arguments to sbrk) if there is unused memory at the `high' end of |
the malloc pool. You can call this after freeing large blocks of |
memory to potentially reduce the system-level memory requirements |
of a program. However, it cannot guarantee to reduce memory. Under |
some allocation patterns, some large free blocks of memory will be |
locked between two used chunks, so they cannot be given back to |
the system. |
|
The `pad' argument to malloc_trim represents the amount of free |
trailing space to leave untrimmed. If this argument is zero, |
only the minimum amount of memory to maintain internal data |
structures will be left (one page or less). Non-zero arguments |
can be supplied to maintain enough trailing space to service |
future expected allocations without having to re-obtain memory |
from the system. |
|
Malloc_trim returns 1 if it actually released any memory, else 0. |
|
*/ |
|
#if __STD_C |
int malloc_trim(RARG size_t pad) |
#else |
int malloc_trim(RARG pad) RDECL size_t pad; |
#endif |
{ |
long top_size; /* Amount of top-most memory */ |
long extra; /* Amount to release */ |
char* current_brk; /* address returned by pre-check sbrk call */ |
char* new_brk; /* address returned by negative sbrk call */ |
|
unsigned long pagesz = malloc_getpagesize; |
|
MALLOC_LOCK; |
|
top_size = chunksize(top); |
extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; |
|
if (extra < (long)pagesz) /* Not enough memory to release */ |
{ |
MALLOC_UNLOCK; |
return 0; |
} |
|
else |
{ |
/* Test to make sure no one else called sbrk */ |
current_brk = (char*)(MORECORE (0)); |
if (current_brk != (char*)(top) + top_size) |
{ |
MALLOC_UNLOCK; |
return 0; /* Apparently we don't own memory; must fail */ |
} |
|
else |
{ |
new_brk = (char*)(MORECORE (-extra)); |
|
if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */ |
{ |
/* Try to figure out what we have */ |
current_brk = (char*)(MORECORE (0)); |
top_size = current_brk - (char*)top; |
if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */ |
{ |
sbrked_mem = current_brk - sbrk_base; |
set_head(top, top_size | PREV_INUSE); |
} |
check_chunk(top); |
MALLOC_UNLOCK; |
return 0; |
} |
|
else |
{ |
/* Success. Adjust top accordingly. */ |
set_head(top, (top_size - extra) | PREV_INUSE); |
sbrked_mem -= extra; |
check_chunk(top); |
MALLOC_UNLOCK; |
return 1; |
} |
} |
} |
} |
|
#endif /* DEFINE_FREE */ |
|
#ifdef DEFINE_MALLOC_USABLE_SIZE |
|
/* |
malloc_usable_size: |
|
This routine tells you how many bytes you can actually use in an |
allocated chunk, which may be more than you requested (although |
often not). You can use this many bytes without worrying about |
overwriting other allocated objects. Not a particularly great |
programming practice, but still sometimes useful. |
|
*/ |
|
#if __STD_C |
size_t malloc_usable_size(RARG Void_t* mem) |
#else |
size_t malloc_usable_size(RARG mem) RDECL Void_t* mem; |
#endif |
{ |
mchunkptr p; |
if (mem == 0) |
return 0; |
else |
{ |
p = mem2chunk(mem); |
if(!chunk_is_mmapped(p)) |
{ |
if (!inuse(p)) return 0; |
#if DEBUG |
MALLOC_LOCK; |
check_inuse_chunk(p); |
MALLOC_UNLOCK; |
#endif |
return chunksize(p) - SIZE_SZ; |
} |
return chunksize(p) - 2*SIZE_SZ; |
} |
} |
|
#endif /* DEFINE_MALLOC_USABLE_SIZE */ |
|
#ifdef DEFINE_MALLINFO |
|
/* Utility to update current_mallinfo for malloc_stats and mallinfo() */ |
|
STATIC void malloc_update_mallinfo() |
{ |
int i; |
mbinptr b; |
mchunkptr p; |
#if DEBUG |
mchunkptr q; |
#endif |
|
INTERNAL_SIZE_T avail = chunksize(top); |
int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; |
|
for (i = 1; i < NAV; ++i) |
{ |
b = bin_at(i); |
for (p = last(b); p != b; p = p->bk) |
{ |
#if DEBUG |
check_free_chunk(p); |
for (q = next_chunk(p); |
q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE; |
q = next_chunk(q)) |
check_inuse_chunk(q); |
#endif |
avail += chunksize(p); |
navail++; |
} |
} |
|
current_mallinfo.ordblks = navail; |
current_mallinfo.uordblks = sbrked_mem - avail; |
current_mallinfo.fordblks = avail; |
#if HAVE_MMAP |
current_mallinfo.hblks = n_mmaps; |
current_mallinfo.hblkhd = mmapped_mem; |
#endif |
current_mallinfo.keepcost = chunksize(top); |
|
} |
|
#else /* ! DEFINE_MALLINFO */ |
|
#if __STD_C |
extern void malloc_update_mallinfo(void); |
#else |
extern void malloc_update_mallinfo(); |
#endif |
|
#endif /* ! DEFINE_MALLINFO */ |
|
#ifdef DEFINE_MALLOC_STATS |
|
/* |
|
malloc_stats: |
|
Prints on stderr the amount of space obtain from the system (both |
via sbrk and mmap), the maximum amount (which may be more than |
current if malloc_trim and/or munmap got called), the maximum |
number of simultaneous mmap regions used, and the current number |
of bytes allocated via malloc (or realloc, etc) but not yet |
freed. (Note that this is the number of bytes allocated, not the |
number requested. It will be larger than the number requested |
because of alignment and bookkeeping overhead.) |
|
*/ |
|
#if __STD_C |
void malloc_stats(RONEARG) |
#else |
void malloc_stats(RONEARG) RDECL |
#endif |
{ |
unsigned long local_max_total_mem; |
int local_sbrked_mem; |
struct mallinfo local_mallinfo; |
#if HAVE_MMAP |
unsigned long local_mmapped_mem, local_max_n_mmaps; |
#endif |
FILE *fp; |
|
MALLOC_LOCK; |
malloc_update_mallinfo(); |
local_max_total_mem = max_total_mem; |
local_sbrked_mem = sbrked_mem; |
local_mallinfo = current_mallinfo; |
#if HAVE_MMAP |
local_mmapped_mem = mmapped_mem; |
local_max_n_mmaps = max_n_mmaps; |
#endif |
MALLOC_UNLOCK; |
|
#ifdef INTERNAL_NEWLIB |
fp = _stderr_r(reent_ptr); |
#define fprintf fiprintf |
#else |
fp = stderr; |
#endif |
|
fprintf(fp, "max system bytes = %10u\n", |
(unsigned int)(local_max_total_mem)); |
#if HAVE_MMAP |
fprintf(fp, "system bytes = %10u\n", |
(unsigned int)(local_sbrked_mem + local_mmapped_mem)); |
fprintf(fp, "in use bytes = %10u\n", |
(unsigned int)(local_mallinfo.uordblks + local_mmapped_mem)); |
#else |
fprintf(fp, "system bytes = %10u\n", |
(unsigned int)local_sbrked_mem); |
fprintf(fp, "in use bytes = %10u\n", |
(unsigned int)local_mallinfo.uordblks); |
#endif |
#if HAVE_MMAP |
fprintf(fp, "max mmap regions = %10u\n", |
(unsigned int)local_max_n_mmaps); |
#endif |
} |
|
#endif /* DEFINE_MALLOC_STATS */ |
|
#ifdef DEFINE_MALLINFO |
|
/* |
mallinfo returns a copy of updated current mallinfo. |
*/ |
|
#if __STD_C |
struct mallinfo mALLINFo(RONEARG) |
#else |
struct mallinfo mALLINFo(RONEARG) RDECL |
#endif |
{ |
struct mallinfo ret; |
|
MALLOC_LOCK; |
malloc_update_mallinfo(); |
ret = current_mallinfo; |
MALLOC_UNLOCK; |
return ret; |
} |
|
#endif /* DEFINE_MALLINFO */ |
|
#ifdef DEFINE_MALLOPT |
|
/* |
mallopt: |
|
mallopt is the general SVID/XPG interface to tunable parameters. |
The format is to provide a (parameter-number, parameter-value) pair. |
mallopt then sets the corresponding parameter to the argument |
value if it can (i.e., so long as the value is meaningful), |
and returns 1 if successful else 0. |
|
See descriptions of tunable parameters above. |
|
*/ |
|
#if __STD_C |
int mALLOPt(RARG int param_number, int value) |
#else |
int mALLOPt(RARG param_number, value) RDECL int param_number; int value; |
#endif |
{ |
MALLOC_LOCK; |
switch(param_number) |
{ |
case M_TRIM_THRESHOLD: |
trim_threshold = value; MALLOC_UNLOCK; return 1; |
case M_TOP_PAD: |
top_pad = value; MALLOC_UNLOCK; return 1; |
case M_MMAP_THRESHOLD: |
#if HAVE_MMAP |
mmap_threshold = value; |
#endif |
MALLOC_UNLOCK; |
return 1; |
case M_MMAP_MAX: |
#if HAVE_MMAP |
n_mmaps_max = value; MALLOC_UNLOCK; return 1; |
#else |
MALLOC_UNLOCK; return value == 0; |
#endif |
|
default: |
MALLOC_UNLOCK; |
return 0; |
} |
} |
|
#endif /* DEFINE_MALLOPT */ |
|
/* |
|
History: |
|
V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) |
* return null for negative arguments |
* Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com> |
* Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' |
(e.g. WIN32 platforms) |
* Cleanup up header file inclusion for WIN32 platforms |
* Cleanup code to avoid Microsoft Visual C++ compiler complaints |
* Add 'USE_DL_PREFIX' to quickly allow co-existence with existing |
memory allocation routines |
* Set 'malloc_getpagesize' for WIN32 platforms (needs more work) |
* Use 'assert' rather than 'ASSERT' in WIN32 code to conform to |
usage of 'assert' in non-WIN32 code |
* Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to |
avoid infinite loop |
* Always call 'fREe()' rather than 'free()' |
|
V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) |
* Fixed ordering problem with boundary-stamping |
|
V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) |
* Added pvalloc, as recommended by H.J. Liu |
* Added 64bit pointer support mainly from Wolfram Gloger |
* Added anonymously donated WIN32 sbrk emulation |
* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen |
* malloc_extend_top: fix mask error that caused wastage after |
foreign sbrks |
* Add linux mremap support code from HJ Liu |
|
V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) |
* Integrated most documentation with the code. |
* Add support for mmap, with help from |
Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Use last_remainder in more cases. |
* Pack bins using idea from colin@nyx10.cs.du.edu |
* Use ordered bins instead of best-fit threshhold |
* Eliminate block-local decls to simplify tracing and debugging. |
* Support another case of realloc via move into top |
* Fix error occuring when initial sbrk_base not word-aligned. |
* Rely on page size for units instead of SBRK_UNIT to |
avoid surprises about sbrk alignment conventions. |
* Add mallinfo, mallopt. Thanks to Raymond Nijssen |
(raymond@es.ele.tue.nl) for the suggestion. |
* Add `pad' argument to malloc_trim and top_pad mallopt parameter. |
* More precautions for cases where other routines call sbrk, |
courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Added macros etc., allowing use in linux libc from |
H.J. Lu (hjl@gnu.ai.mit.edu) |
* Inverted this history list |
|
V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) |
* Re-tuned and fixed to behave more nicely with V2.6.0 changes. |
* Removed all preallocation code since under current scheme |
the work required to undo bad preallocations exceeds |
the work saved in good cases for most test programs. |
* No longer use return list or unconsolidated bins since |
no scheme using them consistently outperforms those that don't |
given above changes. |
* Use best fit for very large chunks to prevent some worst-cases. |
* Added some support for debugging |
|
V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) |
* Removed footers when chunks are in use. Thanks to |
Paul Wilson (wilson@cs.texas.edu) for the suggestion. |
|
V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) |
* Added malloc_trim, with help from Wolfram Gloger |
(wmglo@Dent.MED.Uni-Muenchen.DE). |
|
V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) |
|
V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) |
* realloc: try to expand in both directions |
* malloc: swap order of clean-bin strategy; |
* realloc: only conditionally expand backwards |
* Try not to scavenge used bins |
* Use bin counts as a guide to preallocation |
* Occasionally bin return list chunks in first scan |
* Add a few optimizations from colin@nyx10.cs.du.edu |
|
V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) |
* faster bin computation & slightly different binning |
* merged all consolidations to one part of malloc proper |
(eliminating old malloc_find_space & malloc_clean_bin) |
* Scan 2 returns chunks (not just 1) |
* Propagate failure in realloc if malloc returns 0 |
* Add stuff to allow compilation on non-ANSI compilers |
from kpv@research.att.com |
|
V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) |
* removed potential for odd address access in prev_chunk |
* removed dependency on getpagesize.h |
* misc cosmetics and a bit more internal documentation |
* anticosmetics: mangled names in macros to evade debugger strangeness |
* tested on sparc, hp-700, dec-mips, rs6000 |
with gcc & native cc (hp, dec only) allowing |
Detlefs & Zorn comparison study (in SIGPLAN Notices.) |
|
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) |
* Based loosely on libg++-1.2X malloc. (It retains some of the overall |
structure of old version, but most details differ.) |
|
*/ |
|
/common/v2_0/doc/dlmalloc/dlmalloc-2.6.4.c
0,0 → 1,3166
/* ---------- To make a malloc.h, start cutting here ------------ */ |
|
/* |
A version of malloc/free/realloc written by Doug Lea and released to the |
public domain. Send questions/comments/complaints/performance data |
to dl@cs.oswego.edu |
|
* VERSION 2.6.4 Thu Nov 28 07:54:55 1996 Doug Lea (dl at gee) |
|
Note: There may be an updated version of this malloc obtainable at |
ftp://g.oswego.edu/pub/misc/malloc.c |
Check before installing! |
|
* Why use this malloc? |
|
This is not the fastest, most space-conserving, most portable, or |
most tunable malloc ever written. However it is among the fastest |
while also being among the most space-conserving, portable and tunable. |
Consistent balance across these factors results in a good general-purpose |
allocator. For a high-level description, see |
http://g.oswego.edu/dl/html/malloc.html |
|
* Synopsis of public routines |
|
(Much fuller descriptions are contained in the program documentation below.) |
|
malloc(size_t n); |
Return a pointer to a newly allocated chunk of at least n bytes, or null |
if no space is available. |
free(Void_t* p); |
Release the chunk of memory pointed to by p, or no effect if p is null. |
realloc(Void_t* p, size_t n); |
Return a pointer to a chunk of size n that contains the same data |
as does chunk p up to the minimum of (n, p's size) bytes, or null |
if no space is available. The returned pointer may or may not be |
the same as p. If p is null, equivalent to malloc. Unless the |
#define REALLOC_ZERO_BYTES_FREES below is set, realloc with a |
size argument of zero (re)allocates a minimum-sized chunk. |
memalign(size_t alignment, size_t n); |
Return a pointer to a newly allocated chunk of n bytes, aligned |
in accord with the alignment argument, which must be a power of |
two. |
valloc(size_t n); |
Equivalent to memalign(pagesize, n), where pagesize is the page |
size of the system (or as near to this as can be figured out from |
all the includes/defines below.) |
pvalloc(size_t n); |
Equivalent to valloc(minimum-page-that-holds(n)), that is, |
round up n to nearest pagesize. |
calloc(size_t unit, size_t quantity); |
Returns a pointer to quantity * unit bytes, with all locations |
set to zero. |
cfree(Void_t* p); |
Equivalent to free(p). |
malloc_trim(size_t pad); |
Release all but pad bytes of freed top-most memory back |
to the system. Return 1 if successful, else 0. |
malloc_usable_size(Void_t* p); |
Report the number usable allocated bytes associated with allocated |
chunk p. This may or may not report more bytes than were requested, |
due to alignment and minimum size constraints. |
malloc_stats(); |
Prints brief summary statistics on stderr. |
mallinfo() |
Returns (by copy) a struct containing various summary statistics. |
mallopt(int parameter_number, int parameter_value) |
Changes one of the tunable parameters described below. Returns |
1 if successful in changing the parameter, else 0. |
|
* Vital statistics: |
|
Alignment: 8-byte |
8 byte alignment is currently hardwired into the design. This |
seems to suffice for all current machines and C compilers. |
|
Assumed pointer representation: 4 or 8 bytes |
Code for 8-byte pointers is untested by me but has worked |
reliably by Wolfram Gloger, who contributed most of the |
changes supporting this. |
|
Assumed size_t representation: 4 or 8 bytes |
Note that size_t is allowed to be 4 bytes even if pointers are 8. |
|
Minimum overhead per allocated chunk: 4 or 8 bytes |
Each malloced chunk has a hidden overhead of 4 bytes holding size |
and status information. |
|
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) |
8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
|
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte |
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are |
needed; 4 (8) for a trailing size field |
and 8 (16) bytes for free list pointers. Thus, the minimum |
allocatable size is 16/24/32 bytes. |
|
Even a request for zero bytes (i.e., malloc(0)) returns a |
pointer to something of the minimum allocatable size. |
|
Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes |
8-byte size_t: 2^63 - 16 bytes |
|
It is assumed that (possibly signed) size_t bit values suffice to |
represent chunk sizes. `Possibly signed' is due to the fact |
that `size_t' may be defined on a system as either a signed or |
an unsigned type. To be conservative, values that would appear |
as negative numbers are avoided. |
Requests for sizes with a negative sign bit will return a |
minimum-sized chunk. |
|
Maximum overhead wastage per allocated chunk: normally 15 bytes |
|
Alignnment demands, plus the minimum allocatable size restriction |
make the normal worst-case wastage 15 bytes (i.e., up to 15 |
more bytes will be allocated than were requested in malloc), with |
two exceptions: |
1. Because requests for zero bytes allocate non-zero space, |
the worst case wastage for a request of zero bytes is 24 bytes. |
2. For requests >= mmap_threshold that are serviced via |
mmap(), the worst case wastage is 8 bytes plus the remainder |
from a system page (the minimal mmap unit); typically 4096 bytes. |
|
* Limitations |
|
Here are some features that are NOT currently supported |
|
* No user-definable hooks for callbacks and the like. |
* No automated mechanism for fully checking that all accesses |
to malloced memory stay within their bounds. |
* No support for compaction. |
|
* Synopsis of compile-time options: |
|
People have reported using previous versions of this malloc on all |
versions of Unix, sometimes by tweaking some of the defines |
below. It has been tested most extensively on Solaris and |
Linux. It is also reported to work on WIN32 platforms. |
People have also reported adapting this malloc for use in |
stand-alone embedded systems. |
|
The implementation is in straight, hand-tuned ANSI C. Among other |
consequences, it uses a lot of macros. Because of this, to be at |
all usable, this code should be compiled using an optimizing compiler |
(for example gcc -O2) that can simplify expressions and control |
paths. |
|
__STD_C (default: derived from C compiler defines) |
Nonzero if using ANSI-standard C compiler, a C++ compiler, or |
a C compiler sufficiently close to ANSI to get away with it. |
DEBUG (default: NOT defined) |
Define to enable debugging. Adds fairly extensive assertion-based |
checking to help track down memory errors, but noticeably slows down |
execution. |
REALLOC_ZERO_BYTES_FREES (default: NOT defined) |
Define this if you think that realloc(p, 0) should be equivalent |
to free(p). Otherwise, since malloc returns a unique pointer for |
malloc(0), so does realloc(p, 0). |
HAVE_MEMCPY (default: defined) |
Define if you are not otherwise using ANSI STD C, but still |
have memcpy and memset in your C library and want to use them. |
Otherwise, simple internal versions are supplied. |
USE_MEMCPY (default: 1 if HAVE_MEMCPY is defined, 0 otherwise) |
Define as 1 if you want the C library versions of memset and |
memcpy called in realloc and calloc (otherwise macro versions are used). |
At least on some platforms, the simple macro versions usually |
outperform libc versions. |
HAVE_MMAP (default: defined as 1) |
Define to non-zero to optionally make malloc() use mmap() to |
allocate very large blocks. |
HAVE_MREMAP (default: defined as 0 unless Linux libc set) |
Define to non-zero to optionally make realloc() use mremap() to |
reallocate very large blocks. |
malloc_getpagesize (default: derived from system #includes) |
Either a constant or routine call returning the system page size. |
HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined) |
Optionally define if you are on a system with a /usr/include/malloc.h |
that declares struct mallinfo. It is not at all necessary to |
define this even if you do, but will ensure consistency. |
INTERNAL_SIZE_T (default: size_t) |
Define to a 32-bit type (probably `unsigned int') if you are on a |
64-bit machine, yet do not want or need to allow malloc requests of |
greater than 2^31 to be handled. This saves space, especially for |
very small chunks. |
INTERNAL_LINUX_C_LIB (default: NOT defined) |
Defined only when compiled as part of Linux libc. |
Also note that there is some odd internal name-mangling via defines |
(for example, internally, `malloc' is named `mALLOc') needed |
when compiling in this case. These look funny but don't otherwise |
affect anything. |
WIN32 (default: undefined) |
Define this on MS win (95, nt) platforms to compile in sbrk emulation. |
LACKS_UNISTD_H (default: undefined) |
Define this if your system does not have a <unistd.h>. |
MORECORE (default: sbrk) |
The name of the routine to call to obtain more memory from the system. |
MORECORE_FAILURE (default: -1) |
The value returned upon failure of MORECORE. |
MORECORE_CLEARS (default 1) |
True (1) if the routine mapped to MORECORE zeroes out memory (which |
holds for sbrk). |
DEFAULT_TRIM_THRESHOLD |
DEFAULT_TOP_PAD |
DEFAULT_MMAP_THRESHOLD |
DEFAULT_MMAP_MAX |
Default values of tunable parameters (described in detail below) |
controlling interaction with host system routines (sbrk, mmap, etc). |
These values may also be changed dynamically via mallopt(). The |
preset defaults are those that give best performance for typical |
programs/systems. |
|
|
*/ |
|
|
|
|
/* Preliminaries */ |
|
#ifndef __STD_C |
#ifdef __STDC__ |
#define __STD_C 1 |
#else |
#if __cplusplus |
#define __STD_C 1 |
#else |
#define __STD_C 0 |
#endif /*__cplusplus*/ |
#endif /*__STDC__*/ |
#endif /*__STD_C*/ |
|
#ifndef Void_t |
#if __STD_C |
#define Void_t void |
#else |
#define Void_t char |
#endif |
#endif /*Void_t*/ |
|
#if __STD_C |
#include <stddef.h> /* for size_t */ |
#else |
#include <sys/types.h> |
#endif |
|
#ifdef __cplusplus |
extern "C" { |
#endif |
|
#include <stdio.h> /* needed for malloc_stats */ |
|
|
/* |
Compile-time options |
*/ |
|
|
/* |
Debugging: |
|
Because freed chunks may be overwritten with link fields, this |
malloc will often die when freed memory is overwritten by user |
programs. This can be very effective (albeit in an annoying way) |
in helping track down dangling pointers. |
|
If you compile with -DDEBUG, a number of assertion checks are |
enabled that will catch more memory errors. You probably won't be |
able to make much sense of the actual assertion errors, but they |
should help you locate incorrectly overwritten memory. The |
checking is fairly extensive, and will slow down execution |
noticeably. Calling malloc_stats or mallinfo with DEBUG set will |
attempt to check every non-mmapped allocated and free chunk in the |
course of computing the summmaries. (By nature, mmapped regions |
cannot be checked very much automatically.) |
|
Setting DEBUG may also be helpful if you are trying to modify |
this code. The assertions in the check routines spell out in more |
detail the assumptions and invariants underlying the algorithms. |
|
*/ |
|
#if DEBUG |
#include <assert.h> |
#else |
#define assert(x) ((void)0) |
#endif |
|
|
/* |
INTERNAL_SIZE_T is the word-size used for internal bookkeeping |
of chunk sizes. On a 64-bit machine, you can reduce malloc |
overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' |
at the expense of not being able to handle requests greater than |
2^31. This limitation is hardly ever a concern; you are encouraged |
to set this. However, the default version is the same as size_t. |
*/ |
|
#ifndef INTERNAL_SIZE_T |
#define INTERNAL_SIZE_T size_t |
#endif |
|
/* |
REALLOC_ZERO_BYTES_FREES should be set if a call to |
realloc with zero bytes should be the same as a call to free. |
Some people think it should. Otherwise, since this malloc |
returns a unique pointer for malloc(0), so does realloc(p, 0). |
*/ |
|
|
/* #define REALLOC_ZERO_BYTES_FREES */ |
|
|
/* |
WIN32 causes an emulation of sbrk to be compiled in |
mmap-based options are not currently supported in WIN32. |
*/ |
|
/* #define WIN32 */ |
#ifdef WIN32 |
#define MORECORE wsbrk |
#define HAVE_MMAP 0 |
#endif |
|
|
/* |
HAVE_MEMCPY should be defined if you are not otherwise using |
ANSI STD C, but still have memcpy and memset in your C library |
and want to use them in calloc and realloc. Otherwise simple |
macro versions are defined here. |
|
USE_MEMCPY should be defined as 1 if you actually want to |
have memset and memcpy called. People report that the macro |
versions are often enough faster than libc versions on many |
systems that it is better to use them. |
|
*/ |
|
#define HAVE_MEMCPY |
|
#ifndef USE_MEMCPY |
#ifdef HAVE_MEMCPY |
#define USE_MEMCPY 1 |
#else |
#define USE_MEMCPY 0 |
#endif |
#endif |
|
#if (__STD_C || defined(HAVE_MEMCPY)) |
|
#if __STD_C |
void* memset(void*, int, size_t); |
void* memcpy(void*, const void*, size_t); |
#else |
Void_t* memset(); |
Void_t* memcpy(); |
#endif |
#endif |
|
#if USE_MEMCPY |
|
/* The following macros are only invoked with (2n+1)-multiples of |
INTERNAL_SIZE_T units, with a positive integer n. This is exploited |
for fast inline execution when n is small. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T mzsz = (nbytes); \ |
if(mzsz <= 9*sizeof(mzsz)) { \ |
INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \ |
if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; }}} \ |
*mz++ = 0; \ |
*mz++ = 0; \ |
*mz = 0; \ |
} else memset((charp), 0, mzsz); \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T mcsz = (nbytes); \ |
if(mcsz <= 9*sizeof(mcsz)) { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \ |
if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; }}} \ |
*mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
*mcdst = *mcsrc ; \ |
} else memcpy(dest, src, mcsz); \ |
} while(0) |
|
#else /* !USE_MEMCPY */ |
|
/* Use Duff's device for good zeroing/copying performance. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mzp++ = 0; \ |
case 7: *mzp++ = 0; \ |
case 6: *mzp++ = 0; \ |
case 5: *mzp++ = 0; \ |
case 4: *mzp++ = 0; \ |
case 3: *mzp++ = 0; \ |
case 2: *mzp++ = 0; \ |
case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mcdst++ = *mcsrc++; \ |
case 7: *mcdst++ = *mcsrc++; \ |
case 6: *mcdst++ = *mcsrc++; \ |
case 5: *mcdst++ = *mcsrc++; \ |
case 4: *mcdst++ = *mcsrc++; \ |
case 3: *mcdst++ = *mcsrc++; \ |
case 2: *mcdst++ = *mcsrc++; \ |
case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#endif |
|
|
/* |
Define HAVE_MMAP to optionally make malloc() use mmap() to |
allocate very large blocks. These will be returned to the |
operating system immediately after a free(). |
*/ |
|
#ifndef HAVE_MMAP |
#define HAVE_MMAP 1 |
#endif |
|
/* |
Define HAVE_MREMAP to make realloc() use mremap() to re-allocate |
large blocks. This is currently only possible on Linux with |
kernel versions newer than 1.3.77. |
*/ |
|
#ifndef HAVE_MREMAP |
#ifdef INTERNAL_LINUX_C_LIB |
#define HAVE_MREMAP 1 |
#else |
#define HAVE_MREMAP 0 |
#endif |
#endif |
|
#if HAVE_MMAP |
|
#include <unistd.h> |
#include <fcntl.h> |
#include <sys/mman.h> |
|
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
#define MAP_ANONYMOUS MAP_ANON |
#endif |
|
#endif /* HAVE_MMAP */ |
|
/* |
Access to system page size. To the extent possible, this malloc |
manages memory from the system in page-size units. |
|
The following mechanics for getpagesize were adapted from |
bsd/gnu getpagesize.h |
*/ |
|
#ifndef LACKS_UNISTD_H |
# include <unistd.h> |
#endif |
|
#ifndef malloc_getpagesize |
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
# ifndef _SC_PAGE_SIZE |
# define _SC_PAGE_SIZE _SC_PAGESIZE |
# endif |
# endif |
# ifdef _SC_PAGE_SIZE |
# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
# else |
# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
extern size_t getpagesize(); |
# define malloc_getpagesize getpagesize() |
# else |
# include <sys/param.h> |
# ifdef EXEC_PAGESIZE |
# define malloc_getpagesize EXEC_PAGESIZE |
# else |
# ifdef NBPG |
# ifndef CLSIZE |
# define malloc_getpagesize NBPG |
# else |
# define malloc_getpagesize (NBPG * CLSIZE) |
# endif |
# else |
# ifdef NBPC |
# define malloc_getpagesize NBPC |
# else |
# ifdef PAGESIZE |
# define malloc_getpagesize PAGESIZE |
# else |
# define malloc_getpagesize (4096) /* just guess */ |
# endif |
# endif |
# endif |
# endif |
# endif |
# endif |
#endif |
|
|
|
/* |
|
This version of malloc supports the standard SVID/XPG mallinfo |
routine that returns a struct containing the same kind of |
information you can get from malloc_stats. It should work on |
any SVID/XPG compliant system that has a /usr/include/malloc.h |
defining struct mallinfo. (If you'd like to install such a thing |
yourself, cut out the preliminary declarations as described above |
and below and save them in a malloc.h file. But there's no |
compelling reason to bother to do this.) |
|
The main declaration needed is the mallinfo struct that is returned |
(by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a |
bunch of fields, most of which are not even meaningful in this |
version of malloc. Some of these fields are are instead filled by |
mallinfo() with other numbers that might possibly be of interest. |
|
HAVE_USR_INCLUDE_MALLOC_H should be set if you have a |
/usr/include/malloc.h file that includes a declaration of struct |
mallinfo. If so, it is included; else an SVID2/XPG2 compliant |
version is declared below. These must be precisely the same for |
mallinfo() to work. |
|
*/ |
|
/* #define HAVE_USR_INCLUDE_MALLOC_H */ |
|
#if HAVE_USR_INCLUDE_MALLOC_H |
#include "/usr/include/malloc.h" |
#else |
|
/* SVID2/XPG mallinfo structure */ |
|
struct mallinfo { |
int arena; /* total space allocated from system */ |
int ordblks; /* number of non-inuse chunks */ |
int smblks; /* unused -- always zero */ |
int hblks; /* number of mmapped regions */ |
int hblkhd; /* total space in mmapped regions */ |
int usmblks; /* unused -- always zero */ |
int fsmblks; /* unused -- always zero */ |
int uordblks; /* total allocated space */ |
int fordblks; /* total non-inuse space */ |
int keepcost; /* top-most, releasable (via malloc_trim) space */ |
}; |
|
/* SVID2/XPG mallopt options */ |
|
#define M_MXFAST 1 /* UNUSED in this malloc */ |
#define M_NLBLKS 2 /* UNUSED in this malloc */ |
#define M_GRAIN 3 /* UNUSED in this malloc */ |
#define M_KEEP 4 /* UNUSED in this malloc */ |
|
#endif |
|
/* mallopt options that actually do something */ |
|
#define M_TRIM_THRESHOLD -1 |
#define M_TOP_PAD -2 |
#define M_MMAP_THRESHOLD -3 |
#define M_MMAP_MAX -4 |
|
|
|
#ifndef DEFAULT_TRIM_THRESHOLD |
#define DEFAULT_TRIM_THRESHOLD (128 * 1024) |
#endif |
|
/* |
M_TRIM_THRESHOLD is the maximum amount of unused top-most memory |
to keep before releasing via malloc_trim in free(). |
|
Automatic trimming is mainly useful in long-lived programs. |
Because trimming via sbrk can be slow on some systems, and can |
sometimes be wasteful (in cases where programs immediately |
afterward allocate more large chunks) the value should be high |
enough so that your overall system performance would improve by |
releasing. |
|
The trim threshold and the mmap control parameters (see below) |
can be traded off with one another. Trimming and mmapping are |
two different ways of releasing unused memory back to the |
system. Between these two, it is often possible to keep |
system-level demands of a long-lived program down to a bare |
minimum. For example, in one test suite of sessions measuring |
the XF86 X server on Linux, using a trim threshold of 128K and a |
mmap threshold of 192K led to near-minimal long term resource |
consumption. |
|
If you are using this malloc in a long-lived program, it should |
pay to experiment with these values. As a rough guide, you |
might set to a value close to the average size of a process |
(program) running on your system. Releasing this much memory |
would allow such a process to run in memory. Generally, it's |
worth it to tune for trimming rather tham memory mapping when a |
program undergoes phases where several large chunks are |
allocated and released in ways that can reuse each other's |
storage, perhaps mixed with phases where there are no such |
chunks at all. And in well-behaved long-lived programs, |
controlling release of large blocks via trimming versus mapping |
is usually faster. |
|
However, in most programs, these parameters serve mainly as |
protection against the system-level effects of carrying around |
massive amounts of unneeded memory. Since frequent calls to |
sbrk, mmap, and munmap otherwise degrade performance, the default |
parameters are set to relatively high values that serve only as |
safeguards. |
|
The default trim value is high enough to cause trimming only in |
fairly extreme (by current memory consumption standards) cases. |
It must be greater than page size to have any useful effect. To |
disable trimming completely, you can set to (unsigned long)(-1); |
|
|
*/ |
|
|
#ifndef DEFAULT_TOP_PAD |
#define DEFAULT_TOP_PAD (0) |
#endif |
|
/* |
M_TOP_PAD is the amount of extra `padding' space to allocate or |
retain whenever sbrk is called. It is used in two ways internally: |
|
* When sbrk is called to extend the top of the arena to satisfy |
a new malloc request, this much padding is added to the sbrk |
request. |
|
* When malloc_trim is called automatically from free(), |
it is used as the `pad' argument. |
|
In both cases, the actual amount of padding is rounded |
so that the end of the arena is always a system page boundary. |
|
The main reason for using padding is to avoid calling sbrk so |
often. Having even a small pad greatly reduces the likelihood |
that nearly every malloc request during program start-up (or |
after trimming) will invoke sbrk, which needlessly wastes |
time. |
|
Automatic rounding-up to page-size units is normally sufficient |
to avoid measurable overhead, so the default is 0. However, in |
systems where sbrk is relatively slow, it can pay to increase |
this value, at the expense of carrying around more memory than |
the program needs. |
|
*/ |
|
|
#ifndef DEFAULT_MMAP_THRESHOLD |
#define DEFAULT_MMAP_THRESHOLD (128 * 1024) |
#endif |
|
/* |
|
M_MMAP_THRESHOLD is the request size threshold for using mmap() |
to service a request. Requests of at least this size that cannot |
be allocated using already-existing space will be serviced via mmap. |
(If enough normal freed space already exists it is used instead.) |
|
Using mmap segregates relatively large chunks of memory so that |
they can be individually obtained and released from the host |
system. A request serviced through mmap is never reused by any |
other request (at least not directly; the system may just so |
happen to remap successive requests to the same locations). |
|
Segregating space in this way has the benefit that mmapped space |
can ALWAYS be individually released back to the system, which |
helps keep the system level memory demands of a long-lived |
program low. Mapped memory can never become `locked' between |
other chunks, as can happen with normally allocated chunks, which |
menas that even trimming via malloc_trim would not release them. |
|
However, it has the disadvantages that: |
|
1. The space cannot be reclaimed, consolidated, and then |
used to service later requests, as happens with normal chunks. |
2. It can lead to more wastage because of mmap page alignment |
requirements |
3. It causes malloc performance to be more dependent on host |
system memory management support routines which may vary in |
implementation quality and may impose arbitrary |
limitations. Generally, servicing a request via normal |
malloc steps is faster than going through a system's mmap. |
|
All together, these considerations should lead you to use mmap |
only for relatively large requests. |
|
|
*/ |
|
|
|
#ifndef DEFAULT_MMAP_MAX |
#if HAVE_MMAP |
#define DEFAULT_MMAP_MAX (64) |
#else |
#define DEFAULT_MMAP_MAX (0) |
#endif |
#endif |
|
/* |
M_MMAP_MAX is the maximum number of requests to simultaneously |
service using mmap. This parameter exists because: |
|
1. Some systems have a limited number of internal tables for |
use by mmap. |
2. In most systems, overreliance on mmap can degrade overall |
performance. |
3. If a program allocates many large regions, it is probably |
better off using normal sbrk-based allocation routines that |
can reclaim and reallocate normal heap memory. Using a |
small value allows transition into this mode after the |
first few allocations. |
|
Setting to 0 disables all use of mmap. If HAVE_MMAP is not set, |
the default value is 0, and attempts to set it to non-zero values |
in mallopt will fail. |
*/ |
|
|
|
|
/* |
|
Special defines for linux libc |
|
Except when compiled using these special defines for Linux libc |
using weak aliases, this malloc is NOT designed to work in |
multithreaded applications. No semaphores or other concurrency |
control are provided to ensure that multiple malloc or free calls |
don't run at the same time, which could be disasterous. A single |
semaphore could be used across malloc, realloc, and free (which is |
essentially the effect of the linux weak alias approach). It would |
be hard to obtain finer granularity. |
|
*/ |
|
|
#ifdef INTERNAL_LINUX_C_LIB |
|
#if __STD_C |
|
Void_t * __default_morecore_init (ptrdiff_t); |
Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init; |
|
#else |
|
Void_t * __default_morecore_init (); |
Void_t *(*__morecore)() = __default_morecore_init; |
|
#endif |
|
#define MORECORE (*__morecore) |
#define MORECORE_FAILURE 0 |
#define MORECORE_CLEARS 1 |
|
#else /* INTERNAL_LINUX_C_LIB */ |
|
#if __STD_C |
extern Void_t* sbrk(ptrdiff_t); |
#else |
extern Void_t* sbrk(); |
#endif |
|
#ifndef MORECORE |
#define MORECORE sbrk |
#endif |
|
#ifndef MORECORE_FAILURE |
#define MORECORE_FAILURE -1 |
#endif |
|
#ifndef MORECORE_CLEARS |
#define MORECORE_CLEARS 1 |
#endif |
|
#endif /* INTERNAL_LINUX_C_LIB */ |
|
#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__) |
|
#define cALLOc __libc_calloc |
#define fREe __libc_free |
#define mALLOc __libc_malloc |
#define mEMALIGn __libc_memalign |
#define rEALLOc __libc_realloc |
#define vALLOc __libc_valloc |
#define pvALLOc __libc_pvalloc |
#define mALLINFo __libc_mallinfo |
#define mALLOPt __libc_mallopt |
|
#pragma weak calloc = __libc_calloc |
#pragma weak free = __libc_free |
#pragma weak cfree = __libc_free |
#pragma weak malloc = __libc_malloc |
#pragma weak memalign = __libc_memalign |
#pragma weak realloc = __libc_realloc |
#pragma weak valloc = __libc_valloc |
#pragma weak pvalloc = __libc_pvalloc |
#pragma weak mallinfo = __libc_mallinfo |
#pragma weak mallopt = __libc_mallopt |
|
#else |
|
|
#define cALLOc calloc |
#define fREe free |
#define mALLOc malloc |
#define mEMALIGn memalign |
#define rEALLOc realloc |
#define vALLOc valloc |
#define pvALLOc pvalloc |
#define mALLINFo mallinfo |
#define mALLOPt mallopt |
|
#endif |
|
/* Public routines */ |
|
#if __STD_C |
|
Void_t* mALLOc(size_t); |
void fREe(Void_t*); |
Void_t* rEALLOc(Void_t*, size_t); |
Void_t* mEMALIGn(size_t, size_t); |
Void_t* vALLOc(size_t); |
Void_t* pvALLOc(size_t); |
Void_t* cALLOc(size_t, size_t); |
void cfree(Void_t*); |
int malloc_trim(size_t); |
size_t malloc_usable_size(Void_t*); |
void malloc_stats(); |
int mALLOPt(int, int); |
struct mallinfo mALLINFo(void); |
#else |
Void_t* mALLOc(); |
void fREe(); |
Void_t* rEALLOc(); |
Void_t* mEMALIGn(); |
Void_t* vALLOc(); |
Void_t* pvALLOc(); |
Void_t* cALLOc(); |
void cfree(); |
int malloc_trim(); |
size_t malloc_usable_size(); |
void malloc_stats(); |
int mALLOPt(); |
struct mallinfo mALLINFo(); |
#endif |
|
|
#ifdef __cplusplus |
}; /* end of extern "C" */ |
#endif |
|
/* ---------- To make a malloc.h, end cutting here ------------ */ |
|
|
/* |
Emulation of sbrk for WIN32 |
All code within the ifdef WIN32 is untested by me. |
*/ |
|
|
#ifdef WIN32 |
|
#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & |
~(malloc_getpagesize-1)) |
|
/* resrve 64MB to insure large contiguous space */ |
#define RESERVED_SIZE (1024*1024*64) |
#define NEXT_SIZE (2048*1024) |
#define TOP_MEMORY ((unsigned long)2*1024*1024*1024) |
|
struct GmListElement; |
typedef struct GmListElement GmListElement; |
|
struct GmListElement |
{ |
GmListElement* next; |
void* base; |
}; |
|
static GmListElement* head = 0; |
static unsigned int gNextAddress = 0; |
static unsigned int gAddressBase = 0; |
static unsigned int gAllocatedSize = 0; |
|
static |
GmListElement* makeGmListElement (void* bas) |
{ |
GmListElement* this; |
this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement)); |
ASSERT (this); |
if (this) |
{ |
this->base = bas; |
this->next = head; |
head = this; |
} |
return this; |
} |
|
void gcleanup () |
{ |
BOOL rval; |
ASSERT ( (head == NULL) || (head->base == (void*)gAddressBase)); |
if (gAddressBase && (gNextAddress - gAddressBase)) |
{ |
rval = VirtualFree ((void*)gAddressBase, |
gNextAddress - gAddressBase, |
MEM_DECOMMIT); |
ASSERT (rval); |
} |
while (head) |
{ |
GmListElement* next = head->next; |
rval = VirtualFree (head->base, 0, MEM_RELEASE); |
ASSERT (rval); |
LocalFree (head); |
head = next; |
} |
} |
|
static |
void* findRegion (void* start_address, unsigned long size) |
{ |
MEMORY_BASIC_INFORMATION info; |
while ((unsigned long)start_address < TOP_MEMORY) |
{ |
VirtualQuery (start_address, &info, sizeof (info)); |
if (info.State != MEM_FREE) |
start_address = (char*)info.BaseAddress + info.RegionSize; |
else if (info.RegionSize >= size) |
return start_address; |
else |
start_address = (char*)info.BaseAddress + info.RegionSize; |
} |
return NULL; |
|
} |
|
|
void* wsbrk (long size) |
{ |
void* tmp; |
if (size > 0) |
{ |
if (gAddressBase == 0) |
{ |
gAllocatedSize = max (RESERVED_SIZE, AlignPage (size)); |
gNextAddress = gAddressBase = |
(unsigned int)VirtualAlloc (NULL, gAllocatedSize, |
MEM_RESERVE, PAGE_NOACCESS); |
} else if (AlignPage (gNextAddress + size) > (gAddressBase + |
gAllocatedSize)) |
{ |
long new_size = max (NEXT_SIZE, AlignPage (size)); |
void* new_address = (void*)(gAddressBase+gAllocatedSize); |
do |
{ |
new_address = findRegion (new_address, new_size); |
|
if (new_address == 0) |
return (void*)-1; |
|
gAddressBase = gNextAddress = |
(unsigned int)VirtualAlloc (new_address, new_size, |
MEM_RESERVE, PAGE_NOACCESS); |
// repeat in case of race condition |
// The region that we found has been snagged |
// by another thread |
} |
while (gAddressBase == 0); |
|
ASSERT (new_address == (void*)gAddressBase); |
|
gAllocatedSize = new_size; |
|
if (!makeGmListElement ((void*)gAddressBase)) |
return (void*)-1; |
} |
if ((size + gNextAddress) > AlignPage (gNextAddress)) |
{ |
void* res; |
res = VirtualAlloc ((void*)AlignPage (gNextAddress), |
(size + gNextAddress - |
AlignPage (gNextAddress)), |
MEM_COMMIT, PAGE_READWRITE); |
if (res == 0) |
return (void*)-1; |
} |
tmp = (void*)gNextAddress; |
gNextAddress = (unsigned int)tmp + size; |
return tmp; |
} |
else if (size < 0) |
{ |
unsigned int alignedGoal = AlignPage (gNextAddress + size); |
/* Trim by releasing the virtual memory */ |
if (alignedGoal >= gAddressBase) |
{ |
VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal, |
MEM_DECOMMIT); |
gNextAddress = gNextAddress + size; |
return (void*)gNextAddress; |
} |
else |
{ |
VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase, |
MEM_DECOMMIT); |
gNextAddress = gAddressBase; |
return (void*)-1; |
} |
} |
else |
{ |
return (void*)gNextAddress; |
} |
} |
|
#endif |
|
|
|
/* |
Type declarations |
*/ |
|
|
struct malloc_chunk |
{ |
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ |
INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ |
struct malloc_chunk* fd; /* double links -- used only if free. */ |
struct malloc_chunk* bk; |
}; |
|
typedef struct malloc_chunk* mchunkptr; |
|
/* |
|
malloc_chunk details: |
|
(The following includes lightly edited explanations by Colin Plumb.) |
|
Chunks of memory are maintained using a `boundary tag' method as |
described in e.g., Knuth or Standish. (See the paper by Paul |
Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a |
survey of such techniques.) Sizes of free chunks are stored both |
in the front of each chunk and at the end. This makes |
consolidating fragmented chunks into bigger chunks very fast. The |
size fields also hold bits representing whether chunks are free or |
in use. |
|
An allocated chunk looks like this: |
|
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk, if allocated | | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| User data starts here... . |
. . |
. (malloc_usable_space() bytes) . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
|
Where "chunk" is the front of the chunk for the purpose of most of |
the malloc code, but "mem" is the pointer that is returned to the |
user. "Nextchunk" is the beginning of the next contiguous chunk. |
|
Chunks always begin on even word boundries, so the mem portion |
(which is returned to the user) is also on an even word boundary, and |
thus double-word aligned. |
|
Free chunks are stored in circular doubly-linked lists, and look like this: |
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space (may be 0 bytes long) . |
. . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
The P (PREV_INUSE) bit, stored in the unused low-order bit of the |
chunk size (which is always a multiple of two words), is an in-use |
bit for the *previous* chunk. If that bit is *clear*, then the |
word before the current chunk size contains the previous chunk |
size, and can be used to find the front of the previous chunk. |
(The very first chunk allocated always has this bit set, |
preventing access to non-existent (or non-owned) memory.) |
|
Note that the `foot' of the current chunk is actually represented |
as the prev_size of the NEXT chunk. (This makes it easier to |
deal with alignments etc). |
|
The two exceptions to all this are |
|
1. The special chunk `top', which doesn't bother using the |
trailing size field since there is no |
next contiguous chunk that would have to index off it. (After |
initialization, `top' is forced to always exist. If it would |
become less than MINSIZE bytes long, it is replenished via |
malloc_extend_top.) |
|
2. Chunks allocated via mmap, which have the second-lowest-order |
bit (IS_MMAPPED) set in their size fields. Because they are |
never merged or traversed from any other chunk, they have no |
foot size or inuse information. |
|
Available chunks are kept in any of several places (all declared below): |
|
* `av': An array of chunks serving as bin headers for consolidated |
chunks. Each bin is doubly linked. The bins are approximately |
proportionally (log) spaced. There are a lot of these bins |
(128). This may look excessive, but works very well in |
practice. All procedures maintain the invariant that no |
consolidated chunk physically borders another one. Chunks in |
bins are kept in size order, with ties going to the |
approximately least recently used chunk. |
|
The chunks in each bin are maintained in decreasing sorted order by |
size. This is irrelevant for the small bins, which all contain |
the same-sized chunks, but facilitates best-fit allocation for |
larger chunks. (These lists are just sequential. Keeping them in |
order almost never requires enough traversal to warrant using |
fancier ordered data structures.) Chunks of the same size are |
linked with the most recently freed at the front, and allocations |
are taken from the back. This results in LRU or FIFO allocation |
order, which tends to give each chunk an equal opportunity to be |
consolidated with adjacent freed chunks, resulting in larger free |
chunks and less fragmentation. |
|
* `top': The top-most available chunk (i.e., the one bordering the |
end of available memory) is treated specially. It is never |
included in any bin, is used only if no other chunk is |
available, and is released back to the system if it is very |
large (see M_TRIM_THRESHOLD). |
|
* `last_remainder': A bin holding only the remainder of the |
most recently split (non-top) chunk. This bin is checked |
before other non-fitting chunks, so as to provide better |
locality for runs of sequentially allocated chunks. |
|
* Implicitly, through the host system's memory mapping tables. |
If supported, requests greater than a threshold are usually |
serviced via calls to mmap, and then later released via munmap. |
|
*/ |
|
|
|
|
|
|
/* sizes, alignments */ |
|
#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) |
#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ) |
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) |
#define MINSIZE (sizeof(struct malloc_chunk)) |
|
/* conversion from malloc headers to user pointers, and back */ |
|
#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) |
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) |
|
/* pad request bytes into a usable size */ |
|
#define request2size(req) \ |
(((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \ |
(long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \ |
(((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK))) |
|
/* Check if m has acceptable alignment */ |
|
#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) |
|
|
|
|
/* |
Physical chunk operations |
*/ |
|
|
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
|
#define PREV_INUSE 0x1 |
|
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ |
|
#define IS_MMAPPED 0x2 |
|
/* Bits to mask off when extracting size */ |
|
#define SIZE_BITS (PREV_INUSE|IS_MMAPPED) |
|
|
/* Ptr to next physical malloc_chunk. */ |
|
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) |
|
/* Ptr to previous physical malloc_chunk */ |
|
#define prev_chunk(p)\ |
((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) |
|
|
/* Treat space at ptr + offset as a chunk */ |
|
#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
|
|
|
|
/* |
Dealing with use bits |
*/ |
|
/* extract p's inuse bit */ |
|
#define inuse(p)\ |
((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) |
|
/* extract inuse bit of previous chunk */ |
|
#define prev_inuse(p) ((p)->size & PREV_INUSE) |
|
/* check for mmap()'ed chunk */ |
|
#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) |
|
/* set/clear chunk as in use without otherwise disturbing */ |
|
#define set_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE |
|
#define clear_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) |
|
/* check/set/clear inuse bits in known places */ |
|
#define inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) |
|
#define set_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) |
|
#define clear_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) |
|
|
|
|
/* |
Dealing with size fields |
*/ |
|
/* Get size, ignoring use bits */ |
|
#define chunksize(p) ((p)->size & ~(SIZE_BITS)) |
|
/* Set size at head, without disturbing its use bit */ |
|
#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) |
|
/* Set size/use ignoring previous bits in header */ |
|
#define set_head(p, s) ((p)->size = (s)) |
|
/* Set size at footer (only when chunk is not in use) */ |
|
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) |
|
|
|
|
|
/* |
Bins |
|
The bins, `av_' are an array of pairs of pointers serving as the |
heads of (initially empty) doubly-linked lists of chunks, laid out |
in a way so that each pair can be treated as if it were in a |
malloc_chunk. (This way, the fd/bk offsets for linking bin heads |
and chunks are the same). |
|
Bins for sizes < 512 bytes contain chunks of all the same size, spaced |
8 bytes apart. Larger bins are approximately logarithmically |
spaced. (See the table below.) The `av_' array is never mentioned |
directly in the code, but instead via bin access macros. |
|
Bin layout: |
|
64 bins of size 8 |
32 bins of size 64 |
16 bins of size 512 |
8 bins of size 4096 |
4 bins of size 32768 |
2 bins of size 262144 |
1 bin of size what's left |
|
There is actually a little bit of slop in the numbers in bin_index |
for the sake of speed. This makes no difference elsewhere. |
|
The special chunks `top' and `last_remainder' get their own bins, |
(this is implemented via yet more trickery with the av_ array), |
although `top' is never properly linked to its bin since it is |
always handled specially. |
|
*/ |
|
#define NAV 128 /* number of bins */ |
|
typedef struct malloc_chunk* mbinptr; |
|
/* access macros */ |
|
#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ)) |
#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr))) |
#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr))) |
|
/* |
The first 2 bins are never indexed. The corresponding av_ cells are instead |
used for bookkeeping. This is not to save space, but to simplify |
indexing, maintain locality, and avoid some initialization tests. |
*/ |
|
#define top (bin_at(0)->fd) /* The topmost chunk */ |
#define last_remainder (bin_at(1)) /* remainder from last split */ |
|
|
/* |
Because top initially points to its own bin with initial |
zero size, thus forcing extension on the first malloc request, |
we avoid having any special code in malloc to check whether |
it even exists yet. But we still need to in malloc_extend_top. |
*/ |
|
#define initial_top ((mchunkptr)(bin_at(0))) |
|
/* Helper macro to initialize bins */ |
|
#define IAV(i) bin_at(i), bin_at(i) |
|
static mbinptr av_[NAV * 2 + 2] = { |
0, 0, |
IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), |
IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), |
IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), |
IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31), |
IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39), |
IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47), |
IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55), |
IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63), |
IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71), |
IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79), |
IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), |
IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), |
IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), |
IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), |
IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), |
IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) |
}; |
|
|
|
/* field-extraction macros */ |
|
#define first(b) ((b)->fd) |
#define last(b) ((b)->bk) |
|
/* |
Indexing into bins |
*/ |
|
#define bin_index(sz) \ |
(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ |
((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ |
((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ |
((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ |
((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ |
((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ |
126) |
/* |
bins for chunks < 512 are all spaced 8 bytes apart, and hold |
identically sized chunks. This is exploited in malloc. |
*/ |
|
#define MAX_SMALLBIN 63 |
#define MAX_SMALLBIN_SIZE 512 |
#define SMALLBIN_WIDTH 8 |
|
#define smallbin_index(sz) (((unsigned long)(sz)) >> 3) |
|
/* |
Requests are `small' if both the corresponding and the next bin are small |
*/ |
|
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) |
|
|
|
/* |
To help compensate for the large number of bins, a one-level index |
structure is used for bin-by-bin searching. `binblocks' is a |
one-word bitvector recording whether groups of BINBLOCKWIDTH bins |
have any (possibly) non-empty bins, so they can be skipped over |
all at once during during traversals. The bits are NOT always |
cleared as soon as all bins in a block are empty, but instead only |
when all are noticed to be empty during traversal in malloc. |
*/ |
|
#define BINBLOCKWIDTH 4 /* bins per block */ |
|
#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */ |
|
/* bin<->block macros */ |
|
#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH)) |
#define mark_binblock(ii) (binblocks |= idx2binblock(ii)) |
#define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii))) |
|
|
|
|
|
/* Other static bookkeeping data */ |
|
/* variables holding tunable values */ |
|
static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD; |
static unsigned long top_pad = DEFAULT_TOP_PAD; |
static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX; |
static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
|
/* The first value returned from sbrk */ |
static char* sbrk_base = (char*)(-1); |
|
/* The maximum memory obtained from system via sbrk */ |
static unsigned long max_sbrked_mem = 0; |
|
/* The maximum via either sbrk or mmap */ |
static unsigned long max_total_mem = 0; |
|
/* internal working copy of mallinfo */ |
static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
|
/* The total memory obtained from system via sbrk */ |
#define sbrked_mem (current_mallinfo.arena) |
|
/* Tracking mmaps */ |
|
static unsigned int n_mmaps = 0; |
static unsigned int max_n_mmaps = 0; |
static unsigned long mmapped_mem = 0; |
static unsigned long max_mmapped_mem = 0; |
|
|
|
/* |
Debugging support |
*/ |
|
#if DEBUG |
|
|
/* |
These routines make a number of assertions about the states |
of data structures that should be true at all times. If any |
are not true, it's very likely that a user program has somehow |
trashed memory. (It's also possible that there is a coding error |
in malloc. In which case, please report it!) |
*/ |
|
#if __STD_C |
static void do_check_chunk(mchunkptr p) |
#else |
static void do_check_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
|
/* No checkable chunk is mmapped */ |
assert(!chunk_is_mmapped(p)); |
|
/* Check for legal address ... */ |
assert((char*)p >= sbrk_base); |
if (p != top) |
assert((char*)p + sz <= (char*)top); |
else |
assert((char*)p + sz <= sbrk_base + sbrked_mem); |
|
} |
|
|
#if __STD_C |
static void do_check_free_chunk(mchunkptr p) |
#else |
static void do_check_free_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
mchunkptr next = chunk_at_offset(p, sz); |
|
do_check_chunk(p); |
|
/* Check whether it claims to be free ... */ |
assert(!inuse(p)); |
|
/* Unless a special marker, must have OK fields */ |
if ((long)sz >= (long)MINSIZE) |
{ |
assert((sz & MALLOC_ALIGN_MASK) == 0); |
assert(aligned_OK(chunk2mem(p))); |
/* ... matching footer field */ |
assert(next->prev_size == sz); |
/* ... and is fully consolidated */ |
assert(prev_inuse(p)); |
assert (next == top || inuse(next)); |
|
/* ... and has minimally sane links */ |
assert(p->fd->bk == p); |
assert(p->bk->fd == p); |
} |
else /* markers are always of size SIZE_SZ */ |
assert(sz == SIZE_SZ); |
} |
|
#if __STD_C |
static void do_check_inuse_chunk(mchunkptr p) |
#else |
static void do_check_inuse_chunk(p) mchunkptr p; |
#endif |
{ |
mchunkptr next = next_chunk(p); |
do_check_chunk(p); |
|
/* Check whether it claims to be in use ... */ |
assert(inuse(p)); |
|
/* ... and is surrounded by OK chunks. |
Since more things can be checked with free chunks than inuse ones, |
if an inuse chunk borders them and debug is on, it's worth doing them. |
*/ |
if (!prev_inuse(p)) |
{ |
mchunkptr prv = prev_chunk(p); |
assert(next_chunk(prv) == p); |
do_check_free_chunk(prv); |
} |
if (next == top) |
{ |
assert(prev_inuse(next)); |
assert(chunksize(next) >= MINSIZE); |
} |
else if (!inuse(next)) |
do_check_free_chunk(next); |
|
} |
|
#if __STD_C |
static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) |
#else |
static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; |
#endif |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
long room = sz - s; |
|
do_check_inuse_chunk(p); |
|
/* Legal size ... */ |
assert((long)sz >= (long)MINSIZE); |
assert((sz & MALLOC_ALIGN_MASK) == 0); |
assert(room >= 0); |
assert(room < (long)MINSIZE); |
|
/* ... and alignment */ |
assert(aligned_OK(chunk2mem(p))); |
|
|
/* ... and was allocated at front of an available chunk */ |
assert(prev_inuse(p)); |
|
} |
|
|
#define check_free_chunk(P) do_check_free_chunk(P) |
#define check_inuse_chunk(P) do_check_inuse_chunk(P) |
#define check_chunk(P) do_check_chunk(P) |
#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) |
#else |
#define check_free_chunk(P) |
#define check_inuse_chunk(P) |
#define check_chunk(P) |
#define check_malloced_chunk(P,N) |
#endif |
|
|
|
/* |
Macro-based internal utilities |
*/ |
|
|
/* |
Linking chunks in bin lists. |
Call these only with variables, not arbitrary expressions, as arguments. |
*/ |
|
/* |
Place chunk p of size s in its bin, in size order, |
putting it ahead of others of same size. |
*/ |
|
|
#define frontlink(P, S, IDX, BK, FD) \ |
{ \ |
if (S < MAX_SMALLBIN_SIZE) \ |
{ \ |
IDX = smallbin_index(S); \ |
mark_binblock(IDX); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
else \ |
{ \ |
IDX = bin_index(S); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
if (FD == BK) mark_binblock(IDX); \ |
else \ |
{ \ |
while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ |
BK = FD->bk; \ |
} \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
} |
|
|
/* take a chunk off a list */ |
|
#define unlink(P, BK, FD) \ |
{ \ |
BK = P->bk; \ |
FD = P->fd; \ |
FD->bk = BK; \ |
BK->fd = FD; \ |
} \ |
|
/* Place p as the last remainder */ |
|
#define link_last_remainder(P) \ |
{ \ |
last_remainder->fd = last_remainder->bk = P; \ |
P->fd = P->bk = last_remainder; \ |
} |
|
/* Clear the last_remainder bin */ |
|
#define clear_last_remainder \ |
(last_remainder->fd = last_remainder->bk = last_remainder) |
|
|
|
|
|
|
/* Routines dealing with mmap(). */ |
|
#if HAVE_MMAP |
|
#if __STD_C |
static mchunkptr mmap_chunk(size_t size) |
#else |
static mchunkptr mmap_chunk(size) size_t size; |
#endif |
{ |
size_t page_mask = malloc_getpagesize - 1; |
mchunkptr p; |
|
#ifndef MAP_ANONYMOUS |
static int fd = -1; |
#endif |
|
if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ |
|
/* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because |
* there is no following chunk whose prev_size field could be used. |
*/ |
size = (size + SIZE_SZ + page_mask) & ~page_mask; |
|
#ifdef MAP_ANONYMOUS |
p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, |
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
#else /* !MAP_ANONYMOUS */ |
if (fd < 0) |
{ |
fd = open("/dev/zero", O_RDWR); |
if(fd < 0) return 0; |
} |
p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); |
#endif |
|
if(p == (mchunkptr)-1) return 0; |
|
n_mmaps++; |
if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; |
|
/* We demand that eight bytes into a page must be 8-byte aligned. */ |
assert(aligned_OK(chunk2mem(p))); |
|
/* The offset to the start of the mmapped region is stored |
* in the prev_size field of the chunk; normally it is zero, |
* but that can be changed in memalign(). |
*/ |
p->prev_size = 0; |
set_head(p, size|IS_MMAPPED); |
|
mmapped_mem += size; |
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) |
max_mmapped_mem = mmapped_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
return p; |
} |
|
#if __STD_C |
static void munmap_chunk(mchunkptr p) |
#else |
static void munmap_chunk(p) mchunkptr p; |
#endif |
{ |
INTERNAL_SIZE_T size = chunksize(p); |
int ret; |
|
assert (chunk_is_mmapped(p)); |
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); |
assert((n_mmaps > 0)); |
assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); |
|
n_mmaps--; |
mmapped_mem -= (size + p->prev_size); |
|
ret = munmap((char *)p - p->prev_size, size + p->prev_size); |
|
/* munmap returns non-zero on failure */ |
assert(ret == 0); |
} |
|
#if HAVE_MREMAP |
|
#if __STD_C |
static mchunkptr mremap_chunk(mchunkptr p, size_t new_size) |
#else |
static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size; |
#endif |
{ |
size_t page_mask = malloc_getpagesize - 1; |
INTERNAL_SIZE_T offset = p->prev_size; |
INTERNAL_SIZE_T size = chunksize(p); |
char *cp; |
|
assert (chunk_is_mmapped(p)); |
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); |
assert((n_mmaps > 0)); |
assert(((size + offset) & (malloc_getpagesize-1)) == 0); |
|
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ |
new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; |
|
cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1); |
|
if (cp == (char *)-1) return 0; |
|
p = (mchunkptr)(cp + offset); |
|
assert(aligned_OK(chunk2mem(p))); |
|
assert((p->prev_size == offset)); |
set_head(p, (new_size - offset)|IS_MMAPPED); |
|
mmapped_mem -= size + offset; |
mmapped_mem += new_size; |
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) |
max_mmapped_mem = mmapped_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
return p; |
} |
|
#endif /* HAVE_MREMAP */ |
|
#endif /* HAVE_MMAP */ |
|
|
|
|
/* |
Extend the top-most chunk by obtaining memory from system. |
Main interface to sbrk (but see also malloc_trim). |
*/ |
|
#if __STD_C |
static void malloc_extend_top(INTERNAL_SIZE_T nb) |
#else |
static void malloc_extend_top(nb) INTERNAL_SIZE_T nb; |
#endif |
{ |
char* brk; /* return value from sbrk */ |
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */ |
INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */ |
char* new_brk; /* return of 2nd sbrk call */ |
INTERNAL_SIZE_T top_size; /* new size of top chunk */ |
|
mchunkptr old_top = top; /* Record state of old top */ |
INTERNAL_SIZE_T old_top_size = chunksize(old_top); |
char* old_end = (char*)(chunk_at_offset(old_top, old_top_size)); |
|
/* Pad request with top_pad plus minimal overhead */ |
|
INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; |
unsigned long pagesz = malloc_getpagesize; |
|
/* If not the first time through, round to preserve page boundary */ |
/* Otherwise, we need to correct to a page size below anyway. */ |
/* (We also correct below if an intervening foreign sbrk call.) */ |
|
if (sbrk_base != (char*)(-1)) |
sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1); |
|
brk = (char*)(MORECORE (sbrk_size)); |
|
/* Fail if sbrk failed or if a foreign sbrk call killed our space */ |
if (brk == (char*)(MORECORE_FAILURE) || |
(brk < old_end && old_top != initial_top)) |
return; |
|
sbrked_mem += sbrk_size; |
|
if (brk == old_end) /* can just add bytes to current top */ |
{ |
top_size = sbrk_size + old_top_size; |
set_head(top, top_size | PREV_INUSE); |
} |
else |
{ |
if (sbrk_base == (char*)(-1)) /* First time through. Record base */ |
sbrk_base = brk; |
else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */ |
sbrked_mem += brk - (char*)old_end; |
|
/* Guarantee alignment of first new chunk made from this space */ |
front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK; |
if (front_misalign > 0) |
{ |
correction = (MALLOC_ALIGNMENT) - front_misalign; |
brk += correction; |
} |
else |
correction = 0; |
|
/* Guarantee the next brk will be at a page boundary */ |
correction += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1)); |
|
/* Allocate correction */ |
new_brk = (char*)(MORECORE (correction)); |
if (new_brk == (char*)(MORECORE_FAILURE)) return; |
|
sbrked_mem += correction; |
|
top = (mchunkptr)brk; |
top_size = new_brk - brk + correction; |
set_head(top, top_size | PREV_INUSE); |
|
if (old_top != initial_top) |
{ |
|
/* There must have been an intervening foreign sbrk call. */ |
/* A double fencepost is necessary to prevent consolidation */ |
|
/* If not enough space to do this, then user did something very wrong */ |
if (old_top_size < MINSIZE) |
{ |
set_head(top, PREV_INUSE); /* will force null return from malloc */ |
return; |
} |
|
/* Also keep size a multiple of MALLOC_ALIGNMENT */ |
old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; |
chunk_at_offset(old_top, old_top_size )->size = |
SIZE_SZ|PREV_INUSE; |
chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size = |
SIZE_SZ|PREV_INUSE; |
set_head_size(old_top, old_top_size); |
/* If possible, release the rest. */ |
if (old_top_size >= MINSIZE) |
fREe(chunk2mem(old_top)); |
} |
} |
|
if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem) |
max_sbrked_mem = sbrked_mem; |
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) |
max_total_mem = mmapped_mem + sbrked_mem; |
|
/* We always land on a page boundary */ |
assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0); |
} |
|
|
|
|
/* Main public routines */ |
|
|
/* |
Malloc Algorthim: |
|
The requested size is first converted into a usable form, `nb'. |
This currently means to add 4 bytes overhead plus possibly more to |
obtain 8-byte alignment and/or to obtain a size of at least |
MINSIZE (currently 16 bytes), the smallest allocatable size. |
(All fits are considered `exact' if they are within MINSIZE bytes.) |
|
From there, the first successful of the following steps is taken: |
|
1. The bin corresponding to the request size is scanned, and if |
a chunk of exactly the right size is found, it is taken. |
|
2. The most recently remaindered chunk is used if it is big |
enough. This is a form of (roving) first fit, used only in |
the absence of exact fits. Runs of consecutive requests use |
the remainder of the chunk used for the previous such request |
whenever possible. This limited use of a first-fit style |
allocation strategy tends to give contiguous chunks |
coextensive lifetimes, which improves locality and can reduce |
fragmentation in the long run. |
|
3. Other bins are scanned in increasing size order, using a |
chunk big enough to fulfill the request, and splitting off |
any remainder. This search is strictly by best-fit; i.e., |
the smallest (with ties going to approximately the least |
recently used) chunk that fits is selected. |
|
4. If large enough, the chunk bordering the end of memory |
(`top') is split off. (This use of `top' is in accord with |
the best-fit search rule. In effect, `top' is treated as |
larger (and thus less well fitting) than any other available |
chunk since it can be extended to be as large as necessary |
(up to system limitations). |
|
5. If the request size meets the mmap threshold and the |
system supports mmap, and there are few enough currently |
allocated mmapped regions, and a call to mmap succeeds, |
the request is allocated via direct memory mapping. |
|
6. Otherwise, the top of memory is extended by |
obtaining more space from the system (normally using sbrk, |
but definable to anything else via the MORECORE macro). |
Memory is gathered from the system (in system page-sized |
units) in a way that allows chunks obtained across different |
sbrk calls to be consolidated, but does not require |
contiguous memory. Thus, it should be safe to intersperse |
mallocs with other sbrk calls. |
|
|
All allocations are made from the the `lowest' part of any found |
chunk. (The implementation invariant is that prev_inuse is |
always true of any allocated chunk; i.e., that each allocated |
chunk borders either a previously allocated and still in-use chunk, |
or the base of its memory arena.) |
|
*/ |
|
#if __STD_C |
Void_t* mALLOc(size_t bytes) |
#else |
Void_t* mALLOc(bytes) size_t bytes; |
#endif |
{ |
mchunkptr victim; /* inspected/selected chunk */ |
INTERNAL_SIZE_T victim_size; /* its size */ |
int idx; /* index for bin traversal */ |
mbinptr bin; /* associated bin */ |
mchunkptr remainder; /* remainder from a split */ |
long remainder_size; /* its size */ |
int remainder_index; /* its bin index */ |
unsigned long block; /* block traverser bit */ |
int startidx; /* first bin of a traversed block */ |
mchunkptr fwd; /* misc temp for linking */ |
mchunkptr bck; /* misc temp for linking */ |
mbinptr q; /* misc temp */ |
|
INTERNAL_SIZE_T nb = request2size(bytes); /* padded request size; */ |
|
/* Check for exact match in a bin */ |
|
if (is_small_request(nb)) /* Faster version for small requests */ |
{ |
idx = smallbin_index(nb); |
|
/* No traversal or size check necessary for small bins. */ |
|
q = bin_at(idx); |
victim = last(q); |
|
/* Also scan the next one, since it would have a remainder < MINSIZE */ |
if (victim == q) |
{ |
q = next_bin(q); |
victim = last(q); |
} |
if (victim != q) |
{ |
victim_size = chunksize(victim); |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */ |
|
} |
else |
{ |
idx = bin_index(nb); |
bin = bin_at(idx); |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = victim_size - nb; |
|
if (remainder_size >= (long)MINSIZE) /* too big */ |
{ |
--idx; /* adjust to rescan below after checking last remainder */ |
break; |
} |
|
else if (remainder_size >= 0) /* exact fit */ |
{ |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
} |
|
++idx; |
|
} |
|
/* Try to use the last split-off remainder */ |
|
if ( (victim = last_remainder->fd) != last_remainder) |
{ |
victim_size = chunksize(victim); |
remainder_size = victim_size - nb; |
|
if (remainder_size >= (long)MINSIZE) /* re-split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
clear_last_remainder; |
|
if (remainder_size >= 0) /* exhaust */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
/* Else place in bin */ |
|
frontlink(victim, victim_size, remainder_index, bck, fwd); |
} |
|
/* |
If there are any possibly nonempty big-enough blocks, |
search for best fitting chunk by scanning bins in blockwidth units. |
*/ |
|
if ( (block = idx2binblock(idx)) <= binblocks) |
{ |
|
/* Get to the first marked block */ |
|
if ( (block & binblocks) == 0) |
{ |
/* force to an even block boundary */ |
idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; |
block <<= 1; |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
|
/* For each possibly nonempty block ... */ |
for (;;) |
{ |
startidx = idx; /* (track incomplete blocks) */ |
q = bin = bin_at(idx); |
|
/* For each bin in this block ... */ |
do |
{ |
/* Find and use first big enough chunk ... */ |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = victim_size - nb; |
|
if (remainder_size >= (long)MINSIZE) /* split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
unlink(victim, bck, fwd); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
else if (remainder_size >= 0) /* take */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
unlink(victim, bck, fwd); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
} |
|
} |
|
bin = next_bin(bin); |
|
} while ((++idx & (BINBLOCKWIDTH - 1)) != 0); |
|
/* Clear out the block bit. */ |
|
do /* Possibly backtrack to try to clear a partial block */ |
{ |
if ((startidx & (BINBLOCKWIDTH - 1)) == 0) |
{ |
binblocks &= ~block; |
break; |
} |
--startidx; |
q = prev_bin(q); |
} while (first(q) == q); |
|
/* Get to the next possibly nonempty block */ |
|
if ( (block <<= 1) <= binblocks && (block != 0) ) |
{ |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
else |
break; |
} |
} |
|
|
/* Try to use top chunk */ |
|
/* Require that there be a remainder, ensuring top always exists */ |
if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) |
{ |
|
#if HAVE_MMAP |
/* If big and would otherwise need to extend, try to use mmap instead */ |
if ((unsigned long)nb >= (unsigned long)mmap_threshold && |
(victim = mmap_chunk(nb)) != 0) |
return chunk2mem(victim); |
#endif |
|
/* Try to extend */ |
malloc_extend_top(nb); |
if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) |
return 0; /* propagate failure */ |
} |
|
victim = top; |
set_head(victim, nb | PREV_INUSE); |
top = chunk_at_offset(victim, nb); |
set_head(top, remainder_size | PREV_INUSE); |
check_malloced_chunk(victim, nb); |
return chunk2mem(victim); |
|
} |
|
|
|
|
/* |
|
free() algorithm : |
|
cases: |
|
1. free(0) has no effect. |
|
2. If the chunk was allocated via mmap, it is release via munmap(). |
|
3. If a returned chunk borders the current high end of memory, |
it is consolidated into the top, and if the total unused |
topmost memory exceeds the trim threshold, malloc_trim is |
called. |
|
4. Other chunks are consolidated as they arrive, and |
placed in corresponding bins. (This includes the case of |
consolidating with the current `last_remainder'). |
|
*/ |
|
|
#if __STD_C |
void fREe(Void_t* mem) |
#else |
void fREe(mem) Void_t* mem; |
#endif |
{ |
mchunkptr p; /* chunk corresponding to mem */ |
INTERNAL_SIZE_T hd; /* its head field */ |
INTERNAL_SIZE_T sz; /* its size */ |
int idx; /* its bin index */ |
mchunkptr next; /* next contiguous chunk */ |
INTERNAL_SIZE_T nextsz; /* its size */ |
INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ |
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
int islr; /* track whether merging with last_remainder */ |
|
if (mem == 0) /* free(0) has no effect */ |
return; |
|
p = mem2chunk(mem); |
hd = p->size; |
|
#if HAVE_MMAP |
if (hd & IS_MMAPPED) /* release mmapped memory. */ |
{ |
munmap_chunk(p); |
return; |
} |
#endif |
|
check_inuse_chunk(p); |
|
sz = hd & ~PREV_INUSE; |
next = chunk_at_offset(p, sz); |
nextsz = chunksize(next); |
|
if (next == top) /* merge with top */ |
{ |
sz += nextsz; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -prevsz); |
sz += prevsz; |
unlink(p, bck, fwd); |
} |
|
set_head(p, sz | PREV_INUSE); |
top = p; |
if ((unsigned long)(sz) >= (unsigned long)trim_threshold) |
malloc_trim(top_pad); |
return; |
} |
|
set_head(next, nextsz); /* clear inuse bit */ |
|
islr = 0; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -prevsz); |
sz += prevsz; |
|
if (p->fd == last_remainder) /* keep as last_remainder */ |
islr = 1; |
else |
unlink(p, bck, fwd); |
} |
|
if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */ |
{ |
sz += nextsz; |
|
if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ |
{ |
islr = 1; |
link_last_remainder(p); |
} |
else |
unlink(next, bck, fwd); |
} |
|
|
set_head(p, sz | PREV_INUSE); |
set_foot(p, sz); |
if (!islr) |
frontlink(p, sz, idx, bck, fwd); |
} |
|
|
|
|
|
/* |
|
Realloc algorithm: |
|
Chunks that were obtained via mmap cannot be extended or shrunk |
unless HAVE_MREMAP is defined, in which case mremap is used. |
Otherwise, if their reallocation is for additional space, they are |
copied. If for less, they are just left alone. |
|
Otherwise, if the reallocation is for additional space, and the |
chunk can be extended, it is, else a malloc-copy-free sequence is |
taken. There are several different ways that a chunk could be |
extended. All are tried: |
|
* Extending forward into following adjacent free chunk. |
* Shifting backwards, joining preceding adjacent space |
* Both shifting backwards and extending forward. |
* Extending into newly sbrked space |
|
Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a |
size argument of zero (re)allocates a minimum-sized chunk. |
|
If the reallocation is for less space, and the new request is for |
a `small' (<512 bytes) size, then the newly unused space is lopped |
off and freed. |
|
The old unix realloc convention of allowing the last-free'd chunk |
to be used as an argument to realloc is no longer supported. |
I don't know of any programs still relying on this feature, |
and allowing it would also allow too many other incorrect |
usages of realloc to be sensible. |
|
|
*/ |
|
|
#if __STD_C |
Void_t* rEALLOc(Void_t* oldmem, size_t bytes) |
#else |
Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; |
#endif |
{ |
INTERNAL_SIZE_T nb; /* padded request size */ |
|
mchunkptr oldp; /* chunk corresponding to oldmem */ |
INTERNAL_SIZE_T oldsize; /* its size */ |
|
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
Void_t* newmem; /* corresponding user mem */ |
|
mchunkptr next; /* next contiguous chunk after oldp */ |
INTERNAL_SIZE_T nextsize; /* its size */ |
|
mchunkptr prev; /* previous contiguous chunk before oldp */ |
INTERNAL_SIZE_T prevsize; /* its size */ |
|
mchunkptr remainder; /* holds split off extra space from newp */ |
INTERNAL_SIZE_T remainder_size; /* its size */ |
|
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
|
#ifdef REALLOC_ZERO_BYTES_FREES |
if (bytes == 0) { fREe(oldmem); return 0; } |
#endif |
|
|
/* realloc of null is supposed to be same as malloc */ |
if (oldmem == 0) return mALLOc(bytes); |
|
newp = oldp = mem2chunk(oldmem); |
newsize = oldsize = chunksize(oldp); |
|
|
nb = request2size(bytes); |
|
#if HAVE_MMAP |
if (chunk_is_mmapped(oldp)) |
{ |
#if HAVE_MREMAP |
newp = mremap_chunk(oldp, nb); |
if(newp) return chunk2mem(newp); |
#endif |
/* Note the extra SIZE_SZ overhead. */ |
if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */ |
/* Must alloc, copy, free. */ |
newmem = mALLOc(bytes); |
if (newmem == 0) return 0; /* propagate failure */ |
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); |
munmap_chunk(oldp); |
return newmem; |
} |
#endif |
|
check_inuse_chunk(oldp); |
|
if ((long)(oldsize) < (long)(nb)) |
{ |
|
/* Try expanding forward */ |
|
next = chunk_at_offset(oldp, oldsize); |
if (next == top || !inuse(next)) |
{ |
nextsize = chunksize(next); |
|
/* Forward into top only if a remainder */ |
if (next == top) |
{ |
if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
newsize += nextsize; |
top = chunk_at_offset(oldp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(oldp, nb); |
return chunk2mem(oldp); |
} |
} |
|
/* Forward into next chunk */ |
else if (((long)(nextsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
newsize += nextsize; |
goto split; |
} |
} |
else |
{ |
next = 0; |
nextsize = 0; |
} |
|
/* Try shifting backwards. */ |
|
if (!prev_inuse(oldp)) |
{ |
prev = prev_chunk(oldp); |
prevsize = chunksize(prev); |
|
/* try forward + backward first to save a later consolidation */ |
|
if (next != 0) |
{ |
/* into top */ |
if (next == top) |
{ |
if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize + nextsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
top = chunk_at_offset(newp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(newp, nb); |
return newmem; |
} |
} |
|
/* into next chunk */ |
else if (((long)(nextsize + prevsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += nextsize + prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* backward only */ |
if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* Must allocate */ |
|
newmem = mALLOc (bytes); |
|
if (newmem == 0) /* propagate failure */ |
return 0; |
|
/* Avoid copy if newp is next chunk after oldp. */ |
/* (This can only happen when new chunk is sbrk'ed.) */ |
|
if ( (newp = mem2chunk(newmem)) == next_chunk(oldp)) |
{ |
newsize += chunksize(newp); |
newp = oldp; |
goto split; |
} |
|
/* Otherwise copy, free, and exit */ |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
fREe(oldmem); |
return newmem; |
} |
|
|
split: /* split off extra room in old or expanded chunk */ |
|
if (newsize - nb >= MINSIZE) /* split off remainder */ |
{ |
remainder = chunk_at_offset(newp, nb); |
remainder_size = newsize - nb; |
set_head_size(newp, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_inuse_bit_at_offset(remainder, remainder_size); |
fREe(chunk2mem(remainder)); /* let free() deal with it */ |
} |
else |
{ |
set_head_size(newp, newsize); |
set_inuse_bit_at_offset(newp, newsize); |
} |
|
check_inuse_chunk(newp); |
return chunk2mem(newp); |
} |
|
|
|
|
/* |
|
memalign algorithm: |
|
memalign requests more than enough space from malloc, finds a spot |
within that chunk that meets the alignment request, and then |
possibly frees the leading and trailing space. |
|
The alignment argument must be a power of two. This property is not |
checked by memalign, so misuse may result in random runtime errors. |
|
8-byte alignment is guaranteed by normal malloc calls, so don't |
bother calling memalign with an argument of 8 or less. |
|
Overreliance on memalign is a sure way to fragment space. |
|
*/ |
|
|
#if __STD_C |
Void_t* mEMALIGn(size_t alignment, size_t bytes) |
#else |
Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; |
#endif |
{ |
INTERNAL_SIZE_T nb; /* padded request size */ |
char* m; /* memory returned by malloc call */ |
mchunkptr p; /* corresponding chunk */ |
char* brk; /* alignment point within p */ |
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */ |
mchunkptr remainder; /* spare room at end to split off */ |
long remainder_size; /* its size */ |
|
/* If need less alignment than we give anyway, just relay to malloc */ |
|
if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes); |
|
/* Otherwise, ensure that it is at least a minimum chunk size */ |
|
if (alignment < MINSIZE) alignment = MINSIZE; |
|
/* Call malloc with worst case padding to hit alignment. */ |
|
nb = request2size(bytes); |
m = (char*)(mALLOc(nb + alignment + MINSIZE)); |
|
if (m == 0) return 0; /* propagate failure */ |
|
p = mem2chunk(m); |
|
if ((((unsigned long)(m)) % alignment) == 0) /* aligned */ |
{ |
#if HAVE_MMAP |
if(chunk_is_mmapped(p)) |
return chunk2mem(p); /* nothing more to do */ |
#endif |
} |
else /* misaligned */ |
{ |
/* |
Find an aligned spot inside chunk. |
Since we need to give back leading space in a chunk of at |
least MINSIZE, if the first calculation places us at |
a spot with less than MINSIZE leader, we can move to the |
next aligned spot -- we've allocated enough total room so that |
this is always possible. |
*/ |
|
brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -alignment); |
if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment; |
|
newp = (mchunkptr)brk; |
leadsize = brk - (char*)(p); |
newsize = chunksize(p) - leadsize; |
|
#if HAVE_MMAP |
if(chunk_is_mmapped(p)) |
{ |
newp->prev_size = p->prev_size + leadsize; |
set_head(newp, newsize|IS_MMAPPED); |
return chunk2mem(newp); |
} |
#endif |
|
/* give back leader, use the rest */ |
|
set_head(newp, newsize | PREV_INUSE); |
set_inuse_bit_at_offset(newp, newsize); |
set_head_size(p, leadsize); |
fREe(chunk2mem(p)); |
p = newp; |
|
assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0); |
} |
|
/* Also give back spare room at the end */ |
|
remainder_size = chunksize(p) - nb; |
|
if (remainder_size >= (long)MINSIZE) |
{ |
remainder = chunk_at_offset(p, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_head_size(p, nb); |
fREe(chunk2mem(remainder)); |
} |
|
check_inuse_chunk(p); |
return chunk2mem(p); |
|
} |
|
|
|
|
/* |
valloc just invokes memalign with alignment argument equal |
to the page size of the system (or as near to this as can |
be figured out from all the includes/defines above.) |
*/ |
|
#if __STD_C |
Void_t* vALLOc(size_t bytes) |
#else |
Void_t* vALLOc(bytes) size_t bytes; |
#endif |
{ |
return mEMALIGn (malloc_getpagesize, bytes); |
} |
|
/* |
pvalloc just invokes valloc for the nearest pagesize |
that will accommodate request |
*/ |
|
|
#if __STD_C |
Void_t* pvALLOc(size_t bytes) |
#else |
Void_t* pvALLOc(bytes) size_t bytes; |
#endif |
{ |
size_t pagesize = malloc_getpagesize; |
return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1)); |
} |
|
/* |
|
calloc calls malloc, then zeroes out the allocated chunk. |
|
*/ |
|
#if __STD_C |
Void_t* cALLOc(size_t n, size_t elem_size) |
#else |
Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size; |
#endif |
{ |
mchunkptr p; |
INTERNAL_SIZE_T csz; |
|
INTERNAL_SIZE_T sz = n * elem_size; |
|
/* check if expand_top called, in which case don't need to clear */ |
#if MORECORE_CLEARS |
mchunkptr oldtop = top; |
INTERNAL_SIZE_T oldtopsize = chunksize(top); |
#endif |
Void_t* mem = mALLOc (sz); |
|
if (mem == 0) |
return 0; |
else |
{ |
p = mem2chunk(mem); |
|
/* Two optional cases in which clearing not necessary */ |
|
|
#if HAVE_MMAP |
if (chunk_is_mmapped(p)) return mem; |
#endif |
|
csz = chunksize(p); |
|
#if MORECORE_CLEARS |
if (p == oldtop && csz > oldtopsize) |
{ |
/* clear only the bytes from non-freshly-sbrked memory */ |
csz = oldtopsize; |
} |
#endif |
|
MALLOC_ZERO(mem, csz - SIZE_SZ); |
return mem; |
} |
} |
|
/* |
|
cfree just calls free. It is needed/defined on some systems |
that pair it with calloc, presumably for odd historical reasons. |
|
*/ |
|
#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__) |
#if __STD_C |
void cfree(Void_t *mem) |
#else |
void cfree(mem) Void_t *mem; |
#endif |
{ |
free(mem); |
} |
#endif |
|
|
|
/* |
|
Malloc_trim gives memory back to the system (via negative |
arguments to sbrk) if there is unused memory at the `high' end of |
the malloc pool. You can call this after freeing large blocks of |
memory to potentially reduce the system-level memory requirements |
of a program. However, it cannot guarantee to reduce memory. Under |
some allocation patterns, some large free blocks of memory will be |
locked between two used chunks, so they cannot be given back to |
the system. |
|
The `pad' argument to malloc_trim represents the amount of free |
trailing space to leave untrimmed. If this argument is zero, |
only the minimum amount of memory to maintain internal data |
structures will be left (one page or less). Non-zero arguments |
can be supplied to maintain enough trailing space to service |
future expected allocations without having to re-obtain memory |
from the system. |
|
Malloc_trim returns 1 if it actually released any memory, else 0. |
|
*/ |
|
#if __STD_C |
int malloc_trim(size_t pad) |
#else |
int malloc_trim(pad) size_t pad; |
#endif |
{ |
long top_size; /* Amount of top-most memory */ |
long extra; /* Amount to release */ |
char* current_brk; /* address returned by pre-check sbrk call */ |
char* new_brk; /* address returned by negative sbrk call */ |
|
unsigned long pagesz = malloc_getpagesize; |
|
top_size = chunksize(top); |
extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; |
|
if (extra < (long)pagesz) /* Not enough memory to release */ |
return 0; |
|
else |
{ |
/* Test to make sure no one else called sbrk */ |
current_brk = (char*)(MORECORE (0)); |
if (current_brk != (char*)(top) + top_size) |
return 0; /* Apparently we don't own memory; must fail */ |
|
else |
{ |
new_brk = (char*)(MORECORE (-extra)); |
|
if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */ |
{ |
/* Try to figure out what we have */ |
current_brk = (char*)(MORECORE (0)); |
top_size = current_brk - (char*)top; |
if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */ |
{ |
sbrked_mem = current_brk - sbrk_base; |
set_head(top, top_size | PREV_INUSE); |
} |
check_chunk(top); |
return 0; |
} |
|
else |
{ |
/* Success. Adjust top accordingly. */ |
set_head(top, (top_size - extra) | PREV_INUSE); |
sbrked_mem -= extra; |
check_chunk(top); |
return 1; |
} |
} |
} |
} |
|
|
|
/* |
malloc_usable_size: |
|
This routine tells you how many bytes you can actually use in an |
allocated chunk, which may be more than you requested (although |
often not). You can use this many bytes without worrying about |
overwriting other allocated objects. Not a particularly great |
programming practice, but still sometimes useful. |
|
*/ |
|
#if __STD_C |
size_t malloc_usable_size(Void_t* mem) |
#else |
size_t malloc_usable_size(mem) Void_t* mem; |
#endif |
{ |
mchunkptr p; |
if (mem == 0) |
return 0; |
else |
{ |
p = mem2chunk(mem); |
if(!chunk_is_mmapped(p)) |
{ |
if (!inuse(p)) return 0; |
check_inuse_chunk(p); |
return chunksize(p) - SIZE_SZ; |
} |
return chunksize(p) - 2*SIZE_SZ; |
} |
} |
|
|
|
|
/* Utility to update current_mallinfo for malloc_stats and mallinfo() */ |
|
static void malloc_update_mallinfo() |
{ |
int i; |
mbinptr b; |
mchunkptr p; |
#if DEBUG |
mchunkptr q; |
#endif |
|
INTERNAL_SIZE_T avail = chunksize(top); |
int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; |
|
for (i = 1; i < NAV; ++i) |
{ |
b = bin_at(i); |
for (p = last(b); p != b; p = p->bk) |
{ |
#if DEBUG |
check_free_chunk(p); |
for (q = next_chunk(p); |
q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE; |
q = next_chunk(q)) |
check_inuse_chunk(q); |
#endif |
avail += chunksize(p); |
navail++; |
} |
} |
|
current_mallinfo.ordblks = navail; |
current_mallinfo.uordblks = sbrked_mem - avail; |
current_mallinfo.fordblks = avail; |
current_mallinfo.hblks = n_mmaps; |
current_mallinfo.hblkhd = mmapped_mem; |
current_mallinfo.keepcost = chunksize(top); |
|
} |
|
|
|
/* |
|
malloc_stats: |
|
Prints on stderr the amount of space obtain from the system (both |
via sbrk and mmap), the maximum amount (which may be more than |
current if malloc_trim and/or munmap got called), the maximum |
number of simultaneous mmap regions used, and the current number |
of bytes allocated via malloc (or realloc, etc) but not yet |
freed. (Note that this is the number of bytes allocated, not the |
number requested. It will be larger than the number requested |
because of alignment and bookkeeping overhead.) |
|
*/ |
|
void malloc_stats() |
{ |
malloc_update_mallinfo(); |
fprintf(stderr, "max system bytes = %10u\n", |
(unsigned int)(max_total_mem)); |
fprintf(stderr, "system bytes = %10u\n", |
(unsigned int)(sbrked_mem + mmapped_mem)); |
fprintf(stderr, "in use bytes = %10u\n", |
(unsigned int)(current_mallinfo.uordblks + mmapped_mem)); |
#if HAVE_MMAP |
fprintf(stderr, "max mmap regions = %10u\n", |
(unsigned int)max_n_mmaps); |
#endif |
} |
|
/* |
mallinfo returns a copy of updated current mallinfo. |
*/ |
|
struct mallinfo mALLINFo() |
{ |
malloc_update_mallinfo(); |
return current_mallinfo; |
} |
|
|
|
|
/* |
mallopt: |
|
mallopt is the general SVID/XPG interface to tunable parameters. |
The format is to provide a (parameter-number, parameter-value) pair. |
mallopt then sets the corresponding parameter to the argument |
value if it can (i.e., so long as the value is meaningful), |
and returns 1 if successful else 0. |
|
See descriptions of tunable parameters above. |
|
*/ |
|
#if __STD_C |
int mALLOPt(int param_number, int value) |
#else |
int mALLOPt(param_number, value) int param_number; int value; |
#endif |
{ |
switch(param_number) |
{ |
case M_TRIM_THRESHOLD: |
trim_threshold = value; return 1; |
case M_TOP_PAD: |
top_pad = value; return 1; |
case M_MMAP_THRESHOLD: |
mmap_threshold = value; return 1; |
case M_MMAP_MAX: |
#if HAVE_MMAP |
n_mmaps_max = value; return 1; |
#else |
if (value != 0) return 0; else n_mmaps_max = value; return 1; |
#endif |
|
default: |
return 0; |
} |
} |
|
/* |
|
History: |
|
V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) |
* Added pvalloc, as recommended by H.J. Liu |
* Added 64bit pointer support mainly from Wolfram Gloger |
* Added anonymously donated WIN32 sbrk emulation |
* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen |
* malloc_extend_top: fix mask error that caused wastage after |
foreign sbrks |
* Add linux mremap support code from HJ Liu |
|
V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) |
* Integrated most documentation with the code. |
* Add support for mmap, with help from |
Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Use last_remainder in more cases. |
* Pack bins using idea from colin@nyx10.cs.du.edu |
* Use ordered bins instead of best-fit threshhold |
* Eliminate block-local decls to simplify tracing and debugging. |
* Support another case of realloc via move into top |
* Fix error occuring when initial sbrk_base not word-aligned. |
* Rely on page size for units instead of SBRK_UNIT to |
avoid surprises about sbrk alignment conventions. |
* Add mallinfo, mallopt. Thanks to Raymond Nijssen |
(raymond@es.ele.tue.nl) for the suggestion. |
* Add `pad' argument to malloc_trim and top_pad mallopt parameter. |
* More precautions for cases where other routines call sbrk, |
courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
* Added macros etc., allowing use in linux libc from |
H.J. Lu (hjl@gnu.ai.mit.edu) |
* Inverted this history list |
|
V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) |
* Re-tuned and fixed to behave more nicely with V2.6.0 changes. |
* Removed all preallocation code since under current scheme |
the work required to undo bad preallocations exceeds |
the work saved in good cases for most test programs. |
* No longer use return list or unconsolidated bins since |
no scheme using them consistently outperforms those that don't |
given above changes. |
* Use best fit for very large chunks to prevent some worst-cases. |
* Added some support for debugging |
|
V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) |
* Removed footers when chunks are in use. Thanks to |
Paul Wilson (wilson@cs.texas.edu) for the suggestion. |
|
V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) |
* Added malloc_trim, with help from Wolfram Gloger |
(wmglo@Dent.MED.Uni-Muenchen.DE). |
|
V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) |
|
V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) |
* realloc: try to expand in both directions |
* malloc: swap order of clean-bin strategy; |
* realloc: only conditionally expand backwards |
* Try not to scavenge used bins |
* Use bin counts as a guide to preallocation |
* Occasionally bin return list chunks in first scan |
* Add a few optimizations from colin@nyx10.cs.du.edu |
|
V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) |
* faster bin computation & slightly different binning |
* merged all consolidations to one part of malloc proper |
(eliminating old malloc_find_space & malloc_clean_bin) |
* Scan 2 returns chunks (not just 1) |
* Propagate failure in realloc if malloc returns 0 |
* Add stuff to allow compilation on non-ANSI compilers |
from kpv@research.att.com |
|
V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) |
* removed potential for odd address access in prev_chunk |
* removed dependency on getpagesize.h |
* misc cosmetics and a bit more internal documentation |
* anticosmetics: mangled names in macros to evade debugger strangeness |
* tested on sparc, hp-700, dec-mips, rs6000 |
with gcc & native cc (hp, dec only) allowing |
Detlefs & Zorn comparison study (in SIGPLAN Notices.) |
|
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) |
* Based loosely on libg++-1.2X malloc. (It retains some of the overall |
structure of old version, but most details differ.) |
|
*/ |
|
|
/common/v2_0/ChangeLog
0,0 → 1,309
2003-02-05 Jonathan Larmour <jifl@eCosCentric.com> |
|
* include/memjoin.inl: Don't use default arg in definition. |
|
2003-02-04 John Dallaway <jld@ecoscentric.com> |
|
* src/heapgen.tcl: Accommodate POSIX-style arguments |
under Cygwin. |
|
2003-01-29 John Dallaway <jld@ecoscentric.com> |
|
* src/heapgen.tcl: Accommodate latest Cygwin Tcl shell |
(tclsh83.exe) |
|
2002-05-10 Jonathan Larmour <jlarmour@redhat.com> |
|
* tests/heaptest.c (test_pat): Make failure messages clearer. |
(cyg_start): Output what failures mean. |
|
2002-04-24 Yoshinori Sato <qzb04471@nifty.ne.jp> |
|
* src/memfixed.cxx (resize_alloc): Don't set default args in func |
definition. |
|
2002-01-30 Bart Veer <bartv@redhat.com> |
|
* tests/malloc4.cxx: |
Never call realloc() with a new size of 0, which frees the buffer. |
Fix the volatility of ptr.p |
|
2002-01-23 Jesper Skov <jskov@redhat.com> |
|
* tests/malloc4.cxx (myrand): Fix overflow. |
|
2002-01-15 Jonathan Larmour <jlarmour@redhat.com> |
|
* tests/malloc4.cxx (myrand): Fix so that it really treats the limit |
as a limit. |
|
2001-10-17 Jesper Skov <jskov@redhat.com> |
|
* include/sepmetaimpl.inl: CYGINT_ISO_STRING_MEMFUNCS checks |
changed to ifdef. |
|
2001-10-11 Jesper Skov <jskov@redhat.com> |
|
* tests/testaux.hxx (new_thread): Fixed allocation: increase |
counter before starting threads which have been allocated |
resources. |
|
2001-10-08 Jonathan Larmour <jlarmour@redhat.com> |
|
* cdl/memalloc.cdl: Only build malloc.cxx and kapi.cxx when needed. |
|
2001-09-20 Jesper Skov <jskov@redhat.com> |
|
* tests/heaptest.c: Fix failure reporting. |
|
2001-09-07 Jesper Skov <jskov@redhat.com> |
|
* tests/heaptest.c: Added some extra output on failures. |
|
2001-08-01 Jonathan Larmour <jlarmour@redhat.com> |
|
* include/sepmetaimpl.inl: Define check_free_memdq and |
check_alloced_memdq as inlines. |
|
* cdl/memalloc.cdl: Add new allocator supporting separate metadata, |
and the associated config options, and build sepmeta.cxx and tests. |
Build heapgeninc.tcl with macros that work with both gcc2 and gcc3. |
Ditto for heaps.o. |
Add CYGBLD_MEMALLOC_MALLOC_EXTERNAL_HEAP_H to allow external entities |
to define the heap. |
* src/malloc.cxx: Include CYGBLD_MEMALLOC_MALLOC_EXTERNAL_HEAP_H if |
defined instead of default heap definition. |
* include/sepmeta.hxx, include/sepmetaimpl.hxx, include/sepmetaimpl.inl, |
src/sepmeta.cxx, tests/sepmeta1.cxx, tests/sepmeta2.cxx: |
New files for seperated metadata allocator. |
|
2001-07-18 Jonathan Larmour <jlarmour@redhat.com> |
|
* src/heapgen.tcl: Use constructor priority of CYG_INIT_MEMALLOC |
for heap objects in generated heaps.cxx. |
|
2001-07-12 Jonathan Larmour <jlarmour@redhat.com> |
|
* tests/malloc1.c (main): Accoutn for allocators that do allocate |
space for allocs of 0. |
Test that allocating all space works. |
* src/dlmalloc.cxx (get_status): Correct again calculation of maxfree |
|
2001-06-28 Jonathan Larmour <jlarmour@redhat.com> |
|
* include/memjoin.inl (~Cyg_Mempool_Joined): free even when asserts |
disabled. |
|
* include/memvar.hxx (class Cyg_Mempool_Variable): Comment out argument |
names for consistency. |
* include/memfixed.hxx (class Cyg_Mempool_Fixed): Ditto. |
* include/memjoin.hxx (class Cyg_Mempool_Joined): Ditto. |
|
2001-06-20 Jonathan Larmour <jlarmour@redhat.com> |
|
* include/mvarimpl.inl (get_status): Correct calculation of maxfree |
by taking into account metadata. |
|
2001-06-18 Jonathan Larmour <jlarmour@redhat.com> |
|
* cdl/memalloc.cdl: Add heaptest test. |
|
* tests/heaptest.c: New test to do a memory check of all of heap. |
|
* src/dlmalloc.cxx (get_status): Correct maxfree and totalfree |
by accounting for block headers. |
|
* tests/realloc.c (cyg_start): Remove warning from declaration. |
* tests/malloc1.c (cyg_start): Ditto. |
* tests/malloc2.c (cyg_start): Ditto. |
* tests/malloc3.c (cyg_start): Ditto. |
* tests/malloc4.cxx (cyg_start): Ditto. Also add DEBUGTEST define |
and fix comment. |
|
* tests/testaux.hxx (STACKSIZE): Double. |
|
2001-05-02 Hugo Tyson <hmt@redhat.com> |
|
* src/dlmalloc.cxx (Cyg_Mempool_dlmalloc_Implementation): Fix |
previous change; "top" is a pseudo variable via a NULL pointer if |
the heap is not initialized, so you can't use it as a flag for "no |
mem here"; and a typo, the comparison was reversed. The two hid |
each other, so the check for "no mem here" usually said "OK". |
|
2001-05-01 Jonathan Larmour <jlarmour@redhat.com> |
|
* include/mvarimpl.inl (try_alloc): Allow zero sized heaps. |
(Cyg_Mempool_Variable_Implementation): Ditto. |
* src/dlmalloc.cxx (try_alloc): Ditto. |
(Cyg_Mempool_dlmalloc_Implementation): Ditto. |
|
2001-04-12 Hugo Tyson <hmt@redhat.com> |
|
* include/memjoin.inl (resize_alloc): Fix typo so it compiles. |
This only applies if you configure multiple heaps. |
|
2001-03-21 Jonathan Larmour <jlarmour@redhat.com> |
|
* cdl/memalloc.cdl: Specify explicit output file when preprocessing |
heapgen.cpp. Improves portability. |
|
2001-02-01 Jonathan Larmour <jlarmour@redhat.com> |
|
* tests/malloc4.cxx: Use semaphores to sync startup order. |
|
2000-11-28 Jonathan Larmour <jlarmour@redhat.com> |
|
* src/heapgen.tcl: Don't use cygpath -s for now as not all cygwins |
have it yet. |
|
2000-11-25 Jonathan Larmour <jlarmour@redhat.com> |
|
* cdl/memalloc.cdl: Make sure PWD variable doesn't clash with bash PWD |
by renaming to XPWD |
|
2000-11-24 Jonathan Larmour <jlarmour@redhat.com> |
|
* cdl/memalloc.cdl: Invoke heapgen.tcl with build directory |
surrounded by quotes (and do so in a portable way). |
|
* src/heapgen.tcl: recurse back in on cygwin with correct quoting |
to allow directories containing spaces. Also in a Solaris shell |
compatible way. |
|
2000-11-21 Jonathan Larmour <jlarmour@redhat.com> |
|
* cdl/memalloc.cdl (CYGSEM_MEMALLOC_MALLOC_ZERO_RETURNS_NULL): |
New option. |
* src/malloc.cxx (malloc): Use above option to decide if NULL should |
be returned on malloc(0). |
|
2000-11-01 Jesper Skov <jskov@redhat.com> |
|
* tests/realloc.c (main): Use reasonable factor when making too |
large realloc (targets with 64MB would cause an overflow). |
|
2000-10-31 Jonathan Larmour <jlarmour@redhat.com> |
|
* tests/testaux.hxx: Prototype cyg_hal_invoke_constructors() |
[ Forgot to check this in at the same time as below ] |
|
2000-10-20 Jonathan Larmour <jlarmour@redhat.com> |
|
* tests/dlmalloc1.cxx: |
* tests/dlmalloc2.cxx: |
* tests/malloc4.cxx: |
* tests/memfix1.cxx: |
* tests/memfix2.cxx: |
* tests/memvar1.cxx: |
* tests/memvar2.cxx: |
Make sure default priority constructors have been invoked. |
|
2000-09-14 Jesper Skov <jskov@redhat.com> |
|
* tests/realloc.c (main): fix warning. |
* tests/malloc1.c (main): Same. |
* tests/malloc2.c (main): Same. |
* tests/malloc3.c (main): Same. |
|
2000-08-31 Jonathan Larmour <jlarmour@redhat.com> |
|
* cdl/memalloc.cdl: Make dlmalloc the default malloc implementation now. |
Also add info to the variable block and dlmalloc descriptions to |
describe the pros and cons of these allocators. |
|
2000-08-09 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* tests/malloc4.cxx (thrfree): Don't yield at loop end - actually delay |
(thrrealloc): Ditto |
(thrcalloc): Ditto |
(thrmalloc): Ditto |
|
2000-08-08 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* tests/malloc4.cxx: Make output more frequent |
|
2000-08-04 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* tests/dlmalloc1.cxx (STACKSIZE): Define larger than default. |
|
2000-08-03 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* include/dlmallocimpl.hxx (class Cyg_Mempool_dlmalloc_Implementation): |
Ensure typedefs are public so dlmalloc.cxx can use them at outer level. |
|
2000-08-02 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* src/heapgen.tcl: Fix tclsh invocation quoting problems |
|
2000-07-31 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* src/heapgen.tcl: Allow builddir to be specified on command-line |
* cdl/memalloc.cdl: Work around NT cygtclsh80 bug by cd'ing into |
heapgen.tcl's directory before running it |
|
2000-07-26 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* tests/malloc4.cxx: Call rand_r() rather than rand, and use a seed |
var in each thread. |
|
2000-07-25 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* tests/malloc4.cxx (thrfree): Get mem size here. Tidy output. |
(thrmalloc): Get mem size in thrfree instead |
|
* src/heapgen.tcl: Refine search for user-defined name to cope with |
use of CYG_LABEL_DEFN macro |
|
2000-07-19 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* cdl/memalloc.cdl (CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE): |
Default to 1 |
|
* include/mvarimpl.inl (resize_alloc): Remember to adjust other |
freelist entries when extending block |
|
2000-07-04 Jonathan Larmour <jlarmour@redhat.co.uk> |
|
* CYGPKG_MEMALLOC: |
|
Created as new package, merging existing memory allocator related stuff |
from the kernel and libc. Many bug fixes to existing stuff, as |
well as performance improvements, and extra features such as |
a port of dlmalloc, and the ability to support multiple disjoint |
heaps, possibly with run-time configurable size. |
There's even a bit of documentation, and some new tests |
|
//=========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//=========================================================================== |
/common/v2_0/src/memfixed.cxx
0,0 → 1,177
//========================================================================== |
// |
// memfixed.cxx |
// |
// Memory pool with fixed block class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): hmt |
// Contributors: jlarmour |
// Date: 2000-06-16 |
// Purpose: Define Memfixed class interface |
// Description: Inline class for constructing a fixed block allocator |
// Usage: #include <cyg/memalloc/memfixed.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
#ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
#endif |
|
|
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertion macros |
#include <cyg/infra/cyg_trac.h> // tracing macros |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
# include <cyg/kernel/ktypes.h> // cyg_tick_count |
#endif |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
// tell it to optimize for the fixed block one-to-one case |
# define CYGIMP_MEM_T_ONEFREE_TO_ONEALLOC |
# include <cyg/memalloc/mempolt2.hxx> // kernel safe mempool template |
#endif |
|
#include <cyg/memalloc/memfixed.hxx> |
#include <cyg/memalloc/mfiximpl.hxx> // implementation of a fixed mem pool |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
// ------------------------------------------------------------------------- |
// debugging/assert function |
|
#ifdef CYGDBG_USE_ASSERTS |
cyg_bool |
Cyg_Mempool_Fixed::check_this(cyg_assert_class_zeal zeal) const |
{ |
CYG_REPORT_FUNCTION(); |
// check that we have a non-NULL pointer first |
if( this == NULL ) return false; |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
return mypool.check_this( zeal ); |
#else |
return true; |
#endif |
} |
#endif |
|
// ------------------------------------------------------------------------- |
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out, note that management structures are taken from the |
// same arena. Alloc_unit is the blocksize allocated. |
Cyg_Mempool_Fixed::Cyg_Mempool_Fixed( |
cyg_uint8 *base, |
cyg_int32 size, |
CYG_ADDRWORD alloc_unit ) |
: mypool( base, size, alloc_unit ) |
{ |
} |
|
// Destructor |
Cyg_Mempool_Fixed::~Cyg_Mempool_Fixed() |
{ |
} |
|
// ------------------------------------------------------------------------- |
// get some memory; wait if none available |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
cyg_uint8 * |
Cyg_Mempool_Fixed::alloc() |
{ |
return mypool.alloc( 0 ); |
} |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout |
cyg_uint8 * |
Cyg_Mempool_Fixed::alloc(cyg_tick_count delay_timeout ) |
{ |
return mypool.alloc( 0, delay_timeout ); |
} |
# endif |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 * |
Cyg_Mempool_Fixed::try_alloc() |
{ |
return mypool.try_alloc( 0 ); |
} |
|
// free the memory back to the pool |
cyg_bool |
Cyg_Mempool_Fixed::free( cyg_uint8 *p ) |
{ |
return mypool.free( p, 0 ); |
} |
|
// supposedly resize existing allocation. This is defined in the |
// fixed block allocator purely for API consistency. It will return |
// an error (false) for all values, except for the blocksize |
// returns true on success |
cyg_uint8 * |
Cyg_Mempool_Fixed::resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ) |
{ |
return mypool.resize_alloc( alloc_ptr, newsize, oldsize ); |
} |
|
// Get memory pool status |
void |
Cyg_Mempool_Fixed::get_status( cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
// set to 0 - if there's anything really waiting, it will be set to |
// 1 later |
status.waiting = 0; |
|
return mypool.get_status( flags, status ); |
} |
|
// ------------------------------------------------------------------------- |
|
// End of memfixed.cxx |
/common/v2_0/src/sepmeta.cxx
0,0 → 1,184
//========================================================================== |
// |
// sepmeta.cxx |
// |
// Variable block memory pool with separated metadata class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2001-06-28 |
// Description: |
// Usage: #include <cyg/memalloc/sepmeta.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
#ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
#endif |
|
|
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertion macros |
#include <cyg/infra/cyg_trac.h> // tracing macros |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
# include <cyg/kernel/ktypes.h> // cyg_tick_count |
#endif |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
# include <cyg/memalloc/mempolt2.hxx> // kernel safe mempool template |
#endif |
|
#include <cyg/memalloc/sepmeta.hxx> |
#include <cyg/memalloc/sepmetaimpl.hxx>// implementation of this mem pool |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
// FUNCTIONS |
|
// ------------------------------------------------------------------------- |
// debugging/assert function |
|
#ifdef CYGDBG_USE_ASSERTS |
cyg_bool |
Cyg_Mempool_Sepmeta::check_this(cyg_assert_class_zeal zeal) const |
{ |
CYG_REPORT_FUNCTION(); |
// check that we have a non-NULL pointer first |
if( this == NULL ) return false; |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
return mypool.check_this( zeal ); |
#else |
return true; |
#endif |
} |
#endif |
|
// ------------------------------------------------------------------------- |
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out |
Cyg_Mempool_Sepmeta::Cyg_Mempool_Sepmeta( |
cyg_uint8 *base, |
cyg_int32 size, |
cyg_int32 alignment, |
cyg_uint8 *metabase, |
cyg_uint32 metasize) |
: args(alignment, metabase, metasize), |
mypool( base, size, (CYG_ADDRWORD)&args ) |
{ |
} |
|
// Destructor |
Cyg_Mempool_Sepmeta::~Cyg_Mempool_Sepmeta() |
{ |
} |
|
// ------------------------------------------------------------------------- |
// get some memory; wait if none available |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_SEPMETA_THREADAWARE |
cyg_uint8 * |
Cyg_Mempool_Sepmeta::alloc(cyg_int32 size) |
{ |
return mypool.alloc( size ); |
} |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout |
cyg_uint8 * |
Cyg_Mempool_Sepmeta::alloc(cyg_int32 size, cyg_tick_count delay_timeout) |
{ |
return mypool.alloc( size , delay_timeout ); |
} |
# endif |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 * |
Cyg_Mempool_Sepmeta::try_alloc(cyg_int32 size) |
{ |
return mypool.try_alloc( size ); |
} |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
Cyg_Mempool_Sepmeta::resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ) |
{ |
return mypool.resize_alloc( alloc_ptr, newsize, oldsize ); |
} |
|
// free the memory back to the pool |
cyg_bool |
Cyg_Mempool_Sepmeta::free( cyg_uint8 *p, cyg_int32 size ) |
{ |
return mypool.free( p, size ); |
} |
|
// Get memory pool status |
void |
Cyg_Mempool_Sepmeta::get_status( cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
// set to 0 - if there's anything really waiting, it will be set to |
// 1 later |
status.waiting = 0; |
|
return mypool.get_status( flags, status ); |
} |
|
// ------------------------------------------------------------------------- |
|
// End of sepmeta.cxx |
/common/v2_0/src/kapi.cxx
0,0 → 1,358
//========================================================================== |
// |
// kapi.cxx |
// |
// Implementation of kernel C API functions for memory pools |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): nickg, dsm, jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Description: Implementation of kernel C API functions for memory pools |
// Usage: |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
#ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
#endif |
|
#ifdef CYGFUN_MEMALLOC_KAPI |
|
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertion macros |
#include <cyg/infra/cyg_trac.h> // tracing macros |
#include <cyg/kernel/ktypes.h> // base kernel types |
|
#include <cyg/memalloc/memvar.hxx> |
#include <cyg/memalloc/memfixed.hxx> |
#include <cyg/memalloc/common.hxx> // status flags |
|
#include <cyg/kernel/kapi.h> // C API |
|
// MACROS |
|
#ifdef CYGDBG_USE_ASSERTS |
|
#define CYG_ASSERT_SIZES(cstruct, cxxstruct) \ |
CYG_MACRO_START \ |
char *msg = "Size of C struct " #cstruct \ |
" != size of C++ struct " #cxxstruct ; \ |
CYG_ASSERT( sizeof(cstruct) == sizeof(cxxstruct) , msg ); \ |
CYG_MACRO_END |
|
#else |
|
#define CYG_ASSERT_SIZES(cstruct, cxxstruct) |
|
#endif |
|
// FUNCTIONS |
|
// ------------------------------------------------------------------------- |
// Magic new function |
|
inline void *operator new(size_t size, void *ptr) |
{ |
CYG_CHECK_DATA_PTR( ptr, "Bad pointer" ); |
return ptr; |
} |
|
/*-----------------------------------------------------------------------*/ |
/* Memory pools */ |
|
/* Create a variable size memory pool */ |
externC void cyg_mempool_var_create( |
void *base, /* base of memory to use for pool */ |
cyg_int32 size, /* size of memory in bytes */ |
cyg_handle_t *handle, /* returned handle of memory pool */ |
cyg_mempool_var *var /* space to put pool structure in */ |
) |
{ |
CYG_ASSERT_SIZES( cyg_mempool_var, Cyg_Mempool_Variable ); |
|
Cyg_Mempool_Variable *t = new((void *)var) Cyg_Mempool_Variable ( |
(cyg_uint8 *)base, |
size |
); |
t=t; |
|
CYG_CHECK_DATA_PTR( handle, "Bad handle pointer" ); |
*handle = (cyg_handle_t)var; |
} |
|
/* Delete variable size memory pool */ |
externC void cyg_mempool_var_delete(cyg_handle_t varpool) |
{ |
((Cyg_Mempool_Variable *)varpool)->~Cyg_Mempool_Variable(); |
} |
|
/* Allocates a block of length size. This waits if the memory is not |
currently available. */ |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
externC void *cyg_mempool_var_alloc(cyg_handle_t varpool, cyg_int32 size) |
{ |
return ((Cyg_Mempool_Variable *)varpool)->alloc(size); |
} |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
|
/* Allocates a block of length size. This waits for up to delay |
ticks, if the memory is not already available. NULL is returned if |
no memory is available. */ |
externC void *cyg_mempool_var_timed_alloc( |
cyg_handle_t varpool, |
cyg_int32 size, |
cyg_tick_count_t abstime) |
{ |
return ((Cyg_Mempool_Variable *)varpool)->alloc(size, abstime); |
} |
|
# endif |
#endif |
|
/* Allocates a block of length size. NULL is returned if no memory is |
available. */ |
externC void *cyg_mempool_var_try_alloc( |
cyg_handle_t varpool, |
cyg_int32 size) |
{ |
return ((Cyg_Mempool_Variable *)varpool)->try_alloc(size); |
} |
|
/* Frees memory back into variable size pool. */ |
externC void cyg_mempool_var_free(cyg_handle_t varpool, void *p) |
{ |
cyg_bool b; |
b = ((Cyg_Mempool_Variable *)varpool)->free((cyg_uint8 *)p, 0); |
CYG_ASSERT( b, "Bad free"); |
} |
|
|
/* Returns true if there are any threads waiting for memory in the |
given memory pool. */ |
externC cyg_bool_t cyg_mempool_var_waiting(cyg_handle_t varpool) |
{ |
Cyg_Mempool_Variable *v = (Cyg_Mempool_Variable *)varpool; |
Cyg_Mempool_Status stat; |
|
v->get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
return (stat.waiting != 0); |
} |
|
/* Puts information about a variable memory pool into the structure |
provided. */ |
externC void cyg_mempool_var_get_info( |
cyg_handle_t varpool, |
cyg_mempool_info *info) |
{ |
Cyg_Mempool_Variable *v = (Cyg_Mempool_Variable *)varpool; |
Cyg_Mempool_Status stat; |
|
v->get_status( CYG_MEMPOOL_STAT_ARENASIZE| |
CYG_MEMPOOL_STAT_TOTALFREE| |
CYG_MEMPOOL_STAT_ARENABASE| |
CYG_MEMPOOL_STAT_ORIGSIZE| |
CYG_MEMPOOL_STAT_MAXFREE, stat ); |
|
info->totalmem = stat.arenasize; |
info->freemem = stat.totalfree; |
info->size = stat.origsize; |
info->base = const_cast<cyg_uint8 *>(stat.arenabase); |
info->blocksize = -1; |
info->maxfree = stat.maxfree; |
} |
|
|
/* Create a fixed size memory pool */ |
externC void cyg_mempool_fix_create( |
void *base, // base of memory to use for pool |
cyg_int32 size, // size of memory in byte |
cyg_int32 blocksize, // size of allocation in bytes |
cyg_handle_t *handle, // handle of memory pool |
cyg_mempool_fix *fix // space to put pool structure in |
) |
{ |
CYG_ASSERT_SIZES( cyg_mempool_fix, Cyg_Mempool_Fixed ); |
|
Cyg_Mempool_Fixed *t = new((void *)fix) Cyg_Mempool_Fixed ( |
(cyg_uint8 *)base, |
size, |
blocksize |
); |
t=t; |
|
CYG_CHECK_DATA_PTR( handle, "Bad handle pointer" ); |
*handle = (cyg_handle_t)fix; |
} |
|
/* Delete fixed size memory pool */ |
externC void cyg_mempool_fix_delete(cyg_handle_t fixpool) |
{ |
((Cyg_Mempool_Fixed *)fixpool)->~Cyg_Mempool_Fixed(); |
} |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_FIXED_THREADAWARE |
/* Allocates a block. This waits if the memory is not |
currently available. */ |
externC void *cyg_mempool_fix_alloc(cyg_handle_t fixpool) |
{ |
return ((Cyg_Mempool_Fixed *)fixpool)->alloc(); |
} |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
|
/* Allocates a block. This waits for up to delay ticks, if the memory |
is not already available. NULL is returned if no memory is |
available. */ |
externC void *cyg_mempool_fix_timed_alloc( |
cyg_handle_t fixpool, |
cyg_tick_count_t abstime) |
{ |
return ((Cyg_Mempool_Fixed *)fixpool)->alloc(abstime); |
} |
|
# endif |
#endif |
|
/* Allocates a block. NULL is returned if no memory is available. */ |
externC void *cyg_mempool_fix_try_alloc(cyg_handle_t fixpool) |
{ |
return ((Cyg_Mempool_Fixed *)fixpool)->try_alloc(); |
} |
|
/* Frees memory back into fixed size pool. */ |
externC void cyg_mempool_fix_free(cyg_handle_t fixpool, void *p) |
{ |
cyg_bool b; |
b = ((Cyg_Mempool_Fixed *)fixpool)->free((cyg_uint8 *)p); |
CYG_ASSERT( b, "Bad free"); |
} |
|
/* Returns true if there are any threads waiting for memory in the |
given memory pool. */ |
externC cyg_bool_t cyg_mempool_fix_waiting(cyg_handle_t fixpool) |
{ |
Cyg_Mempool_Fixed *f = (Cyg_Mempool_Fixed *)fixpool; |
Cyg_Mempool_Status stat; |
|
f->get_status( CYG_MEMPOOL_STAT_WAITING, stat ); |
return (stat.waiting != 0); |
} |
|
/* Puts information about a fixed block memory pool into the structure |
provided. */ |
externC void cyg_mempool_fix_get_info( |
cyg_handle_t fixpool, |
cyg_mempool_info *info) |
{ |
Cyg_Mempool_Fixed *f = (Cyg_Mempool_Fixed *)fixpool; |
Cyg_Mempool_Status stat; |
|
f->get_status( CYG_MEMPOOL_STAT_ARENASIZE| |
CYG_MEMPOOL_STAT_TOTALFREE| |
CYG_MEMPOOL_STAT_ARENABASE| |
CYG_MEMPOOL_STAT_ORIGSIZE| |
CYG_MEMPOOL_STAT_BLOCKSIZE| |
CYG_MEMPOOL_STAT_MAXFREE, stat ); |
|
info->totalmem = stat.arenasize; |
info->freemem = stat.totalfree; |
info->size = stat.origsize; |
info->base = const_cast<cyg_uint8 *>(stat.arenabase); |
info->blocksize = stat.blocksize; |
info->maxfree = stat.maxfree; |
} |
|
// ------------------------------------------------------------------------- |
// Check structure sizes. |
// This class and constructor get run automatically in debug versions |
// of the kernel and check that the structures configured in the C |
// code are the same size as the C++ classes they should match. |
|
#ifdef CYGPKG_INFRA_DEBUG |
|
class Cyg_Check_Mem_Structure_Sizes |
{ |
int dummy; |
public: |
Cyg_Check_Mem_Structure_Sizes( int x ); |
|
}; |
|
#define CYG_CHECK_SIZES(cstruct, cxxstruct) \ |
if( sizeof(cstruct) != sizeof(cxxstruct) ) \ |
{ \ |
char *fmt = "Size of C struct " #cstruct \ |
" != size of C++ struct " #cxxstruct ; \ |
CYG_TRACE2(1, fmt, sizeof(cstruct) , sizeof(cxxstruct) ); \ |
fail = true; \ |
fmt = fmt; \ |
} |
|
Cyg_Check_Mem_Structure_Sizes::Cyg_Check_Mem_Structure_Sizes(int x) |
{ |
cyg_bool fail = false; |
|
dummy = x+1; |
|
CYG_CHECK_SIZES( cyg_mempool_var, Cyg_Mempool_Variable ); |
CYG_CHECK_SIZES( cyg_mempool_fix, Cyg_Mempool_Fixed ); |
|
CYG_ASSERT( !fail, "Size checks failed"); |
} |
|
static Cyg_Check_Mem_Structure_Sizes cyg_memalloc_check_structure_sizes(1); |
|
#endif |
|
// ------------------------------------------------------------------------- |
|
|
#endif // ifdef CYGFUN_MEMALLOC_KAPI |
|
// End of kapi.cxx |
/common/v2_0/src/dlmalloc.cxx
0,0 → 1,1656
//========================================================================== |
// |
// dlmalloc.cxx |
// |
// Port of Doug Lea's malloc implementation |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): Doug Lea (dl at g.oswego.edu), jlarmour |
// Contributors: |
// Date: 2000-06-18 |
// Purpose: Doug Lea's malloc implementation |
// Description: Doug Lea's malloc has been ported to eCos. This file |
// provides the implementation in a way acceptable to eCos. |
// Substantial amounts of unnecessary bits (to eCos) of the |
// original implementation have been removed to make the |
// code more tractable. Note this may make a number of the |
// comments appear to make little sense, or no longer apply! |
// In particular, mmap support is removed entirely. |
// Also the memory is "sbrked" all at once at the |
// beginning, covering the entire memory region given at |
// construction, and there can be no more afterwards. |
// Usage: #include <cyg/memalloc/dlmalloc.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// DOCUMENTATION FROM ORIGINAL FILE: |
// (some now irrelevant parts elided) |
|
//---------------------------------------------------------------------------- |
|
/* |
A version of malloc/free/realloc written by Doug Lea and released to the |
public domain. Send questions/comments/complaints/performance data |
to dl at cs.oswego.edu |
|
* VERSION 2.6.6 Sun Mar 5 19:10:03 2000 Doug Lea (dl at gee) |
|
Note: There may be an updated version of this malloc obtainable at |
ftp://g.oswego.edu/pub/misc/malloc.c |
Check before installing! |
|
* Why use this malloc? |
|
This is not the fastest, most space-conserving, most portable, or |
most tunable malloc ever written. However it is among the fastest |
while also being among the most space-conserving, portable and tunable. |
Consistent balance across these factors results in a good general-purpose |
allocator. For a high-level description, see |
http://g.oswego.edu/dl/html/malloc.html |
|
* Synopsis of public routines |
|
(Much fuller descriptions are contained in the program documentation below.) |
|
[ these have of course been renamed in the eCos port ]a |
|
malloc(size_t n); |
Return a pointer to a newly allocated chunk of at least n bytes, or null |
if no space is available. |
free(Void_t* p); |
Release the chunk of memory pointed to by p, or no effect if p is null. |
realloc(Void_t* p, size_t n); |
Return a pointer to a chunk of size n that contains the same data |
as does chunk p up to the minimum of (n, p's size) bytes, or null |
if no space is available. The returned pointer may or may not be |
the same as p. If p is null, equivalent to malloc. realloc of |
zero bytes calls free(p) |
|
* Vital statistics: |
|
Alignment: 8-byte |
8 byte alignment is currently hardwired into the design. This |
seems to suffice for all current machines and C compilers. |
|
Assumed pointer representation: 4 or 8 bytes |
Code for 8-byte pointers is untested by me but has worked |
reliably by Wolfram Gloger, who contributed most of the |
changes supporting this. |
|
Assumed size_t representation: 4 or 8 bytes |
Note that size_t is allowed to be 4 bytes even if pointers are 8. |
|
Minimum overhead per allocated chunk: 4 or 8 bytes |
Each malloced chunk has a hidden overhead of 4 bytes holding size |
and status information. |
|
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) |
8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
|
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte |
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are |
needed; 4 (8) for a trailing size field |
and 8 (16) bytes for free list pointers. Thus, the minimum |
allocatable size is 16/24/32 bytes. |
|
Even a request for zero bytes (i.e., malloc(0)) returns a |
pointer to something of the minimum allocatable size. |
|
Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes |
8-byte size_t: 2^63 - 16 bytes |
|
It is assumed that (possibly signed) size_t bit values suffice to |
represent chunk sizes. `Possibly signed' is due to the fact |
that `size_t' may be defined on a system as either a signed or |
an unsigned type. To be conservative, values that would appear |
as negative numbers are avoided. |
Requests for sizes with a negative sign bit when the request |
size is treaded as a long will return null. |
|
Maximum overhead wastage per allocated chunk: normally 15 bytes |
|
Alignnment demands, plus the minimum allocatable size restriction |
make the normal worst-case wastage 15 bytes (i.e., up to 15 |
more bytes will be allocated than were requested in malloc), with |
one exception: because requests for zero bytes allocate non-zero space, |
the worst case wastage for a request of zero bytes is 24 bytes. |
|
* Limitations |
|
Here are some features that are NOT currently supported |
|
* No user-definable hooks for callbacks and the like. |
* No automated mechanism for fully checking that all accesses |
to malloced memory stay within their bounds. |
* No support for compaction. |
|
* Synopsis of compile-time options: |
|
People have reported using previous versions of this malloc on all |
versions of Unix, sometimes by tweaking some of the defines |
below. It has been tested most extensively on Solaris and |
Linux. It is also reported to work on WIN32 platforms. |
People have also reported adapting this malloc for use in |
stand-alone embedded systems. |
|
The implementation is in straight, hand-tuned ANSI C. Among other |
consequences, it uses a lot of macros. Because of this, to be at |
all usable, this code should be compiled using an optimizing compiler |
(for example gcc -O2) that can simplify expressions and control |
paths. |
|
CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG (default: NOT defined) |
Define to enable debugging. Adds fairly extensive assertion-based |
checking to help track down memory errors, but noticeably slows down |
execution. |
MALLOC_LOCK (default: NOT defined) |
MALLOC_UNLOCK (default: NOT defined) |
Define these to C expressions which are run to lock and unlock |
the malloc data structures. Calls may be nested; that is, |
MALLOC_LOCK may be called more than once before the corresponding |
MALLOC_UNLOCK calls. MALLOC_LOCK must avoid waiting for a lock |
that it already holds. |
MALLOC_ALIGNMENT (default: NOT defined) |
Define this to 16 if you need 16 byte alignment instead of 8 byte alignment |
which is the normal default. |
SIZE_T_SMALLER_THAN_LONG (default: NOT defined) |
Define this when the platform you are compiling has |
sizeof(long) > sizeof(size_t). |
The option causes some extra code to be generated to handle operations |
that use size_t operands and have long results. |
INTERNAL_SIZE_T (default: size_t) |
Define to a 32-bit type (probably `unsigned int') if you are on a |
64-bit machine, yet do not want or need to allow malloc requests of |
greater than 2^31 to be handled. This saves space, especially for |
very small chunks. |
|
*/ |
|
//---------------------------------------------------------------------------- |
|
|
/* Preliminaries */ |
|
#include <pkgconf/memalloc.h> // configuration header |
#include <pkgconf/infra.h> // CYGDBG_USE_ASSERTS |
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertions |
#include <stddef.h> // for size_t |
#include <cyg/memalloc/dlmalloc.hxx> |
//#include <cyg/infra/diag.h> |
|
/* |
Debugging: |
|
Because freed chunks may be overwritten with link fields, this |
malloc will often die when freed memory is overwritten by user |
programs. This can be very effective (albeit in an annoying way) |
in helping track down dangling pointers. |
|
If you compile with CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG enabled, a |
number of assertion checks are |
enabled that will catch more memory errors. You probably won't be |
able to make much sense of the actual assertion errors, but they |
should help you locate incorrectly overwritten memory. The |
checking is fairly extensive, and will slow down execution |
noticeably. Calling get_status() with DEBUG set will |
attempt to check every allocated and free chunk in the |
course of computing the summmaries. |
|
Setting CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG may also be helpful if you |
are trying to modify this code. The assertions in the check routines |
spell out in more detail the assumptions and invariants underlying |
the algorithms. |
|
*/ |
|
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG |
# define ASSERT(x) CYG_ASSERTC( x ) |
#else |
# define ASSERT(x) ((void)0) |
#endif |
|
|
/* |
Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to |
lock and unlock the malloc data structures. MALLOC_LOCK may be |
called recursively. |
*/ |
|
#ifndef MALLOC_LOCK |
#define MALLOC_LOCK |
#endif |
|
#ifndef MALLOC_UNLOCK |
#define MALLOC_UNLOCK |
#endif |
|
/* |
INTERNAL_SIZE_T is the word-size used for internal bookkeeping |
of chunk sizes. On a 64-bit machine, you can reduce malloc |
overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' |
at the expense of not being able to handle requests greater than |
2^31. This limitation is hardly ever a concern; you are encouraged |
to set this. However, the default version is the same as size_t. |
*/ |
|
#ifndef INTERNAL_SIZE_T |
#define INTERNAL_SIZE_T Cyg_Mempool_dlmalloc_Implementation::Cyg_dlmalloc_size_t |
#endif |
|
/* |
Following is needed on implementations whereby long > size_t. |
The problem is caused because the code performs subtractions of |
size_t values and stores the result in long values. In the case |
where long > size_t and the first value is actually less than |
the second value, the resultant value is positive. For example, |
(long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF |
which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF. This is due to the |
fact that assignment from unsigned to signed won't sign extend. |
*/ |
|
#ifdef SIZE_T_SMALLER_THAN_LONG |
#define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) ); |
#else |
#define long_sub_size_t(x, y) ( (long)(x - y) ) |
#endif |
|
|
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY |
|
#include <string.h> // memcpy, memset |
|
/* The following macros are only invoked with (2n+1)-multiples of |
INTERNAL_SIZE_T units, with a positive integer n. This is exploited |
for fast inline execution when n is small. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T mzsz = (nbytes); \ |
if(mzsz <= 9*sizeof(mzsz)) { \ |
INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \ |
if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; \ |
if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \ |
*mz++ = 0; }}} \ |
*mz++ = 0; \ |
*mz++ = 0; \ |
*mz = 0; \ |
} else memset((charp), 0, mzsz); \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T mcsz = (nbytes); \ |
if(mcsz <= 9*sizeof(mcsz)) { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \ |
if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; }}} \ |
*mcdst++ = *mcsrc++; \ |
*mcdst++ = *mcsrc++; \ |
*mcdst = *mcsrc ; \ |
} else memcpy(dest, src, mcsz); \ |
} while(0) |
|
#else /* !CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY */ |
|
/* Use Duff's device for good zeroing/copying performance. */ |
|
#define MALLOC_ZERO(charp, nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mzp++ = 0; \ |
case 7: *mzp++ = 0; \ |
case 6: *mzp++ = 0; \ |
case 5: *mzp++ = 0; \ |
case 4: *mzp++ = 0; \ |
case 3: *mzp++ = 0; \ |
case 2: *mzp++ = 0; \ |
case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#define MALLOC_COPY(dest,src,nbytes) \ |
do { \ |
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ |
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ |
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \ |
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ |
switch (mctmp) { \ |
case 0: for(;;) { *mcdst++ = *mcsrc++; \ |
case 7: *mcdst++ = *mcsrc++; \ |
case 6: *mcdst++ = *mcsrc++; \ |
case 5: *mcdst++ = *mcsrc++; \ |
case 4: *mcdst++ = *mcsrc++; \ |
case 3: *mcdst++ = *mcsrc++; \ |
case 2: *mcdst++ = *mcsrc++; \ |
case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ |
} \ |
} while(0) |
|
#endif |
|
|
//---------------------------------------------------------------------------- |
|
/* |
malloc_chunk details: |
|
(The following includes lightly edited explanations by Colin Plumb.) |
|
Chunks of memory are maintained using a `boundary tag' method as |
described in e.g., Knuth or Standish. (See the paper by Paul |
Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a |
survey of such techniques.) Sizes of free chunks are stored both |
in the front of each chunk and at the end. This makes |
consolidating fragmented chunks into bigger chunks very fast. The |
size fields also hold bits representing whether chunks are free or |
in use. |
|
An allocated chunk looks like this: |
|
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk, if allocated | | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| User data starts here... . |
. . |
. (malloc_usable_space() bytes) . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
|
Where "chunk" is the front of the chunk for the purpose of most of |
the malloc code, but "mem" is the pointer that is returned to the |
user. "Nextchunk" is the beginning of the next contiguous chunk. |
|
Chunks always begin on even word boundries, so the mem portion |
(which is returned to the user) is also on an even word boundary, and |
thus double-word aligned. |
|
Free chunks are stored in circular doubly-linked lists, and look like this: |
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space (may be 0 bytes long) . |
. . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
The P (PREV_INUSE) bit, stored in the unused low-order bit of the |
chunk size (which is always a multiple of two words), is an in-use |
bit for the *previous* chunk. If that bit is *clear*, then the |
word before the current chunk size contains the previous chunk |
size, and can be used to find the front of the previous chunk. |
(The very first chunk allocated always has this bit set, |
preventing access to non-existent (or non-owned) memory.) |
|
Note that the `foot' of the current chunk is actually represented |
as the prev_size of the NEXT chunk. (This makes it easier to |
deal with alignments etc). |
|
The exception to all this is the special chunk `top', which doesn't |
bother using the trailing size field since there is no next |
contiguous chunk that would have to index off it. (After |
initialization, `top' is forced to always exist. ) |
|
Available chunks are kept in any of several places (all declared below): |
|
* `av': An array of chunks serving as bin headers for consolidated |
chunks. Each bin is doubly linked. The bins are approximately |
proportionally (log) spaced. There are a lot of these bins |
(128). This may look excessive, but works very well in |
practice. All procedures maintain the invariant that no |
consolidated chunk physically borders another one. Chunks in |
bins are kept in size order, with ties going to the |
approximately least recently used chunk. |
|
The chunks in each bin are maintained in decreasing sorted order by |
size. This is irrelevant for the small bins, which all contain |
the same-sized chunks, but facilitates best-fit allocation for |
larger chunks. (These lists are just sequential. Keeping them in |
order almost never requires enough traversal to warrant using |
fancier ordered data structures.) Chunks of the same size are |
linked with the most recently freed at the front, and allocations |
are taken from the back. This results in LRU or FIFO allocation |
order, which tends to give each chunk an equal opportunity to be |
consolidated with adjacent freed chunks, resulting in larger free |
chunks and less fragmentation. |
|
* `top': The top-most available chunk (i.e., the one bordering the |
end of available memory) is treated specially. It is never |
included in any bin, is used only if no other chunk is |
available. |
|
* `last_remainder': A bin holding only the remainder of the |
most recently split (non-top) chunk. This bin is checked |
before other non-fitting chunks, so as to provide better |
locality for runs of sequentially allocated chunks. |
|
*/ |
|
typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mchunkptr; |
|
/* sizes, alignments */ |
|
#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) |
#ifndef MALLOC_ALIGNMENT |
#define MALLOC_ALIGN 8 |
#define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ) |
#else |
#define MALLOC_ALIGN MALLOC_ALIGNMENT |
#endif |
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) |
#define MINSIZE \ |
(sizeof(struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk)) |
|
/* conversion from malloc headers to user pointers, and back */ |
|
#define chunk2mem(p) ((cyg_uint8*)((char*)(p) + 2*SIZE_SZ)) |
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) |
|
/* pad request bytes into a usable size */ |
|
#define request2size(req) \ |
(((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \ |
(long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \ |
(((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK))) |
|
/* Check if m has acceptable alignment */ |
|
#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) |
|
|
/* |
Physical chunk operations |
*/ |
|
|
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
|
#define PREV_INUSE 0x1 |
|
/* Bits to mask off when extracting size */ |
|
#define SIZE_BITS (PREV_INUSE) |
|
|
/* Ptr to next physical malloc_chunk. */ |
|
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) |
|
/* Ptr to previous physical malloc_chunk */ |
|
#define prev_chunk(p)\ |
((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) |
|
|
/* Treat space at ptr + offset as a chunk */ |
|
#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
|
/* |
Dealing with use bits |
*/ |
|
/* extract p's inuse bit */ |
|
#define inuse(p)\ |
((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) |
|
/* extract inuse bit of previous chunk */ |
|
#define prev_inuse(p) ((p)->size & PREV_INUSE) |
|
/* set/clear chunk as in use without otherwise disturbing */ |
|
#define set_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE |
|
#define clear_inuse(p)\ |
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) |
|
/* check/set/clear inuse bits in known places */ |
|
#define inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) |
|
#define set_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) |
|
#define clear_inuse_bit_at_offset(p, s)\ |
(((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) |
|
|
/* |
Dealing with size fields |
*/ |
|
/* Get size, ignoring use bits */ |
|
#define chunksize(p) ((p)->size & ~(SIZE_BITS)) |
|
/* Set size at head, without disturbing its use bit */ |
|
#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) |
|
/* Set size/use ignoring previous bits in header */ |
|
#define set_head(p, s) ((p)->size = (s)) |
|
/* Set size at footer (only when chunk is not in use) */ |
|
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) |
|
|
//---------------------------------------------------------------------------- |
|
/* |
Bins |
|
The bins, `av_' are an array of pairs of pointers serving as the |
heads of (initially empty) doubly-linked lists of chunks, laid out |
in a way so that each pair can be treated as if it were in a |
malloc_chunk. (This way, the fd/bk offsets for linking bin heads |
and chunks are the same). |
|
Bins for sizes < 512 bytes contain chunks of all the same size, spaced |
8 bytes apart. Larger bins are approximately logarithmically |
spaced. (See the table below.) The `av_' array is never mentioned |
directly in the code, but instead via bin access macros. |
|
Bin layout: |
|
64 bins of size 8 |
32 bins of size 64 |
16 bins of size 512 |
8 bins of size 4096 |
4 bins of size 32768 |
2 bins of size 262144 |
1 bin of size what's left |
|
There is actually a little bit of slop in the numbers in bin_index |
for the sake of speed. This makes no difference elsewhere. |
|
The special chunks `top' and `last_remainder' get their own bins, |
(this is implemented via yet more trickery with the av_ array), |
although `top' is never properly linked to its bin since it is |
always handled specially. |
|
*/ |
|
typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mbinptr; |
|
/* access macros */ |
|
#define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ)) |
#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr))) |
#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr))) |
|
/* |
The first 2 bins are never indexed. The corresponding av_ cells are instead |
used for bookkeeping. This is not to save space, but to simplify |
indexing, maintain locality, and avoid some initialization tests. |
*/ |
|
#define top (bin_at(0)->fd) /* The topmost chunk */ |
#define last_remainder (bin_at(1)) /* remainder from last split */ |
|
|
/* Helper macro to initialize bins */ |
|
#define IAV(i) bin_at(i), bin_at(i) |
|
#ifndef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE |
static mbinptr av_[CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV * 2 + 2] = { |
0, 0, |
IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), |
IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), |
IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), |
IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31), |
IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39), |
IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47), |
IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55), |
IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63), |
IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71), |
IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79), |
IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), |
IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), |
IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), |
IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), |
IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), |
IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) |
}; |
#endif |
|
/* field-extraction macros */ |
|
#define first(b) ((b)->fd) |
#define last(b) ((b)->bk) |
|
/* |
Indexing into bins |
*/ |
|
#define bin_index(sz) \ |
(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \ |
((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \ |
((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \ |
((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ |
((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ |
((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ |
126) |
/* |
bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold |
identically sized chunks. This is exploited in malloc. |
*/ |
|
#define MAX_SMALLBIN_SIZE 512 |
#define SMALLBIN_WIDTH 8 |
#define SMALLBIN_WIDTH_BITS 3 |
#define MAX_SMALLBIN (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1 |
|
#define smallbin_index(sz) (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS) |
|
/* |
Requests are `small' if both the corresponding and the next bin are small |
*/ |
|
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) |
|
/* |
To help compensate for the large number of bins, a one-level index |
structure is used for bin-by-bin searching. `binblocks' is a |
one-word bitvector recording whether groups of BINBLOCKWIDTH bins |
have any (possibly) non-empty bins, so they can be skipped over |
all at once during during traversals. The bits are NOT always |
cleared as soon as all bins in a block are empty, but instead only |
when all are noticed to be empty during traversal in malloc. |
*/ |
|
#define BINBLOCKWIDTH 4 /* bins per block */ |
|
#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */ |
|
/* bin<->block macros */ |
|
#define idx2binblock(ix) ((unsigned long)1 << (ix / BINBLOCKWIDTH)) |
#define mark_binblock(ii) (binblocks |= idx2binblock(ii)) |
#define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii))) |
|
|
//---------------------------------------------------------------------------- |
|
/* |
Debugging support |
*/ |
|
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG |
|
/* |
These routines make a number of assertions about the states |
of data structures that should be true at all times. If any |
are not true, it's very likely that a user program has somehow |
trashed memory. (It's also possible that there is a coding error |
in malloc. In which case, please report it!) |
*/ |
|
void |
Cyg_Mempool_dlmalloc_Implementation::do_check_chunk( mchunkptr p ) |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
|
/* Check for legal address ... */ |
ASSERT((cyg_uint8 *)p >= arenabase); |
if (p != top) |
ASSERT((cyg_uint8 *)p + sz <= (cyg_uint8 *)top); |
else |
ASSERT((cyg_uint8 *)p + sz <= arenabase + arenasize); |
|
} // Cyg_Mempool_dlmalloc_Implementation::do_check_chunk() |
|
|
void |
Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk(mchunkptr p) |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
mchunkptr next = chunk_at_offset(p, sz); |
|
do_check_chunk(p); |
|
/* Check whether it claims to be free ... */ |
ASSERT(!inuse(p)); |
|
/* Unless a special marker, must have OK fields */ |
if ((long)sz >= (long)MINSIZE) |
{ |
ASSERT((sz & MALLOC_ALIGN_MASK) == 0); |
ASSERT(aligned_OK(chunk2mem(p))); |
/* ... matching footer field */ |
ASSERT(next->prev_size == sz); |
/* ... and is fully consolidated */ |
ASSERT(prev_inuse(p)); |
ASSERT (next == top || inuse(next)); |
|
/* ... and has minimally sane links */ |
ASSERT(p->fd->bk == p); |
ASSERT(p->bk->fd == p); |
} |
else /* markers are always of size SIZE_SZ */ |
ASSERT(sz == SIZE_SZ); |
} // Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk() |
|
void |
Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(mchunkptr p) |
{ |
mchunkptr next = next_chunk(p); |
do_check_chunk(p); |
|
/* Check whether it claims to be in use ... */ |
ASSERT(inuse(p)); |
|
/* ... and is surrounded by OK chunks. |
Since more things can be checked with free chunks than inuse ones, |
if an inuse chunk borders them and debug is on, it's worth doing them. |
*/ |
if (!prev_inuse(p)) |
{ |
mchunkptr prv = prev_chunk(p); |
ASSERT(next_chunk(prv) == p); |
do_check_free_chunk(prv); |
} |
if (next == top) |
{ |
ASSERT(prev_inuse(next)); |
ASSERT(chunksize(next) >= MINSIZE); |
} |
else if (!inuse(next)) |
do_check_free_chunk(next); |
|
} // Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk( |
|
void |
Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(mchunkptr p, |
INTERNAL_SIZE_T s) |
{ |
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; |
long room = long_sub_size_t(sz, s); |
|
do_check_inuse_chunk(p); |
|
/* Legal size ... */ |
ASSERT((long)sz >= (long)MINSIZE); |
ASSERT((sz & MALLOC_ALIGN_MASK) == 0); |
ASSERT(room >= 0); |
ASSERT(room < (long)MINSIZE); |
|
/* ... and alignment */ |
ASSERT(aligned_OK(chunk2mem(p))); |
|
|
/* ... and was allocated at front of an available chunk */ |
ASSERT(prev_inuse(p)); |
|
} // Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk( |
|
|
#define check_free_chunk(P) do_check_free_chunk(P) |
#define check_inuse_chunk(P) do_check_inuse_chunk(P) |
#define check_chunk(P) do_check_chunk(P) |
#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) |
#else |
#define check_free_chunk(P) |
#define check_inuse_chunk(P) |
#define check_chunk(P) |
#define check_malloced_chunk(P,N) |
#endif |
|
|
//---------------------------------------------------------------------------- |
|
/* |
Macro-based internal utilities |
*/ |
|
|
/* |
Linking chunks in bin lists. |
Call these only with variables, not arbitrary expressions, as arguments. |
*/ |
|
/* |
Place chunk p of size s in its bin, in size order, |
putting it ahead of others of same size. |
*/ |
|
|
#define frontlink(P, S, IDX, BK, FD) \ |
{ \ |
if (S < MAX_SMALLBIN_SIZE) \ |
{ \ |
IDX = smallbin_index(S); \ |
mark_binblock(IDX); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
else \ |
{ \ |
IDX = bin_index(S); \ |
BK = bin_at(IDX); \ |
FD = BK->fd; \ |
if (FD == BK) mark_binblock(IDX); \ |
else \ |
{ \ |
while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ |
BK = FD->bk; \ |
} \ |
P->bk = BK; \ |
P->fd = FD; \ |
FD->bk = BK->fd = P; \ |
} \ |
} |
|
|
/* take a chunk off a list */ |
|
#define unlink(P, BK, FD) \ |
{ \ |
BK = P->bk; \ |
FD = P->fd; \ |
FD->bk = BK; \ |
BK->fd = FD; \ |
} \ |
|
/* Place p as the last remainder */ |
|
#define link_last_remainder(P) \ |
{ \ |
last_remainder->fd = last_remainder->bk = P; \ |
P->fd = P->bk = last_remainder; \ |
} |
|
/* Clear the last_remainder bin */ |
|
#define clear_last_remainder \ |
(last_remainder->fd = last_remainder->bk = last_remainder) |
|
|
//---------------------------------------------------------------------------- |
|
Cyg_Mempool_dlmalloc_Implementation::Cyg_Mempool_dlmalloc_Implementation( |
cyg_uint8 *base, cyg_int32 size, |
CYG_ADDRWORD /* argthru */ ) |
{ |
arenabase = base; |
arenasize = size; |
|
CYG_ADDRESS front_misalign; |
cyg_int32 correction; |
|
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE |
cyg_ucount16 i; |
av_[0] = av_[1] = 0; |
for (i=0; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; i++) { |
av_[ i*2+2 ] = av_[ i*2+3 ] = bin_at(i); |
} // for |
|
#elif defined(CYGDBG_USE_ASSERTS) |
static int instances; |
if ( ++instances > 1 ) |
CYG_FAIL( "Multiple dlmalloc instances but " |
"CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE " |
"not defined" ); |
#endif |
|
front_misalign = (CYG_ADDRESS)chunk2mem(base) & MALLOC_ALIGN_MASK; |
|
if ( front_misalign > 0 ) { |
correction = (MALLOC_ALIGNMENT) - front_misalign; |
} else { |
correction = 0; |
} |
|
// too small to be useful? |
if ( correction + 2*MALLOC_ALIGNMENT > (unsigned) size ) |
// help catch errors. Don't fail now. |
arenabase = NULL; |
else { |
top = (mchunkptr)(base + correction); |
set_head(top, arenasize | PREV_INUSE); |
} |
} |
|
//---------------------------------------------------------------------------- |
|
/* Main public routines */ |
|
/* |
Malloc Algorithm: |
|
The requested size is first converted into a usable form, `nb'. |
This currently means to add 4 bytes overhead plus possibly more to |
obtain 8-byte alignment and/or to obtain a size of at least |
MINSIZE (currently 16 bytes), the smallest allocatable size. |
(All fits are considered `exact' if they are within MINSIZE bytes.) |
|
From there, the first successful of the following steps is taken: |
|
1. The bin corresponding to the request size is scanned, and if |
a chunk of exactly the right size is found, it is taken. |
|
2. The most recently remaindered chunk is used if it is big |
enough. This is a form of (roving) first fit, used only in |
the absence of exact fits. Runs of consecutive requests use |
the remainder of the chunk used for the previous such request |
whenever possible. This limited use of a first-fit style |
allocation strategy tends to give contiguous chunks |
coextensive lifetimes, which improves locality and can reduce |
fragmentation in the long run. |
|
3. Other bins are scanned in increasing size order, using a |
chunk big enough to fulfill the request, and splitting off |
any remainder. This search is strictly by best-fit; i.e., |
the smallest (with ties going to approximately the least |
recently used) chunk that fits is selected. |
|
4. If large enough, the chunk bordering the end of memory |
(`top') is split off. (This use of `top' is in accord with |
the best-fit search rule. In effect, `top' is treated as |
larger (and thus less well fitting) than any other available |
chunk since it can be extended to be as large as necessary |
(up to system limitations). |
|
All allocations are made from the the `lowest' part of any found |
chunk. (The implementation invariant is that prev_inuse is |
always true of any allocated chunk; i.e., that each allocated |
chunk borders either a previously allocated and still in-use chunk, |
or the base of its memory arena.) |
|
*/ |
|
cyg_uint8 * |
Cyg_Mempool_dlmalloc_Implementation::try_alloc( cyg_int32 bytes ) |
{ |
mchunkptr victim; /* inspected/selected chunk */ |
INTERNAL_SIZE_T victim_size; /* its size */ |
int idx; /* index for bin traversal */ |
mbinptr bin; /* associated bin */ |
mchunkptr remainder; /* remainder from a split */ |
long remainder_size; /* its size */ |
int remainder_index; /* its bin index */ |
unsigned long block; /* block traverser bit */ |
int startidx; /* first bin of a traversed block */ |
mchunkptr fwd; /* misc temp for linking */ |
mchunkptr bck; /* misc temp for linking */ |
mbinptr q; /* misc temp */ |
|
INTERNAL_SIZE_T nb; |
|
/* Allow uninitialised (zero sized) heaps because they could exist as a |
* quirk of the MLT setup where a dynamically sized heap is at the top of |
* memory. */ |
if (NULL==arenabase) return NULL; |
|
if ((long)bytes < 0) return 0; |
|
nb = request2size(bytes); /* padded request size; */ |
|
MALLOC_LOCK; |
|
/* Check for exact match in a bin */ |
|
if (is_small_request(nb)) /* Faster version for small requests */ |
{ |
idx = smallbin_index(nb); |
|
/* No traversal or size check necessary for small bins. */ |
|
q = bin_at(idx); |
victim = last(q); |
|
#if MALLOC_ALIGN != 16 |
/* Also scan the next one, since it would have a remainder < MINSIZE */ |
if (victim == q) |
{ |
q = next_bin(q); |
victim = last(q); |
} |
#endif |
if (victim != q) |
{ |
victim_size = chunksize(victim); |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */ |
|
} |
else |
{ |
idx = bin_index(nb); |
bin = bin_at(idx); |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* too big */ |
{ |
--idx; /* adjust to rescan below after checking last remainder */ |
break; |
} |
|
else if (remainder_size >= 0) /* exact fit */ |
{ |
unlink(victim, bck, fwd); |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
} |
|
++idx; |
|
} |
|
/* Try to use the last split-off remainder */ |
|
if ( (victim = last_remainder->fd) != last_remainder) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* re-split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
clear_last_remainder; |
|
if (remainder_size >= 0) /* exhaust */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
/* Else place in bin */ |
|
frontlink(victim, victim_size, remainder_index, bck, fwd); |
} |
|
/* |
If there are any possibly nonempty big-enough blocks, |
search for best fitting chunk by scanning bins in blockwidth units. |
*/ |
|
if ( (block = idx2binblock(idx)) <= binblocks) |
{ |
|
/* Get to the first marked block */ |
|
if ( (block & binblocks) == 0) |
{ |
/* force to an even block boundary */ |
idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; |
block <<= 1; |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
|
/* For each possibly nonempty block ... */ |
for (;;) |
{ |
startidx = idx; /* (track incomplete blocks) */ |
q = bin = bin_at(idx); |
|
/* For each bin in this block ... */ |
do |
{ |
/* Find and use first big enough chunk ... */ |
|
for (victim = last(bin); victim != bin; victim = victim->bk) |
{ |
victim_size = chunksize(victim); |
remainder_size = long_sub_size_t(victim_size, nb); |
|
if (remainder_size >= (long)MINSIZE) /* split */ |
{ |
remainder = chunk_at_offset(victim, nb); |
set_head(victim, nb | PREV_INUSE); |
unlink(victim, bck, fwd); |
link_last_remainder(remainder); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_foot(remainder, remainder_size); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
else if (remainder_size >= 0) /* take */ |
{ |
set_inuse_bit_at_offset(victim, victim_size); |
unlink(victim, bck, fwd); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
} |
|
} |
|
bin = next_bin(bin); |
|
#if MALLOC_ALIGN == 16 |
if (idx < MAX_SMALLBIN) |
{ |
bin = next_bin(bin); |
++idx; |
} |
#endif |
} while ((++idx & (BINBLOCKWIDTH - 1)) != 0); |
|
/* Clear out the block bit. */ |
|
do /* Possibly backtrack to try to clear a partial block */ |
{ |
if ((startidx & (BINBLOCKWIDTH - 1)) == 0) |
{ |
binblocks &= ~block; |
break; |
} |
--startidx; |
q = prev_bin(q); |
} while (first(q) == q); |
|
/* Get to the next possibly nonempty block */ |
|
if ( (block <<= 1) <= binblocks && (block != 0) ) |
{ |
while ((block & binblocks) == 0) |
{ |
idx += BINBLOCKWIDTH; |
block <<= 1; |
} |
} |
else |
break; |
} |
} |
|
|
/* Try to use top chunk */ |
|
/* Require that there be a remainder, ensuring top always exists */ |
remainder_size = long_sub_size_t(chunksize(top), nb); |
if (chunksize(top) < nb || remainder_size < (long)MINSIZE) |
{ |
//diag_printf("chunksize(top)=%ld, nb=%d, remainder=%ld\n", chunksize(top), |
// nb, remainder_size); |
MALLOC_UNLOCK; |
return NULL; /* propagate failure */ |
} |
|
victim = top; |
set_head(victim, nb | PREV_INUSE); |
top = chunk_at_offset(victim, nb); |
set_head(top, remainder_size | PREV_INUSE); |
check_malloced_chunk(victim, nb); |
MALLOC_UNLOCK; |
return chunk2mem(victim); |
|
} // Cyg_Mempool_dlmalloc_Implementation::try_alloc() |
|
//---------------------------------------------------------------------------- |
|
/* |
free() algorithm : |
|
cases: |
|
1. free(NULL) has no effect. |
|
2. Chunks are consolidated as they arrive, and |
placed in corresponding bins. (This includes the case of |
consolidating with the current `last_remainder'). |
*/ |
|
cyg_bool |
Cyg_Mempool_dlmalloc_Implementation::free( cyg_uint8 *mem, cyg_int32 ) |
{ |
mchunkptr p; /* chunk corresponding to mem */ |
INTERNAL_SIZE_T hd; /* its head field */ |
INTERNAL_SIZE_T sz; /* its size */ |
int idx; /* its bin index */ |
mchunkptr next; /* next contiguous chunk */ |
INTERNAL_SIZE_T nextsz; /* its size */ |
INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ |
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
int islr; /* track whether merging with last_remainder */ |
|
if (mem == NULL) /* free(NULL) has no effect */ |
return false; |
|
MALLOC_LOCK; |
|
p = mem2chunk(mem); |
hd = p->size; |
|
check_inuse_chunk(p); |
|
sz = hd & ~PREV_INUSE; |
next = chunk_at_offset(p, sz); |
nextsz = chunksize(next); |
|
if (next == top) /* merge with top */ |
{ |
sz += nextsz; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -((long) prevsz)); |
sz += prevsz; |
unlink(p, bck, fwd); |
} |
|
set_head(p, sz | PREV_INUSE); |
top = p; |
MALLOC_UNLOCK; |
return true; |
} |
|
set_head(next, nextsz); /* clear inuse bit */ |
|
islr = 0; |
|
if (!(hd & PREV_INUSE)) /* consolidate backward */ |
{ |
prevsz = p->prev_size; |
p = chunk_at_offset(p, -((long) prevsz)); |
sz += prevsz; |
|
if (p->fd == last_remainder) /* keep as last_remainder */ |
islr = 1; |
else |
unlink(p, bck, fwd); |
} |
|
if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */ |
{ |
sz += nextsz; |
|
if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ |
{ |
islr = 1; |
link_last_remainder(p); |
} |
else |
unlink(next, bck, fwd); |
} |
|
|
set_head(p, sz | PREV_INUSE); |
set_foot(p, sz); |
if (!islr) |
frontlink(p, sz, idx, bck, fwd); |
|
MALLOC_UNLOCK; |
|
return true; |
} // Cyg_Mempool_dlmalloc_Implementation::free() |
|
//---------------------------------------------------------------------------- |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
|
|
// DOCUMENTATION FROM ORIGINAL FILE: |
// (some now irrelevant parts elided) |
/* |
Realloc algorithm: |
|
If the reallocation is for additional space, and the |
chunk can be extended, it is, else a malloc-copy-free sequence is |
taken. There are several different ways that a chunk could be |
extended. All are tried: |
|
* Extending forward into following adjacent free chunk. |
* Shifting backwards, joining preceding adjacent space |
* Both shifting backwards and extending forward. |
|
If the reallocation is for less space, and the new request is for |
a `small' (<512 bytes) size, then the newly unused space is lopped |
off and freed. |
|
The old unix realloc convention of allowing the last-free'd chunk |
to be used as an argument to realloc is no longer supported. |
I don't know of any programs still relying on this feature, |
and allowing it would also allow too many other incorrect |
usages of realloc to be sensible. |
*/ |
|
cyg_uint8 * |
Cyg_Mempool_dlmalloc_Implementation::resize_alloc( cyg_uint8 *oldmem, |
cyg_int32 bytes, |
cyg_int32 *poldsize ) |
{ |
|
INTERNAL_SIZE_T nb; /* padded request size */ |
|
mchunkptr oldp; /* chunk corresponding to oldmem */ |
INTERNAL_SIZE_T oldsize; /* its size */ |
|
mchunkptr newp; /* chunk to return */ |
INTERNAL_SIZE_T newsize; /* its size */ |
cyg_uint8* newmem; /* corresponding user mem */ |
|
mchunkptr next; /* next contiguous chunk after oldp */ |
INTERNAL_SIZE_T nextsize; /* its size */ |
|
mchunkptr prev; /* previous contiguous chunk before oldp */ |
INTERNAL_SIZE_T prevsize; /* its size */ |
|
mchunkptr remainder; /* holds split off extra space from newp */ |
INTERNAL_SIZE_T remainder_size; /* its size */ |
|
mchunkptr bck; /* misc temp for linking */ |
mchunkptr fwd; /* misc temp for linking */ |
|
MALLOC_LOCK; |
|
newp = oldp = mem2chunk(oldmem); |
newsize = oldsize = chunksize(oldp); |
|
if (NULL != poldsize) |
*poldsize = oldsize - SIZE_SZ; |
|
nb = request2size(bytes); |
|
check_inuse_chunk(oldp); |
|
if ((long)(oldsize) < (long)(nb)) |
{ |
|
/* Try expanding forward */ |
|
next = chunk_at_offset(oldp, oldsize); |
if (next == top || !inuse(next)) |
{ |
nextsize = chunksize(next); |
|
/* Forward into top only if a remainder */ |
if (next == top) |
{ |
if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
newsize += nextsize; |
top = chunk_at_offset(oldp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(oldp, nb); |
MALLOC_UNLOCK; |
return chunk2mem(oldp); |
} |
} |
|
/* Forward into next chunk */ |
else if (((long)(nextsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
newsize += nextsize; |
goto split; |
} |
} |
else |
{ |
next = 0; |
nextsize = 0; |
} |
|
/* Try shifting backwards. */ |
|
if (!prev_inuse(oldp)) |
{ |
prev = prev_chunk(oldp); |
prevsize = chunksize(prev); |
|
/* try forward + backward first to save a later consolidation */ |
|
if (next != 0) |
{ |
/* into top */ |
if (next == top) |
{ |
if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize + nextsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
top = chunk_at_offset(newp, nb); |
set_head(top, (newsize - nb) | PREV_INUSE); |
set_head_size(newp, nb); |
MALLOC_UNLOCK; |
return newmem; |
} |
} |
|
/* into next chunk */ |
else if (((long)(nextsize + prevsize + newsize) >= (long)(nb))) |
{ |
unlink(next, bck, fwd); |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += nextsize + prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
/* backward only */ |
if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) |
{ |
unlink(prev, bck, fwd); |
newp = prev; |
newsize += prevsize; |
newmem = chunk2mem(newp); |
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); |
goto split; |
} |
} |
|
// couldn't resize the allocation any direction, so return failure |
MALLOC_UNLOCK; |
return NULL; |
} |
|
|
split: /* split off extra room in old or expanded chunk */ |
|
remainder_size = long_sub_size_t(newsize, nb); |
|
if (remainder_size >= (long)MINSIZE) /* split off remainder */ |
{ |
remainder = chunk_at_offset(newp, nb); |
set_head_size(newp, nb); |
set_head(remainder, remainder_size | PREV_INUSE); |
set_inuse_bit_at_offset(remainder, remainder_size); |
/* let free() deal with it */ |
Cyg_Mempool_dlmalloc_Implementation::free( chunk2mem(remainder) ); |
} |
else |
{ |
set_head_size(newp, newsize); |
set_inuse_bit_at_offset(newp, newsize); |
} |
|
check_inuse_chunk(newp); |
MALLOC_UNLOCK; |
return chunk2mem(newp); |
|
} // Cyg_Mempool_dlmalloc_Implementation::resize_alloc() |
|
//---------------------------------------------------------------------------- |
|
// Get memory pool status |
// flags is a bitmask of requested fields to fill in. The flags are |
// defined in common.hxx |
void |
Cyg_Mempool_dlmalloc_Implementation::get_status( |
cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
if (0 != (flags&(CYG_MEMPOOL_STAT_FREEBLOCKS|CYG_MEMPOOL_STAT_TOTALFREE| |
CYG_MEMPOOL_STAT_TOTALALLOCATED|CYG_MEMPOOL_STAT_MAXFREE))) |
{ |
int i; |
mbinptr b; |
mchunkptr p; |
cyg_int32 chunksizep; |
cyg_int32 maxfree; |
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG |
mchunkptr q; |
#endif |
|
INTERNAL_SIZE_T avail = chunksize(top); |
int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0; |
maxfree = avail; |
|
for (i = 1; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; ++i) { |
b = bin_at(i); |
for (p = last(b); p != b; p = p->bk) { |
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG |
check_free_chunk(p); |
for (q = next_chunk(p); |
(q < top) && inuse(q) && |
(long)(chunksize(q)) >= (long)MINSIZE; |
q = next_chunk(q)) |
check_inuse_chunk(q); |
#endif |
chunksizep = chunksize(p); |
avail += chunksizep; |
if ( chunksizep > maxfree ) |
maxfree = chunksizep; |
navail++; |
} |
} |
|
if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) ) |
status.totalallocated = arenasize - avail; |
// as quick or quicker to just set most of these, rather than |
// test flag first |
status.totalfree = (avail & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE; |
CYG_ASSERT( ((avail + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) |
>= MINSIZE, "free mem negative!" ); |
status.freeblocks = navail; |
status.maxfree = (maxfree & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE; |
//diag_printf("raw mf: %d, ret mf: %d\n", maxfree, status.maxfree); |
CYG_ASSERT( ((maxfree + SIZE_SZ + MALLOC_ALIGN_MASK) & |
~MALLOC_ALIGN_MASK) >= MINSIZE, |
"max free block size negative!" ); |
} // if |
|
// as quick or quicker to just set most of these, rather than |
// test flag first |
status.arenabase = status.origbase = arenabase; |
status.arenasize = status.origsize = arenasize; |
status.maxoverhead = MINSIZE + MALLOC_ALIGNMENT; |
|
} // Cyg_Mempool_dlmalloc_Implementation::get_status() |
|
|
//---------------------------------------------------------------------------- |
|
// EOF dlmalloc.cxx |
/common/v2_0/src/heapgen.cpp
0,0 → 1,72
/*======================================================================== |
// |
// heapgen.cpp |
// |
// Helper file for heapgen.tcl |
// |
//======================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//======================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-06-13 |
// Purpose: Helper file for heapgen.tcl |
// Description: Exports macros derived from the configuration so that |
// they are visible to heapgen.tcl. This file is |
// preprocessed by a make rule in the CDL to generate |
// "heapgeninc.tcl" |
// Note, this isn't a real C file. It is only to be |
// preprocessed, not compiled |
// Usage: |
// |
//####DESCRIPTIONEND#### |
// |
//======================================================================*/ |
|
#include <pkgconf/system.h> |
#include <pkgconf/memalloc.h> |
|
#define STRINGIFY1(_x_) #_x_ |
#define STRINGIFY(_x_) STRINGIFY1(_x_) |
|
set memlayout_h STRINGIFY(CYGHWR_MEMORY_LAYOUT_H) |
set memlayout_ldi STRINGIFY(CYGHWR_MEMORY_LAYOUT_LDI) |
set malloc_impl_h STRINGIFY(CYGBLD_MEMALLOC_MALLOC_IMPLEMENTATION_HEADER) |
#define __MALLOC_IMPL_WANTED |
#include CYGBLD_MEMALLOC_MALLOC_IMPLEMENTATION_HEADER |
set malloc_impl_class STRINGIFY(CYGCLS_MEMALLOC_MALLOC_IMPL) |
|
/* EOF heapgen.cpp */ |
/common/v2_0/src/heapgen.tcl
0,0 → 1,201
#!/bin/bash |
# restart using a Tcl shell \ |
exec sh -c 'for tclshell in tclsh tclsh83 cygtclsh80 ; do \ |
( echo | $tclshell ) 2> /dev/null && exec $tclshell "`( cygpath -w \"$0\" ) 2> /dev/null || echo $0`" "$@" ; \ |
done ; \ |
echo "heapgen.tcl: cannot find Tcl shell" ; exit 1' "$0" "$@" |
|
#=============================================================================== |
# |
# heapgen.tcl |
# |
# Script to generate memory pool instantiations based on the memory map |
# |
#=============================================================================== |
#####ECOSGPLCOPYRIGHTBEGIN#### |
## ------------------------------------------- |
## This file is part of eCos, the Embedded Configurable Operating System. |
## Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
## |
## eCos is free software; you can redistribute it and/or modify it under |
## the terms of the GNU General Public License as published by the Free |
## Software Foundation; either version 2 or (at your option) any later version. |
## |
## eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
## WARRANTY; without even the implied warranty of MERCHANTABILITY or |
## FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
## for more details. |
## |
## You should have received a copy of the GNU General Public License along |
## with eCos; if not, write to the Free Software Foundation, Inc., |
## 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
## |
## As a special exception, if other files instantiate templates or use macros |
## or inline functions from this file, or you compile this file and link it |
## with other works to produce a work based on this file, this file does not |
## by itself cause the resulting work to be covered by the GNU General Public |
## License. However the source code for this file must still be made available |
## in accordance with section (3) of the GNU General Public License. |
## |
## This exception does not invalidate any other reasons why a work based on |
## this file might be covered by the GNU General Public License. |
## |
## Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
## at http://sources.redhat.com/ecos/ecos-license/ |
## ------------------------------------------- |
#####ECOSGPLCOPYRIGHTEND#### |
#=============================================================================== |
######DESCRIPTIONBEGIN#### |
# |
# Author(s): jlarmour |
# Contributors: |
# Date: 2000-06-13 |
# Purpose: Generate memory pool instantiations based on the memory map |
# along with information in a header file to allow access from |
# C source |
# Description: |
# Usage: |
# |
#####DESCRIPTIONEND#### |
#=============================================================================== |
|
set debug 0 |
|
proc dputs { args } { |
global debug |
if { $debug > 0 } { |
puts -nonewline "DEBUG: " |
foreach i $args { |
puts -nonewline $i |
} |
puts "" |
} |
} |
|
proc tcl_path { posix_path } { |
global tcl_platform |
if { $tcl_platform(platform) == "windows" } { |
return [ exec cygpath -w $posix_path ] |
} else { |
return $posix_path |
} |
} |
|
dputs "argc=" $argc |
dputs "argv=" $argv |
|
if { $argc != 2 } { |
error "Usage: heapgen.tcl installdir builddir" |
} |
|
set installdir [ tcl_path [ lindex $argv 0 ] ] |
set builddir [ tcl_path [ lindex $argv 1 ] ] |
|
dputs "builddir=" $builddir |
dputs "installdir=" $installdir |
dputs "pwd=" [pwd] |
|
# Fetch relevant config data placed in the generated file heapgeninc.tcl |
source [ file join $builddir heapgeninc.tcl ] |
|
dputs "memlayout_h=" $memlayout_h |
|
# ---------------------------------------------------------------------------- |
# Get heap information |
|
# trim brackets |
set ldi_name [ string trim $memlayout_ldi "<>" ] |
dputs $ldi_name |
# prefix full leading path including installdir |
set ldifile [open [ file join $installdir include $ldi_name ] r] |
|
# now read the .ldi file and find the user-defined sections with the |
# prefix "heap" |
set heaps "" |
while { [gets $ldifile line] >= 0} { |
# Search for user-defined name beginning heap (possibly with leading |
# underscores |
if [ regexp {^[ \t]+(CYG_LABEL_DEFN\(|)[ \t]*_*heap} $line ] { |
set heapnamestart [ string first heap $line ] |
set heapnameend1 [ string first ")" $line ] |
incr heapnameend1 -1 |
set heapnameend2 [ string wordend $line $heapnamestart ] |
if { $heapnameend1 < 0 } { |
set $heapnameend1 $heapnameend2 |
} |
set heapnameend [ expr $heapnameend1 < $heapnameend2 ? $heapnameend1 : $heapnameend2 ] |
set heapname [ string range $line $heapnamestart $heapnameend ] |
set heaps [ concat $heaps $heapname ] |
dputs [ format "Found heap \"%s\"" $heapname ] |
} |
} |
close $ldifile |
|
set heapcount [ llength $heaps ] |
set heapcount1 [ expr 1 + $heapcount ] |
|
# ---------------------------------------------------------------------------- |
# Generate header file |
|
# Could have made it generate the header file straight into include/pkgconf, |
# but that knowledge of the build system is best left in the make rules in CDL |
|
set hfile [ open [ file join $builddir heaps.hxx ] w] |
puts $hfile "#ifndef CYGONCE_PKGCONF_HEAPS_HXX" |
puts $hfile "#define CYGONCE_PKGCONF_HEAPS_HXX" |
puts $hfile "/* <pkgconf/heaps.hxx> */\n" |
puts $hfile "/* This is a generated file - do not edit! */\n" |
# Allow CYGMEM_HEAP_COUNT to be available to the implementation header file |
puts $hfile [ format "#define CYGMEM_HEAP_COUNT %d" $heapcount ] |
puts $hfile [ concat "#include " $malloc_impl_h ] |
puts $hfile "" |
puts $hfile [ format "extern %s *cygmem_memalloc_heaps\[ %d \];" \ |
$malloc_impl_class $heapcount1 ] |
puts $hfile "\n#endif" |
puts $hfile "/* EOF <pkgconf/heaps.hxx> */" |
close $hfile |
|
# ---------------------------------------------------------------------------- |
# Generate C file in the current directory (ie. the build directory) |
# that instantiates the pools |
|
set cfile [ open [ file join $builddir heaps.cxx ] w ] |
puts $cfile "/* heaps.cxx */\n" |
puts $cfile "/* This is a generated file - do not edit! */\n" |
puts $cfile "#include <pkgconf/heaps.hxx>" |
puts $cfile [ concat "#include " $memlayout_h ] |
puts $cfile "#include <cyg/infra/cyg_type.h>" |
puts $cfile "#include <cyg/hal/hal_intr.h>" |
puts $cfile [ concat "#include " $malloc_impl_h ] |
puts $cfile "" |
|
foreach heap $heaps { |
puts $cfile "#ifdef HAL_MEM_REAL_REGION_TOP\n" |
|
puts $cfile [ format "%s cygmem_pool_%s ( (cyg_uint8 *)CYGMEM_SECTION_%s ," \ |
$malloc_impl_class $heap $heap ] |
puts $cfile [ format " HAL_MEM_REAL_REGION_TOP( (cyg_uint8 *)CYGMEM_SECTION_%s + CYGMEM_SECTION_%s_SIZE ) - (cyg_uint8 *)CYGMEM_SECTION_%s ) " \ |
$heap $heap $heap ] |
puts $cfile " CYGBLD_ATTRIB_INIT_PRI(CYG_INIT_MEMALLOC);\n" |
|
puts $cfile "#else\n" |
|
puts $cfile [ format "%s cygmem_pool_%s ( (cyg_uint8 *)CYGMEM_SECTION_%s , CYGMEM_SECTION_%s_SIZE ) CYGBLD_ATTRIB_INIT_PRI(CYG_INIT_MEMALLOC);\n" \ |
$malloc_impl_class $heap $heap $heap ] |
|
puts $cfile "#endif" |
} |
|
puts $cfile "" |
puts $cfile [ format "%s *cygmem_memalloc_heaps\[ %d \] = { " \ |
$malloc_impl_class $heapcount1 ] |
foreach heap $heaps { |
puts $cfile [ format " &cygmem_pool_%s," $heap ] |
} |
puts $cfile " NULL\n};" |
|
puts $cfile "\n/* EOF heaps.cxx */" |
close $cfile |
|
# ---------------------------------------------------------------------------- |
# EOF heapgen.tcl |
/common/v2_0/src/malloc.cxx
0,0 → 1,287
//======================================================================== |
// |
// malloc.cxx |
// |
// Implementation of ISO C memory allocation routines |
// |
//======================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//======================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): jlarmour |
// Contributors: |
// Date: 2000-04-30 |
// Purpose: Provides ISO C calloc(), malloc(), realloc() and free() |
// functions |
// Description: Implementation of ISO standard allocation routines as per |
// ISO C section 7.10.3 |
// Usage: |
// |
//####DESCRIPTIONEND#### |
// |
//======================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> // Configuration header |
|
// Do we want these functions? |
#ifdef CYGPKG_MEMALLOC_MALLOC_ALLOCATORS |
|
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // Common type definitions and support |
#include <cyg/infra/cyg_trac.h> // Common tracing support |
#include <cyg/infra/cyg_ass.h> // Common assertion support |
#include <string.h> // For memset() and memmove() |
#include <stdlib.h> // header for this file |
#ifdef CYGBLD_MEMALLOC_MALLOC_EXTERNAL_HEAP_H |
# include CYGBLD_MEMALLOC_MALLOC_EXTERNAL_HEAP_H |
#else |
# include <pkgconf/heaps.hxx> // heap pools information |
#endif |
#include CYGBLD_MEMALLOC_MALLOC_IMPLEMENTATION_HEADER |
|
// STATIC VARIABLES |
|
// First deal with the worst case, that the memory layout didn't define a |
// heap |
#if CYGMEM_HEAP_COUNT == 0 |
|
// the data space for the memory pool |
cyg_uint8 cyg_memalloc_mallocpool_memory[ |
CYGNUM_MEMALLOC_FALLBACK_MALLOC_POOL_SIZE ] CYGBLD_ATTRIB_WEAK; |
|
// the memory pool object itself |
CYGCLS_MEMALLOC_MALLOC_IMPL cyg_memalloc_mallocpool |
CYGBLD_ATTRIB_INIT_BEFORE( CYG_INIT_LIBC ) = |
CYGCLS_MEMALLOC_MALLOC_IMPL( cyg_memalloc_mallocpool_memory, |
sizeof( cyg_memalloc_mallocpool_memory ) ); |
|
# define POOL cyg_memalloc_mallocpool |
|
#elif CYGMEM_HEAP_COUNT == 1 |
// one heap, so it's straightforward |
|
# define POOL (*cygmem_memalloc_heaps[0]) |
|
#else |
// multiple heaps |
|
# include <cyg/memalloc/memjoin.hxx> |
|
Cyg_Mempool_Joined<CYGCLS_MEMALLOC_MALLOC_IMPL> cyg_memalloc_mallocpool |
CYGBLD_ATTRIB_INIT_BEFORE( CYG_INIT_LIBC ) = |
Cyg_Mempool_Joined<CYGCLS_MEMALLOC_MALLOC_IMPL>( |
CYGMEM_HEAP_COUNT, cygmem_memalloc_heaps |
); |
|
# define POOL cyg_memalloc_mallocpool |
|
#endif |
|
// FUNCTIONS |
|
void * |
malloc( size_t size ) |
{ |
void *data_ptr; |
|
CYG_REPORT_FUNCNAMETYPE( "malloc", "returning pointer %08x" ); |
|
CYG_REPORT_FUNCARG1DV( size ); |
|
#ifdef CYGSEM_MEMALLOC_MALLOC_ZERO_RETURNS_NULL |
// first check if size wanted is 0 |
if ( 0 == size ) { |
CYG_REPORT_RETVAL( NULL ); |
return NULL; |
} // if |
#endif |
|
// ask the pool for the data |
data_ptr = POOL.try_alloc( size ); |
|
// if it isn't NULL is the pointer valid? |
if ( NULL != data_ptr ) { |
CYG_CHECK_DATA_PTR( data_ptr, |
"allocator returned invalid pointer!" ); |
|
// And just check its alignment |
CYG_ASSERT( !((CYG_ADDRWORD)data_ptr & (sizeof(CYG_ADDRWORD) - 1)), |
"Allocator has returned badly aligned data!"); |
} // if |
|
CYG_REPORT_RETVAL( data_ptr ); |
|
return data_ptr; |
} // malloc() |
|
|
void |
free( void *ptr ) |
{ |
cyg_bool freeret; |
|
CYG_REPORT_FUNCNAME( "free"); |
|
CYG_REPORT_FUNCARG1XV( ptr ); |
|
// if null pointer, do nothing as per spec |
if ( NULL==ptr ) |
return; |
|
CYG_CHECK_DATA_PTR( ptr, "Pointer to free isn't even valid!" ); |
|
// get pool to free it |
freeret = POOL.free( (cyg_uint8 *) ptr ); |
|
CYG_ASSERT( freeret , "Couldn't free!" ); |
|
CYG_REPORT_RETURN(); |
|
} // free() |
|
|
void * |
calloc( size_t nmemb, size_t size ) |
{ |
void *data_ptr; |
cyg_ucount32 realsize; |
|
CYG_REPORT_FUNCNAMETYPE( "calloc", "returning pointer %08x" ); |
|
CYG_REPORT_FUNCARG2DV( nmemb, size ); |
|
realsize = nmemb * size; |
|
data_ptr = malloc( realsize ); |
|
// Fill with 0's if non-NULL |
if ( data_ptr != NULL ) |
memset( data_ptr, 0, realsize ); |
|
CYG_REPORT_RETVAL( data_ptr ); |
return data_ptr; |
} // calloc() |
|
|
externC void * |
realloc( void *ptr, size_t size ) |
{ |
cyg_int32 oldsize; |
|
CYG_REPORT_FUNCNAMETYPE( "realloc", "returning pointer %08x" ); |
|
CYG_REPORT_FUNCARG2( "ptr=%08x, size=%d", ptr, size ); |
|
// if pointer is NULL, we must malloc it |
if ( ptr == NULL ) { |
ptr = malloc( size ); |
CYG_REPORT_RETVAL( ptr ); |
return ptr; |
} // if |
|
CYG_CHECK_DATA_PTR( ptr, "realloc() passed a bogus pointer!" ); |
|
// if size is 0, we must free it |
if (size == 0) { |
free(ptr); |
CYG_REPORT_RETVAL( NULL ); |
return NULL; |
} // if |
|
void *newptr; |
|
// otherwise try to resize allocation |
newptr = POOL.resize_alloc( (cyg_uint8 *)ptr, size, &oldsize ); |
|
if ( NULL == newptr ) { |
// if resize_alloc doesn't return a pointer, it failed, so we |
// just have to allocate new space instead, and later copy it |
|
CYG_ASSERT( oldsize != 0, |
"resize_alloc() couldn't determine allocation size!" ); |
|
newptr = malloc( size ); |
|
if ( NULL != newptr ) { |
memcpy( newptr, ptr, size < (size_t) oldsize ? size |
: (size_t) oldsize ); |
free( ptr ); |
} |
} |
|
CYG_REPORT_RETVAL( newptr ); |
return newptr; |
} // realloc() |
|
|
externC struct mallinfo |
mallinfo( void ) |
{ |
struct mallinfo ret = { 0 }; // initialize to all zeros |
Cyg_Mempool_Status stat; |
|
CYG_REPORT_FUNCTION(); |
|
POOL.get_status( CYG_MEMPOOL_STAT_ARENASIZE| |
CYG_MEMPOOL_STAT_FREEBLOCKS| |
CYG_MEMPOOL_STAT_TOTALALLOCATED| |
CYG_MEMPOOL_STAT_TOTALFREE| |
CYG_MEMPOOL_STAT_MAXFREE, stat ); |
|
if ( stat.arenasize > 0 ) |
ret.arena = stat.arenasize; |
|
if ( stat.freeblocks > 0 ) |
ret.ordblks = stat.freeblocks; |
|
if ( stat.totalallocated > 0 ) |
ret.uordblks = stat.totalallocated; |
|
if ( stat.totalfree > 0 ) |
ret.fordblks = stat.totalfree; |
|
if ( stat.maxfree > 0 ) |
ret.maxfree = stat.maxfree; |
|
CYG_REPORT_RETURN(); |
return ret; |
} // mallinfo() |
|
#endif // ifdef CYGPKG_MEMALLOC_MALLOC_ALLOCATORS |
|
// EOF malloc.cxx |
/common/v2_0/src/memvar.cxx
0,0 → 1,181
//========================================================================== |
// |
// memvar.cxx |
// |
// Memory pool with variable block class declarations |
// |
//========================================================================== |
//####ECOSGPLCOPYRIGHTBEGIN#### |
// ------------------------------------------- |
// This file is part of eCos, the Embedded Configurable Operating System. |
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc. |
// |
// eCos is free software; you can redistribute it and/or modify it under |
// the terms of the GNU General Public License as published by the Free |
// Software Foundation; either version 2 or (at your option) any later version. |
// |
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY |
// WARRANTY; without even the implied warranty of MERCHANTABILITY or |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
// for more details. |
// |
// You should have received a copy of the GNU General Public License along |
// with eCos; if not, write to the Free Software Foundation, Inc., |
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
// |
// As a special exception, if other files instantiate templates or use macros |
// or inline functions from this file, or you compile this file and link it |
// with other works to produce a work based on this file, this file does not |
// by itself cause the resulting work to be covered by the GNU General Public |
// License. However the source code for this file must still be made available |
// in accordance with section (3) of the GNU General Public License. |
// |
// This exception does not invalidate any other reasons why a work based on |
// this file might be covered by the GNU General Public License. |
// |
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc. |
// at http://sources.redhat.com/ecos/ecos-license/ |
// ------------------------------------------- |
//####ECOSGPLCOPYRIGHTEND#### |
//========================================================================== |
//#####DESCRIPTIONBEGIN#### |
// |
// Author(s): dsm, jlarmour |
// Contributors: |
// Date: 2000-06-12 |
// Description: |
// Usage: #include <cyg/memalloc/memvar.hxx> |
// |
// |
//####DESCRIPTIONEND#### |
// |
//========================================================================== |
|
// CONFIGURATION |
|
#include <pkgconf/memalloc.h> |
#include <pkgconf/system.h> |
#ifdef CYGPKG_KERNEL |
# include <pkgconf/kernel.h> |
#endif |
|
|
// INCLUDES |
|
#include <cyg/infra/cyg_type.h> // types |
#include <cyg/infra/cyg_ass.h> // assertion macros |
#include <cyg/infra/cyg_trac.h> // tracing macros |
|
#ifdef CYGFUN_KERNEL_THREADS_TIMER |
# include <cyg/kernel/ktypes.h> // cyg_tick_count |
#endif |
|
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
# include <cyg/memalloc/mempolt2.hxx> // kernel safe mempool template |
#endif |
|
#include <cyg/memalloc/memvar.hxx> |
#include <cyg/memalloc/mvarimpl.hxx> // implementation of a variable mem pool |
#include <cyg/memalloc/common.hxx> // Common memory allocator infra |
|
// FUNCTIONS |
|
// ------------------------------------------------------------------------- |
// debugging/assert function |
|
#ifdef CYGDBG_USE_ASSERTS |
cyg_bool |
Cyg_Mempool_Variable::check_this(cyg_assert_class_zeal zeal) const |
{ |
CYG_REPORT_FUNCTION(); |
// check that we have a non-NULL pointer first |
if( this == NULL ) return false; |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
return mypool.check_this( zeal ); |
#else |
return true; |
#endif |
} |
#endif |
|
// ------------------------------------------------------------------------- |
// Constructor: gives the base and size of the arena in which memory is |
// to be carved out |
Cyg_Mempool_Variable::Cyg_Mempool_Variable( |
cyg_uint8 *base, |
cyg_int32 size, |
cyg_int32 alignment) |
: mypool( base, size, (CYG_ADDRWORD)alignment ) |
{ |
} |
|
// Destructor |
Cyg_Mempool_Variable::~Cyg_Mempool_Variable() |
{ |
} |
|
// ------------------------------------------------------------------------- |
// get some memory; wait if none available |
#ifdef CYGSEM_MEMALLOC_ALLOCATOR_VARIABLE_THREADAWARE |
cyg_uint8 * |
Cyg_Mempool_Variable::alloc(cyg_int32 size) |
{ |
return mypool.alloc( size ); |
} |
|
# ifdef CYGFUN_KERNEL_THREADS_TIMER |
// get some memory with a timeout |
cyg_uint8 * |
Cyg_Mempool_Variable::alloc(cyg_int32 size, cyg_tick_count delay_timeout) |
{ |
return mypool.alloc( size , delay_timeout ); |
} |
# endif |
#endif |
|
// get some memory, return NULL if none available |
cyg_uint8 * |
Cyg_Mempool_Variable::try_alloc(cyg_int32 size) |
{ |
return mypool.try_alloc( size ); |
} |
|
// resize existing allocation, if oldsize is non-NULL, previous |
// allocation size is placed into it. If previous size not available, |
// it is set to 0. NB previous allocation size may have been rounded up. |
// Occasionally the allocation can be adjusted *backwards* as well as, |
// or instead of forwards, therefore the address of the resized |
// allocation is returned, or NULL if no resizing was possible. |
// Note that this differs from ::realloc() in that no attempt is |
// made to call malloc() if resizing is not possible - that is left |
// to higher layers. The data is copied from old to new though. |
// The effects of alloc_ptr==NULL or newsize==0 are undefined |
cyg_uint8 * |
Cyg_Mempool_Variable::resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize, |
cyg_int32 *oldsize ) |
{ |
return mypool.resize_alloc( alloc_ptr, newsize, oldsize ); |
} |
|
// free the memory back to the pool |
cyg_bool |
Cyg_Mempool_Variable::free( cyg_uint8 *p, cyg_int32 size ) |
{ |
return mypool.free( p, size ); |
} |
|
// Get memory pool status |
void |
Cyg_Mempool_Variable::get_status( cyg_mempool_status_flag_t flags, |
Cyg_Mempool_Status &status ) |
{ |
// set to 0 - if there's anything really waiting, it will be set to |
// 1 later |
status.waiting = 0; |
|
return mypool.get_status( flags, status ); |
} |
|
// ------------------------------------------------------------------------- |
|
// End of memvar.cxx |