OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [newlib/] [newlib/] [libc/] [stdlib/] [mallocr.c] - Diff between revs 39 and 56

Go to most recent revision | Show entire file | Details | Blame | View Log

Rev 39 Rev 56
Line 1... Line 1...
 
#ifdef MALLOC_PROVIDED
 
int _dummy_mallocr = 1;
 
#else
/* ---------- To make a malloc.h, start cutting here ------------ */
/* ---------- To make a malloc.h, start cutting here ------------ */
 
 
/*
/*
  A version of malloc/free/realloc written by Doug Lea and released to the
  A version of malloc/free/realloc written by Doug Lea and released to the
  public domain.  Send questions/comments/complaints/performance data
  public domain.  Send questions/comments/complaints/performance data
Line 160... Line 163...
     Define these to C expressions which are run to lock and unlock
     Define these to C expressions which are run to lock and unlock
     the malloc data structures.  Calls may be nested; that is,
     the malloc data structures.  Calls may be nested; that is,
     MALLOC_LOCK may be called more than once before the corresponding
     MALLOC_LOCK may be called more than once before the corresponding
     MALLOC_UNLOCK calls.  MALLOC_LOCK must avoid waiting for a lock
     MALLOC_UNLOCK calls.  MALLOC_LOCK must avoid waiting for a lock
     that it already holds.
     that it already holds.
 
  MALLOC_ALIGNMENT          (default: NOT defined)
 
     Define this to 16 if you need 16 byte alignment instead of 8 byte alignment
 
     which is the normal default.
 
  SIZE_T_SMALLER_THAN_LONG (default: NOT defined)
 
     Define this when the platform you are compiling has sizeof(long) > sizeof(size_t).
 
     The option causes some extra code to be generated to handle operations
 
     that use size_t operands and have long results.
  REALLOC_ZERO_BYTES_FREES (default: NOT defined)
  REALLOC_ZERO_BYTES_FREES (default: NOT defined)
     Define this if you think that realloc(p, 0) should be equivalent
     Define this if you think that realloc(p, 0) should be equivalent
     to free(p). Otherwise, since malloc returns a unique pointer for
     to free(p). Otherwise, since malloc returns a unique pointer for
     malloc(0), so does realloc(p, 0).
     malloc(0), so does realloc(p, 0).
  HAVE_MEMCPY               (default: defined)
  HAVE_MEMCPY               (default: defined)
Line 416... Line 426...
#ifndef INTERNAL_SIZE_T
#ifndef INTERNAL_SIZE_T
#define INTERNAL_SIZE_T size_t
#define INTERNAL_SIZE_T size_t
#endif
#endif
 
 
/*
/*
 
  Following is needed on implementations whereby long > size_t.
 
  The problem is caused because the code performs subtractions of
 
  size_t values and stores the result in long values.  In the case
 
  where long > size_t and the first value is actually less than
 
  the second value, the resultant value is positive.  For example,
 
  (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
 
  which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF.  This is due to the
 
  fact that assignment from unsigned to signed won't sign extend.
 
*/
 
 
 
#ifdef SIZE_T_SMALLER_THAN_LONG
 
#define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) );
 
#else
 
#define long_sub_size_t(x, y) ( (long)(x - y) )
 
#endif
 
 
 
/*
  REALLOC_ZERO_BYTES_FREES should be set if a call to
  REALLOC_ZERO_BYTES_FREES should be set if a call to
  realloc with zero bytes should be the same as a call to free.
  realloc with zero bytes should be the same as a call to free.
  Some people think it should. Otherwise, since this malloc
  Some people think it should. Otherwise, since this malloc
  returns a unique pointer for malloc(0), so does realloc(p, 0).
  returns a unique pointer for malloc(0), so does realloc(p, 0).
*/
*/
Line 1349... Line 1376...
 
 
/*  sizes, alignments */
/*  sizes, alignments */
 
 
#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
#ifndef MALLOC_ALIGNMENT
#ifndef MALLOC_ALIGNMENT
 
#define MALLOC_ALIGN           8
#define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
#define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
 
#else
 
#define MALLOC_ALIGN           MALLOC_ALIGNMENT
#endif
#endif
#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
#define MINSIZE                (sizeof(struct malloc_chunk))
#define MINSIZE                (sizeof(struct malloc_chunk))
 
 
/* conversion from malloc headers to user pointers, and back */
/* conversion from malloc headers to user pointers, and back */
Line 1363... Line 1393...
 
 
/* pad request bytes into a usable size */
/* pad request bytes into a usable size */
 
 
#define request2size(req) \
#define request2size(req) \
 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
 
 
/* Check if m has acceptable alignment */
/* Check if m has acceptable alignment */
 
 
#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
Line 1586... Line 1616...
 ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
 ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
 ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
 ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
                                          126)
                                          126)
/*
/*
  bins for chunks < 512 are all spaced 8 bytes apart, and hold
  bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold
  identically sized chunks. This is exploited in malloc.
  identically sized chunks. This is exploited in malloc.
*/
*/
 
 
#define MAX_SMALLBIN         63
 
#define MAX_SMALLBIN_SIZE   512
#define MAX_SMALLBIN_SIZE   512
#define SMALLBIN_WIDTH        8
#define SMALLBIN_WIDTH        8
 
#define SMALLBIN_WIDTH_BITS   3
 
#define MAX_SMALLBIN        (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
 
 
#define smallbin_index(sz)  (((unsigned long)(sz)) >> 3)
#define smallbin_index(sz)  (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS)
 
 
/*
/*
   Requests are `small' if both the corresponding and the next bin are small
   Requests are `small' if both the corresponding and the next bin are small
*/
*/
 
 
Line 1813... Line 1844...
#else
#else
static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
#endif
#endif
{
{
  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
  long room = sz - s;
  long room = long_sub_size_t(sz, s);
 
 
  do_check_inuse_chunk(p);
  do_check_inuse_chunk(p);
 
 
  /* Legal size ... */
  /* Legal size ... */
  assert((long)sz >= (long)MINSIZE);
  assert((long)sz >= (long)MINSIZE);
Line 2265... Line 2296...
Void_t* mALLOc(RARG size_t bytes)
Void_t* mALLOc(RARG size_t bytes)
#else
#else
Void_t* mALLOc(RARG bytes) RDECL size_t bytes;
Void_t* mALLOc(RARG bytes) RDECL size_t bytes;
#endif
#endif
{
{
 
#ifdef MALLOC_PROVIDED
 
 
 
  malloc (bytes);
 
 
 
#else
 
 
  mchunkptr victim;                  /* inspected/selected chunk */
  mchunkptr victim;                  /* inspected/selected chunk */
  INTERNAL_SIZE_T victim_size;       /* its size */
  INTERNAL_SIZE_T victim_size;       /* its size */
  int       idx;                     /* index for bin traversal */
  int       idx;                     /* index for bin traversal */
  mbinptr   bin;                     /* associated bin */
  mbinptr   bin;                     /* associated bin */
  mchunkptr remainder;               /* remainder from a split */
  mchunkptr remainder;               /* remainder from a split */
Line 2293... Line 2330...
    /* No traversal or size check necessary for small bins.  */
    /* No traversal or size check necessary for small bins.  */
 
 
    q = bin_at(idx);
    q = bin_at(idx);
    victim = last(q);
    victim = last(q);
 
 
 
#if MALLOC_ALIGN != 16
    /* Also scan the next one, since it would have a remainder < MINSIZE */
    /* Also scan the next one, since it would have a remainder < MINSIZE */
    if (victim == q)
    if (victim == q)
    {
    {
      q = next_bin(q);
      q = next_bin(q);
      victim = last(q);
      victim = last(q);
    }
    }
 
#endif
    if (victim != q)
    if (victim != q)
    {
    {
      victim_size = chunksize(victim);
      victim_size = chunksize(victim);
      unlink(victim, bck, fwd);
      unlink(victim, bck, fwd);
      set_inuse_bit_at_offset(victim, victim_size);
      set_inuse_bit_at_offset(victim, victim_size);
Line 2320... Line 2359...
    bin = bin_at(idx);
    bin = bin_at(idx);
 
 
    for (victim = last(bin); victim != bin; victim = victim->bk)
    for (victim = last(bin); victim != bin; victim = victim->bk)
    {
    {
      victim_size = chunksize(victim);
      victim_size = chunksize(victim);
      remainder_size = victim_size - nb;
      remainder_size = long_sub_size_t(victim_size, nb);
 
 
      if (remainder_size >= (long)MINSIZE) /* too big */
      if (remainder_size >= (long)MINSIZE) /* too big */
      {
      {
        --idx; /* adjust to rescan below after checking last remainder */
        --idx; /* adjust to rescan below after checking last remainder */
        break;
        break;
Line 2347... Line 2386...
  /* Try to use the last split-off remainder */
  /* Try to use the last split-off remainder */
 
 
  if ( (victim = last_remainder->fd) != last_remainder)
  if ( (victim = last_remainder->fd) != last_remainder)
  {
  {
    victim_size = chunksize(victim);
    victim_size = chunksize(victim);
    remainder_size = victim_size - nb;
    remainder_size = long_sub_size_t(victim_size, nb);
 
 
    if (remainder_size >= (long)MINSIZE) /* re-split */
    if (remainder_size >= (long)MINSIZE) /* re-split */
    {
    {
      remainder = chunk_at_offset(victim, nb);
      remainder = chunk_at_offset(victim, nb);
      set_head(victim, nb | PREV_INUSE);
      set_head(victim, nb | PREV_INUSE);
Line 2412... Line 2451...
        /* Find and use first big enough chunk ... */
        /* Find and use first big enough chunk ... */
 
 
        for (victim = last(bin); victim != bin; victim = victim->bk)
        for (victim = last(bin); victim != bin; victim = victim->bk)
        {
        {
          victim_size = chunksize(victim);
          victim_size = chunksize(victim);
          remainder_size = victim_size - nb;
          remainder_size = long_sub_size_t(victim_size, nb);
 
 
          if (remainder_size >= (long)MINSIZE) /* split */
          if (remainder_size >= (long)MINSIZE) /* split */
          {
          {
            remainder = chunk_at_offset(victim, nb);
            remainder = chunk_at_offset(victim, nb);
            set_head(victim, nb | PREV_INUSE);
            set_head(victim, nb | PREV_INUSE);
Line 2440... Line 2479...
 
 
        }
        }
 
 
       bin = next_bin(bin);
       bin = next_bin(bin);
 
 
 
#if MALLOC_ALIGN == 16
 
       if (idx < MAX_SMALLBIN)
 
         {
 
           bin = next_bin(bin);
 
           ++idx;
 
         }
 
#endif
      } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
      } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
 
 
      /* Clear out the block bit. */
      /* Clear out the block bit. */
 
 
      do   /* Possibly backtrack to try to clear a partial block */
      do   /* Possibly backtrack to try to clear a partial block */
Line 2474... Line 2520...
 
 
 
 
  /* Try to use top chunk */
  /* Try to use top chunk */
 
 
  /* Require that there be a remainder, ensuring top always exists  */
  /* Require that there be a remainder, ensuring top always exists  */
  remainder_size = chunksize(top) - nb;
  remainder_size = long_sub_size_t(chunksize(top), nb);
  if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
  if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
  {
  {
 
 
#if HAVE_MMAP
#if HAVE_MMAP
    /* If big and would otherwise need to extend, try to use mmap instead */
    /* If big and would otherwise need to extend, try to use mmap instead */
Line 2490... Line 2536...
    }
    }
#endif
#endif
 
 
    /* Try to extend */
    /* Try to extend */
    malloc_extend_top(RCALL nb);
    malloc_extend_top(RCALL nb);
    remainder_size = chunksize(top) - nb;
    remainder_size = long_sub_size_t(chunksize(top), nb);
    if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
    if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
    {
    {
      MALLOC_UNLOCK;
      MALLOC_UNLOCK;
      return 0; /* propagate failure */
      return 0; /* propagate failure */
    }
    }
Line 2506... Line 2552...
  set_head(top, remainder_size | PREV_INUSE);
  set_head(top, remainder_size | PREV_INUSE);
  check_malloced_chunk(victim, nb);
  check_malloced_chunk(victim, nb);
  MALLOC_UNLOCK;
  MALLOC_UNLOCK;
  return chunk2mem(victim);
  return chunk2mem(victim);
 
 
 
#endif /* MALLOC_PROVIDED */
}
}
 
 
#endif /* DEFINE_MALLOC */
#endif /* DEFINE_MALLOC */


#ifdef DEFINE_FREE
#ifdef DEFINE_FREE
Line 2540... Line 2587...
void fREe(RARG Void_t* mem)
void fREe(RARG Void_t* mem)
#else
#else
void fREe(RARG mem) RDECL Void_t* mem;
void fREe(RARG mem) RDECL Void_t* mem;
#endif
#endif
{
{
 
#ifdef MALLOC_PROVIDED
 
 
 
  free (mem);
 
 
 
#else
 
 
  mchunkptr p;         /* chunk corresponding to mem */
  mchunkptr p;         /* chunk corresponding to mem */
  INTERNAL_SIZE_T hd;  /* its head field */
  INTERNAL_SIZE_T hd;  /* its head field */
  INTERNAL_SIZE_T sz;  /* its size */
  INTERNAL_SIZE_T sz;  /* its size */
  int       idx;       /* its bin index */
  int       idx;       /* its bin index */
  mchunkptr next;      /* next contiguous chunk */
  mchunkptr next;      /* next contiguous chunk */
Line 2630... Line 2683...
  set_foot(p, sz);
  set_foot(p, sz);
  if (!islr)
  if (!islr)
    frontlink(p, sz, idx, bck, fwd);
    frontlink(p, sz, idx, bck, fwd);
 
 
  MALLOC_UNLOCK;
  MALLOC_UNLOCK;
 
 
 
#endif /* MALLOC_PROVIDED */
}
}
 
 
#endif /* DEFINE_FREE */
#endif /* DEFINE_FREE */


#ifdef DEFINE_REALLOC
#ifdef DEFINE_REALLOC
Line 2678... Line 2733...
Void_t* rEALLOc(RARG Void_t* oldmem, size_t bytes)
Void_t* rEALLOc(RARG Void_t* oldmem, size_t bytes)
#else
#else
Void_t* rEALLOc(RARG oldmem, bytes) RDECL Void_t* oldmem; size_t bytes;
Void_t* rEALLOc(RARG oldmem, bytes) RDECL Void_t* oldmem; size_t bytes;
#endif
#endif
{
{
 
#ifdef MALLOC_PROVIDED
 
 
 
  realloc (oldmem, bytes);
 
 
 
#else
 
 
  INTERNAL_SIZE_T    nb;      /* padded request size */
  INTERNAL_SIZE_T    nb;      /* padded request size */
 
 
  mchunkptr oldp;             /* chunk corresponding to oldmem */
  mchunkptr oldp;             /* chunk corresponding to oldmem */
  INTERNAL_SIZE_T    oldsize; /* its size */
  INTERNAL_SIZE_T    oldsize; /* its size */
 
 
Line 2870... Line 2931...
  }
  }
 
 
 
 
 split:  /* split off extra room in old or expanded chunk */
 split:  /* split off extra room in old or expanded chunk */
 
 
  if (newsize - nb >= MINSIZE) /* split off remainder */
  remainder_size = long_sub_size_t(newsize, nb);
 
 
 
  if (remainder_size >= (long)MINSIZE) /* split off remainder */
  {
  {
    remainder = chunk_at_offset(newp, nb);
    remainder = chunk_at_offset(newp, nb);
    remainder_size = newsize - nb;
 
    set_head_size(newp, nb);
    set_head_size(newp, nb);
    set_head(remainder, remainder_size | PREV_INUSE);
    set_head(remainder, remainder_size | PREV_INUSE);
    set_inuse_bit_at_offset(remainder, remainder_size);
    set_inuse_bit_at_offset(remainder, remainder_size);
    fREe(RCALL chunk2mem(remainder)); /* let free() deal with it */
    fREe(RCALL chunk2mem(remainder)); /* let free() deal with it */
  }
  }
Line 2888... Line 2950...
  }
  }
 
 
  check_inuse_chunk(newp);
  check_inuse_chunk(newp);
  MALLOC_UNLOCK;
  MALLOC_UNLOCK;
  return chunk2mem(newp);
  return chunk2mem(newp);
 
 
 
#endif /* MALLOC_PROVIDED */
}
}
 
 
#endif /* DEFINE_REALLOC */
#endif /* DEFINE_REALLOC */


#ifdef DEFINE_MEMALIGN
#ifdef DEFINE_MEMALIGN
Line 2999... Line 3063...
    assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
    assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
  }
  }
 
 
  /* Also give back spare room at the end */
  /* Also give back spare room at the end */
 
 
  remainder_size = chunksize(p) - nb;
  remainder_size = long_sub_size_t(chunksize(p), nb);
 
 
  if (remainder_size >= (long)MINSIZE)
  if (remainder_size >= (long)MINSIZE)
  {
  {
    remainder = chunk_at_offset(p, nb);
    remainder = chunk_at_offset(p, nb);
    set_head(remainder, remainder_size | PREV_INUSE);
    set_head(remainder, remainder_size | PREV_INUSE);
Line 3577... Line 3641...
    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
         structure of old version,  but most details differ.)
         structure of old version,  but most details differ.)
 
 
*/
*/
 
#endif
 
 
 No newline at end of file
 No newline at end of file

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.