OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [newlib-1.17.0/] [newlib/] [libc/] [machine/] [powerpc/] [vec_mallocr.c] - Blame information for rev 158

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 148 jeremybenn
/* This code is based on mallocr.c written by Doug Lea which is released
2
   to the public domain.  Any changes to libc/stdlib/mallocr.c
3
   should be reflected here as well.    */
4
 
5
/* Preliminaries */
6
 
7
#ifndef __STD_C
8
#ifdef __STDC__
9
#define __STD_C     1
10
#else
11
#if __cplusplus
12
#define __STD_C     1
13
#else
14
#define __STD_C     0
15
#endif /*__cplusplus*/
16
#endif /*__STDC__*/
17
#endif /*__STD_C*/
18
 
19
#ifndef Void_t
20
#if __STD_C
21
#define Void_t      void
22
#else
23
#define Void_t      char
24
#endif
25
#endif /*Void_t*/
26
 
27
#if __STD_C
28
#include <stddef.h>   /* for size_t */
29
#else
30
#include <sys/types.h>
31
#endif
32
 
33
#ifdef __cplusplus
34
extern "C" {
35
#endif
36
 
37
#include <sys/config.h>
38
 
39
/*
40
  In newlib, all the publically visible routines take a reentrancy
41
  pointer.  We don't currently do anything much with it, but we do
42
  pass it to the lock routine.
43
 */
44
 
45
#include <reent.h>
46
#include <string.h>
47
#include <malloc.h>
48
 
49
#define MALLOC_LOCK __malloc_lock(reent_ptr)
50
#define MALLOC_UNLOCK __malloc_unlock(reent_ptr)
51
 
52
#ifdef SMALL_MEMORY
53
#define malloc_getpagesize (128)
54
#else
55
#define malloc_getpagesize (4096)
56
#endif
57
 
58
#if __STD_C
59
extern void __malloc_lock(struct _reent *);
60
extern void __malloc_unlock(struct _reent *);
61
#else
62
extern void __malloc_lock();
63
extern void __malloc_unlock();
64
#endif
65
 
66
#if __STD_C
67
#define RARG struct _reent *reent_ptr,
68
#define RONEARG struct _reent *reent_ptr
69
#else
70
#define RARG reent_ptr
71
#define RONEARG reent_ptr
72
#define RDECL struct _reent *reent_ptr;
73
#endif
74
 
75
#define RCALL reent_ptr,
76
#define RONECALL reent_ptr
77
 
78
/*
79
   Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to
80
   lock and unlock the malloc data structures.  MALLOC_LOCK may be
81
   called recursively.
82
 */
83
 
84
#ifndef MALLOC_LOCK
85
#define MALLOC_LOCK
86
#endif
87
 
88
#ifndef MALLOC_UNLOCK
89
#define MALLOC_UNLOCK
90
#endif
91
 
92
/*
93
  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
94
  of chunk sizes. On a 64-bit machine, you can reduce malloc
95
  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
96
  at the expense of not being able to handle requests greater than
97
  2^31. This limitation is hardly ever a concern; you are encouraged
98
  to set this. However, the default version is the same as size_t.
99
*/
100
 
101
#ifndef INTERNAL_SIZE_T
102
#define INTERNAL_SIZE_T size_t
103
#endif
104
 
105
/*
106
  Following is needed on implementations whereby long > size_t.
107
  The problem is caused because the code performs subtractions of
108
  size_t values and stores the result in long values.  In the case
109
  where long > size_t and the first value is actually less than
110
  the second value, the resultant value is positive.  For example,
111
  (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
112
  which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF.  This is due to the
113
  fact that assignment from unsigned to signed won't sign extend.
114
*/
115
 
116
#ifdef SIZE_T_SMALLER_THAN_LONG
117
#define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) );
118
#else
119
#define long_sub_size_t(x, y) ( (long)(x - y) )
120
#endif
121
 
122
/*
123
  REALLOC_ZERO_BYTES_FREES should be set if a call to
124
  realloc with zero bytes should be the same as a call to free.
125
  Some people think it should. Otherwise, since this malloc
126
  returns a unique pointer for malloc(0), so does realloc(p, 0).
127
*/
128
 
129
/* The following macros are only invoked with (2n+1)-multiples of
130
   INTERNAL_SIZE_T units, with a positive integer n. This is exploited
131
   for fast inline execution when n is small. */
132
 
133
#define MALLOC_ZERO(charp, nbytes)                                            \
134
do {                                                                          \
135
  INTERNAL_SIZE_T mzsz = (nbytes);                                            \
136
  if(mzsz <= 9*sizeof(mzsz)) {                                                \
137
    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
138
    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
139
                                     *mz++ = 0;                               \
140
      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
141
                                     *mz++ = 0;                               \
142
        if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
143
                                     *mz++ = 0; }}}                           \
144
                                     *mz++ = 0;                               \
145
                                     *mz++ = 0;                               \
146
                                     *mz   = 0;                               \
147
  } else memset((charp), 0, mzsz);                                            \
148
} while(0)
149
 
150
#define MALLOC_COPY(dest,src,nbytes)                                          \
151
do {                                                                          \
152
  INTERNAL_SIZE_T mcsz = (nbytes);                                            \
153
  if(mcsz <= 9*sizeof(mcsz)) {                                                \
154
    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
155
    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
156
    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
157
                                     *mcdst++ = *mcsrc++;                     \
158
      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
159
                                     *mcdst++ = *mcsrc++;                     \
160
        if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
161
                                     *mcdst++ = *mcsrc++; }}}                 \
162
                                     *mcdst++ = *mcsrc++;                     \
163
                                     *mcdst++ = *mcsrc++;                     \
164
                                     *mcdst   = *mcsrc  ;                     \
165
  } else memcpy(dest, src, mcsz);                                             \
166
} while(0)
167
 
168
#define vECCALLOc       _vec_calloc_r
169
#define fREe            _free_r
170
#define mEMALIGn        _memalign_r
171
#define vECREALLOc      _vec_realloc_r
172
#
173
#if __STD_C
174
 
175
Void_t* vECREALLOc(RARG Void_t*, size_t);
176
Void_t* vECCALLOc(RARG size_t, size_t);
177
#else
178
Void_t* vECREALLOc();
179
Void_t* vECCALLOc();
180
#endif
181
 
182
 
183
#ifdef __cplusplus
184
};  /* end of extern "C" */
185
#endif
186
 
187
/*
188
  Type declarations
189
*/
190
 
191
struct malloc_chunk
192
{
193
  INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
194
  INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
195
  struct malloc_chunk* fd;   /* double links -- used only if free. */
196
  struct malloc_chunk* bk;
197
};
198
 
199
typedef struct malloc_chunk* mchunkptr;
200
 
201
/*  sizes, alignments */
202
 
203
#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
204
#define MALLOC_ALIGN           16
205
#define MALLOC_ALIGNMENT       16
206
#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
207
#define MINSIZE                (sizeof(struct malloc_chunk))
208
 
209
/* conversion from malloc headers to user pointers, and back */
210
 
211
#define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
212
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
213
/* pad request bytes into a usable size */
214
 
215
#define request2size(req) \
216
 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
217
  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
218
   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
219
 
220
 
221
/* Check if m has acceptable alignment */
222
 
223
#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
224
 
225
/*
226
  Physical chunk operations
227
*/
228
 
229
 
230
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
231
 
232
#define PREV_INUSE 0x1 
233
 
234
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
235
 
236
#define IS_MMAPPED 0x2
237
 
238
/* Bits to mask off when extracting size */
239
 
240
#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
241
 
242
 
243
/* Ptr to next physical malloc_chunk. */
244
 
245
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
246
 
247
/* Ptr to previous physical malloc_chunk */
248
 
249
#define prev_chunk(p)\
250
   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
251
 
252
 
253
/* Treat space at ptr + offset as a chunk */
254
 
255
#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
256
 
257
 
258
 
259
 
260
/*
261
  Dealing with use bits
262
*/
263
 
264
/* extract p's inuse bit */
265
 
266
#define inuse(p)\
267
((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
268
 
269
/* extract inuse bit of previous chunk */
270
 
271
#define prev_inuse(p)  ((p)->size & PREV_INUSE)
272
 
273
/* check for mmap()'ed chunk */
274
 
275
#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
276
 
277
/* set/clear chunk as in use without otherwise disturbing */
278
 
279
#define set_inuse(p)\
280
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
281
 
282
#define clear_inuse(p)\
283
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
284
 
285
/* check/set/clear inuse bits in known places */
286
 
287
#define inuse_bit_at_offset(p, s)\
288
 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
289
 
290
#define set_inuse_bit_at_offset(p, s)\
291
 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
292
 
293
#define clear_inuse_bit_at_offset(p, s)\
294
 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
295
 
296
 
297
 
298
/*
299
  Dealing with size fields
300
*/
301
 
302
/* Get size, ignoring use bits */
303
 
304
#define chunksize(p)          ((p)->size & ~(SIZE_BITS))
305
 
306
/* Set size at head, without disturbing its use bit */
307
 
308
#define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
309
 
310
/* Set size/use ignoring previous bits in header */
311
 
312
#define set_head(p, s)        ((p)->size = (s))
313
 
314
 
315
 
316
#ifdef DEFINE_VECREALLOC
317
 
318
 
319
#if __STD_C
320
Void_t* vECREALLOc(RARG Void_t* oldmem, size_t bytes)
321
#else
322
Void_t* vECREALLOc(RARG oldmem, bytes) RDECL Void_t* oldmem; size_t bytes;
323
#endif
324
{
325
  INTERNAL_SIZE_T    nb;      /* padded request size */
326
 
327
  mchunkptr oldp;             /* chunk corresponding to oldmem */
328
  INTERNAL_SIZE_T    oldsize; /* its size */
329
 
330
  mchunkptr newp;             /* chunk to return */
331
  INTERNAL_SIZE_T    newsize; /* its size */
332
  Void_t*   newmem;           /* corresponding user mem */
333
 
334
  mchunkptr remainder;        /* holds split off extra space from newp */
335
  INTERNAL_SIZE_T  remainder_size;   /* its size */
336
 
337
#ifdef REALLOC_ZERO_BYTES_FREES
338
  if (bytes == 0) { fREe(RCALL oldmem); return 0; }
339
#endif
340
 
341
 
342
  /* realloc of null is supposed to be same as malloc */
343
  if (oldmem == 0) return mEMALIGn(RCALL 16, bytes);
344
 
345
  MALLOC_LOCK;
346
 
347
  newp    = oldp    = mem2chunk(oldmem);
348
  newsize = oldsize = chunksize(oldp);
349
 
350
  nb = request2size(bytes);
351
 
352
  if ((long)(oldsize) < (long)(nb))
353
  {
354
    /* Must allocate */
355
 
356
    newmem = mEMALIGn (RCALL 16, bytes);
357
 
358
    if (newmem == 0)  /* propagate failure */
359
    {
360
      MALLOC_UNLOCK;
361
      return 0;
362
    }
363
 
364
    /* copy, free, and exit */
365
    MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
366
    fREe(RCALL oldmem);
367
    MALLOC_UNLOCK;
368
    return newmem;
369
  }
370
 
371
  remainder_size = long_sub_size_t(newsize, nb);
372
 
373
  if (remainder_size >= (long)MINSIZE) /* split off remainder */
374
  {
375
    remainder = chunk_at_offset(newp, nb);
376
    set_head_size(newp, nb);
377
    set_head(remainder, remainder_size | PREV_INUSE);
378
    set_inuse_bit_at_offset(remainder, remainder_size);
379
    fREe(RCALL chunk2mem(remainder)); /* let free() deal with it */
380
  }
381
  else
382
  {
383
    set_head_size(newp, newsize);
384
    set_inuse_bit_at_offset(newp, newsize);
385
  }
386
 
387
  MALLOC_UNLOCK;
388
  return chunk2mem(newp);
389
}
390
 
391
#endif /* DEFINE_VECREALLOC */
392
 
393
 
394
#ifdef DEFINE_VECCALLOC
395
 
396
/*
397
 
398
  calloc calls malloc, then zeroes out the allocated chunk.
399
 
400
*/
401
 
402
#if __STD_C
403
Void_t* vECCALLOc(RARG size_t n, size_t elem_size)
404
#else
405
Void_t* vECCALLOc(RARG n, elem_size) RDECL size_t n; size_t elem_size;
406
#endif
407
{
408
  INTERNAL_SIZE_T sz = n * elem_size;
409
 
410
  Void_t* mem;
411
 
412
  mem = mEMALIGn (RCALL 16, sz);
413
 
414
  if (mem == 0)
415
  {
416
    return 0;
417
  }
418
 
419
  MALLOC_ZERO(mem, sz);
420
  return mem;
421
}
422
 
423
#endif /* DEFINE_VECCALLOC */
424
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.