OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libmudflap/] [mf-hooks1.c] - Blame information for rev 775

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 738 jeremybenn
/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2
   Copyright (C) 2002, 2003, 2004, 2009, 2011 Free Software Foundation, Inc.
3
   Contributed by Frank Ch. Eigler <fche@redhat.com>
4
   and Graydon Hoare <graydon@redhat.com>
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
Under Section 7 of GPL version 3, you are granted additional
19
permissions described in the GCC Runtime Library Exception, version
20
3.1, as published by the Free Software Foundation.
21
 
22
You should have received a copy of the GNU General Public License and
23
a copy of the GCC Runtime Library Exception along with this program;
24
see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
25
<http://www.gnu.org/licenses/>.  */
26
 
27
#include "config.h"
28
 
29
#ifndef HAVE_SOCKLEN_T
30
#define socklen_t int
31
#endif
32
 
33
 
34
/* These attempt to coax various unix flavours to declare all our
35
   needed tidbits in the system headers.  */
36
#if !defined(__FreeBSD__)  && !defined(__APPLE__)
37
#define _POSIX_SOURCE
38
#endif /* Some BSDs break <sys/socket.h> if this is defined. */
39
#define _GNU_SOURCE
40
#define _XOPEN_SOURCE
41
#define _BSD_TYPES
42
#define __EXTENSIONS__
43
#define _ALL_SOURCE
44
#define _LARGE_FILE_API
45
#define _XOPEN_SOURCE_EXTENDED 1
46
 
47
#include <string.h>
48
#include <stdio.h>
49
#include <stdlib.h>
50
#include <sys/time.h>
51
#include <sys/types.h>
52
#include <unistd.h>
53
#include <assert.h>
54
#include <errno.h>
55
#include <limits.h>
56
#include <time.h>
57
 
58
#include "mf-runtime.h"
59
#include "mf-impl.h"
60
 
61
#ifdef _MUDFLAP
62
#error "Do not compile this file with -fmudflap!"
63
#endif
64
 
65
 
66
/* Memory allocation related hook functions.  Some of these are
67
   intercepted via linker wrapping or symbol interposition.  Others
68
   use plain macros in mf-runtime.h.  */
69
 
70
 
71
#if PIC
72
 
73
enum { BS = 4096, NB=10 };
74
static char __mf_0fn_bufs[NB][BS];
75
static unsigned __mf_0fn_bufs_used[NB];
76
 
77
 
78
/* A special bootstrap variant. */
79
void *
80
__mf_0fn_malloc (size_t c)
81
{
82
  unsigned i;
83
 
84
  for (i=0; i<NB; i++)
85
    {
86
      if (! __mf_0fn_bufs_used[i] && c < BS)
87
        {
88
          __mf_0fn_bufs_used[i] = 1;
89
          return & __mf_0fn_bufs[i][0];
90
        }
91
    }
92
  return NULL;
93
}
94
#endif
95
 
96
 
97
#undef malloc
98
WRAPPER(void *, malloc, size_t c)
99
{
100
  size_t size_with_crumple_zones;
101
  DECLARE(void *, malloc, size_t c);
102
  void *result;
103
  BEGIN_PROTECT (malloc, c);
104
 
105
  size_with_crumple_zones =
106
    CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
107
                        __mf_opts.crumple_zone));
108
  BEGIN_MALLOC_PROTECT ();
109
  result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
110
  END_MALLOC_PROTECT ();
111
 
112
  if (LIKELY(result))
113
    {
114
      result += __mf_opts.crumple_zone;
115
      __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
116
      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
117
    }
118
 
119
  return result;
120
}
121
 
122
 
123
#ifdef PIC
124
/* A special bootstrap variant. */
125
void *
126
__mf_0fn_calloc (size_t c, size_t n)
127
{
128
  return __mf_0fn_malloc (c * n);
129
}
130
#endif
131
 
132
 
133
#undef calloc
134
WRAPPER(void *, calloc, size_t c, size_t n)
135
{
136
  size_t size_with_crumple_zones;
137
  DECLARE(void *, calloc, size_t, size_t);
138
  DECLARE(void *, malloc, size_t);
139
  DECLARE(void *, memset, void *, int, size_t);
140
  char *result;
141
  BEGIN_PROTECT (calloc, c, n);
142
 
143
  size_with_crumple_zones =
144
    CLAMPADD((c * n), /* XXX: CLAMPMUL */
145
             CLAMPADD(__mf_opts.crumple_zone,
146
                      __mf_opts.crumple_zone));
147
  BEGIN_MALLOC_PROTECT ();
148
  result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
149
  END_MALLOC_PROTECT ();
150
 
151
  if (LIKELY(result))
152
    memset (result, 0, size_with_crumple_zones);
153
 
154
  if (LIKELY(result))
155
    {
156
      result += __mf_opts.crumple_zone;
157
      __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
158
      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
159
    }
160
 
161
  return result;
162
}
163
 
164
 
165
#if PIC
166
/* A special bootstrap variant. */
167
void *
168
__mf_0fn_realloc (void *buf, size_t c)
169
{
170
  return NULL;
171
}
172
#endif
173
 
174
 
175
#undef realloc
176
WRAPPER(void *, realloc, void *buf, size_t c)
177
{
178
  DECLARE(void * , realloc, void *, size_t);
179
  size_t size_with_crumple_zones;
180
  char *base = buf;
181
  unsigned saved_wipe_heap;
182
  char *result;
183
  BEGIN_PROTECT (realloc, buf, c);
184
 
185
  if (LIKELY(buf))
186
    base -= __mf_opts.crumple_zone;
187
 
188
  size_with_crumple_zones =
189
    CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
190
                         __mf_opts.crumple_zone));
191
  BEGIN_MALLOC_PROTECT ();
192
  result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
193
  END_MALLOC_PROTECT ();
194
 
195
  /* Ensure heap wiping doesn't occur during this peculiar
196
     unregister/reregister pair.  */
197
  LOCKTH ();
198
  __mf_set_state (reentrant);
199
  saved_wipe_heap = __mf_opts.wipe_heap;
200
  __mf_opts.wipe_heap = 0;
201
 
202
  if (LIKELY(buf))
203
    __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
204
  /* NB: underlying region may have been __MF_TYPE_HEAP. */
205
 
206
  if (LIKELY(result))
207
    {
208
      result += __mf_opts.crumple_zone;
209
      __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
210
      /* XXX: register __MF_TYPE_NOACCESS for crumple zones.  */
211
    }
212
 
213
  /* Restore previous setting.  */
214
  __mf_opts.wipe_heap = saved_wipe_heap;
215
 
216
  __mf_set_state (active);
217
  UNLOCKTH ();
218
 
219
  return result;
220
}
221
 
222
 
223
#if PIC
224
/* A special bootstrap variant. */
225
void
226
__mf_0fn_free (void *buf)
227
{
228
  return;
229
}
230
#endif
231
 
232
#undef free
233
WRAPPER(void, free, void *buf)
234
{
235
  /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s.  */
236
  static void *free_queue [__MF_FREEQ_MAX];
237
  static unsigned free_ptr = 0;
238
  static int freeq_initialized = 0;
239
  DECLARE(void, free, void *);
240
 
241
  BEGIN_PROTECT (free, buf);
242
 
243
  if (UNLIKELY(buf == NULL))
244
    return;
245
 
246
#if PIC
247
  /* Check whether the given buffer might have come from a
248
     __mf_0fn_malloc/calloc call that for whatever reason was not
249
     redirected back to __mf_0fn_free.  If so, we just ignore the
250
     call. */
251
  if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs &&
252
               (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs))))
253
  {
254
    VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf);
255
    return;
256
  }
257
#endif
258
 
259
  LOCKTH ();
260
  if (UNLIKELY(!freeq_initialized))
261
    {
262
      memset (free_queue, 0,
263
                     __MF_FREEQ_MAX * sizeof (void *));
264
      freeq_initialized = 1;
265
    }
266
  UNLOCKTH ();
267
 
268
  __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
269
  /* NB: underlying region may have been __MF_TYPE_HEAP. */
270
 
271
  if (UNLIKELY(__mf_opts.free_queue_length > 0))
272
    {
273
      char *freeme = NULL;
274
      LOCKTH ();
275
      if (free_queue [free_ptr] != NULL)
276
        {
277
          freeme = free_queue [free_ptr];
278
          freeme -= __mf_opts.crumple_zone;
279
        }
280
      free_queue [free_ptr] = buf;
281
      free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
282
      UNLOCKTH ();
283
      if (freeme)
284
        {
285
          if (__mf_opts.trace_mf_calls)
286
            {
287
              VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
288
                             (void *) freeme,
289
                             __mf_opts.crumple_zone);
290
            }
291
          BEGIN_MALLOC_PROTECT ();
292
          CALL_REAL (free, freeme);
293
          END_MALLOC_PROTECT ();
294
        }
295
    }
296
  else
297
    {
298
      /* back pointer up a bit to the beginning of crumple zone */
299
      char *base = (char *)buf;
300
      base -= __mf_opts.crumple_zone;
301
      if (__mf_opts.trace_mf_calls)
302
        {
303
          VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
304
                         (void *) base,
305
                         (void *) buf,
306
                         __mf_opts.crumple_zone);
307
        }
308
      BEGIN_MALLOC_PROTECT ();
309
      CALL_REAL (free, base);
310
      END_MALLOC_PROTECT ();
311
    }
312
}
313
 
314
 
315
/* We can only wrap mmap if the target supports it.  Likewise for munmap.
316
   We assume we have both if we have mmap.  */
317
#ifdef HAVE_MMAP
318
 
319
#if PIC
320
/* A special bootstrap variant. */
321
void *
322
__mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
323
{
324
#if defined(__FreeBSD__)
325
  if (f == 0x1000 && fd == -1 && prot == 0 && off == 0)
326
    return 0;
327
#endif /* Ignore red zone allocation request for initial thread's stack. */
328
 
329
  return (void *) -1;
330
}
331
#endif
332
 
333
 
334
#undef mmap
335
WRAPPER(void *, mmap,
336
        void  *start,  size_t length, int prot,
337
        int flags, int fd, off_t offset)
338
{
339
  DECLARE(void *, mmap, void *, size_t, int,
340
                            int, int, off_t);
341
  void *result;
342
  BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
343
 
344
  result = CALL_REAL (mmap, start, length, prot,
345
                        flags, fd, offset);
346
 
347
  /*
348
  VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
349
                 (uintptr_t) start, (uintptr_t) length,
350
                 (uintptr_t) result);
351
  */
352
 
353
  if (result != (void *)-1)
354
    {
355
      /* Register each page as a heap object.  Why not register it all
356
         as a single segment?  That's so that a later munmap() call
357
         can unmap individual pages.  XXX: would __MF_TYPE_GUESS make
358
         this more automatic?  */
359
      size_t ps = getpagesize ();
360
      uintptr_t base = (uintptr_t) result;
361
      uintptr_t offset;
362
 
363
      for (offset=0; offset<length; offset+=ps)
364
        {
365
          /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
366
          /* XXX: Unaccessed HEAP pages are reported as leaks.  Is this
367
             appropriate for unaccessed mmap pages? */
368
          __mf_register ((void *) CLAMPADD (base, offset), ps,
369
                         __MF_TYPE_HEAP_I, "mmap page");
370
        }
371
    }
372
 
373
  return result;
374
}
375
 
376
 
377
#if PIC
378
/* A special bootstrap variant. */
379
int
380
__mf_0fn_munmap (void *start, size_t length)
381
{
382
  return -1;
383
}
384
#endif
385
 
386
 
387
#undef munmap
388
WRAPPER(int , munmap, void *start, size_t length)
389
{
390
  DECLARE(int, munmap, void *, size_t);
391
  int result;
392
  BEGIN_PROTECT (munmap, start, length);
393
 
394
  result = CALL_REAL (munmap, start, length);
395
 
396
  /*
397
  VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
398
                 (uintptr_t) start, (uintptr_t) length,
399
                 (uintptr_t) result);
400
  */
401
 
402
  if (result == 0)
403
    {
404
      /* Unregister each page as a heap object.  */
405
      size_t ps = getpagesize ();
406
      uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
407
      uintptr_t offset;
408
 
409
      for (offset=0; offset<length; offset+=ps)
410
        __mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
411
    }
412
  return result;
413
}
414
#endif /* HAVE_MMAP */
415
 
416
 
417
#ifdef HAVE_MMAP64
418
#if PIC
419
/* A special bootstrap variant. */
420
void *
421
__mf_0fn_mmap64 (void *start, size_t l, int prot, int f, int fd, off64_t off)
422
{
423
  return (void *) -1;
424
}
425
#endif
426
 
427
 
428
#undef mmap
429
WRAPPER(void *, mmap64,
430
        void  *start,  size_t length, int prot,
431
        int flags, int fd, off64_t offset)
432
{
433
  DECLARE(void *, mmap64, void *, size_t, int,
434
                            int, int, off64_t);
435
  void *result;
436
  BEGIN_PROTECT (mmap64, start, length, prot, flags, fd, offset);
437
 
438
  result = CALL_REAL (mmap64, start, length, prot,
439
                        flags, fd, offset);
440
 
441
  /*
442
  VERBOSE_TRACE ("mmap64 (%08lx, %08lx, ...) => %08lx\n",
443
                 (uintptr_t) start, (uintptr_t) length,
444
                 (uintptr_t) result);
445
  */
446
 
447
  if (result != (void *)-1)
448
    {
449
      /* Register each page as a heap object.  Why not register it all
450
         as a single segment?  That's so that a later munmap() call
451
         can unmap individual pages.  XXX: would __MF_TYPE_GUESS make
452
         this more automatic?  */
453
      size_t ps = getpagesize ();
454
      uintptr_t base = (uintptr_t) result;
455
      uintptr_t offset;
456
 
457
      for (offset=0; offset<length; offset+=ps)
458
        {
459
          /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
460
          /* XXX: Unaccessed HEAP pages are reported as leaks.  Is this
461
             appropriate for unaccessed mmap pages? */
462
          __mf_register ((void *) CLAMPADD (base, offset), ps,
463
                         __MF_TYPE_HEAP_I, "mmap64 page");
464
        }
465
    }
466
 
467
  return result;
468
}
469
#endif /* HAVE_MMAP64 */
470
 
471
 
472
/* This wrapper is a little different, as it's called indirectly from
473
   __mf_fini also to clean up pending allocations.  */
474
void *
475
__mf_wrap_alloca_indirect (size_t c)
476
{
477
  DECLARE (void *, malloc, size_t);
478
  DECLARE (void, free, void *);
479
 
480
  /* This struct, a linked list, tracks alloca'd objects.  The newest
481
     object is at the head of the list.  If we detect that we've
482
     popped a few levels of stack, then the listed objects are freed
483
     as needed.  NB: The tracking struct is allocated with
484
     real_malloc; the user data with wrap_malloc.
485
  */
486
  struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
487
  static struct alloca_tracking *alloca_history = NULL;
488
 
489
  void *stack = __builtin_frame_address (0);
490
  void *result;
491
  struct alloca_tracking *track;
492
 
493
  TRACE ("%s\n", __PRETTY_FUNCTION__);
494
  VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
495
 
496
  /* XXX: thread locking! */
497
 
498
  /* Free any previously alloca'd blocks that belong to deeper-nested functions,
499
     which must therefore have exited by now.  */
500
 
501
#define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
502
 
503
  while (alloca_history &&
504
         ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
505
    {
506
      struct alloca_tracking *next = alloca_history->next;
507
      __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
508
      BEGIN_MALLOC_PROTECT ();
509
      CALL_REAL (free, alloca_history->ptr);
510
      CALL_REAL (free, alloca_history);
511
      END_MALLOC_PROTECT ();
512
      alloca_history = next;
513
    }
514
 
515
  /* Allocate new block.  */
516
  result = NULL;
517
  if (LIKELY (c > 0)) /* alloca(0) causes no allocation.  */
518
    {
519
      BEGIN_MALLOC_PROTECT ();
520
      track = (struct alloca_tracking *) CALL_REAL (malloc,
521
                                                    sizeof (struct alloca_tracking));
522
      END_MALLOC_PROTECT ();
523
      if (LIKELY (track != NULL))
524
        {
525
          BEGIN_MALLOC_PROTECT ();
526
          result = CALL_REAL (malloc, c);
527
          END_MALLOC_PROTECT ();
528
          if (UNLIKELY (result == NULL))
529
            {
530
              BEGIN_MALLOC_PROTECT ();
531
              CALL_REAL (free, track);
532
              END_MALLOC_PROTECT ();
533
              /* Too bad.  XXX: What about errno?  */
534
            }
535
          else
536
            {
537
              __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
538
              track->ptr = result;
539
              track->stack = stack;
540
              track->next = alloca_history;
541
              alloca_history = track;
542
            }
543
        }
544
    }
545
 
546
  return result;
547
}
548
 
549
 
550
#undef alloca
551
WRAPPER(void *, alloca, size_t c)
552
{
553
  return __mf_wrap_alloca_indirect (c);
554
}
555
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.