OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gcc-4.5.1/] [libmudflap/] [mf-runtime.c] - Blame information for rev 864

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 275 jeremybenn
/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2
   Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008, 2009, 2010
3
   Free Software Foundation, Inc.
4
   Contributed by Frank Ch. Eigler <fche@redhat.com>
5
   and Graydon Hoare <graydon@redhat.com>
6
   Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
7
   adapted from libiberty.
8
 
9
This file is part of GCC.
10
 
11
GCC is free software; you can redistribute it and/or modify it under
12
the terms of the GNU General Public License as published by the Free
13
Software Foundation; either version 3, or (at your option) any later
14
version.
15
 
16
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17
WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19
for more details.
20
 
21
Under Section 7 of GPL version 3, you are granted additional
22
permissions described in the GCC Runtime Library Exception, version
23
3.1, as published by the Free Software Foundation.
24
 
25
You should have received a copy of the GNU General Public License and
26
a copy of the GCC Runtime Library Exception along with this program;
27
see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
28
<http://www.gnu.org/licenses/>.  */
29
 
30
#include "config.h"
31
 
32
/* These attempt to coax various unix flavours to declare all our
33
   needed tidbits in the system headers.  */
34
#if !defined(__FreeBSD__) && !defined(__APPLE__)
35
#define _POSIX_SOURCE
36
#endif /* Some BSDs break <sys/socket.h> if this is defined. */
37
#define _GNU_SOURCE
38
#define _XOPEN_SOURCE
39
#define _BSD_TYPES
40
#define __EXTENSIONS__
41
#define _ALL_SOURCE
42
#define _LARGE_FILE_API
43
#define _XOPEN_SOURCE_EXTENDED 1
44
 
45
#include <stdio.h>
46
#include <stdlib.h>
47
#include <sys/types.h>
48
#include <sys/time.h>
49
#include <time.h>
50
#include <unistd.h>
51
#ifdef HAVE_EXECINFO_H
52
#include <execinfo.h>
53
#endif
54
#ifdef HAVE_SIGNAL_H
55
#include <signal.h>
56
#endif
57
#include <assert.h>
58
 
59
#include <string.h>
60
#include <limits.h>
61
#include <sys/types.h>
62
#include <signal.h>
63
#include <errno.h>
64
#include <ctype.h>
65
 
66
#include "mf-runtime.h"
67
#include "mf-impl.h"
68
 
69
 
70
/* ------------------------------------------------------------------------ */
71
/* Splay-tree implementation.  */
72
 
73
typedef uintptr_t mfsplay_tree_key;
74
typedef void *mfsplay_tree_value;
75
 
76
/* Forward declaration for a node in the tree.  */
77
typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
78
 
79
/* The type of a function used to iterate over the tree.  */
80
typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
81
 
82
/* The nodes in the splay tree.  */
83
struct mfsplay_tree_node_s
84
{
85
  /* Data.  */
86
  mfsplay_tree_key key;
87
  mfsplay_tree_value value;
88
  /* Children.  */
89
  mfsplay_tree_node left;
90
  mfsplay_tree_node right;
91
  /* XXX: The addition of a parent pointer may eliminate some recursion.  */
92
};
93
 
94
/* The splay tree itself.  */
95
struct mfsplay_tree_s
96
{
97
  /* The root of the tree.  */
98
  mfsplay_tree_node root;
99
 
100
  /* The last key value for which the tree has been splayed, but not
101
     since modified.  */
102
  mfsplay_tree_key last_splayed_key;
103
  int last_splayed_key_p;
104
 
105
  /* Statistics.  */
106
  unsigned num_keys;
107
 
108
  /* Traversal recursion control flags.  */
109
  unsigned max_depth;
110
  unsigned depth;
111
  unsigned rebalance_p;
112
};
113
typedef struct mfsplay_tree_s *mfsplay_tree;
114
 
115
static mfsplay_tree mfsplay_tree_new (void);
116
static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
117
static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
118
static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
119
static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
120
static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
121
static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
122
static void mfsplay_tree_rebalance (mfsplay_tree sp);
123
 
124
/* ------------------------------------------------------------------------ */
125
/* Utility macros */
126
 
127
#define CTOR  __attribute__ ((constructor))
128
#define DTOR  __attribute__ ((destructor))
129
 
130
 
131
/* Codes to describe the context in which a violation occurs. */
132
#define __MF_VIOL_UNKNOWN 0
133
#define __MF_VIOL_READ 1
134
#define __MF_VIOL_WRITE 2
135
#define __MF_VIOL_REGISTER 3
136
#define __MF_VIOL_UNREGISTER 4
137
#define __MF_VIOL_WATCH 5
138
 
139
/* Protect against recursive calls. */
140
 
141
static void
142
begin_recursion_protect1 (const char *pf)
143
{
144
  if (__mf_get_state () == reentrant)
145
    {
146
      write (2, "mf: erroneous reentrancy detected in `", 38);
147
      write (2, pf, strlen(pf));
148
      write (2, "'\n", 2); \
149
      abort ();
150
    }
151
  __mf_set_state (reentrant);
152
}
153
 
154
#define BEGIN_RECURSION_PROTECT() \
155
  begin_recursion_protect1 (__PRETTY_FUNCTION__)
156
 
157
#define END_RECURSION_PROTECT() \
158
  __mf_set_state (active)
159
 
160
/* ------------------------------------------------------------------------ */
161
/* Required globals.  */
162
 
163
#define LOOKUP_CACHE_MASK_DFL 1023
164
#define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
165
#define LOOKUP_CACHE_SHIFT_DFL 2
166
 
167
struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
168
uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
169
unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
170
#define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171
 
172
struct __mf_options __mf_opts;
173
int __mf_starting_p = 1;
174
 
175
#ifdef LIBMUDFLAPTH
176
#if defined(HAVE_TLS) && !defined(USE_EMUTLS)
177
__thread enum __mf_state_enum __mf_state_1 = reentrant;
178
#endif
179
#else
180
enum __mf_state_enum __mf_state_1 = reentrant;
181
#endif
182
 
183
#ifdef LIBMUDFLAPTH
184
pthread_mutex_t __mf_biglock =
185
#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
186
       PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
187
#else
188
       PTHREAD_MUTEX_INITIALIZER;
189
#endif
190
#endif
191
 
192
/* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
193
   the libmudflap.la (no threading support) can diagnose whether
194
   the application is linked with -lpthread.  See __mf_usage() below.  */
195
#if HAVE_PTHREAD_H
196
#ifdef _POSIX_THREADS
197
#pragma weak pthread_join
198
#else
199
#define pthread_join NULL
200
#endif
201
#endif
202
 
203
 
204
/* ------------------------------------------------------------------------ */
205
/* stats-related globals.  */
206
 
207
static unsigned long __mf_count_check;
208
static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
209
static unsigned long __mf_count_register;
210
static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
211
static unsigned long __mf_count_unregister;
212
static unsigned long __mf_total_unregister_size;
213
static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
214
static unsigned long __mf_sigusr1_received;
215
static unsigned long __mf_sigusr1_handled;
216
/* not static */ unsigned long __mf_reentrancy;
217
#ifdef LIBMUDFLAPTH
218
/* not static */ unsigned long __mf_lock_contention;
219
#endif
220
 
221
 
222
/* ------------------------------------------------------------------------ */
223
/* mode-check-related globals.  */
224
 
225
typedef struct __mf_object
226
{
227
  uintptr_t low, high; /* __mf_register parameters */
228
  const char *name;
229
  char type; /* __MF_TYPE_something */
230
  char watching_p; /* Trigger a VIOL_WATCH on access? */
231
  unsigned read_count; /* Number of times __mf_check/read was called on this object.  */
232
  unsigned write_count; /* Likewise for __mf_check/write.  */
233
  unsigned liveness; /* A measure of recent checking activity.  */
234
  unsigned description_epoch; /* Last epoch __mf_describe_object printed this.  */
235
 
236
  uintptr_t alloc_pc;
237
  struct timeval alloc_time;
238
  char **alloc_backtrace;
239
  size_t alloc_backtrace_size;
240
#ifdef LIBMUDFLAPTH
241
  pthread_t alloc_thread;
242
#endif
243
 
244
  int deallocated_p;
245
  uintptr_t dealloc_pc;
246
  struct timeval dealloc_time;
247
  char **dealloc_backtrace;
248
  size_t dealloc_backtrace_size;
249
#ifdef LIBMUDFLAPTH
250
  pthread_t dealloc_thread;
251
#endif
252
} __mf_object_t;
253
 
254
/* Live objects: splay trees, separated by type, ordered on .low (base address).  */
255
/* Actually stored as static vars within lookup function below.  */
256
 
257
/* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
258
static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
259
static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
260
 
261
 
262
/* ------------------------------------------------------------------------ */
263
/* Forward function declarations */
264
 
265
void __mf_init () CTOR;
266
static void __mf_sigusr1_respond ();
267
static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
268
                                   __mf_object_t **objs, unsigned max_objs);
269
static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
270
                                    __mf_object_t **objs, unsigned max_objs, int type);
271
static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272
                                        __mf_object_t **objs, unsigned max_objs);
273
static void __mf_adapt_cache ();
274
static void __mf_describe_object (__mf_object_t *obj);
275
static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
276
static mfsplay_tree __mf_object_tree (int type);
277
static void __mf_link_object (__mf_object_t *node);
278
static void __mf_unlink_object (__mf_object_t *node);
279
 
280
 
281
/* ------------------------------------------------------------------------ */
282
/* Configuration engine */
283
 
284
static void
285
__mf_set_default_options ()
286
{
287
  memset (& __mf_opts, 0, sizeof (__mf_opts));
288
 
289
  __mf_opts.adapt_cache = 1000003;
290
  __mf_opts.abbreviate = 1;
291
  __mf_opts.verbose_violations = 1;
292
  __mf_opts.free_queue_length = 4;
293
  __mf_opts.persistent_count = 100;
294
  __mf_opts.crumple_zone = 32;
295
  __mf_opts.backtrace = 4;
296
  __mf_opts.timestamps = 1;
297
  __mf_opts.mudflap_mode = mode_check;
298
  __mf_opts.violation_mode = viol_nop;
299
#ifdef HAVE___LIBC_FREERES
300
  __mf_opts.call_libc_freeres = 1;
301
#endif
302
  __mf_opts.heur_std_data = 1;
303
#ifdef LIBMUDFLAPTH
304
  __mf_opts.thread_stack = 0;
305
#endif
306
 
307
  /* PR41443: Beware that the above flags will be applied to
308
     setuid/setgid binaries, and cannot be overriden with
309
     $MUDFLAP_OPTIONS.  So the defaults must be non-exploitable.
310
 
311
     Should we consider making the default violation_mode something
312
     harsher than viol_nop?  OTOH, glibc's MALLOC_CHECK_ is disabled
313
     by default for these same programs. */
314
}
315
 
316
static struct mudoption
317
{
318
  char *name;
319
  char *description;
320
  enum
321
    {
322
      set_option,
323
      read_integer_option,
324
    } type;
325
  unsigned value;
326
  unsigned *target;
327
}
328
options [] =
329
  {
330
    {"mode-nop",
331
     "mudflaps do nothing",
332
     set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
333
    {"mode-populate",
334
     "mudflaps populate object tree",
335
     set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
336
    {"mode-check",
337
     "mudflaps check for memory violations",
338
     set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
339
    {"mode-violate",
340
     "mudflaps always cause violations (diagnostic)",
341
     set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
342
 
343
    {"viol-nop",
344
     "violations do not change program execution",
345
     set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
346
    {"viol-abort",
347
     "violations cause a call to abort()",
348
     set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
349
    {"viol-segv",
350
     "violations are promoted to SIGSEGV signals",
351
     set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
352
    {"viol-gdb",
353
     "violations fork a gdb process attached to current program",
354
     set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
355
    {"trace-calls",
356
     "trace calls to mudflap runtime library",
357
     set_option, 1, &__mf_opts.trace_mf_calls},
358
    {"verbose-trace",
359
     "trace internal events within mudflap runtime library",
360
     set_option, 1, &__mf_opts.verbose_trace},
361
    {"collect-stats",
362
     "collect statistics on mudflap's operation",
363
     set_option, 1, &__mf_opts.collect_stats},
364
#ifdef SIGUSR1
365
    {"sigusr1-report",
366
     "print report upon SIGUSR1",
367
     set_option, 1, &__mf_opts.sigusr1_report},
368
#endif
369
    {"internal-checking",
370
     "perform more expensive internal checking",
371
     set_option, 1, &__mf_opts.internal_checking},
372
    {"print-leaks",
373
     "print any memory leaks at program shutdown",
374
     set_option, 1, &__mf_opts.print_leaks},
375
#ifdef HAVE___LIBC_FREERES
376
    {"libc-freeres",
377
     "call glibc __libc_freeres at shutdown for better leak data",
378
     set_option, 1, &__mf_opts.call_libc_freeres},
379
#endif
380
    {"check-initialization",
381
     "detect uninitialized object reads",
382
     set_option, 1, &__mf_opts.check_initialization},
383
    {"verbose-violations",
384
     "print verbose messages when memory violations occur",
385
     set_option, 1, &__mf_opts.verbose_violations},
386
    {"abbreviate",
387
     "abbreviate repetitive listings",
388
     set_option, 1, &__mf_opts.abbreviate},
389
    {"timestamps",
390
     "track object lifetime timestamps",
391
     set_option, 1, &__mf_opts.timestamps},
392
    {"ignore-reads",
393
     "ignore read accesses - assume okay",
394
     set_option, 1, &__mf_opts.ignore_reads},
395
    {"wipe-stack",
396
     "wipe stack objects at unwind",
397
     set_option, 1, &__mf_opts.wipe_stack},
398
    {"wipe-heap",
399
     "wipe heap objects at free",
400
     set_option, 1, &__mf_opts.wipe_heap},
401
    {"heur-proc-map",
402
     "support /proc/self/map heuristics",
403
     set_option, 1, &__mf_opts.heur_proc_map},
404
    {"heur-stack-bound",
405
     "enable a simple upper stack bound heuristic",
406
     set_option, 1, &__mf_opts.heur_stack_bound},
407
    {"heur-start-end",
408
     "support _start.._end heuristics",
409
     set_option, 1, &__mf_opts.heur_start_end},
410
    {"heur-stdlib",
411
     "register standard library data (argv, errno, stdin, ...)",
412
     set_option, 1, &__mf_opts.heur_std_data},
413
    {"free-queue-length",
414
     "queue N deferred free() calls before performing them",
415
     read_integer_option, 0, &__mf_opts.free_queue_length},
416
    {"persistent-count",
417
     "keep a history of N unregistered regions",
418
     read_integer_option, 0, &__mf_opts.persistent_count},
419
    {"crumple-zone",
420
     "surround allocations with crumple zones of N bytes",
421
     read_integer_option, 0, &__mf_opts.crumple_zone},
422
    /* XXX: not type-safe.
423
    {"lc-mask",
424
     "set lookup cache size mask to N (2**M - 1)",
425
     read_integer_option, 0, (int *)(&__mf_lc_mask)},
426
    {"lc-shift",
427
     "set lookup cache pointer shift",
428
     read_integer_option, 0, (int *)(&__mf_lc_shift)},
429
    */
430
    {"lc-adapt",
431
     "adapt mask/shift parameters after N cache misses",
432
     read_integer_option, 1, &__mf_opts.adapt_cache},
433
    {"backtrace",
434
     "keep an N-level stack trace of each call context",
435
     read_integer_option, 0, &__mf_opts.backtrace},
436
#ifdef LIBMUDFLAPTH
437
    {"thread-stack",
438
     "override thread stacks allocation: N kB",
439
     read_integer_option, 0, &__mf_opts.thread_stack},
440
#endif
441
    {0, 0, set_option, 0, NULL}
442
  };
443
 
444
static void
445
__mf_usage ()
446
{
447
  struct mudoption *opt;
448
 
449
  fprintf (stderr,
450
           "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
451
           "Mudflap is Copyright (C) 2002-2010 Free Software Foundation, Inc.\n"
452
           "\n"
453
           "Unless setuid, a program's mudflap options be set by an environment variable:\n"
454
           "\n"
455
           "$ export MUDFLAP_OPTIONS='<options>'\n"
456
           "$ <mudflapped_program>\n"
457
           "\n"
458
           "where <options> is a space-separated list of \n"
459
           "any of the following options.  Use `-no-OPTION' to disable options.\n"
460
           "\n",
461
#if HAVE_PTHREAD_H
462
           (pthread_join ? "multi-threaded " : "single-threaded "),
463
#else
464
           "",
465
#endif
466
#if LIBMUDFLAPTH
467
           "thread-aware "
468
#else
469
           "thread-unaware "
470
#endif
471
            );
472
  /* XXX: The multi-threaded thread-unaware combination is bad.  */
473
 
474
  for (opt = options; opt->name; opt++)
475
    {
476
      int default_p = (opt->value == * opt->target);
477
 
478
      switch (opt->type)
479
        {
480
          char buf[128];
481
        case set_option:
482
          fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
483
          if (default_p)
484
            fprintf (stderr, " [active]\n");
485
          else
486
            fprintf (stderr, "\n");
487
          break;
488
        case read_integer_option:
489
          strncpy (buf, opt->name, 128);
490
          strncpy (buf + strlen (opt->name), "=N", 2);
491
          fprintf (stderr, "-%-23.23s %s", buf, opt->description);
492
          fprintf (stderr, " [%d]\n", * opt->target);
493
          break;
494
        default: abort();
495
        }
496
    }
497
 
498
  fprintf (stderr, "\n");
499
}
500
 
501
 
502
int
503
__mf_set_options (const char *optstr)
504
{
505
  int rc;
506
  LOCKTH ();
507
  BEGIN_RECURSION_PROTECT ();
508
  rc = __mfu_set_options (optstr);
509
  /* XXX: It's not really that easy.  A change to a bunch of parameters
510
     can require updating auxiliary state or risk crashing:
511
     free_queue_length, crumple_zone ... */
512
  END_RECURSION_PROTECT ();
513
  UNLOCKTH ();
514
  return rc;
515
}
516
 
517
 
518
int
519
__mfu_set_options (const char *optstr)
520
{
521
  struct mudoption *opts = 0;
522
  char *nxt = 0;
523
  long tmp = 0;
524
  int rc = 0;
525
  const char *saved_optstr = optstr;
526
 
527
  /* XXX: bounds-check for optstr! */
528
 
529
  while (*optstr)
530
    {
531
      switch (*optstr) {
532
      case ' ':
533
      case '\t':
534
      case '\n':
535
        optstr++;
536
        break;
537
 
538
      case '-':
539
        if (*optstr+1)
540
          {
541
            int negate = 0;
542
            optstr++;
543
 
544
            if (*optstr == '?' ||
545
                strncmp (optstr, "help", 4) == 0)
546
              {
547
                /* Caller will print help and exit.  */
548
                return -1;
549
              }
550
 
551
            if (strncmp (optstr, "no-", 3) == 0)
552
              {
553
                negate = 1;
554
                optstr = & optstr[3];
555
              }
556
 
557
            for (opts = options; opts->name; opts++)
558
              {
559
                if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
560
                  {
561
                    optstr += strlen (opts->name);
562
                    assert (opts->target);
563
                    switch (opts->type)
564
                      {
565
                      case set_option:
566
                        if (negate)
567
                          *(opts->target) = 0;
568
                        else
569
                          *(opts->target) = opts->value;
570
                        break;
571
                      case read_integer_option:
572
                        if (! negate && (*optstr == '=' && *(optstr+1)))
573
                          {
574
                            optstr++;
575
                            tmp = strtol (optstr, &nxt, 10);
576
                            if ((optstr != nxt) && (tmp != LONG_MAX))
577
                              {
578
                                optstr = nxt;
579
                                *(opts->target) = (int)tmp;
580
                              }
581
                          }
582
                        else if (negate)
583
                          * opts->target = 0;
584
                        break;
585
                      }
586
                  }
587
              }
588
          }
589
        break;
590
 
591
      default:
592
        fprintf (stderr,
593
                 "warning: unrecognized string '%s' in mudflap options\n",
594
                 optstr);
595
        optstr += strlen (optstr);
596
        rc = -1;
597
        break;
598
      }
599
    }
600
 
601
  /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
602
  __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
603
  __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
604
 
605
  /* Clear the lookup cache, in case the parameters got changed.  */
606
  /* XXX: race */
607
  memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
608
  /* void slot 0 */
609
  __mf_lookup_cache[0].low = MAXPTR;
610
 
611
  TRACE ("set options from `%s'\n", saved_optstr);
612
 
613
  /* Call this unconditionally, in case -sigusr1-report was toggled. */
614
  __mf_sigusr1_respond ();
615
 
616
  return rc;
617
}
618
 
619
 
620
#ifdef PIC
621
 
622
void
623
__mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
624
{
625
  char *err;
626
 
627
  assert (e);
628
  if (e->pointer) return;
629
 
630
#if HAVE_DLVSYM
631
  if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
632
    e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
633
  else
634
#endif
635
    e->pointer = dlsym (RTLD_NEXT, e->name);
636
 
637
  err = dlerror ();
638
 
639
  if (err)
640
    {
641
      fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
642
               e->name, err);
643
      abort ();
644
    }
645
  if (! e->pointer)
646
    {
647
      fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
648
      abort ();
649
    }
650
}
651
 
652
 
653
static void
654
__mf_resolve_dynamics ()
655
{
656
  int i;
657
  for (i = 0; i < dyn_INITRESOLVE; i++)
658
    __mf_resolve_single_dynamic (& __mf_dynamic[i]);
659
}
660
 
661
 
662
/* NB: order must match enums in mf-impl.h */
663
struct __mf_dynamic_entry __mf_dynamic [] =
664
{
665
  {NULL, "calloc", NULL},
666
  {NULL, "free", NULL},
667
  {NULL, "malloc", NULL},
668
  {NULL, "mmap", NULL},
669
  {NULL, "munmap", NULL},
670
  {NULL, "realloc", NULL},
671
  {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
672
#ifdef LIBMUDFLAPTH
673
  {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
674
  {NULL, "pthread_join", NULL},
675
  {NULL, "pthread_exit", NULL}
676
#endif
677
};
678
 
679
#endif /* PIC */
680
 
681
 
682
 
683
/* ------------------------------------------------------------------------ */
684
 
685
/* Lookup & manage automatic initialization of the five or so splay trees.  */
686
static mfsplay_tree
687
__mf_object_tree (int type)
688
{
689
  static mfsplay_tree trees [__MF_TYPE_MAX+1];
690
  assert (type >= 0 && type <= __MF_TYPE_MAX);
691
  if (UNLIKELY (trees[type] == NULL))
692
    trees[type] = mfsplay_tree_new ();
693
  return trees[type];
694
}
695
 
696
 
697
/* not static */void
698
__mf_init ()
699
{
700
  char *ov = 0;
701
 
702
  /* Return if initialization has already been done. */
703
  if (LIKELY (__mf_starting_p == 0))
704
    return;
705
 
706
#if defined(__FreeBSD__) && defined(LIBMUDFLAPTH)
707
  pthread_self();
708
  LOCKTH ();
709
  UNLOCKTH ();
710
#endif /* Prime mutex which calls calloc upon first lock to avoid deadlock. */
711
 
712
  /* This initial bootstrap phase requires that __mf_starting_p = 1. */
713
#ifdef PIC
714
  __mf_resolve_dynamics ();
715
#endif
716
  __mf_starting_p = 0;
717
 
718
  __mf_set_state (active);
719
 
720
  __mf_set_default_options ();
721
 
722
  if (getuid () == geteuid () && getgid () == getegid ()) /* PR41433, not setuid */
723
    ov = getenv ("MUDFLAP_OPTIONS");
724
  if (ov)
725
    {
726
      int rc = __mfu_set_options (ov);
727
      if (rc < 0)
728
        {
729
          __mf_usage ();
730
          exit (1);
731
        }
732
    }
733
 
734
  /* Initialize to a non-zero description epoch. */
735
  __mf_describe_object (NULL);
736
 
737
#define REG_RESERVED(obj) \
738
  __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
739
 
740
  REG_RESERVED (__mf_lookup_cache);
741
  REG_RESERVED (__mf_lc_mask);
742
  REG_RESERVED (__mf_lc_shift);
743
  /* XXX: others of our statics?  */
744
 
745
  /* Prevent access to *NULL. */
746
  __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
747
  __mf_lookup_cache[0].low = (uintptr_t) -1;
748
}
749
 
750
 
751
 
752
int
753
__wrap_main (int argc, char* argv[])
754
{
755
  extern char **environ;
756
  extern int main ();
757
  extern int __real_main ();
758
  static int been_here = 0;
759
 
760
  if (__mf_opts.heur_std_data && ! been_here)
761
    {
762
      unsigned i;
763
 
764
      been_here = 1;
765
      __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
766
      for (i=0; i<argc; i++)
767
        {
768
          unsigned j = strlen (argv[i]);
769
          __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
770
        }
771
 
772
      for (i=0; ; i++)
773
        {
774
          char *e = environ[i];
775
          unsigned j;
776
          if (e == NULL) break;
777
          j = strlen (environ[i]);
778
          __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
779
        }
780
      __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
781
 
782
      __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
783
 
784
      __mf_register (stdin,  sizeof (*stdin),  __MF_TYPE_STATIC, "stdin");
785
      __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
786
      __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
787
 
788
      /* Make some effort to register ctype.h static arrays.  */
789
      /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
790
      /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
791
         with in mf-hooks2.c.  */
792
    }
793
 
794
#ifdef PIC
795
  return main (argc, argv, environ);
796
#else
797
  return __real_main (argc, argv, environ);
798
#endif
799
}
800
 
801
 
802
 
803
extern void __mf_fini () DTOR;
804
void __mf_fini ()
805
{
806
  TRACE ("__mf_fini\n");
807
  __mfu_report ();
808
 
809
#ifndef PIC
810
/* Since we didn't populate the tree for allocations in constructors
811
   before __mf_init, we cannot check destructors after __mf_fini.  */
812
  __mf_opts.mudflap_mode = mode_nop;
813
#endif
814
}
815
 
816
 
817
 
818
/* ------------------------------------------------------------------------ */
819
/* __mf_check */
820
 
821
void __mf_check (void *ptr, size_t sz, int type, const char *location)
822
{
823
  LOCKTH ();
824
  BEGIN_RECURSION_PROTECT ();
825
  __mfu_check (ptr, sz, type, location);
826
  END_RECURSION_PROTECT ();
827
  UNLOCKTH ();
828
}
829
 
830
 
831
void __mfu_check (void *ptr, size_t sz, int type, const char *location)
832
{
833
  unsigned entry_idx = __MF_CACHE_INDEX (ptr);
834
  struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
835
  int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
836
  uintptr_t ptr_low = (uintptr_t) ptr;
837
  uintptr_t ptr_high = CLAMPSZ (ptr, sz);
838
  struct __mf_cache old_entry = *entry;
839
 
840
  if (UNLIKELY (__mf_opts.sigusr1_report))
841
    __mf_sigusr1_respond ();
842
  if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
843
    return;
844
 
845
  TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
846
         ptr, entry_idx, (unsigned long)sz,
847
         (type == 0 ? "read" : "write"), location);
848
 
849
  switch (__mf_opts.mudflap_mode)
850
    {
851
    case mode_nop:
852
      /* It is tempting to poison the cache here similarly to
853
         mode_populate.  However that eliminates a valuable
854
         distinction between these two modes.  mode_nop is useful to
855
         let a user count & trace every single check / registration
856
         call.  mode_populate is useful to let a program run fast
857
         while unchecked.
858
      */
859
      judgement = 1;
860
      break;
861
 
862
    case mode_populate:
863
      entry->low = ptr_low;
864
      entry->high = ptr_high;
865
      judgement = 1;
866
      break;
867
 
868
    case mode_check:
869
      {
870
        unsigned heuristics = 0;
871
 
872
        /* Advance aging/adaptation counters.  */
873
        static unsigned adapt_count;
874
        adapt_count ++;
875
        if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
876
                      adapt_count > __mf_opts.adapt_cache))
877
          {
878
            adapt_count = 0;
879
            __mf_adapt_cache ();
880
          }
881
 
882
        /* Looping only occurs if heuristics were triggered.  */
883
        while (judgement == 0)
884
          {
885
            DECLARE (void, free, void *p);
886
            __mf_object_t* ovr_obj[1];
887
            unsigned obj_count;
888
            __mf_object_t** all_ovr_obj = NULL;
889
            __mf_object_t** dealloc_me = NULL;
890
            unsigned i;
891
 
892
            /* Find all overlapping objects.  Be optimistic that there is just one.  */
893
            obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
894
            if (UNLIKELY (obj_count > 1))
895
              {
896
                /* Allocate a real buffer and do the search again.  */
897
                DECLARE (void *, malloc, size_t c);
898
                unsigned n;
899
                all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
900
                                                   obj_count));
901
                if (all_ovr_obj == NULL) abort ();
902
                n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
903
                assert (n == obj_count);
904
                dealloc_me = all_ovr_obj;
905
              }
906
            else
907
              {
908
                all_ovr_obj = ovr_obj;
909
                dealloc_me = NULL;
910
              }
911
 
912
            /* Update object statistics.  */
913
            for (i = 0; i < obj_count; i++)
914
              {
915
                __mf_object_t *obj = all_ovr_obj[i];
916
                assert (obj != NULL);
917
                if (type == __MF_CHECK_READ)
918
                  obj->read_count ++;
919
                else
920
                  obj->write_count ++;
921
                obj->liveness ++;
922
              }
923
 
924
            /* Iterate over the various objects.  There are a number of special cases.  */
925
            for (i = 0; i < obj_count; i++)
926
              {
927
                  __mf_object_t *obj = all_ovr_obj[i];
928
 
929
                /* Any __MF_TYPE_NOACCESS hit is bad.  */
930
                if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
931
                  judgement = -1;
932
 
933
                /* Any object with a watch flag is bad.  */
934
                if (UNLIKELY (obj->watching_p))
935
                  judgement = -2; /* trigger VIOL_WATCH */
936
 
937
                /* A read from an uninitialized object is bad. */
938
                if (UNLIKELY (__mf_opts.check_initialization
939
                              /* reading */
940
                              && type == __MF_CHECK_READ
941
                              /* not written */
942
                              && obj->write_count == 0
943
                              /* uninitialized (heap) */
944
                              && obj->type == __MF_TYPE_HEAP))
945
                  judgement = -1;
946
              }
947
 
948
            /* We now know that the access spans no invalid objects.  */
949
            if (LIKELY (judgement >= 0))
950
              for (i = 0; i < obj_count; i++)
951
                {
952
                  __mf_object_t *obj = all_ovr_obj[i];
953
 
954
                  /* Is this access entirely contained within this object?  */
955
                  if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
956
                    {
957
                      /* Valid access.  */
958
                      entry->low = obj->low;
959
                      entry->high = obj->high;
960
                      judgement = 1;
961
                    }
962
                }
963
 
964
            /* This access runs off the end of one valid object.  That
965
                could be okay, if other valid objects fill in all the
966
                holes.  We allow this only for HEAP and GUESS type
967
                objects.  Accesses to STATIC and STACK variables
968
                should not be allowed to span.  */
969
            if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
970
              {
971
                unsigned uncovered = 0;
972
                for (i = 0; i < obj_count; i++)
973
                  {
974
                    __mf_object_t *obj = all_ovr_obj[i];
975
                    int j, uncovered_low_p, uncovered_high_p;
976
                    uintptr_t ptr_lower, ptr_higher;
977
 
978
                    uncovered_low_p = ptr_low < obj->low;
979
                    ptr_lower = CLAMPSUB (obj->low, 1);
980
                    uncovered_high_p = ptr_high > obj->high;
981
                    ptr_higher = CLAMPADD (obj->high, 1);
982
 
983
                    for (j = 0; j < obj_count; j++)
984
                      {
985
                        __mf_object_t *obj2 = all_ovr_obj[j];
986
 
987
                        if (i == j) continue;
988
 
989
                        /* Filter out objects that cannot be spanned across.  */
990
                        if (obj2->type == __MF_TYPE_STACK
991
                            || obj2->type == __MF_TYPE_STATIC)
992
                          continue;
993
 
994
                          /* Consider a side "covered" if obj2 includes
995
                             the next byte on that side.  */
996
                          if (uncovered_low_p
997
                              && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
998
                            uncovered_low_p = 0;
999
                          if (uncovered_high_p
1000
                              && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
1001
                            uncovered_high_p = 0;
1002
                      }
1003
 
1004
                    if (uncovered_low_p || uncovered_high_p)
1005
                      uncovered ++;
1006
                  }
1007
 
1008
                /* Success if no overlapping objects are uncovered.  */
1009
                if (uncovered == 0)
1010
                  judgement = 1;
1011
                }
1012
 
1013
 
1014
            if (dealloc_me != NULL)
1015
              CALL_REAL (free, dealloc_me);
1016
 
1017
            /* If the judgment is still unknown at this stage, loop
1018
               around at most one more time.  */
1019
            if (judgement == 0)
1020
              {
1021
                if (heuristics++ < 2) /* XXX parametrize this number? */
1022
                  judgement = __mf_heuristic_check (ptr_low, ptr_high);
1023
                else
1024
                  judgement = -1;
1025
              }
1026
          }
1027
 
1028
      }
1029
      break;
1030
 
1031
    case mode_violate:
1032
      judgement = -1;
1033
      break;
1034
    }
1035
 
1036
  if (__mf_opts.collect_stats)
1037
    {
1038
      __mf_count_check ++;
1039
 
1040
      if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1041
        /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1042
        __mf_lookup_cache_reusecount [entry_idx] ++;
1043
    }
1044
 
1045
  if (UNLIKELY (judgement < 0))
1046
    __mf_violation (ptr, sz,
1047
                    (uintptr_t) __builtin_return_address (0), location,
1048
                    ((judgement == -1) ?
1049
                     (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1050
                     __MF_VIOL_WATCH));
1051
}
1052
 
1053
 
1054
static __mf_object_t *
1055
__mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1056
                        const char *name, uintptr_t pc)
1057
{
1058
  DECLARE (void *, calloc, size_t c, size_t n);
1059
 
1060
  __mf_object_t *new_obj;
1061
  new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1062
  new_obj->low = low;
1063
  new_obj->high = high;
1064
  new_obj->type = type;
1065
  new_obj->name = name;
1066
  new_obj->alloc_pc = pc;
1067
#if HAVE_GETTIMEOFDAY
1068
  if (__mf_opts.timestamps)
1069
    gettimeofday (& new_obj->alloc_time, NULL);
1070
#endif
1071
#if LIBMUDFLAPTH
1072
  new_obj->alloc_thread = pthread_self ();
1073
#endif
1074
 
1075
  if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1076
    new_obj->alloc_backtrace_size =
1077
      __mf_backtrace (& new_obj->alloc_backtrace,
1078
                      (void *) pc, 2);
1079
 
1080
  __mf_link_object (new_obj);
1081
  return new_obj;
1082
}
1083
 
1084
 
1085
static void
1086
__mf_uncache_object (__mf_object_t *old_obj)
1087
{
1088
  /* Remove any low/high pointers for this object from the lookup cache.  */
1089
 
1090
  /* Can it possibly exist in the cache?  */
1091
  if (LIKELY (old_obj->read_count + old_obj->write_count))
1092
    {
1093
      uintptr_t low = old_obj->low;
1094
      uintptr_t high = old_obj->high;
1095
      struct __mf_cache *entry;
1096
      unsigned i;
1097
      if ((high - low) >= (__mf_lc_mask << __mf_lc_shift))
1098
        {
1099
          /* For large objects (>= cache size - 1) check the whole cache.  */
1100
          entry = & __mf_lookup_cache [0];
1101
          for (i = 0; i <= __mf_lc_mask; i++, entry++)
1102
            {
1103
              /* NB: the "||" in the following test permits this code to
1104
                 tolerate the situation introduced by __mf_check over
1105
                 contiguous objects, where a cache entry spans several
1106
                 objects.  */
1107
              if (entry->low == low || entry->high == high)
1108
                {
1109
                  entry->low = MAXPTR;
1110
                  entry->high = MINPTR;
1111
                }
1112
            }
1113
        }
1114
      else
1115
        {
1116
          /* Object is now smaller then cache size.  */
1117
          unsigned entry_low_idx = __MF_CACHE_INDEX (low);
1118
          unsigned entry_high_idx = __MF_CACHE_INDEX (high);
1119
          if (entry_low_idx <= entry_high_idx)
1120
            {
1121
              entry = & __mf_lookup_cache [entry_low_idx];
1122
              for (i = entry_low_idx; i <= entry_high_idx; i++, entry++)
1123
                {
1124
                  /* NB: the "||" in the following test permits this code to
1125
                     tolerate the situation introduced by __mf_check over
1126
                     contiguous objects, where a cache entry spans several
1127
                     objects.  */
1128
                  if (entry->low == low || entry->high == high)
1129
                    {
1130
                      entry->low = MAXPTR;
1131
                      entry->high = MINPTR;
1132
                    }
1133
                }
1134
            }
1135
          else
1136
            {
1137
              /* Object wrapped around the end of the cache. First search
1138
                 from low to end of cache and then from 0 to high.  */
1139
              entry = & __mf_lookup_cache [entry_low_idx];
1140
              for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++)
1141
                {
1142
                  /* NB: the "||" in the following test permits this code to
1143
                     tolerate the situation introduced by __mf_check over
1144
                     contiguous objects, where a cache entry spans several
1145
                     objects.  */
1146
                  if (entry->low == low || entry->high == high)
1147
                    {
1148
                      entry->low = MAXPTR;
1149
                      entry->high = MINPTR;
1150
                    }
1151
                }
1152
              entry = & __mf_lookup_cache [0];
1153
              for (i = 0; i <= entry_high_idx; i++, entry++)
1154
                {
1155
                  /* NB: the "||" in the following test permits this code to
1156
                     tolerate the situation introduced by __mf_check over
1157
                     contiguous objects, where a cache entry spans several
1158
                     objects.  */
1159
                  if (entry->low == low || entry->high == high)
1160
                    {
1161
                      entry->low = MAXPTR;
1162
                      entry->high = MINPTR;
1163
                    }
1164
                }
1165
            }
1166
        }
1167
    }
1168
}
1169
 
1170
 
1171
void
1172
__mf_register (void *ptr, size_t sz, int type, const char *name)
1173
{
1174
  LOCKTH ();
1175
  BEGIN_RECURSION_PROTECT ();
1176
  __mfu_register (ptr, sz, type, name);
1177
  END_RECURSION_PROTECT ();
1178
  UNLOCKTH ();
1179
}
1180
 
1181
 
1182
void
1183
__mfu_register (void *ptr, size_t sz, int type, const char *name)
1184
{
1185
  TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1186
         ptr, (unsigned long) sz, type, name ? name : "");
1187
 
1188
  if (__mf_opts.collect_stats)
1189
    {
1190
      __mf_count_register ++;
1191
      __mf_total_register_size [(type < 0) ? 0 :
1192
                                (type > __MF_TYPE_MAX) ? 0 :
1193
                                type] += sz;
1194
    }
1195
 
1196
  if (UNLIKELY (__mf_opts.sigusr1_report))
1197
    __mf_sigusr1_respond ();
1198
 
1199
  switch (__mf_opts.mudflap_mode)
1200
    {
1201
    case mode_nop:
1202
      break;
1203
 
1204
    case mode_violate:
1205
      __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1206
                      __MF_VIOL_REGISTER);
1207
      break;
1208
 
1209
    case mode_populate:
1210
      /* Clear the cache.  */
1211
      /* XXX: why the entire cache? */
1212
      /* XXX: race */
1213
      memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1214
      /* void slot 0 */
1215
      __mf_lookup_cache[0].low = MAXPTR;
1216
      break;
1217
 
1218
    case mode_check:
1219
      {
1220
        __mf_object_t *ovr_objs [1];
1221
        unsigned num_overlapping_objs;
1222
        uintptr_t low = (uintptr_t) ptr;
1223
        uintptr_t high = CLAMPSZ (ptr, sz);
1224
        uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1225
 
1226
        /* Treat unknown size indication as 1.  */
1227
        if (UNLIKELY (sz == 0)) sz = 1;
1228
 
1229
        /* Look for objects only of the same type.  This will e.g. permit a registration
1230
           of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS.  At
1231
           __mf_check time however harmful overlaps will be detected. */
1232
        num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1233
 
1234
        /* Handle overlaps.  */
1235
        if (UNLIKELY (num_overlapping_objs > 0))
1236
          {
1237
            __mf_object_t *ovr_obj = ovr_objs[0];
1238
 
1239
            /* Accept certain specific duplication pairs.  */
1240
            if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1241
                && ovr_obj->low == low
1242
                && ovr_obj->high == high
1243
                && ovr_obj->type == type)
1244
              {
1245
                /* Duplicate registration for static objects may come
1246
                   from distinct compilation units.  */
1247
                VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1248
                               (void *) low, (void *) high,
1249
                               (ovr_obj->name ? ovr_obj->name : ""));
1250
                break;
1251
              }
1252
 
1253
            /* Alas, a genuine violation.  */
1254
            else
1255
              {
1256
                /* Two or more *real* mappings here. */
1257
                __mf_violation ((void *) ptr, sz,
1258
                                (uintptr_t) __builtin_return_address (0), NULL,
1259
                                __MF_VIOL_REGISTER);
1260
              }
1261
          }
1262
        else /* No overlapping objects: AOK.  */
1263
          __mf_insert_new_object (low, high, type, name, pc);
1264
 
1265
        /* We could conceivably call __mf_check() here to prime the cache,
1266
           but then the read_count/write_count field is not reliable.  */
1267
        break;
1268
      }
1269
    } /* end switch (__mf_opts.mudflap_mode) */
1270
}
1271
 
1272
 
1273
void
1274
__mf_unregister (void *ptr, size_t sz, int type)
1275
{
1276
  LOCKTH ();
1277
  BEGIN_RECURSION_PROTECT ();
1278
  __mfu_unregister (ptr, sz, type);
1279
  END_RECURSION_PROTECT ();
1280
  UNLOCKTH ();
1281
}
1282
 
1283
 
1284
void
1285
__mfu_unregister (void *ptr, size_t sz, int type)
1286
{
1287
  DECLARE (void, free, void *ptr);
1288
 
1289
  if (UNLIKELY (__mf_opts.sigusr1_report))
1290
    __mf_sigusr1_respond ();
1291
 
1292
  TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1293
 
1294
  switch (__mf_opts.mudflap_mode)
1295
    {
1296
    case mode_nop:
1297
      break;
1298
 
1299
    case mode_violate:
1300
      __mf_violation (ptr, sz,
1301
                      (uintptr_t) __builtin_return_address (0), NULL,
1302
                      __MF_VIOL_UNREGISTER);
1303
      break;
1304
 
1305
    case mode_populate:
1306
      /* Clear the cache.  */
1307
      /* XXX: race */
1308
      memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1309
      /* void slot 0 */
1310
      __mf_lookup_cache[0].low = MAXPTR;
1311
      break;
1312
 
1313
    case mode_check:
1314
      {
1315
        __mf_object_t *old_obj = NULL;
1316
        __mf_object_t *del_obj = NULL;  /* Object to actually delete. */
1317
        __mf_object_t *objs[1] = {NULL};
1318
        unsigned num_overlapping_objs;
1319
 
1320
        num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1321
                                                   CLAMPSZ (ptr, sz), objs, 1, type);
1322
 
1323
        /* Special case for HEAP_I - see free & realloc hook.  They don't
1324
           know whether the input region was HEAP or HEAP_I before
1325
           unmapping it.  Here we give HEAP a try in case HEAP_I
1326
           failed.  */
1327
        if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1328
          {
1329
            num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1330
                                                       CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1331
          }
1332
 
1333
        old_obj = objs[0];
1334
        if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1335
                      || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1336
                      || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1337
          {
1338
            __mf_violation (ptr, sz,
1339
                            (uintptr_t) __builtin_return_address (0), NULL,
1340
                            __MF_VIOL_UNREGISTER);
1341
            break;
1342
          }
1343
 
1344
        __mf_unlink_object (old_obj);
1345
        __mf_uncache_object (old_obj);
1346
 
1347
        /* Wipe buffer contents if desired.  */
1348
        if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1349
            || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1350
                                        || old_obj->type == __MF_TYPE_HEAP_I)))
1351
          {
1352
            memset ((void *) old_obj->low,
1353
                    0,
1354
                    (size_t) (old_obj->high - old_obj->low + 1));
1355
          }
1356
 
1357
        /* Manage the object cemetary.  */
1358
        if (__mf_opts.persistent_count > 0
1359
            && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1360
          {
1361
            old_obj->deallocated_p = 1;
1362
            old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1363
#if HAVE_GETTIMEOFDAY
1364
            if (__mf_opts.timestamps)
1365
              gettimeofday (& old_obj->dealloc_time, NULL);
1366
#endif
1367
#ifdef LIBMUDFLAPTH
1368
            old_obj->dealloc_thread = pthread_self ();
1369
#endif
1370
 
1371
            if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1372
              old_obj->dealloc_backtrace_size =
1373
                __mf_backtrace (& old_obj->dealloc_backtrace,
1374
                                NULL, 2);
1375
 
1376
            /* Encourage this object to be displayed again in current epoch.  */
1377
            old_obj->description_epoch --;
1378
 
1379
            /* Put this object into the cemetary.  This may require this plot to
1380
               be recycled, and the previous resident to be designated del_obj.  */
1381
            {
1382
              unsigned row = old_obj->type;
1383
              unsigned plot = __mf_object_dead_head [row];
1384
 
1385
              del_obj = __mf_object_cemetary [row][plot];
1386
              __mf_object_cemetary [row][plot] = old_obj;
1387
              plot ++;
1388
              if (plot == __mf_opts.persistent_count) plot = 0;
1389
              __mf_object_dead_head [row] = plot;
1390
            }
1391
          }
1392
        else
1393
          del_obj = old_obj;
1394
 
1395
        if (__mf_opts.print_leaks)
1396
          {
1397
            if ((old_obj->read_count + old_obj->write_count) == 0 &&
1398
                (old_obj->type == __MF_TYPE_HEAP
1399
                 || old_obj->type == __MF_TYPE_HEAP_I))
1400
              {
1401
                /* The problem with a warning message here is that we may not
1402
                   be privy to accesses to such objects that occur within
1403
                   uninstrumented libraries.  */
1404
#if 0
1405
                fprintf (stderr,
1406
                         "*******\n"
1407
                         "mudflap warning: unaccessed registered object:\n");
1408
                __mf_describe_object (old_obj);
1409
#endif
1410
              }
1411
          }
1412
 
1413
        if (del_obj != NULL) /* May or may not equal old_obj.  */
1414
          {
1415
            if (__mf_opts.backtrace > 0)
1416
              {
1417
                CALL_REAL(free, del_obj->alloc_backtrace);
1418
                if (__mf_opts.persistent_count > 0)
1419
                  {
1420
                    CALL_REAL(free, del_obj->dealloc_backtrace);
1421
                  }
1422
              }
1423
            CALL_REAL(free, del_obj);
1424
          }
1425
 
1426
        break;
1427
      }
1428
    } /* end switch (__mf_opts.mudflap_mode) */
1429
 
1430
 
1431
  if (__mf_opts.collect_stats)
1432
    {
1433
      __mf_count_unregister ++;
1434
      __mf_total_unregister_size += sz;
1435
    }
1436
}
1437
 
1438
 
1439
 
1440
struct tree_stats
1441
{
1442
  unsigned obj_count;
1443
  unsigned long total_size;
1444
  unsigned live_obj_count;
1445
  double total_weight;
1446
  double weighted_size;
1447
  unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1448
};
1449
 
1450
 
1451
 
1452
static int
1453
__mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1454
{
1455
  __mf_object_t *obj = (__mf_object_t *) n->value;
1456
  struct tree_stats *s = (struct tree_stats *) param;
1457
 
1458
  assert (obj != NULL && s != NULL);
1459
 
1460
  /* Exclude never-accessed objects.  */
1461
  if (obj->read_count + obj->write_count)
1462
    {
1463
      s->obj_count ++;
1464
      s->total_size += (obj->high - obj->low + 1);
1465
 
1466
      if (obj->liveness)
1467
        {
1468
          unsigned i;
1469
          uintptr_t addr;
1470
 
1471
          /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1472
             (void *) obj->low, obj->liveness, obj->name); */
1473
 
1474
          s->live_obj_count ++;
1475
          s->total_weight += (double) obj->liveness;
1476
          s->weighted_size +=
1477
            (double) (obj->high - obj->low + 1) *
1478
            (double) obj->liveness;
1479
 
1480
          addr = obj->low;
1481
          for (i=0; i<sizeof(uintptr_t) * 8; i++)
1482
            {
1483
              unsigned bit = addr & 1;
1484
              s->weighted_address_bits[i][bit] += obj->liveness;
1485
              addr = addr >> 1;
1486
            }
1487
 
1488
          /* Age the liveness value.  */
1489
          obj->liveness >>= 1;
1490
        }
1491
    }
1492
 
1493
  return 0;
1494
}
1495
 
1496
 
1497
static void
1498
__mf_adapt_cache ()
1499
{
1500
  struct tree_stats s;
1501
  uintptr_t new_mask = 0;
1502
  unsigned char new_shift;
1503
  float cache_utilization;
1504
  float max_value;
1505
  static float smoothed_new_shift = -1.0;
1506
  unsigned i;
1507
 
1508
  memset (&s, 0, sizeof (s));
1509
 
1510
  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1511
  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1512
  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1513
  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1514
  mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1515
 
1516
  /* Maybe we're dealing with funny aging/adaptation parameters, or an
1517
     empty tree.  Just leave the cache alone in such cases, rather
1518
     than risk dying by division-by-zero.  */
1519
  if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1520
    return;
1521
 
1522
  /* Guess a good value for the shift parameter by finding an address bit that is a
1523
     good discriminant of lively objects.  */
1524
  max_value = 0.0;
1525
  for (i=0; i<sizeof (uintptr_t)*8; i++)
1526
    {
1527
      float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1528
      if (max_value < value) max_value = value;
1529
    }
1530
  for (i=0; i<sizeof (uintptr_t)*8; i++)
1531
    {
1532
      float shoulder_factor = 0.7;  /* Include slightly less popular bits too.  */
1533
      float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1534
      if (value >= max_value * shoulder_factor)
1535
        break;
1536
    }
1537
  if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1538
  /* Converge toward this slowly to reduce flapping. */
1539
  smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1540
  new_shift = (unsigned) (smoothed_new_shift + 0.5);
1541
  assert (new_shift < sizeof (uintptr_t)*8);
1542
 
1543
  /* Count number of used buckets.  */
1544
  cache_utilization = 0.0;
1545
  for (i = 0; i < (1 + __mf_lc_mask); i++)
1546
    if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1547
      cache_utilization += 1.0;
1548
  cache_utilization /= (1 + __mf_lc_mask);
1549
 
1550
  new_mask |= 0xffff; /* XXX: force a large cache.  */
1551
  new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1552
 
1553
  VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1554
                 "util=%u%% m=%p s=%u\n",
1555
                 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1556
                 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1557
 
1558
  /* We should reinitialize cache if its parameters have changed.  */
1559
  if (new_mask != __mf_lc_mask ||
1560
      new_shift != __mf_lc_shift)
1561
    {
1562
      __mf_lc_mask = new_mask;
1563
      __mf_lc_shift = new_shift;
1564
      /* XXX: race */
1565
      memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1566
      /* void slot 0 */
1567
      __mf_lookup_cache[0].low = MAXPTR;
1568
    }
1569
}
1570
 
1571
 
1572
 
1573
/* __mf_find_object[s] */
1574
 
1575
/* Find overlapping live objecs between [low,high].  Return up to
1576
   max_objs of their pointers in objs[].  Return total count of
1577
   overlaps (may exceed max_objs). */
1578
 
1579
unsigned
1580
__mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1581
                    __mf_object_t **objs, unsigned max_objs, int type)
1582
{
1583
  unsigned count = 0;
1584
  mfsplay_tree t = __mf_object_tree (type);
1585
  mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1586
  int direction;
1587
 
1588
  mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1589
  /* An exact match for base address implies a hit.  */
1590
  if (n != NULL)
1591
    {
1592
      if (count < max_objs)
1593
        objs[count] = (__mf_object_t *) n->value;
1594
      count ++;
1595
    }
1596
 
1597
  /* Iterate left then right near this key value to find all overlapping objects. */
1598
  for (direction = 0; direction < 2; direction ++)
1599
    {
1600
      /* Reset search origin.  */
1601
      k = (mfsplay_tree_key) ptr_low;
1602
 
1603
      while (1)
1604
        {
1605
          __mf_object_t *obj;
1606
 
1607
          n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1608
          if (n == NULL) break;
1609
          obj = (__mf_object_t *) n->value;
1610
 
1611
          if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1612
            break;
1613
 
1614
          if (count < max_objs)
1615
            objs[count] = (__mf_object_t *) n->value;
1616
          count ++;
1617
 
1618
          k = (mfsplay_tree_key) obj->low;
1619
        }
1620
    }
1621
 
1622
  return count;
1623
}
1624
 
1625
 
1626
unsigned
1627
__mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1628
                   __mf_object_t **objs, unsigned max_objs)
1629
{
1630
  int type;
1631
  unsigned count = 0;
1632
 
1633
  /* Search each splay tree for overlaps.  */
1634
  for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1635
    {
1636
      unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1637
      if (c > max_objs)
1638
        {
1639
          max_objs = 0;
1640
          objs = NULL;
1641
        }
1642
      else /* NB: C may equal 0 */
1643
        {
1644
          max_objs -= c;
1645
          objs += c;
1646
        }
1647
      count += c;
1648
    }
1649
 
1650
  return count;
1651
}
1652
 
1653
 
1654
 
1655
/* __mf_link_object */
1656
 
1657
static void
1658
__mf_link_object (__mf_object_t *node)
1659
{
1660
  mfsplay_tree t = __mf_object_tree (node->type);
1661
  mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1662
}
1663
 
1664
/* __mf_unlink_object */
1665
 
1666
static void
1667
__mf_unlink_object (__mf_object_t *node)
1668
{
1669
  mfsplay_tree t = __mf_object_tree (node->type);
1670
  mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1671
}
1672
 
1673
/* __mf_find_dead_objects */
1674
 
1675
/* Find overlapping dead objecs between [low,high].  Return up to
1676
   max_objs of their pointers in objs[].  Return total count of
1677
   overlaps (may exceed max_objs).  */
1678
 
1679
static unsigned
1680
__mf_find_dead_objects (uintptr_t low, uintptr_t high,
1681
                        __mf_object_t **objs, unsigned max_objs)
1682
{
1683
  if (__mf_opts.persistent_count > 0)
1684
    {
1685
      unsigned count = 0;
1686
      unsigned recollection = 0;
1687
      unsigned row = 0;
1688
 
1689
      assert (low <= high);
1690
      assert (max_objs == 0 || objs != NULL);
1691
 
1692
      /* Widen the search from the most recent plots in each row, looking
1693
         backward in time.  */
1694
      recollection = 0;
1695
      while (recollection < __mf_opts.persistent_count)
1696
        {
1697
          count = 0;
1698
 
1699
          for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1700
            {
1701
              unsigned plot;
1702
              unsigned i;
1703
 
1704
              plot = __mf_object_dead_head [row];
1705
              for (i = 0; i <= recollection; i ++)
1706
                {
1707
                  __mf_object_t *obj;
1708
 
1709
                  /* Look backward through row: it's a circular buffer.  */
1710
                  if (plot > 0) plot --;
1711
                  else plot = __mf_opts.persistent_count - 1;
1712
 
1713
                  obj = __mf_object_cemetary [row][plot];
1714
                  if (obj && obj->low <= high && obj->high >= low)
1715
                    {
1716
                      /* Found an overlapping dead object!  */
1717
                      if (count < max_objs)
1718
                        objs [count] = obj;
1719
                      count ++;
1720
                    }
1721
                }
1722
            }
1723
 
1724
          if (count)
1725
            break;
1726
 
1727
          /* Look farther back in time.  */
1728
          recollection = (recollection * 2) + 1;
1729
        }
1730
 
1731
      return count;
1732
    } else {
1733
      return 0;
1734
    }
1735
}
1736
 
1737
/* __mf_describe_object */
1738
 
1739
static void
1740
__mf_describe_object (__mf_object_t *obj)
1741
{
1742
  static unsigned epoch = 0;
1743
  if (obj == NULL)
1744
    {
1745
      epoch ++;
1746
      return;
1747
    }
1748
 
1749
  if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1750
    {
1751
      fprintf (stderr,
1752
               "mudflap %sobject %p: name=`%s'\n",
1753
               (obj->deallocated_p ? "dead " : ""),
1754
               (void *) obj, (obj->name ? obj->name : ""));
1755
      return;
1756
    }
1757
  else
1758
    obj->description_epoch = epoch;
1759
 
1760
  fprintf (stderr,
1761
           "mudflap %sobject %p: name=`%s'\n"
1762
           "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1763
           "alloc time=%lu.%06lu pc=%p"
1764
#ifdef LIBMUDFLAPTH
1765
           " thread=%u"
1766
#endif
1767
           "\n",
1768
           (obj->deallocated_p ? "dead " : ""),
1769
           (void *) obj, (obj->name ? obj->name : ""),
1770
           (void *) obj->low, (void *) obj->high,
1771
           (unsigned long) (obj->high - obj->low + 1),
1772
           (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1773
            obj->type == __MF_TYPE_HEAP ? "heap" :
1774
            obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1775
            obj->type == __MF_TYPE_STACK ? "stack" :
1776
            obj->type == __MF_TYPE_STATIC ? "static" :
1777
            obj->type == __MF_TYPE_GUESS ? "guess" :
1778
            "unknown"),
1779
           obj->read_count, obj->write_count, obj->liveness,
1780
           obj->watching_p ? " watching" : "",
1781
           obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1782
           (void *) obj->alloc_pc
1783
#ifdef LIBMUDFLAPTH
1784
           , (unsigned) obj->alloc_thread
1785
#endif
1786
           );
1787
 
1788
  if (__mf_opts.backtrace > 0)
1789
  {
1790
    unsigned i;
1791
    for (i=0; i<obj->alloc_backtrace_size; i++)
1792
      fprintf (stderr, "      %s\n", obj->alloc_backtrace[i]);
1793
  }
1794
 
1795
  if (__mf_opts.persistent_count > 0)
1796
    {
1797
      if (obj->deallocated_p)
1798
        {
1799
          fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1800
#ifdef LIBMUDFLAPTH
1801
                   " thread=%u"
1802
#endif
1803
                   "\n",
1804
                   obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1805
                   (void *) obj->dealloc_pc
1806
#ifdef LIBMUDFLAPTH
1807
                   , (unsigned) obj->dealloc_thread
1808
#endif
1809
                   );
1810
 
1811
 
1812
          if (__mf_opts.backtrace > 0)
1813
          {
1814
            unsigned i;
1815
            for (i=0; i<obj->dealloc_backtrace_size; i++)
1816
              fprintf (stderr, "      %s\n", obj->dealloc_backtrace[i]);
1817
          }
1818
        }
1819
    }
1820
}
1821
 
1822
 
1823
static int
1824
__mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1825
{
1826
  __mf_object_t *node = (__mf_object_t *) n->value;
1827
  unsigned *count = (unsigned *) param;
1828
 
1829
  if (count != NULL)
1830
    (*count) ++;
1831
 
1832
  fprintf (stderr, "Leaked object %u:\n", (*count));
1833
  __mf_describe_object (node);
1834
 
1835
  return 0;
1836
}
1837
 
1838
 
1839
static unsigned
1840
__mf_report_leaks ()
1841
{
1842
  unsigned count = 0;
1843
 
1844
  (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1845
                             __mf_report_leaks_fn, & count);
1846
  (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1847
                             __mf_report_leaks_fn, & count);
1848
 
1849
  return count;
1850
}
1851
 
1852
/* ------------------------------------------------------------------------ */
1853
/* __mf_report */
1854
 
1855
void
1856
__mf_report ()
1857
{
1858
  LOCKTH ();
1859
  BEGIN_RECURSION_PROTECT ();
1860
  __mfu_report ();
1861
  END_RECURSION_PROTECT ();
1862
  UNLOCKTH ();
1863
}
1864
 
1865
void
1866
__mfu_report ()
1867
{
1868
  if (__mf_opts.collect_stats)
1869
    {
1870
      fprintf (stderr,
1871
               "*******\n"
1872
               "mudflap stats:\n"
1873
               "calls to __mf_check: %lu\n"
1874
               "         __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1875
               "         __mf_unregister: %lu [%luB]\n"
1876
               "         __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1877
               __mf_count_check,
1878
               __mf_count_register,
1879
               __mf_total_register_size[0], __mf_total_register_size[1],
1880
               __mf_total_register_size[2], __mf_total_register_size[3],
1881
               __mf_total_register_size[4], /* XXX */
1882
               __mf_count_unregister, __mf_total_unregister_size,
1883
               __mf_count_violation[0], __mf_count_violation[1],
1884
               __mf_count_violation[2], __mf_count_violation[3],
1885
               __mf_count_violation[4]);
1886
 
1887
      fprintf (stderr,
1888
               "calls with reentrancy: %lu\n", __mf_reentrancy);
1889
#ifdef LIBMUDFLAPTH
1890
      fprintf (stderr,
1891
               "           lock contention: %lu\n", __mf_lock_contention);
1892
#endif
1893
 
1894
      /* Lookup cache stats.  */
1895
      {
1896
        unsigned i;
1897
        unsigned max_reuse = 0;
1898
        unsigned num_used = 0;
1899
        unsigned num_unused = 0;
1900
 
1901
        for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1902
          {
1903
            if (__mf_lookup_cache_reusecount[i])
1904
              num_used ++;
1905
            else
1906
              num_unused ++;
1907
            if (max_reuse < __mf_lookup_cache_reusecount[i])
1908
              max_reuse = __mf_lookup_cache_reusecount[i];
1909
          }
1910
        fprintf (stderr, "lookup cache slots used: %u  unused: %u  peak-reuse: %u\n",
1911
                 num_used, num_unused, max_reuse);
1912
      }
1913
 
1914
      {
1915
        unsigned live_count;
1916
        live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1917
        fprintf (stderr, "number of live objects: %u\n", live_count);
1918
      }
1919
 
1920
      if (__mf_opts.persistent_count > 0)
1921
        {
1922
          unsigned dead_count = 0;
1923
          unsigned row, plot;
1924
          for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1925
            for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1926
              if (__mf_object_cemetary [row][plot] != 0)
1927
                dead_count ++;
1928
          fprintf (stderr, "          zombie objects: %u\n", dead_count);
1929
        }
1930
    }
1931
  if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1932
    {
1933
      unsigned l;
1934
      extern void * __mf_wrap_alloca_indirect (size_t c);
1935
 
1936
      /* Free up any remaining alloca()'d blocks.  */
1937
      __mf_wrap_alloca_indirect (0);
1938
#ifdef HAVE___LIBC_FREERES
1939
      if (__mf_opts.call_libc_freeres)
1940
        {
1941
          extern void __libc_freeres (void);
1942
          __libc_freeres ();
1943
        }
1944
#endif
1945
 
1946
      __mf_describe_object (NULL); /* Reset description epoch.  */
1947
      l = __mf_report_leaks ();
1948
      fprintf (stderr, "number of leaked objects: %u\n", l);
1949
    }
1950
}
1951
 
1952
/* __mf_backtrace */
1953
 
1954
size_t
1955
__mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1956
{
1957
  void ** pc_array;
1958
  unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1959
  unsigned remaining_size;
1960
  unsigned omitted_size = 0;
1961
  unsigned i;
1962
  DECLARE (void, free, void *ptr);
1963
  DECLARE (void *, calloc, size_t c, size_t n);
1964
  DECLARE (void *, malloc, size_t n);
1965
 
1966
  pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1967
#ifdef HAVE_BACKTRACE
1968
  pc_array_size = backtrace (pc_array, pc_array_size);
1969
#else
1970
#define FETCH(n) do { if (pc_array_size >= n) { \
1971
                 pc_array[n] = __builtin_return_address(n); \
1972
                 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1973
 
1974
  /* Unroll some calls __builtin_return_address because this function
1975
     only takes a literal integer parameter.  */
1976
  FETCH (0);
1977
#if 0
1978
  /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1979
     rather than simply returning 0.  :-(  */
1980
  FETCH (1);
1981
  FETCH (2);
1982
  FETCH (3);
1983
  FETCH (4);
1984
  FETCH (5);
1985
  FETCH (6);
1986
  FETCH (7);
1987
  FETCH (8);
1988
  if (pc_array_size > 8) pc_array_size = 9;
1989
#else
1990
  if (pc_array_size > 0) pc_array_size = 1;
1991
#endif
1992
 
1993
#undef FETCH
1994
#endif
1995
 
1996
  /* We want to trim the first few levels of the stack traceback,
1997
     since they contain libmudflap wrappers and junk.  If pc_array[]
1998
     ends up containing a non-NULL guess_pc, then trim everything
1999
     before that.  Otherwise, omit the first guess_omit_levels
2000
     entries. */
2001
 
2002
  if (guess_pc != NULL)
2003
    for (i=0; i<pc_array_size; i++)
2004
      if (pc_array [i] == guess_pc)
2005
        omitted_size = i;
2006
 
2007
  if (omitted_size == 0) /* No match? */
2008
    if (pc_array_size > guess_omit_levels)
2009
      omitted_size = guess_omit_levels;
2010
 
2011
  remaining_size = pc_array_size - omitted_size;
2012
 
2013
#ifdef HAVE_BACKTRACE_SYMBOLS
2014
  *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2015
#else
2016
  {
2017
    /* Let's construct a buffer by hand.  It will have <remaining_size>
2018
       char*'s at the front, pointing at individual strings immediately
2019
       afterwards.  */
2020
    void *buffer;
2021
    char *chars;
2022
    char **pointers;
2023
    enum { perline = 30 };
2024
    buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2025
    pointers = (char **) buffer;
2026
    chars = (char *)buffer + (remaining_size * sizeof (char *));
2027
    for (i = 0; i < remaining_size; i++)
2028
      {
2029
        pointers[i] = chars;
2030
        sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2031
        chars = chars + perline;
2032
      }
2033
    *symbols = pointers;
2034
  }
2035
#endif
2036
  CALL_REAL (free, pc_array);
2037
 
2038
  return remaining_size;
2039
}
2040
 
2041
/* ------------------------------------------------------------------------ */
2042
/* __mf_violation */
2043
 
2044
void
2045
__mf_violation (void *ptr, size_t sz, uintptr_t pc,
2046
                const char *location, int type)
2047
{
2048
  char buf [128];
2049
  static unsigned violation_number;
2050
  DECLARE(void, free, void *ptr);
2051
 
2052
  TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2053
         (void *) pc,
2054
         (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2055
 
2056
  if (__mf_opts.collect_stats)
2057
    __mf_count_violation [(type < 0) ? 0 :
2058
                          (type > __MF_VIOL_WATCH) ? 0 :
2059
                          type] ++;
2060
 
2061
  /* Print out a basic warning message.  */
2062
  if (__mf_opts.verbose_violations)
2063
  {
2064
    unsigned dead_p;
2065
    unsigned num_helpful = 0;
2066
    struct timeval now = { 0, 0 };
2067
#if HAVE_GETTIMEOFDAY
2068
    gettimeofday (& now, NULL);
2069
#endif
2070
 
2071
    violation_number ++;
2072
    fprintf (stderr,
2073
             "*******\n"
2074
             "mudflap violation %u (%s): time=%lu.%06lu "
2075
             "ptr=%p size=%lu\npc=%p%s%s%s\n",
2076
             violation_number,
2077
             ((type == __MF_VIOL_READ) ? "check/read" :
2078
              (type == __MF_VIOL_WRITE) ? "check/write" :
2079
              (type == __MF_VIOL_REGISTER) ? "register" :
2080
              (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2081
              (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2082
             now.tv_sec, now.tv_usec,
2083
             (void *) ptr, (unsigned long)sz, (void *) pc,
2084
             (location != NULL ? " location=`" : ""),
2085
             (location != NULL ? location : ""),
2086
             (location != NULL ? "'" : ""));
2087
 
2088
    if (__mf_opts.backtrace > 0)
2089
      {
2090
        char ** symbols;
2091
        unsigned i, num;
2092
 
2093
        num = __mf_backtrace (& symbols, (void *) pc, 2);
2094
        /* Note: backtrace_symbols calls malloc().  But since we're in
2095
           __mf_violation and presumably __mf_check, it'll detect
2096
           recursion, and not put the new string into the database.  */
2097
 
2098
        for (i=0; i<num; i++)
2099
          fprintf (stderr, "      %s\n", symbols[i]);
2100
 
2101
        /* Calling free() here would trigger a violation.  */
2102
        CALL_REAL(free, symbols);
2103
      }
2104
 
2105
 
2106
    /* Look for nearby objects.  For this, we start with s_low/s_high
2107
       pointing to the given area, looking for overlapping objects.
2108
       If none show up, widen the search area and keep looking. */
2109
 
2110
    if (sz == 0) sz = 1;
2111
 
2112
    for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2113
      {
2114
        enum {max_objs = 3}; /* magic */
2115
        __mf_object_t *objs[max_objs];
2116
        unsigned num_objs = 0;
2117
        uintptr_t s_low, s_high;
2118
        unsigned tries = 0;
2119
        unsigned i;
2120
 
2121
        s_low = (uintptr_t) ptr;
2122
        s_high = CLAMPSZ (ptr, sz);
2123
 
2124
        while (tries < 16) /* magic */
2125
          {
2126
            if (dead_p)
2127
              num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2128
            else
2129
              num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2130
 
2131
            if (num_objs) /* good enough */
2132
              break;
2133
 
2134
            tries ++;
2135
 
2136
            /* XXX: tune this search strategy.  It's too dependent on
2137
             sz, which can vary from 1 to very big (when array index
2138
             checking) numbers. */
2139
            s_low = CLAMPSUB (s_low, (sz * tries * tries));
2140
            s_high = CLAMPADD (s_high, (sz * tries * tries));
2141
          }
2142
 
2143
        for (i = 0; i < min (num_objs, max_objs); i++)
2144
          {
2145
            __mf_object_t *obj = objs[i];
2146
            uintptr_t low = (uintptr_t) ptr;
2147
            uintptr_t high = CLAMPSZ (ptr, sz);
2148
            unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2149
            unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2150
            unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2151
            unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2152
            unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2153
            unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2154
 
2155
            fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2156
                     num_helpful + i + 1,
2157
                     (before1 ? before1 : after1 ? after1 : into1),
2158
                     (before1 ? "before" : after1 ? "after" : "into"),
2159
                     (before2 ? before2 : after2 ? after2 : into2),
2160
                     (before2 ? "before" : after2 ? "after" : "into"));
2161
            __mf_describe_object (obj);
2162
          }
2163
        num_helpful += num_objs;
2164
      }
2165
 
2166
    fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2167
  }
2168
 
2169
  /* How to finally handle this violation?  */
2170
  switch (__mf_opts.violation_mode)
2171
    {
2172
    case viol_nop:
2173
      break;
2174
    case viol_segv:
2175
      kill (getpid(), SIGSEGV);
2176
      break;
2177
    case viol_abort:
2178
      abort ();
2179
      break;
2180
    case viol_gdb:
2181
 
2182
      snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2183
      system (buf);
2184
      /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2185
      instead, and let the forked child execlp() gdb.  That way, this
2186
      subject process can be resumed under the supervision of gdb.
2187
      This can't happen now, since system() only returns when gdb
2188
      dies.  In that case, we need to beware of starting a second
2189
      concurrent gdb child upon the next violation.  (But if the first
2190
      gdb dies, then starting a new one is appropriate.)  */
2191
      break;
2192
    }
2193
}
2194
 
2195
/* ------------------------------------------------------------------------ */
2196
 
2197
 
2198
unsigned __mf_watch (void *ptr, size_t sz)
2199
{
2200
  unsigned rc;
2201
  LOCKTH ();
2202
  BEGIN_RECURSION_PROTECT ();
2203
  rc = __mf_watch_or_not (ptr, sz, 1);
2204
  END_RECURSION_PROTECT ();
2205
  UNLOCKTH ();
2206
  return rc;
2207
}
2208
 
2209
unsigned __mf_unwatch (void *ptr, size_t sz)
2210
{
2211
  unsigned rc;
2212
  LOCKTH ();
2213
  rc = __mf_watch_or_not (ptr, sz, 0);
2214
  UNLOCKTH ();
2215
  return rc;
2216
}
2217
 
2218
 
2219
static unsigned
2220
__mf_watch_or_not (void *ptr, size_t sz, char flag)
2221
{
2222
  uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2223
  uintptr_t ptr_low = (uintptr_t) ptr;
2224
  unsigned count = 0;
2225
 
2226
  TRACE ("%s ptr=%p size=%lu\n",
2227
         (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2228
 
2229
  switch (__mf_opts.mudflap_mode)
2230
    {
2231
    case mode_nop:
2232
    case mode_populate:
2233
    case mode_violate:
2234
      count = 0;
2235
      break;
2236
 
2237
    case mode_check:
2238
      {
2239
        __mf_object_t **all_ovr_objs;
2240
        unsigned obj_count;
2241
        unsigned n;
2242
        DECLARE (void *, malloc, size_t c);
2243
        DECLARE (void, free, void *p);
2244
 
2245
        obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2246
        VERBOSE_TRACE (" %u:", obj_count);
2247
 
2248
        all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2249
        if (all_ovr_objs == NULL) abort ();
2250
        n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2251
        assert (n == obj_count);
2252
 
2253
        for (n = 0; n < obj_count; n ++)
2254
          {
2255
            __mf_object_t *obj = all_ovr_objs[n];
2256
 
2257
            VERBOSE_TRACE (" [%p]", (void *) obj);
2258
            if (obj->watching_p != flag)
2259
              {
2260
                obj->watching_p = flag;
2261
                count ++;
2262
 
2263
                /* Remove object from cache, to ensure next access
2264
                   goes through __mf_check().  */
2265
                if (flag)
2266
                  __mf_uncache_object (obj);
2267
              }
2268
          }
2269
        CALL_REAL (free, all_ovr_objs);
2270
      }
2271
      break;
2272
    }
2273
 
2274
  return count;
2275
}
2276
 
2277
 
2278
void
2279
__mf_sigusr1_handler (int num)
2280
{
2281
  __mf_sigusr1_received ++;
2282
}
2283
 
2284
/* Install or remove SIGUSR1 handler as necessary.
2285
   Also, respond to a received pending SIGUSR1.  */
2286
void
2287
__mf_sigusr1_respond ()
2288
{
2289
  static int handler_installed;
2290
 
2291
#ifdef SIGUSR1
2292
  /* Manage handler */
2293
  if (__mf_opts.sigusr1_report && ! handler_installed)
2294
    {
2295
      signal (SIGUSR1, __mf_sigusr1_handler);
2296
      handler_installed = 1;
2297
    }
2298
  else if(! __mf_opts.sigusr1_report && handler_installed)
2299
    {
2300
      signal (SIGUSR1, SIG_DFL);
2301
      handler_installed = 0;
2302
    }
2303
#endif
2304
 
2305
  /* Manage enqueued signals */
2306
  if (__mf_sigusr1_received > __mf_sigusr1_handled)
2307
    {
2308
      __mf_sigusr1_handled ++;
2309
      assert (__mf_get_state () == reentrant);
2310
      __mfu_report ();
2311
      handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2312
    }
2313
}
2314
 
2315
 
2316
/* XXX: provide an alternative __assert_fail function that cannot
2317
   fail due to libmudflap infinite recursion.  */
2318
#ifndef NDEBUG
2319
 
2320
static void
2321
write_itoa (int fd, unsigned n)
2322
{
2323
  enum x { bufsize = sizeof(n)*4 };
2324
  char buf [bufsize];
2325
  unsigned i;
2326
 
2327
  for (i=0; i<bufsize-1; i++)
2328
    {
2329
      unsigned digit = n % 10;
2330
      buf[bufsize-2-i] = digit + '0';
2331
      n /= 10;
2332
      if (n == 0)
2333
        {
2334
          char *m = & buf [bufsize-2-i];
2335
          buf[bufsize-1] = '\0';
2336
          write (fd, m, strlen(m));
2337
          break;
2338
        }
2339
    }
2340
}
2341
 
2342
 
2343
void
2344
__assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2345
{
2346
#define write2(string) write (2, (string), strlen ((string)));
2347
  write2("mf");
2348
#ifdef LIBMUDFLAPTH
2349
  write2("(");
2350
  write_itoa (2, (unsigned) pthread_self ());
2351
  write2(")");
2352
#endif
2353
  write2(": assertion failure: `");
2354
  write (2, msg, strlen (msg));
2355
  write2("' in ");
2356
  write (2, func, strlen (func));
2357
  write2(" at ");
2358
  write (2, file, strlen (file));
2359
  write2(":");
2360
  write_itoa (2, line);
2361
  write2("\n");
2362
#undef write2
2363
  abort ();
2364
}
2365
 
2366
 
2367
#endif
2368
 
2369
 
2370
 
2371
/* Adapted splay tree code, originally from libiberty.  It has been
2372
   specialized for libmudflap as requested by RMS.  */
2373
 
2374
static void
2375
mfsplay_tree_free (void *p)
2376
{
2377
  DECLARE (void, free, void *p);
2378
  CALL_REAL (free, p);
2379
}
2380
 
2381
static void *
2382
mfsplay_tree_xmalloc (size_t s)
2383
{
2384
  DECLARE (void *, malloc, size_t s);
2385
  return CALL_REAL (malloc, s);
2386
}
2387
 
2388
 
2389
static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2390
static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2391
                                                mfsplay_tree_key,
2392
                                                mfsplay_tree_node *,
2393
                                                mfsplay_tree_node *,
2394
                                                mfsplay_tree_node *);
2395
 
2396
 
2397
/* Help splay SP around KEY.  PARENT and GRANDPARENT are the parent
2398
   and grandparent, respectively, of NODE.  */
2399
 
2400
static mfsplay_tree_node
2401
mfsplay_tree_splay_helper (mfsplay_tree sp,
2402
                         mfsplay_tree_key key,
2403
                         mfsplay_tree_node * node,
2404
                         mfsplay_tree_node * parent,
2405
                         mfsplay_tree_node * grandparent)
2406
{
2407
  mfsplay_tree_node *next;
2408
  mfsplay_tree_node n;
2409
  int comparison;
2410
 
2411
  n = *node;
2412
 
2413
  if (!n)
2414
    return *parent;
2415
 
2416
  comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2417
 
2418
  if (comparison == 0)
2419
    /* We've found the target.  */
2420
    next = 0;
2421
  else if (comparison < 0)
2422
    /* The target is to the left.  */
2423
    next = &n->left;
2424
  else
2425
    /* The target is to the right.  */
2426
    next = &n->right;
2427
 
2428
  if (next)
2429
    {
2430
      /* Check whether our recursion depth is too high.  Abort this search,
2431
         and signal that a rebalance is required to continue.  */
2432
      if (sp->depth > sp->max_depth)
2433
        {
2434
          sp->rebalance_p = 1;
2435
          return n;
2436
         }
2437
 
2438
      /* Continue down the tree.  */
2439
      sp->depth ++;
2440
      n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2441
      sp->depth --;
2442
 
2443
      /* The recursive call will change the place to which NODE
2444
         points.  */
2445
      if (*node != n || sp->rebalance_p)
2446
        return n;
2447
    }
2448
 
2449
  if (!parent)
2450
    /* NODE is the root.  We are done.  */
2451
    return n;
2452
 
2453
  /* First, handle the case where there is no grandparent (i.e.,
2454
   *PARENT is the root of the tree.)  */
2455
  if (!grandparent)
2456
    {
2457
      if (n == (*parent)->left)
2458
        {
2459
          *node = n->right;
2460
          n->right = *parent;
2461
        }
2462
      else
2463
        {
2464
          *node = n->left;
2465
          n->left = *parent;
2466
        }
2467
      *parent = n;
2468
      return n;
2469
    }
2470
 
2471
  /* Next handle the cases where both N and *PARENT are left children,
2472
     or where both are right children.  */
2473
  if (n == (*parent)->left && *parent == (*grandparent)->left)
2474
    {
2475
      mfsplay_tree_node p = *parent;
2476
 
2477
      (*grandparent)->left = p->right;
2478
      p->right = *grandparent;
2479
      p->left = n->right;
2480
      n->right = p;
2481
      *grandparent = n;
2482
      return n;
2483
    }
2484
  else if (n == (*parent)->right && *parent == (*grandparent)->right)
2485
    {
2486
      mfsplay_tree_node p = *parent;
2487
 
2488
      (*grandparent)->right = p->left;
2489
      p->left = *grandparent;
2490
      p->right = n->left;
2491
      n->left = p;
2492
      *grandparent = n;
2493
      return n;
2494
    }
2495
 
2496
  /* Finally, deal with the case where N is a left child, but *PARENT
2497
     is a right child, or vice versa.  */
2498
  if (n == (*parent)->left)
2499
    {
2500
      (*parent)->left = n->right;
2501
      n->right = *parent;
2502
      (*grandparent)->right = n->left;
2503
      n->left = *grandparent;
2504
      *grandparent = n;
2505
      return n;
2506
    }
2507
  else
2508
    {
2509
      (*parent)->right = n->left;
2510
      n->left = *parent;
2511
      (*grandparent)->left = n->right;
2512
      n->right = *grandparent;
2513
      *grandparent = n;
2514
      return n;
2515
    }
2516
}
2517
 
2518
 
2519
 
2520
static int
2521
mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2522
{
2523
  mfsplay_tree_node **p = array_ptr;
2524
  *(*p) = n;
2525
  (*p)++;
2526
  return 0;
2527
}
2528
 
2529
 
2530
static mfsplay_tree_node
2531
mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2532
                              unsigned high)
2533
{
2534
  unsigned middle = low + (high - low) / 2;
2535
  mfsplay_tree_node n = array[middle];
2536
 
2537
  /* Note that since we're producing a balanced binary tree, it is not a problem
2538
     that this function is recursive.  */
2539
  if (low + 1 <= middle)
2540
    n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2541
  else
2542
    n->left = NULL;
2543
 
2544
  if (middle + 1 <= high)
2545
    n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2546
  else
2547
    n->right = NULL;
2548
 
2549
  return n;
2550
}
2551
 
2552
 
2553
/* Rebalance the entire tree.  Do this by copying all the node
2554
   pointers into an array, then cleverly re-linking them.  */
2555
static void
2556
mfsplay_tree_rebalance (mfsplay_tree sp)
2557
{
2558
  mfsplay_tree_node *all_nodes, *all_nodes_1;
2559
 
2560
  if (sp->num_keys <= 2)
2561
    return;
2562
 
2563
  all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2564
 
2565
  /* Traverse all nodes to copy their addresses into this array.  */
2566
  all_nodes_1 = all_nodes;
2567
  mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2568
                      (void *) &all_nodes_1);
2569
 
2570
  /* Relink all the nodes.  */
2571
  sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2572
 
2573
  mfsplay_tree_free (all_nodes);
2574
}
2575
 
2576
 
2577
/* Splay SP around KEY.  */
2578
static void
2579
mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2580
{
2581
  if (sp->root == 0)
2582
    return;
2583
 
2584
  /* If we just splayed the tree with the same key, do nothing.  */
2585
  if (sp->last_splayed_key_p &&
2586
      (sp->last_splayed_key == key))
2587
    return;
2588
 
2589
  /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2590
     The idea is to limit excessive stack usage if we're facing
2591
     degenerate access patterns.  Unfortunately such patterns can occur
2592
     e.g. during static initialization, where many static objects might
2593
     be registered in increasing address sequence, or during a case where
2594
     large tree-like heap data structures are allocated quickly.
2595
 
2596
     On x86, this corresponds to roughly 200K of stack usage.
2597
     XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack.  */
2598
  sp->max_depth = 2500;
2599
  sp->rebalance_p = sp->depth = 0;
2600
 
2601
  mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2602
  if (sp->rebalance_p)
2603
    {
2604
      mfsplay_tree_rebalance (sp);
2605
 
2606
      sp->rebalance_p = sp->depth = 0;
2607
      mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2608
 
2609
      if (sp->rebalance_p)
2610
        abort ();
2611
    }
2612
 
2613
 
2614
  /* Cache this splay key. */
2615
  sp->last_splayed_key = key;
2616
  sp->last_splayed_key_p = 1;
2617
}
2618
 
2619
 
2620
 
2621
/* Allocate a new splay tree.  */
2622
static mfsplay_tree
2623
mfsplay_tree_new ()
2624
{
2625
  mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2626
  sp->root = NULL;
2627
  sp->last_splayed_key_p = 0;
2628
  sp->num_keys = 0;
2629
 
2630
  return sp;
2631
}
2632
 
2633
 
2634
 
2635
/* Insert a new node (associating KEY with DATA) into SP.  If a
2636
   previous node with the indicated KEY exists, its data is replaced
2637
   with the new value.  Returns the new node.  */
2638
static mfsplay_tree_node
2639
mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2640
{
2641
  int comparison = 0;
2642
 
2643
  mfsplay_tree_splay (sp, key);
2644
 
2645
  if (sp->root)
2646
    comparison = ((sp->root->key > key) ? 1 :
2647
                  ((sp->root->key < key) ? -1 : 0));
2648
 
2649
  if (sp->root && comparison == 0)
2650
    {
2651
      /* If the root of the tree already has the indicated KEY, just
2652
         replace the value with VALUE.  */
2653
      sp->root->value = value;
2654
    }
2655
  else
2656
    {
2657
      /* Create a new node, and insert it at the root.  */
2658
      mfsplay_tree_node node;
2659
 
2660
      node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2661
      node->key = key;
2662
      node->value = value;
2663
      sp->num_keys++;
2664
      if (!sp->root)
2665
        node->left = node->right = 0;
2666
      else if (comparison < 0)
2667
        {
2668
          node->left = sp->root;
2669
          node->right = node->left->right;
2670
          node->left->right = 0;
2671
        }
2672
      else
2673
        {
2674
          node->right = sp->root;
2675
          node->left = node->right->left;
2676
          node->right->left = 0;
2677
        }
2678
 
2679
      sp->root = node;
2680
      sp->last_splayed_key_p = 0;
2681
    }
2682
 
2683
  return sp->root;
2684
}
2685
 
2686
/* Remove KEY from SP.  It is not an error if it did not exist.  */
2687
 
2688
static void
2689
mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2690
{
2691
  mfsplay_tree_splay (sp, key);
2692
  sp->last_splayed_key_p = 0;
2693
  if (sp->root && (sp->root->key == key))
2694
    {
2695
      mfsplay_tree_node left, right;
2696
      left = sp->root->left;
2697
      right = sp->root->right;
2698
      /* Delete the root node itself.  */
2699
      mfsplay_tree_free (sp->root);
2700
      sp->num_keys--;
2701
      /* One of the children is now the root.  Doesn't matter much
2702
         which, so long as we preserve the properties of the tree.  */
2703
      if (left)
2704
        {
2705
          sp->root = left;
2706
          /* If there was a right child as well, hang it off the
2707
             right-most leaf of the left child.  */
2708
          if (right)
2709
            {
2710
              while (left->right)
2711
                left = left->right;
2712
              left->right = right;
2713
            }
2714
        }
2715
      else
2716
        sp->root = right;
2717
    }
2718
}
2719
 
2720
/* Lookup KEY in SP, returning VALUE if present, and NULL
2721
   otherwise.  */
2722
 
2723
static mfsplay_tree_node
2724
mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2725
{
2726
  mfsplay_tree_splay (sp, key);
2727
  if (sp->root && (sp->root->key == key))
2728
    return sp->root;
2729
  else
2730
    return 0;
2731
}
2732
 
2733
 
2734
/* Return the immediate predecessor KEY, or NULL if there is no
2735
   predecessor.  KEY need not be present in the tree.  */
2736
 
2737
static mfsplay_tree_node
2738
mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2739
{
2740
  int comparison;
2741
  mfsplay_tree_node node;
2742
  /* If the tree is empty, there is certainly no predecessor.  */
2743
  if (!sp->root)
2744
    return NULL;
2745
  /* Splay the tree around KEY.  That will leave either the KEY
2746
     itself, its predecessor, or its successor at the root.  */
2747
  mfsplay_tree_splay (sp, key);
2748
  comparison = ((sp->root->key > key) ? 1 :
2749
                ((sp->root->key < key) ? -1 : 0));
2750
 
2751
  /* If the predecessor is at the root, just return it.  */
2752
  if (comparison < 0)
2753
    return sp->root;
2754
  /* Otherwise, find the rightmost element of the left subtree.  */
2755
  node = sp->root->left;
2756
  if (node)
2757
    while (node->right)
2758
      node = node->right;
2759
  return node;
2760
}
2761
 
2762
/* Return the immediate successor KEY, or NULL if there is no
2763
   successor.  KEY need not be present in the tree.  */
2764
 
2765
static mfsplay_tree_node
2766
mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2767
{
2768
  int comparison;
2769
  mfsplay_tree_node node;
2770
  /* If the tree is empty, there is certainly no successor.  */
2771
  if (!sp->root)
2772
    return NULL;
2773
  /* Splay the tree around KEY.  That will leave either the KEY
2774
     itself, its predecessor, or its successor at the root.  */
2775
  mfsplay_tree_splay (sp, key);
2776
  comparison = ((sp->root->key > key) ? 1 :
2777
                ((sp->root->key < key) ? -1 : 0));
2778
  /* If the successor is at the root, just return it.  */
2779
  if (comparison > 0)
2780
    return sp->root;
2781
  /* Otherwise, find the leftmost element of the right subtree.  */
2782
  node = sp->root->right;
2783
  if (node)
2784
    while (node->left)
2785
      node = node->left;
2786
  return node;
2787
}
2788
 
2789
/* Call FN, passing it the DATA, for every node in SP, following an
2790
   in-order traversal.  If FN every returns a non-zero value, the
2791
   iteration ceases immediately, and the value is returned.
2792
   Otherwise, this function returns 0.
2793
 
2794
   This function simulates recursion using dynamically allocated
2795
   arrays, since it may be called from mfsplay_tree_rebalance(), which
2796
   in turn means that the tree is already uncomfortably deep for stack
2797
   space limits.  */
2798
static int
2799
mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2800
{
2801
  mfsplay_tree_node *stack1;
2802
  char *stack2;
2803
  unsigned sp;
2804
  int val = 0;
2805
  enum s { s_left, s_here, s_right, s_up };
2806
 
2807
  if (st->root == NULL) /* => num_keys == 0 */
2808
    return 0;
2809
 
2810
  stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2811
  stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2812
 
2813
  sp = 0;
2814
  stack1 [sp] = st->root;
2815
  stack2 [sp] = s_left;
2816
 
2817
  while (1)
2818
    {
2819
      mfsplay_tree_node n;
2820
      enum s s;
2821
 
2822
      n = stack1 [sp];
2823
      s = stack2 [sp];
2824
 
2825
      /* Handle each of the four possible states separately.  */
2826
 
2827
      /* 1: We're here to traverse the left subtree (if any).  */
2828
      if (s == s_left)
2829
        {
2830
          stack2 [sp] = s_here;
2831
          if (n->left != NULL)
2832
            {
2833
              sp ++;
2834
              stack1 [sp] = n->left;
2835
              stack2 [sp] = s_left;
2836
            }
2837
        }
2838
 
2839
      /* 2: We're here to traverse this node.  */
2840
      else if (s == s_here)
2841
        {
2842
          stack2 [sp] = s_right;
2843
          val = (*fn) (n, data);
2844
          if (val) break;
2845
        }
2846
 
2847
      /* 3: We're here to traverse the right subtree (if any).  */
2848
      else if (s == s_right)
2849
        {
2850
          stack2 [sp] = s_up;
2851
          if (n->right != NULL)
2852
            {
2853
              sp ++;
2854
              stack1 [sp] = n->right;
2855
              stack2 [sp] = s_left;
2856
            }
2857
        }
2858
 
2859
      /* 4: We're here after both subtrees (if any) have been traversed.  */
2860
      else if (s == s_up)
2861
        {
2862
          /* Pop the stack.  */
2863
          if (sp == 0) break; /* Popping off the root note: we're finished!  */
2864
          sp --;
2865
        }
2866
 
2867
      else
2868
        abort ();
2869
    }
2870
 
2871
  mfsplay_tree_free (stack1);
2872
  mfsplay_tree_free (stack2);
2873
  return val;
2874
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.