OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [boehm-gc/] [mark.c] - Blame information for rev 721

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 721 jeremybenn
 
2
/*
3
 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4
 * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved.
5
 * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
6
 *
7
 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8
 * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9
 *
10
 * Permission is hereby granted to use or copy this program
11
 * for any purpose,  provided the above notices are retained on all copies.
12
 * Permission to modify the code and to distribute modified code is granted,
13
 * provided the above notices are retained, and a notice that the code was
14
 * modified is included with the above copyright notice.
15
 *
16
 */
17
 
18
 
19
# include <stdio.h>
20
# include "private/gc_pmark.h"
21
 
22
#if defined(MSWIN32) && defined(__GNUC__)
23
# include <excpt.h>
24
#endif
25
 
26
/* We put this here to minimize the risk of inlining. */
27
/*VARARGS*/
28
#ifdef __WATCOMC__
29
  void GC_noop(void *p, ...) {}
30
#else
31
  void GC_noop() {}
32
#endif
33
 
34
/* Single argument version, robust against whole program analysis. */
35
void GC_noop1(x)
36
word x;
37
{
38
    static VOLATILE word sink;
39
 
40
    sink = x;
41
}
42
 
43
/* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
44
 
45
word GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
46
 
47
/* Initialize GC_obj_kinds properly and standard free lists properly.   */
48
/* This must be done statically since they may be accessed before       */
49
/* GC_init is called.                                                   */
50
/* It's done here, since we need to deal with mark descriptors.         */
51
struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
52
/* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
53
 
54
/* NORMAL  */ { &GC_objfreelist[0], 0,
55
 
56
                TRUE /* add length to descr */, TRUE },
57
/* UNCOLLECTABLE */
58
              { &GC_uobjfreelist[0], 0,
59
 
60
# ifdef ATOMIC_UNCOLLECTABLE
61
   /* AUNCOLLECTABLE */
62
              { &GC_auobjfreelist[0], 0,
63
 
64
# endif
65
# ifdef STUBBORN_ALLOC
66
/*STUBBORN*/ { &GC_sobjfreelist[0], 0,
67
 
68
# endif
69
};
70
 
71
# ifdef ATOMIC_UNCOLLECTABLE
72
#   ifdef STUBBORN_ALLOC
73
      int GC_n_kinds = 5;
74
#   else
75
      int GC_n_kinds = 4;
76
#   endif
77
# else
78
#   ifdef STUBBORN_ALLOC
79
      int GC_n_kinds = 4;
80
#   else
81
      int GC_n_kinds = 3;
82
#   endif
83
# endif
84
 
85
 
86
# ifndef INITIAL_MARK_STACK_SIZE
87
#   define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
88
                /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a    */
89
                /* multiple of HBLKSIZE.                                */
90
                /* The incremental collector actually likes a larger    */
91
                /* size, since it want to push all marked dirty objs    */
92
                /* before marking anything new.  Currently we let it    */
93
                /* grow dynamically.                                    */
94
# endif
95
 
96
/*
97
 * Limits of stack for GC_mark routine.
98
 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
99
 * need to be marked from.
100
 */
101
 
102
word GC_n_rescuing_pages;       /* Number of dirty pages we marked from */
103
                                /* excludes ptrfree pages, etc.         */
104
 
105
mse * GC_mark_stack;
106
 
107
mse * GC_mark_stack_limit;
108
 
109
word GC_mark_stack_size = 0;
110
 
111
#ifdef PARALLEL_MARK
112
  mse * VOLATILE GC_mark_stack_top;
113
#else
114
  mse * GC_mark_stack_top;
115
#endif
116
 
117
static struct hblk * scan_ptr;
118
 
119
mark_state_t GC_mark_state = MS_NONE;
120
 
121
GC_bool GC_mark_stack_too_small = FALSE;
122
 
123
GC_bool GC_objects_are_marked = FALSE;  /* Are there collectable marked */
124
                                        /* objects in the heap?         */
125
 
126
/* Is a collection in progress?  Note that this can return true in the  */
127
/* nonincremental case, if a collection has been abandoned and the      */
128
/* mark state is now MS_INVALID.                                        */
129
GC_bool GC_collection_in_progress()
130
{
131
    return(GC_mark_state != MS_NONE);
132
}
133
 
134
/* clear all mark bits in the header */
135
void GC_clear_hdr_marks(hhdr)
136
register hdr * hhdr;
137
{
138
#   ifdef USE_MARK_BYTES
139
      BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
140
#   else
141
      BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
142
#   endif
143
}
144
 
145
/* Set all mark bits in the header.  Used for uncollectable blocks. */
146
void GC_set_hdr_marks(hhdr)
147
register hdr * hhdr;
148
{
149
    register int i;
150
 
151
    for (i = 0; i < MARK_BITS_SZ; ++i) {
152
#     ifdef USE_MARK_BYTES
153
        hhdr -> hb_marks[i] = 1;
154
#     else
155
        hhdr -> hb_marks[i] = ONES;
156
#     endif
157
    }
158
}
159
 
160
/*
161
 * Clear all mark bits associated with block h.
162
 */
163
/*ARGSUSED*/
164
# if defined(__STDC__) || defined(__cplusplus)
165
    static void clear_marks_for_block(struct hblk *h, word dummy)
166
# else
167
    static void clear_marks_for_block(h, dummy)
168
    struct hblk *h;
169
    word dummy;
170
# endif
171
{
172
    register hdr * hhdr = HDR(h);
173
 
174
    if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
175
        /* Mark bit for these is cleared only once the object is        */
176
        /* explicitly deallocated.  This either frees the block, or     */
177
        /* the bit is cleared once the object is on the free list.      */
178
    GC_clear_hdr_marks(hhdr);
179
}
180
 
181
/* Slow but general routines for setting/clearing/asking about mark bits */
182
void GC_set_mark_bit(p)
183
ptr_t p;
184
{
185
    register struct hblk *h = HBLKPTR(p);
186
    register hdr * hhdr = HDR(h);
187
    register int word_no = (word *)p - (word *)h;
188
 
189
    set_mark_bit_from_hdr(hhdr, word_no);
190
}
191
 
192
void GC_clear_mark_bit(p)
193
ptr_t p;
194
{
195
    register struct hblk *h = HBLKPTR(p);
196
    register hdr * hhdr = HDR(h);
197
    register int word_no = (word *)p - (word *)h;
198
 
199
    clear_mark_bit_from_hdr(hhdr, word_no);
200
}
201
 
202
GC_bool GC_is_marked(p)
203
ptr_t p;
204
{
205
    register struct hblk *h = HBLKPTR(p);
206
    register hdr * hhdr = HDR(h);
207
    register int word_no = (word *)p - (word *)h;
208
 
209
    return(mark_bit_from_hdr(hhdr, word_no));
210
}
211
 
212
 
213
/*
214
 * Clear mark bits in all allocated heap blocks.  This invalidates
215
 * the marker invariant, and sets GC_mark_state to reflect this.
216
 * (This implicitly starts marking to reestablish the invariant.)
217
 */
218
void GC_clear_marks()
219
{
220
    GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
221
    GC_objects_are_marked = FALSE;
222
    GC_mark_state = MS_INVALID;
223
    scan_ptr = 0;
224
#   ifdef GATHERSTATS
225
        /* Counters reflect currently marked objects: reset here */
226
        GC_composite_in_use = 0;
227
        GC_atomic_in_use = 0;
228
#   endif
229
 
230
}
231
 
232
/* Initiate a garbage collection.  Initiates a full collection if the   */
233
/* mark state is invalid.                                               */
234
/*ARGSUSED*/
235
void GC_initiate_gc()
236
{
237
    if (GC_dirty_maintained) GC_read_dirty();
238
#   ifdef STUBBORN_ALLOC
239
        GC_read_changed();
240
#   endif
241
#   ifdef CHECKSUMS
242
        {
243
            extern void GC_check_dirty();
244
 
245
            if (GC_dirty_maintained) GC_check_dirty();
246
        }
247
#   endif
248
    GC_n_rescuing_pages = 0;
249
    if (GC_mark_state == MS_NONE) {
250
        GC_mark_state = MS_PUSH_RESCUERS;
251
    } else if (GC_mark_state != MS_INVALID) {
252
        ABORT("unexpected state");
253
    } /* else this is really a full collection, and mark        */
254
      /* bits are invalid.                                      */
255
    scan_ptr = 0;
256
}
257
 
258
 
259
static void alloc_mark_stack();
260
 
261
/* Perform a small amount of marking.                   */
262
/* We try to touch roughly a page of memory.            */
263
/* Return TRUE if we just finished a mark phase.        */
264
/* Cold_gc_frame is an address inside a GC frame that   */
265
/* remains valid until all marking is complete.         */
266
/* A zero value indicates that it's OK to miss some     */
267
/* register values.                                     */
268
/* We hold the allocation lock.  In the case of         */
269
/* incremental collection, the world may not be stopped.*/
270
#ifdef MSWIN32
271
  /* For win32, this is called after we establish a structured  */
272
  /* exception handler, in case Windows unmaps one of our root  */
273
  /* segments.  See below.  In either case, we acquire the      */
274
  /* allocator lock long before we get here.                    */
275
  GC_bool GC_mark_some_inner(cold_gc_frame)
276
  ptr_t cold_gc_frame;
277
#else
278
  GC_bool GC_mark_some(cold_gc_frame)
279
  ptr_t cold_gc_frame;
280
#endif
281
{
282
    switch(GC_mark_state) {
283
        case MS_NONE:
284
            return(FALSE);
285
 
286
        case MS_PUSH_RESCUERS:
287
            if (GC_mark_stack_top
288
                >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {
289
                /* Go ahead and mark, even though that might cause us to */
290
                /* see more marked dirty objects later on.  Avoid this   */
291
                /* in the future.                                        */
292
                GC_mark_stack_too_small = TRUE;
293
                MARK_FROM_MARK_STACK();
294
                return(FALSE);
295
            } else {
296
                scan_ptr = GC_push_next_marked_dirty(scan_ptr);
297
                if (scan_ptr == 0) {
298
#                   ifdef CONDPRINT
299
                      if (GC_print_stats) {
300
                        GC_printf1("Marked from %lu dirty pages\n",
301
                                   (unsigned long)GC_n_rescuing_pages);
302
                      }
303
#                   endif
304
                    GC_push_roots(FALSE, cold_gc_frame);
305
                    GC_objects_are_marked = TRUE;
306
                    if (GC_mark_state != MS_INVALID) {
307
                        GC_mark_state = MS_ROOTS_PUSHED;
308
                    }
309
                }
310
            }
311
            return(FALSE);
312
 
313
        case MS_PUSH_UNCOLLECTABLE:
314
            if (GC_mark_stack_top
315
                >= GC_mark_stack + GC_mark_stack_size/4) {
316
#               ifdef PARALLEL_MARK
317
                  /* Avoid this, since we don't parallelize the marker  */
318
                  /* here.                                              */
319
                  if (GC_parallel) GC_mark_stack_too_small = TRUE;
320
#               endif
321
                MARK_FROM_MARK_STACK();
322
                return(FALSE);
323
            } else {
324
                scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
325
                if (scan_ptr == 0) {
326
                    GC_push_roots(TRUE, cold_gc_frame);
327
                    GC_objects_are_marked = TRUE;
328
                    if (GC_mark_state != MS_INVALID) {
329
                        GC_mark_state = MS_ROOTS_PUSHED;
330
                    }
331
                }
332
            }
333
            return(FALSE);
334
 
335
        case MS_ROOTS_PUSHED:
336
#           ifdef PARALLEL_MARK
337
              /* In the incremental GC case, this currently doesn't     */
338
              /* quite do the right thing, since it runs to             */
339
              /* completion.  On the other hand, starting a             */
340
              /* parallel marker is expensive, so perhaps it is         */
341
              /* the right thing?                                       */
342
              /* Eventually, incremental marking should run             */
343
              /* asynchronously in multiple threads, without grabbing   */
344
              /* the allocation lock.                                   */
345
                if (GC_parallel) {
346
                  GC_do_parallel_mark();
347
                  GC_ASSERT(GC_mark_stack_top < GC_first_nonempty);
348
                  GC_mark_stack_top = GC_mark_stack - 1;
349
                  if (GC_mark_stack_too_small) {
350
                    alloc_mark_stack(2*GC_mark_stack_size);
351
                  }
352
                  if (GC_mark_state == MS_ROOTS_PUSHED) {
353
                    GC_mark_state = MS_NONE;
354
                    return(TRUE);
355
                  } else {
356
                    return(FALSE);
357
                  }
358
                }
359
#           endif
360
            if (GC_mark_stack_top >= GC_mark_stack) {
361
                MARK_FROM_MARK_STACK();
362
                return(FALSE);
363
            } else {
364
                GC_mark_state = MS_NONE;
365
                if (GC_mark_stack_too_small) {
366
                    alloc_mark_stack(2*GC_mark_stack_size);
367
                }
368
                return(TRUE);
369
            }
370
 
371
        case MS_INVALID:
372
        case MS_PARTIALLY_INVALID:
373
            if (!GC_objects_are_marked) {
374
                GC_mark_state = MS_PUSH_UNCOLLECTABLE;
375
                return(FALSE);
376
            }
377
            if (GC_mark_stack_top >= GC_mark_stack) {
378
                MARK_FROM_MARK_STACK();
379
                return(FALSE);
380
            }
381
            if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
382
                /* About to start a heap scan for marked objects. */
383
                /* Mark stack is empty.  OK to reallocate.        */
384
                if (GC_mark_stack_too_small) {
385
                    alloc_mark_stack(2*GC_mark_stack_size);
386
                }
387
                GC_mark_state = MS_PARTIALLY_INVALID;
388
            }
389
            scan_ptr = GC_push_next_marked(scan_ptr);
390
            if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
391
                GC_push_roots(TRUE, cold_gc_frame);
392
                GC_objects_are_marked = TRUE;
393
                if (GC_mark_state != MS_INVALID) {
394
                    GC_mark_state = MS_ROOTS_PUSHED;
395
                }
396
            }
397
            return(FALSE);
398
        default:
399
            ABORT("GC_mark_some: bad state");
400
            return(FALSE);
401
    }
402
}
403
 
404
 
405
#ifdef MSWIN32
406
 
407
# ifdef __GNUC__
408
 
409
    typedef struct {
410
      EXCEPTION_REGISTRATION ex_reg;
411
      void *alt_path;
412
    } ext_ex_regn;
413
 
414
 
415
    static EXCEPTION_DISPOSITION mark_ex_handler(
416
        struct _EXCEPTION_RECORD *ex_rec,
417
        void *est_frame,
418
        struct _CONTEXT *context,
419
        void *disp_ctxt)
420
    {
421
        if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
422
          ext_ex_regn *xer = (ext_ex_regn *)est_frame;
423
 
424
          /* Unwind from the inner function assuming the standard */
425
          /* function prologue.                                   */
426
          /* Assumes code has not been compiled with              */
427
          /* -fomit-frame-pointer.                                */
428
          context->Esp = context->Ebp;
429
          context->Ebp = *((DWORD *)context->Esp);
430
          context->Esp = context->Esp - 8;
431
 
432
          /* Resume execution at the "real" handler within the    */
433
          /* wrapper function.                                    */
434
          context->Eip = (DWORD )(xer->alt_path);
435
 
436
          return ExceptionContinueExecution;
437
 
438
        } else {
439
            return ExceptionContinueSearch;
440
        }
441
    }
442
# endif /* __GNUC__ */
443
 
444
 
445
  GC_bool GC_mark_some(cold_gc_frame)
446
  ptr_t cold_gc_frame;
447
  {
448
      GC_bool ret_val;
449
 
450
#   ifndef __GNUC__
451
      /* Windows 98 appears to asynchronously create and remove  */
452
      /* writable memory mappings, for reasons we haven't yet    */
453
      /* understood.  Since we look for writable regions to      */
454
      /* determine the root set, we may try to mark from an      */
455
      /* address range that disappeared since we started the     */
456
      /* collection.  Thus we have to recover from faults here.  */
457
      /* This code does not appear to be necessary for Windows   */
458
      /* 95/NT/2000. Note that this code should never generate   */
459
      /* an incremental GC write fault.                          */
460
 
461
      __try {
462
 
463
#   else /* __GNUC__ */
464
 
465
      /* Manually install an exception handler since GCC does    */
466
      /* not yet support Structured Exception Handling (SEH) on  */
467
      /* Win32.                                                  */
468
 
469
      ext_ex_regn er;
470
 
471
      er.alt_path = &&handle_ex;
472
      er.ex_reg.handler = mark_ex_handler;
473
      asm volatile ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
474
      asm volatile ("movl %0, %%fs:0" : : "r" (&er));
475
 
476
#   endif /* __GNUC__ */
477
 
478
          ret_val = GC_mark_some_inner(cold_gc_frame);
479
 
480
#   ifndef __GNUC__
481
 
482
      } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
483
                EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
484
 
485
#   else /* __GNUC__ */
486
 
487
          /* Prevent GCC from considering the following code unreachable */
488
          /* and thus eliminating it.                                    */
489
          if (er.alt_path != 0)
490
              goto rm_handler;
491
 
492
handle_ex:
493
          /* Execution resumes from here on an access violation. */
494
 
495
#   endif /* __GNUC__ */
496
 
497
#         ifdef CONDPRINT
498
            if (GC_print_stats) {
499
              GC_printf0("Caught ACCESS_VIOLATION in marker. "
500
                         "Memory mapping disappeared.\n");
501
            }
502
#         endif /* CONDPRINT */
503
 
504
          /* We have bad roots on the stack.  Discard mark stack.  */
505
          /* Rescan from marked objects.  Redetermine roots.     */
506
          GC_invalidate_mark_state();
507
          scan_ptr = 0;
508
 
509
          ret_val = FALSE;
510
 
511
#   ifndef __GNUC__
512
 
513
      }
514
 
515
#   else /* __GNUC__ */
516
 
517
rm_handler:
518
      /* Uninstall the exception handler */
519
      asm volatile ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
520
 
521
#   endif /* __GNUC__ */
522
 
523
      return ret_val;
524
  }
525
#endif /* MSWIN32 */
526
 
527
 
528
GC_bool GC_mark_stack_empty()
529
{
530
    return(GC_mark_stack_top < GC_mark_stack);
531
}
532
 
533
#ifdef PROF_MARKER
534
    word GC_prof_array[10];
535
#   define PROF(n) GC_prof_array[n]++
536
#else
537
#   define PROF(n)
538
#endif
539
 
540
/* Given a pointer to someplace other than a small object page or the   */
541
/* first page of a large object, either:                                */
542
/*      - return a pointer to somewhere in the first page of the large  */
543
/*        object, if current points to a large object.                  */
544
/*        In this case *hhdr is replaced with a pointer to the header   */
545
/*        for the large object.                                         */
546
/*      - just return current if it does not point to a large object.   */
547
/*ARGSUSED*/
548
ptr_t GC_find_start(current, hhdr, new_hdr_p)
549
register ptr_t current;
550
register hdr *hhdr, **new_hdr_p;
551
{
552
    if (GC_all_interior_pointers) {
553
        if (hhdr != 0) {
554
            register ptr_t orig = current;
555
 
556
            current = (ptr_t)HBLKPTR(current);
557
            do {
558
              current = current - HBLKSIZE*(word)hhdr;
559
              hhdr = HDR(current);
560
            } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
561
            /* current points to near the start of the large object */
562
            if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(orig);
563
            if ((word *)orig - (word *)current
564
                 >= (ptrdiff_t)(hhdr->hb_sz)) {
565
                /* Pointer past the end of the block */
566
                return(orig);
567
            }
568
            *new_hdr_p = hhdr;
569
            return(current);
570
        } else {
571
            return(current);
572
        }
573
    } else {
574
        return(current);
575
    }
576
}
577
 
578
void GC_invalidate_mark_state()
579
{
580
    GC_mark_state = MS_INVALID;
581
    GC_mark_stack_top = GC_mark_stack-1;
582
}
583
 
584
mse * GC_signal_mark_stack_overflow(msp)
585
mse * msp;
586
{
587
    GC_mark_state = MS_INVALID;
588
    GC_mark_stack_too_small = TRUE;
589
#   ifdef CONDPRINT
590
      if (GC_print_stats) {
591
        GC_printf1("Mark stack overflow; current size = %lu entries\n",
592
                    GC_mark_stack_size);
593
      }
594
#   endif
595
    return(msp - GC_MARK_STACK_DISCARDS);
596
}
597
 
598
/*
599
 * Mark objects pointed to by the regions described by
600
 * mark stack entries between GC_mark_stack and GC_mark_stack_top,
601
 * inclusive.  Assumes the upper limit of a mark stack entry
602
 * is never 0.  A mark stack entry never has size 0.
603
 * We try to traverse on the order of a hblk of memory before we return.
604
 * Caller is responsible for calling this until the mark stack is empty.
605
 * Note that this is the most performance critical routine in the
606
 * collector.  Hence it contains all sorts of ugly hacks to speed
607
 * things up.  In particular, we avoid procedure calls on the common
608
 * path, we take advantage of peculiarities of the mark descriptor
609
 * encoding, we optionally maintain a cache for the block address to
610
 * header mapping, we prefetch when an object is "grayed", etc.
611
 */
612
mse * GC_mark_from(mark_stack_top, mark_stack, mark_stack_limit)
613
mse * mark_stack_top;
614
mse * mark_stack;
615
mse * mark_stack_limit;
616
{
617
  int credit = HBLKSIZE;        /* Remaining credit for marking work    */
618
  register word * current_p;    /* Pointer to current candidate ptr.    */
619
  register word current;        /* Candidate pointer.                   */
620
  register word * limit;        /* (Incl) limit of current candidate    */
621
                                /* range                                */
622
  register word descr;
623
  register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
624
  register ptr_t least_ha = GC_least_plausible_heap_addr;
625
  DECLARE_HDR_CACHE;
626
 
627
# define SPLIT_RANGE_WORDS 128  /* Must be power of 2.          */
628
 
629
  GC_objects_are_marked = TRUE;
630
  INIT_HDR_CACHE;
631
# ifdef OS2 /* Use untweaked version to circumvent compiler problem */
632
  while (mark_stack_top >= mark_stack && credit >= 0) {
633
# else
634
  while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit)
635
        >= 0) {
636
# endif
637
    current_p = mark_stack_top -> mse_start;
638
    descr = mark_stack_top -> mse_descr;
639
  retry:
640
    /* current_p and descr describe the current object.         */
641
    /* *mark_stack_top is vacant.                               */
642
    /* The following is 0 only for small objects described by a simple  */
643
    /* length descriptor.  For many applications this is the common     */
644
    /* case, so we try to detect it quickly.                            */
645
    if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
646
      word tag = descr & GC_DS_TAGS;
647
 
648
      switch(tag) {
649
        case GC_DS_LENGTH:
650
          /* Large length.                                              */
651
          /* Process part of the range to avoid pushing too much on the */
652
          /* stack.                                                     */
653
          GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
654
                            - (word)GC_least_plausible_heap_addr);
655
#         ifdef PARALLEL_MARK
656
#           define SHARE_BYTES 2048
657
            if (descr > SHARE_BYTES && GC_parallel
658
                && mark_stack_top < mark_stack_limit - 1) {
659
              int new_size = (descr/2) & ~(sizeof(word)-1);
660
              mark_stack_top -> mse_start = current_p;
661
              mark_stack_top -> mse_descr = new_size + sizeof(word);
662
                                        /* makes sure we handle         */
663
                                        /* misaligned pointers.         */
664
              mark_stack_top++;
665
              current_p = (word *) ((char *)current_p + new_size);
666
              descr -= new_size;
667
              goto retry;
668
            }
669
#         endif /* PARALLEL_MARK */
670
          mark_stack_top -> mse_start =
671
                limit = current_p + SPLIT_RANGE_WORDS-1;
672
          mark_stack_top -> mse_descr =
673
                        descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
674
          /* Make sure that pointers overlapping the two ranges are     */
675
          /* considered.                                                */
676
          limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
677
          break;
678
        case GC_DS_BITMAP:
679
          mark_stack_top--;
680
          descr &= ~GC_DS_TAGS;
681
          credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
682
          while (descr != 0) {
683
            if ((signed_word)descr < 0) {
684
              current = *current_p;
685
              FIXUP_POINTER(current);
686
              if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
687
                PREFETCH((ptr_t)current);
688
                HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top,
689
                              mark_stack_limit, current_p, exit1);
690
              }
691
            }
692
            descr <<= 1;
693
            ++ current_p;
694
          }
695
          continue;
696
        case GC_DS_PROC:
697
          mark_stack_top--;
698
          credit -= GC_PROC_BYTES;
699
          mark_stack_top =
700
              (*PROC(descr))
701
                    (current_p, mark_stack_top,
702
                    mark_stack_limit, ENV(descr));
703
          continue;
704
        case GC_DS_PER_OBJECT:
705
          if ((signed_word)descr >= 0) {
706
            /* Descriptor is in the object.     */
707
            descr = *(word *)((ptr_t)current_p + descr - GC_DS_PER_OBJECT);
708
          } else {
709
            /* Descriptor is in type descriptor pointed to by first     */
710
            /* word in object.                                          */
711
            ptr_t type_descr = *(ptr_t *)current_p;
712
            /* type_descr is either a valid pointer to the descriptor   */
713
            /* structure, or this object was on a free list.  If it     */
714
            /* it was anything but the last object on the free list,    */
715
            /* we will misinterpret the next object on the free list as */
716
            /* the type descriptor, and get a 0 GC descriptor, which    */
717
            /* is ideal.  Unfortunately, we need to check for the last  */
718
            /* object case explicitly.                                  */
719
            if (0 == type_descr) {
720
                /* Rarely executed.     */
721
                mark_stack_top--;
722
                continue;
723
            }
724
            descr = *(word *)(type_descr
725
                              - (descr - (GC_DS_PER_OBJECT
726
                                          - GC_INDIR_PER_OBJ_BIAS)));
727
          }
728
          if (0 == descr) {
729
              /* Can happen either because we generated a 0 descriptor  */
730
              /* or we saw a pointer to a free object.                  */
731
              mark_stack_top--;
732
              continue;
733
          }
734
          goto retry;
735
      }
736
    } else /* Small object with length descriptor */ {
737
      mark_stack_top--;
738
      limit = (word *)(((ptr_t)current_p) + (word)descr);
739
    }
740
    /* The simple case in which we're scanning a range. */
741
    GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
742
    credit -= (ptr_t)limit - (ptr_t)current_p;
743
    limit -= 1;
744
    {
745
#     define PREF_DIST 4
746
 
747
#     ifndef SMALL_CONFIG
748
        word deferred;
749
 
750
        /* Try to prefetch the next pointer to be examined asap.        */
751
        /* Empirically, this also seems to help slightly without        */
752
        /* prefetches, at least on linux/X86.  Presumably this loop     */
753
        /* ends up with less register pressure, and gcc thus ends up    */
754
        /* generating slightly better code.  Overall gcc code quality   */
755
        /* for this loop is still not great.                            */
756
        for(;;) {
757
          PREFETCH((ptr_t)limit - PREF_DIST*CACHE_LINE_SIZE);
758
          GC_ASSERT(limit >= current_p);
759
          deferred = *limit;
760
          FIXUP_POINTER(deferred);
761
          limit = (word *)((char *)limit - ALIGNMENT);
762
          if ((ptr_t)deferred >= least_ha && (ptr_t)deferred <  greatest_ha) {
763
            PREFETCH((ptr_t)deferred);
764
            break;
765
          }
766
          if (current_p > limit) goto next_object;
767
          /* Unroll once, so we don't do too many of the prefetches     */
768
          /* based on limit.                                            */
769
          deferred = *limit;
770
          FIXUP_POINTER(deferred);
771
          limit = (word *)((char *)limit - ALIGNMENT);
772
          if ((ptr_t)deferred >= least_ha && (ptr_t)deferred <  greatest_ha) {
773
            PREFETCH((ptr_t)deferred);
774
            break;
775
          }
776
          if (current_p > limit) goto next_object;
777
        }
778
#     endif
779
 
780
      while (current_p <= limit) {
781
        /* Empirically, unrolling this loop doesn't help a lot. */
782
        /* Since HC_PUSH_CONTENTS expands to a lot of code,     */
783
        /* we don't.                                            */
784
        current = *current_p;
785
        FIXUP_POINTER(current);
786
        PREFETCH((ptr_t)current_p + PREF_DIST*CACHE_LINE_SIZE);
787
        if ((ptr_t)current >= least_ha && (ptr_t)current <  greatest_ha) {
788
          /* Prefetch the contents of the object we just pushed.  It's  */
789
          /* likely we will need them soon.                             */
790
          PREFETCH((ptr_t)current);
791
          HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top,
792
                           mark_stack_limit, current_p, exit2);
793
        }
794
        current_p = (word *)((char *)current_p + ALIGNMENT);
795
      }
796
 
797
#     ifndef SMALL_CONFIG
798
        /* We still need to mark the entry we previously prefetched.    */
799
        /* We alrady know that it passes the preliminary pointer        */
800
        /* validity test.                                               */
801
        HC_PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
802
                         mark_stack_limit, current_p, exit4);
803
        next_object:;
804
#     endif
805
    }
806
  }
807
  return mark_stack_top;
808
}
809
 
810
#ifdef PARALLEL_MARK
811
 
812
/* We assume we have an ANSI C Compiler.        */
813
GC_bool GC_help_wanted = FALSE;
814
unsigned GC_helper_count = 0;
815
unsigned GC_active_count = 0;
816
mse * VOLATILE GC_first_nonempty;
817
word GC_mark_no = 0;
818
 
819
#define LOCAL_MARK_STACK_SIZE HBLKSIZE
820
        /* Under normal circumstances, this is big enough to guarantee  */
821
        /* We don't overflow half of it in a single call to             */
822
        /* GC_mark_from.                                                */
823
 
824
 
825
/* Steal mark stack entries starting at mse low into mark stack local   */
826
/* until we either steal mse high, or we have max entries.              */
827
/* Return a pointer to the top of the local mark stack.                 */
828
/* *next is replaced by a pointer to the next unscanned mark stack      */
829
/* entry.                                                               */
830
mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
831
                          unsigned max, mse **next)
832
{
833
    mse *p;
834
    mse *top = local - 1;
835
    unsigned i = 0;
836
 
837
    /* Make sure that prior writes to the mark stack are visible. */
838
    /* On some architectures, the fact that the reads are         */
839
    /* volatile should suffice.                                   */
840
#   if !defined(IA64) && !defined(HP_PA) && !defined(I386)
841
      GC_memory_barrier();
842
#   endif
843
    GC_ASSERT(high >= low-1 && high - low + 1 <= GC_mark_stack_size);
844
    for (p = low; p <= high && i <= max; ++p) {
845
        word descr = *(volatile word *) &(p -> mse_descr);
846
        /* In the IA64 memory model, the following volatile store is    */
847
        /* ordered after this read of descr.  Thus a thread must read   */
848
        /* the original nonzero value.  HP_PA appears to be similar,    */
849
        /* and if I'm reading the P4 spec correctly, X86 is probably    */
850
        /* also OK.  In some other cases we need a barrier.             */
851
#       if !defined(IA64) && !defined(HP_PA) && !defined(I386)
852
          GC_memory_barrier();
853
#       endif
854
        if (descr != 0) {
855
            *(volatile word *) &(p -> mse_descr) = 0;
856
            /* More than one thread may get this entry, but that's only */
857
            /* a minor performance problem.                             */
858
            ++top;
859
            top -> mse_descr = descr;
860
            top -> mse_start = p -> mse_start;
861
            GC_ASSERT(  (top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH ||
862
                        top -> mse_descr < (ptr_t)GC_greatest_plausible_heap_addr
863
                                           - (ptr_t)GC_least_plausible_heap_addr);
864
            /* If this is a big object, count it as                     */
865
            /* size/256 + 1 objects.                                    */
866
            ++i;
867
            if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (descr >> 8);
868
        }
869
    }
870
    *next = p;
871
    return top;
872
}
873
 
874
/* Copy back a local mark stack.        */
875
/* low and high are inclusive bounds.   */
876
void GC_return_mark_stack(mse * low, mse * high)
877
{
878
    mse * my_top;
879
    mse * my_start;
880
    size_t stack_size;
881
 
882
    if (high < low) return;
883
    stack_size = high - low + 1;
884
    GC_acquire_mark_lock();
885
    my_top = GC_mark_stack_top;
886
    my_start = my_top + 1;
887
    if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
888
#     ifdef CONDPRINT
889
        if (GC_print_stats) {
890
          GC_printf0("No room to copy back mark stack.");
891
        }
892
#     endif
893
      GC_mark_state = MS_INVALID;
894
      GC_mark_stack_too_small = TRUE;
895
      /* We drop the local mark stack.  We'll fix things later. */
896
    } else {
897
      BCOPY(low, my_start, stack_size * sizeof(mse));
898
      GC_ASSERT(GC_mark_stack_top = my_top);
899
#     if !defined(IA64) && !defined(HP_PA)
900
        GC_memory_barrier();
901
#     endif
902
        /* On IA64, the volatile write acts as a release barrier. */
903
      GC_mark_stack_top = my_top + stack_size;
904
    }
905
    GC_release_mark_lock();
906
    GC_notify_all_marker();
907
}
908
 
909
/* Mark from the local mark stack.              */
910
/* On return, the local mark stack is empty.    */
911
/* But this may be achieved by copying the      */
912
/* local mark stack back into the global one.   */
913
void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
914
{
915
    unsigned n;
916
#   define N_LOCAL_ITERS 1
917
 
918
#   ifdef GC_ASSERTIONS
919
      /* Make sure we don't hold mark lock. */
920
        GC_acquire_mark_lock();
921
        GC_release_mark_lock();
922
#   endif
923
    for (;;) {
924
        for (n = 0; n < N_LOCAL_ITERS; ++n) {
925
            local_top = GC_mark_from(local_top, local_mark_stack,
926
                                     local_mark_stack + LOCAL_MARK_STACK_SIZE);
927
            if (local_top < local_mark_stack) return;
928
            if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) {
929
                GC_return_mark_stack(local_mark_stack, local_top);
930
                return;
931
            }
932
        }
933
        if (GC_mark_stack_top < GC_first_nonempty &&
934
            GC_active_count < GC_helper_count
935
            && local_top > local_mark_stack + 1) {
936
            /* Try to share the load, since the main stack is empty,    */
937
            /* and helper threads are waiting for a refill.             */
938
            /* The entries near the bottom of the stack are likely      */
939
            /* to require more work.  Thus we return those, eventhough  */
940
            /* it's harder.                                             */
941
            mse * p;
942
            mse * new_bottom = local_mark_stack
943
                                + (local_top - local_mark_stack)/2;
944
            GC_ASSERT(new_bottom > local_mark_stack
945
                      && new_bottom < local_top);
946
            GC_return_mark_stack(local_mark_stack, new_bottom - 1);
947
            memmove(local_mark_stack, new_bottom,
948
                    (local_top - new_bottom + 1) * sizeof(mse));
949
            local_top -= (new_bottom - local_mark_stack);
950
        }
951
    }
952
}
953
 
954
#define ENTRIES_TO_GET 5
955
 
956
long GC_markers = 2;            /* Normally changed by thread-library-  */
957
                                /* -specific code.                      */
958
 
959
/* Mark using the local mark stack until the global mark stack is empty */
960
/* and there are no active workers. Update GC_first_nonempty to reflect */
961
/* progress.                                                            */
962
/* Caller does not hold mark lock.                                      */
963
/* Caller has already incremented GC_helper_count.  We decrement it,    */
964
/* and maintain GC_active_count.                                        */
965
void GC_mark_local(mse *local_mark_stack, int id)
966
{
967
    mse * my_first_nonempty;
968
 
969
    GC_acquire_mark_lock();
970
    GC_active_count++;
971
    my_first_nonempty = GC_first_nonempty;
972
    GC_ASSERT(GC_first_nonempty >= GC_mark_stack &&
973
              GC_first_nonempty <= GC_mark_stack_top + 1);
974
#   ifdef PRINTSTATS
975
        GC_printf1("Starting mark helper %lu\n", (unsigned long)id);
976
#   endif
977
    GC_release_mark_lock();
978
    for (;;) {
979
        size_t n_on_stack;
980
        size_t n_to_get;
981
        mse *next;
982
        mse * my_top;
983
        mse * local_top;
984
        mse * global_first_nonempty = GC_first_nonempty;
985
 
986
        GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
987
                  my_first_nonempty <= GC_mark_stack_top + 1);
988
        GC_ASSERT(global_first_nonempty >= GC_mark_stack &&
989
                  global_first_nonempty <= GC_mark_stack_top + 1);
990
        if (my_first_nonempty < global_first_nonempty) {
991
            my_first_nonempty = global_first_nonempty;
992
        } else if (global_first_nonempty < my_first_nonempty) {
993
            GC_compare_and_exchange((word *)(&GC_first_nonempty),
994
                                   (word) global_first_nonempty,
995
                                   (word) my_first_nonempty);
996
            /* If this fails, we just go ahead, without updating        */
997
            /* GC_first_nonempty.                                       */
998
        }
999
        /* Perhaps we should also update GC_first_nonempty, if it */
1000
        /* is less.  But that would require using atomic updates. */
1001
        my_top = GC_mark_stack_top;
1002
        n_on_stack = my_top - my_first_nonempty + 1;
1003
        if (0 == n_on_stack) {
1004
            GC_acquire_mark_lock();
1005
            my_top = GC_mark_stack_top;
1006
            n_on_stack = my_top - my_first_nonempty + 1;
1007
            if (0 == n_on_stack) {
1008
                GC_active_count--;
1009
                GC_ASSERT(GC_active_count <= GC_helper_count);
1010
                /* Other markers may redeposit objects  */
1011
                /* on the stack.                                */
1012
                if (0 == GC_active_count) GC_notify_all_marker();
1013
                while (GC_active_count > 0
1014
                       && GC_first_nonempty > GC_mark_stack_top) {
1015
                    /* We will be notified if either GC_active_count    */
1016
                    /* reaches zero, or if more objects are pushed on   */
1017
                    /* the global mark stack.                           */
1018
                    GC_wait_marker();
1019
                }
1020
                if (GC_active_count == 0 &&
1021
                    GC_first_nonempty > GC_mark_stack_top) {
1022
                    GC_bool need_to_notify = FALSE;
1023
                    /* The above conditions can't be falsified while we */
1024
                    /* hold the mark lock, since neither                */
1025
                    /* GC_active_count nor GC_mark_stack_top can        */
1026
                    /* change.  GC_first_nonempty can only be           */
1027
                    /* incremented asynchronously.  Thus we know that   */
1028
                    /* both conditions actually held simultaneously.    */
1029
                    GC_helper_count--;
1030
                    if (0 == GC_helper_count) need_to_notify = TRUE;
1031
#                   ifdef PRINTSTATS
1032
                      GC_printf1(
1033
                        "Finished mark helper %lu\n", (unsigned long)id);
1034
#                   endif
1035
                    GC_release_mark_lock();
1036
                    if (need_to_notify) GC_notify_all_marker();
1037
                    return;
1038
                }
1039
                /* else there's something on the stack again, or        */
1040
                /* another helper may push something.                   */
1041
                GC_active_count++;
1042
                GC_ASSERT(GC_active_count > 0);
1043
                GC_release_mark_lock();
1044
                continue;
1045
            } else {
1046
                GC_release_mark_lock();
1047
            }
1048
        }
1049
        n_to_get = ENTRIES_TO_GET;
1050
        if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
1051
        local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
1052
                                        local_mark_stack, n_to_get,
1053
                                        &my_first_nonempty);
1054
        GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
1055
                  my_first_nonempty <= GC_mark_stack_top + 1);
1056
        GC_do_local_mark(local_mark_stack, local_top);
1057
    }
1058
}
1059
 
1060
/* Perform Parallel mark.                       */
1061
/* We hold the GC lock, not the mark lock.      */
1062
/* Currently runs until the mark stack is       */
1063
/* empty.                                       */
1064
void GC_do_parallel_mark()
1065
{
1066
    mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1067
    mse * local_top;
1068
    mse * my_top;
1069
 
1070
    GC_acquire_mark_lock();
1071
    GC_ASSERT(I_HOLD_LOCK());
1072
    /* This could be a GC_ASSERT, but it seems safer to keep it on      */
1073
    /* all the time, especially since it's cheap.                       */
1074
    if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
1075
        ABORT("Tried to start parallel mark in bad state");
1076
#   ifdef PRINTSTATS
1077
        GC_printf1("Starting marking for mark phase number %lu\n",
1078
                   (unsigned long)GC_mark_no);
1079
#   endif
1080
    GC_first_nonempty = GC_mark_stack;
1081
    GC_active_count = 0;
1082
    GC_helper_count = 1;
1083
    GC_help_wanted = TRUE;
1084
    GC_release_mark_lock();
1085
    GC_notify_all_marker();
1086
        /* Wake up potential helpers.   */
1087
    GC_mark_local(local_mark_stack, 0);
1088
    GC_acquire_mark_lock();
1089
    GC_help_wanted = FALSE;
1090
    /* Done; clean up.  */
1091
    while (GC_helper_count > 0) GC_wait_marker();
1092
    /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
1093
#   ifdef PRINTSTATS
1094
        GC_printf1(
1095
            "Finished marking for mark phase number %lu\n",
1096
            (unsigned long)GC_mark_no);
1097
#   endif
1098
    GC_mark_no++;
1099
    GC_release_mark_lock();
1100
    GC_notify_all_marker();
1101
}
1102
 
1103
 
1104
/* Try to help out the marker, if it's running.         */
1105
/* We do not hold the GC lock, but the requestor does.  */
1106
void GC_help_marker(word my_mark_no)
1107
{
1108
    mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
1109
    unsigned my_id;
1110
    mse * my_first_nonempty;
1111
 
1112
    if (!GC_parallel) return;
1113
    GC_acquire_mark_lock();
1114
    while (GC_mark_no < my_mark_no
1115
           || !GC_help_wanted && GC_mark_no == my_mark_no) {
1116
      GC_wait_marker();
1117
    }
1118
    my_id = GC_helper_count;
1119
    if (GC_mark_no != my_mark_no || my_id >= GC_markers) {
1120
      /* Second test is useful only if original threads can also        */
1121
      /* act as helpers.  Under Linux they can't.                       */
1122
      GC_release_mark_lock();
1123
      return;
1124
    }
1125
    GC_helper_count = my_id + 1;
1126
    GC_release_mark_lock();
1127
    GC_mark_local(local_mark_stack, my_id);
1128
    /* GC_mark_local decrements GC_helper_count. */
1129
}
1130
 
1131
#endif /* PARALLEL_MARK */
1132
 
1133
/* Allocate or reallocate space for mark stack of size s words  */
1134
/* May silently fail.                                           */
1135
static void alloc_mark_stack(n)
1136
word n;
1137
{
1138
    mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
1139
 
1140
    GC_mark_stack_too_small = FALSE;
1141
    if (GC_mark_stack_size != 0) {
1142
        if (new_stack != 0) {
1143
          word displ = (word)GC_mark_stack & (GC_page_size - 1);
1144
          signed_word size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
1145
 
1146
          /* Recycle old space */
1147
              if (0 != displ) displ = GC_page_size - displ;
1148
              size = (size - displ) & ~(GC_page_size - 1);
1149
              if (size > 0) {
1150
                GC_add_to_heap((struct hblk *)
1151
                                ((word)GC_mark_stack + displ), (word)size);
1152
              }
1153
          GC_mark_stack = new_stack;
1154
          GC_mark_stack_size = n;
1155
          GC_mark_stack_limit = new_stack + n;
1156
#         ifdef CONDPRINT
1157
            if (GC_print_stats) {
1158
              GC_printf1("Grew mark stack to %lu frames\n",
1159
                         (unsigned long) GC_mark_stack_size);
1160
            }
1161
#         endif
1162
        } else {
1163
#         ifdef CONDPRINT
1164
            if (GC_print_stats) {
1165
              GC_printf1("Failed to grow mark stack to %lu frames\n",
1166
                         (unsigned long) n);
1167
            }
1168
#         endif
1169
        }
1170
    } else {
1171
        if (new_stack == 0) {
1172
            GC_err_printf0("No space for mark stack\n");
1173
            EXIT();
1174
        }
1175
        GC_mark_stack = new_stack;
1176
        GC_mark_stack_size = n;
1177
        GC_mark_stack_limit = new_stack + n;
1178
    }
1179
    GC_mark_stack_top = GC_mark_stack-1;
1180
}
1181
 
1182
void GC_mark_init()
1183
{
1184
    alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
1185
}
1186
 
1187
/*
1188
 * Push all locations between b and t onto the mark stack.
1189
 * b is the first location to be checked. t is one past the last
1190
 * location to be checked.
1191
 * Should only be used if there is no possibility of mark stack
1192
 * overflow.
1193
 */
1194
void GC_push_all(bottom, top)
1195
ptr_t bottom;
1196
ptr_t top;
1197
{
1198
    register word length;
1199
 
1200
    bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1201
    top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
1202
    if (top == 0 || bottom == top) return;
1203
    GC_mark_stack_top++;
1204
    if (GC_mark_stack_top >= GC_mark_stack_limit) {
1205
        ABORT("unexpected mark stack overflow");
1206
    }
1207
    length = top - bottom;
1208
#   if GC_DS_TAGS > ALIGNMENT - 1
1209
        length += GC_DS_TAGS;
1210
        length &= ~GC_DS_TAGS;
1211
#   endif
1212
    GC_mark_stack_top -> mse_start = (word *)bottom;
1213
    GC_mark_stack_top -> mse_descr = length;
1214
}
1215
 
1216
/*
1217
 * Analogous to the above, but push only those pages h with dirty_fn(h) != 0.
1218
 * We use push_fn to actually push the block.
1219
 * Used both to selectively push dirty pages, or to push a block
1220
 * in piecemeal fashion, to allow for more marking concurrency.
1221
 * Will not overflow mark stack if push_fn pushes a small fixed number
1222
 * of entries.  (This is invoked only if push_fn pushes a single entry,
1223
 * or if it marks each object before pushing it, thus ensuring progress
1224
 * in the event of a stack overflow.)
1225
 */
1226
void GC_push_selected(bottom, top, dirty_fn, push_fn)
1227
ptr_t bottom;
1228
ptr_t top;
1229
int (*dirty_fn) GC_PROTO((struct hblk * h));
1230
void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top));
1231
{
1232
    register struct hblk * h;
1233
 
1234
    bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1235
    top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
1236
 
1237
    if (top == 0 || bottom == top) return;
1238
    h = HBLKPTR(bottom + HBLKSIZE);
1239
    if (top <= (ptr_t) h) {
1240
        if ((*dirty_fn)(h-1)) {
1241
            (*push_fn)(bottom, top);
1242
        }
1243
        return;
1244
    }
1245
    if ((*dirty_fn)(h-1)) {
1246
        (*push_fn)(bottom, (ptr_t)h);
1247
    }
1248
    while ((ptr_t)(h+1) <= top) {
1249
        if ((*dirty_fn)(h)) {
1250
            if ((word)(GC_mark_stack_top - GC_mark_stack)
1251
                > 3 * GC_mark_stack_size / 4) {
1252
                /* Danger of mark stack overflow */
1253
                (*push_fn)((ptr_t)h, top);
1254
                return;
1255
            } else {
1256
                (*push_fn)((ptr_t)h, (ptr_t)(h+1));
1257
            }
1258
        }
1259
        h++;
1260
    }
1261
    if ((ptr_t)h != top) {
1262
        if ((*dirty_fn)(h)) {
1263
            (*push_fn)((ptr_t)h, top);
1264
        }
1265
    }
1266
    if (GC_mark_stack_top >= GC_mark_stack_limit) {
1267
        ABORT("unexpected mark stack overflow");
1268
    }
1269
}
1270
 
1271
# ifndef SMALL_CONFIG
1272
 
1273
#ifdef PARALLEL_MARK
1274
    /* Break up root sections into page size chunks to better spread    */
1275
    /* out work.                                                        */
1276
    GC_bool GC_true_func(struct hblk *h) { return TRUE; }
1277
#   define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);
1278
#else
1279
#   define GC_PUSH_ALL(b,t) GC_push_all(b,t);
1280
#endif
1281
 
1282
 
1283
void GC_push_conditional(bottom, top, all)
1284
ptr_t bottom;
1285
ptr_t top;
1286
int all;
1287
{
1288
    if (all) {
1289
      if (GC_dirty_maintained) {
1290
#       ifdef PROC_VDB
1291
            /* Pages that were never dirtied cannot contain pointers    */
1292
            GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all);
1293
#       else
1294
            GC_push_all(bottom, top);
1295
#       endif
1296
      } else {
1297
        GC_push_all(bottom, top);
1298
      }
1299
    } else {
1300
        GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);
1301
    }
1302
}
1303
#endif
1304
 
1305
# if defined(MSWIN32) || defined(MSWINCE)
1306
  void __cdecl GC_push_one(p)
1307
# else
1308
  void GC_push_one(p)
1309
# endif
1310
word p;
1311
{
1312
    GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
1313
}
1314
 
1315
struct GC_ms_entry *GC_mark_and_push(obj, mark_stack_ptr, mark_stack_limit, src)
1316
GC_PTR obj;
1317
struct GC_ms_entry * mark_stack_ptr;
1318
struct GC_ms_entry * mark_stack_limit;
1319
GC_PTR *src;
1320
{
1321
   PREFETCH(obj);
1322
   PUSH_CONTENTS(obj, mark_stack_ptr /* modified */, mark_stack_limit, src,
1323
                 was_marked /* internally generated exit label */);
1324
   return mark_stack_ptr;
1325
}
1326
 
1327
# ifdef __STDC__
1328
#   define BASE(p) (word)GC_base((void *)(p))
1329
# else
1330
#   define BASE(p) (word)GC_base((char *)(p))
1331
# endif
1332
 
1333
/* Mark and push (i.e. gray) a single object p onto the main    */
1334
/* mark stack.  Consider p to be valid if it is an interior     */
1335
/* pointer.                                                     */
1336
/* The object p has passed a preliminary pointer validity       */
1337
/* test, but we do not definitely know whether it is valid.     */
1338
/* Mark bits are NOT atomically updated.  Thus this must be the */
1339
/* only thread setting them.                                    */
1340
# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1341
    void GC_mark_and_push_stack(p, source)
1342
    ptr_t source;
1343
# else
1344
    void GC_mark_and_push_stack(p)
1345
#   define source 0
1346
# endif
1347
register word p;
1348
{
1349
    register word r;
1350
    register hdr * hhdr;
1351
    register int displ;
1352
 
1353
    GET_HDR(p, hhdr);
1354
    if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
1355
        if (hhdr != 0) {
1356
          r = BASE(p);
1357
          hhdr = HDR(r);
1358
          displ = BYTES_TO_WORDS(HBLKDISPL(r));
1359
        }
1360
    } else {
1361
        register map_entry_type map_entry;
1362
 
1363
        displ = HBLKDISPL(p);
1364
        map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
1365
        if (map_entry >= MAX_OFFSET) {
1366
          if (map_entry == OFFSET_TOO_BIG || !GC_all_interior_pointers) {
1367
              r = BASE(p);
1368
              displ = BYTES_TO_WORDS(HBLKDISPL(r));
1369
              if (r == 0) hhdr = 0;
1370
          } else {
1371
              /* Offset invalid, but map reflects interior pointers     */
1372
              hhdr = 0;
1373
          }
1374
        } else {
1375
          displ = BYTES_TO_WORDS(displ);
1376
          displ -= map_entry;
1377
          r = (word)((word *)(HBLKPTR(p)) + displ);
1378
        }
1379
    }
1380
    /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
1381
    /* displ is the word index within the block.                 */
1382
    if (hhdr == 0) {
1383
#       ifdef PRINT_BLACK_LIST
1384
          GC_add_to_black_list_stack(p, source);
1385
#       else
1386
          GC_add_to_black_list_stack(p);
1387
#       endif
1388
#       undef source  /* In case we had to define it. */
1389
    } else {
1390
        if (!mark_bit_from_hdr(hhdr, displ)) {
1391
            set_mark_bit_from_hdr(hhdr, displ);
1392
            GC_STORE_BACK_PTR(source, (ptr_t)r);
1393
            PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
1394
                     GC_mark_stack_limit);
1395
        }
1396
    }
1397
}
1398
 
1399
# ifdef TRACE_BUF
1400
 
1401
# define TRACE_ENTRIES 1000
1402
 
1403
struct trace_entry {
1404
    char * kind;
1405
    word gc_no;
1406
    word words_allocd;
1407
    word arg1;
1408
    word arg2;
1409
} GC_trace_buf[TRACE_ENTRIES];
1410
 
1411
int GC_trace_buf_ptr = 0;
1412
 
1413
void GC_add_trace_entry(char *kind, word arg1, word arg2)
1414
{
1415
    GC_trace_buf[GC_trace_buf_ptr].kind = kind;
1416
    GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
1417
    GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
1418
    GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
1419
    GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
1420
    GC_trace_buf_ptr++;
1421
    if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
1422
}
1423
 
1424
void GC_print_trace(word gc_no, GC_bool lock)
1425
{
1426
    int i;
1427
    struct trace_entry *p;
1428
 
1429
    if (lock) LOCK();
1430
    for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
1431
        if (i < 0) i = TRACE_ENTRIES-1;
1432
        p = GC_trace_buf + i;
1433
        if (p -> gc_no < gc_no || p -> kind == 0) return;
1434
        printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
1435
                p -> kind, p -> gc_no, p -> words_allocd,
1436
                (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
1437
    }
1438
    printf("Trace incomplete\n");
1439
    if (lock) UNLOCK();
1440
}
1441
 
1442
# endif /* TRACE_BUF */
1443
 
1444
/*
1445
 * A version of GC_push_all that treats all interior pointers as valid
1446
 * and scans the entire region immediately, in case the contents
1447
 * change.
1448
 */
1449
void GC_push_all_eager(bottom, top)
1450
ptr_t bottom;
1451
ptr_t top;
1452
{
1453
    word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
1454
    word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
1455
    register word *p;
1456
    register word q;
1457
    register word *lim;
1458
    register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1459
    register ptr_t least_ha = GC_least_plausible_heap_addr;
1460
#   define GC_greatest_plausible_heap_addr greatest_ha
1461
#   define GC_least_plausible_heap_addr least_ha
1462
 
1463
    if (top == 0) return;
1464
    /* check all pointers in range and push if they appear      */
1465
    /* to be valid.                                             */
1466
      lim = t - 1 /* longword */;
1467
      for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
1468
        q = *p;
1469
        GC_PUSH_ONE_STACK(q, p);
1470
      }
1471
#   undef GC_greatest_plausible_heap_addr
1472
#   undef GC_least_plausible_heap_addr
1473
}
1474
 
1475
#ifndef THREADS
1476
/*
1477
 * A version of GC_push_all that treats all interior pointers as valid
1478
 * and scans part of the area immediately, to make sure that saved
1479
 * register values are not lost.
1480
 * Cold_gc_frame delimits the stack section that must be scanned
1481
 * eagerly.  A zero value indicates that no eager scanning is needed.
1482
 */
1483
void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
1484
ptr_t bottom;
1485
ptr_t top;
1486
ptr_t cold_gc_frame;
1487
{
1488
  if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1489
#   define EAGER_BYTES 1024
1490
    /* Push the hot end of the stack eagerly, so that register values   */
1491
    /* saved inside GC frames are marked before they disappear.         */
1492
    /* The rest of the marking can be deferred until later.             */
1493
    if (0 == cold_gc_frame) {
1494
        GC_push_all_stack(bottom, top);
1495
        return;
1496
    }
1497
    GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
1498
#   ifdef STACK_GROWS_DOWN
1499
        GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
1500
        GC_push_all_eager(bottom, cold_gc_frame);
1501
#   else /* STACK_GROWS_UP */
1502
        GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
1503
        GC_push_all_eager(cold_gc_frame, top);
1504
#   endif /* STACK_GROWS_UP */
1505
  } else {
1506
    GC_push_all_eager(bottom, top);
1507
  }
1508
# ifdef TRACE_BUF
1509
      GC_add_trace_entry("GC_push_all_stack", bottom, top);
1510
# endif
1511
}
1512
#endif /* !THREADS */
1513
 
1514
void GC_push_all_stack(bottom, top)
1515
ptr_t bottom;
1516
ptr_t top;
1517
{
1518
  if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
1519
    GC_push_all(bottom, top);
1520
  } else {
1521
    GC_push_all_eager(bottom, top);
1522
  }
1523
}
1524
 
1525
#if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
1526
/* Push all objects reachable from marked objects in the given block */
1527
/* of size 1 objects.                                                */
1528
void GC_push_marked1(h, hhdr)
1529
struct hblk *h;
1530
register hdr * hhdr;
1531
{
1532
    word * mark_word_addr = &(hhdr->hb_marks[0]);
1533
    register word *p;
1534
    word *plim;
1535
    register int i;
1536
    register word q;
1537
    register word mark_word;
1538
    register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1539
    register ptr_t least_ha = GC_least_plausible_heap_addr;
1540
    register mse * mark_stack_top = GC_mark_stack_top;
1541
    register mse * mark_stack_limit = GC_mark_stack_limit;
1542
#   define GC_mark_stack_top mark_stack_top
1543
#   define GC_mark_stack_limit mark_stack_limit
1544
#   define GC_greatest_plausible_heap_addr greatest_ha
1545
#   define GC_least_plausible_heap_addr least_ha
1546
 
1547
    p = (word *)(h->hb_body);
1548
    plim = (word *)(((word)h) + HBLKSIZE);
1549
 
1550
    /* go through all words in block */
1551
        while( p < plim )  {
1552
            mark_word = *mark_word_addr++;
1553
            i = 0;
1554
            while(mark_word != 0) {
1555
              if (mark_word & 1) {
1556
                  q = p[i];
1557
                  GC_PUSH_ONE_HEAP(q, p + i);
1558
              }
1559
              i++;
1560
              mark_word >>= 1;
1561
            }
1562
            p += WORDSZ;
1563
        }
1564
#   undef GC_greatest_plausible_heap_addr
1565
#   undef GC_least_plausible_heap_addr        
1566
#   undef GC_mark_stack_top
1567
#   undef GC_mark_stack_limit
1568
    GC_mark_stack_top = mark_stack_top;
1569
}
1570
 
1571
 
1572
#ifndef UNALIGNED
1573
 
1574
/* Push all objects reachable from marked objects in the given block */
1575
/* of size 2 objects.                                                */
1576
void GC_push_marked2(h, hhdr)
1577
struct hblk *h;
1578
register hdr * hhdr;
1579
{
1580
    word * mark_word_addr = &(hhdr->hb_marks[0]);
1581
    register word *p;
1582
    word *plim;
1583
    register int i;
1584
    register word q;
1585
    register word mark_word;
1586
    register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1587
    register ptr_t least_ha = GC_least_plausible_heap_addr;
1588
    register mse * mark_stack_top = GC_mark_stack_top;
1589
    register mse * mark_stack_limit = GC_mark_stack_limit;
1590
#   define GC_mark_stack_top mark_stack_top
1591
#   define GC_mark_stack_limit mark_stack_limit
1592
#   define GC_greatest_plausible_heap_addr greatest_ha
1593
#   define GC_least_plausible_heap_addr least_ha
1594
 
1595
    p = (word *)(h->hb_body);
1596
    plim = (word *)(((word)h) + HBLKSIZE);
1597
 
1598
    /* go through all words in block */
1599
        while( p < plim )  {
1600
            mark_word = *mark_word_addr++;
1601
            i = 0;
1602
            while(mark_word != 0) {
1603
              if (mark_word & 1) {
1604
                  q = p[i];
1605
                  GC_PUSH_ONE_HEAP(q, p + i);
1606
                  q = p[i+1];
1607
                  GC_PUSH_ONE_HEAP(q, p + i);
1608
              }
1609
              i += 2;
1610
              mark_word >>= 2;
1611
            }
1612
            p += WORDSZ;
1613
        }
1614
#   undef GC_greatest_plausible_heap_addr
1615
#   undef GC_least_plausible_heap_addr        
1616
#   undef GC_mark_stack_top
1617
#   undef GC_mark_stack_limit
1618
    GC_mark_stack_top = mark_stack_top;
1619
}
1620
 
1621
/* Push all objects reachable from marked objects in the given block */
1622
/* of size 4 objects.                                                */
1623
/* There is a risk of mark stack overflow here.  But we handle that. */
1624
/* And only unmarked objects get pushed, so it's not very likely.    */
1625
void GC_push_marked4(h, hhdr)
1626
struct hblk *h;
1627
register hdr * hhdr;
1628
{
1629
    word * mark_word_addr = &(hhdr->hb_marks[0]);
1630
    register word *p;
1631
    word *plim;
1632
    register int i;
1633
    register word q;
1634
    register word mark_word;
1635
    register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1636
    register ptr_t least_ha = GC_least_plausible_heap_addr;
1637
    register mse * mark_stack_top = GC_mark_stack_top;
1638
    register mse * mark_stack_limit = GC_mark_stack_limit;
1639
#   define GC_mark_stack_top mark_stack_top
1640
#   define GC_mark_stack_limit mark_stack_limit
1641
#   define GC_greatest_plausible_heap_addr greatest_ha
1642
#   define GC_least_plausible_heap_addr least_ha
1643
 
1644
    p = (word *)(h->hb_body);
1645
    plim = (word *)(((word)h) + HBLKSIZE);
1646
 
1647
    /* go through all words in block */
1648
        while( p < plim )  {
1649
            mark_word = *mark_word_addr++;
1650
            i = 0;
1651
            while(mark_word != 0) {
1652
              if (mark_word & 1) {
1653
                  q = p[i];
1654
                  GC_PUSH_ONE_HEAP(q, p + i);
1655
                  q = p[i+1];
1656
                  GC_PUSH_ONE_HEAP(q, p + i + 1);
1657
                  q = p[i+2];
1658
                  GC_PUSH_ONE_HEAP(q, p + i + 2);
1659
                  q = p[i+3];
1660
                  GC_PUSH_ONE_HEAP(q, p + i + 3);
1661
              }
1662
              i += 4;
1663
              mark_word >>= 4;
1664
            }
1665
            p += WORDSZ;
1666
        }
1667
#   undef GC_greatest_plausible_heap_addr
1668
#   undef GC_least_plausible_heap_addr        
1669
#   undef GC_mark_stack_top
1670
#   undef GC_mark_stack_limit
1671
    GC_mark_stack_top = mark_stack_top;
1672
}
1673
 
1674
#endif /* UNALIGNED */
1675
 
1676
#endif /* SMALL_CONFIG */
1677
 
1678
/* Push all objects reachable from marked objects in the given block */
1679
void GC_push_marked(h, hhdr)
1680
struct hblk *h;
1681
register hdr * hhdr;
1682
{
1683
    register int sz = hhdr -> hb_sz;
1684
    register int descr = hhdr -> hb_descr;
1685
    register word * p;
1686
    register int word_no;
1687
    register word * lim;
1688
    register mse * GC_mark_stack_top_reg;
1689
    register mse * mark_stack_limit = GC_mark_stack_limit;
1690
 
1691
    /* Some quick shortcuts: */
1692
        if ((0 | GC_DS_LENGTH) == descr) return;
1693
        if (GC_block_empty(hhdr)/* nothing marked */) return;
1694
    GC_n_rescuing_pages++;
1695
    GC_objects_are_marked = TRUE;
1696
    if (sz > MAXOBJSZ) {
1697
        lim = (word *)h;
1698
    } else {
1699
        lim = (word *)(h + 1) - sz;
1700
    }
1701
 
1702
    switch(sz) {
1703
#   if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)   
1704
     case 1:
1705
       GC_push_marked1(h, hhdr);
1706
       break;
1707
#   endif
1708
#   if !defined(SMALL_CONFIG) && !defined(UNALIGNED) && \
1709
       !defined(USE_MARK_BYTES)
1710
     case 2:
1711
       GC_push_marked2(h, hhdr);
1712
       break;
1713
     case 4:
1714
       GC_push_marked4(h, hhdr);
1715
       break;
1716
#   endif       
1717
     default:
1718
      GC_mark_stack_top_reg = GC_mark_stack_top;
1719
      for (p = (word *)h, word_no = 0; p <= lim; p += sz, word_no += sz) {
1720
         if (mark_bit_from_hdr(hhdr, word_no)) {
1721
           /* Mark from fields inside the object */
1722
             PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1723
#            ifdef GATHERSTATS
1724
                /* Subtract this object from total, since it was        */
1725
                /* added in twice.                                      */
1726
                GC_composite_in_use -= sz;
1727
#            endif
1728
         }
1729
      }
1730
      GC_mark_stack_top = GC_mark_stack_top_reg;
1731
    }
1732
}
1733
 
1734
#ifndef SMALL_CONFIG
1735
/* Test whether any page in the given block is dirty    */
1736
GC_bool GC_block_was_dirty(h, hhdr)
1737
struct hblk *h;
1738
register hdr * hhdr;
1739
{
1740
    register int sz = hhdr -> hb_sz;
1741
 
1742
    if (sz <= MAXOBJSZ) {
1743
         return(GC_page_was_dirty(h));
1744
    } else {
1745
         register ptr_t p = (ptr_t)h;
1746
         sz = WORDS_TO_BYTES(sz);
1747
         while (p < (ptr_t)h + sz) {
1748
             if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1749
             p += HBLKSIZE;
1750
         }
1751
         return(FALSE);
1752
    }
1753
}
1754
#endif /* SMALL_CONFIG */
1755
 
1756
/* Similar to GC_push_next_marked, but return address of next block     */
1757
struct hblk * GC_push_next_marked(h)
1758
struct hblk *h;
1759
{
1760
    register hdr * hhdr;
1761
 
1762
    h = GC_next_used_block(h);
1763
    if (h == 0) return(0);
1764
    hhdr = HDR(h);
1765
    GC_push_marked(h, hhdr);
1766
    return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1767
}
1768
 
1769
#ifndef SMALL_CONFIG
1770
/* Identical to above, but mark only from dirty pages   */
1771
struct hblk * GC_push_next_marked_dirty(h)
1772
struct hblk *h;
1773
{
1774
    register hdr * hhdr;
1775
 
1776
    if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1777
    for (;;) {
1778
        h = GC_next_used_block(h);
1779
        if (h == 0) return(0);
1780
        hhdr = HDR(h);
1781
#       ifdef STUBBORN_ALLOC
1782
          if (hhdr -> hb_obj_kind == STUBBORN) {
1783
            if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1784
                break;
1785
            }
1786
          } else {
1787
            if (GC_block_was_dirty(h, hhdr)) break;
1788
          }
1789
#       else
1790
          if (GC_block_was_dirty(h, hhdr)) break;
1791
#       endif
1792
        h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1793
    }
1794
    GC_push_marked(h, hhdr);
1795
    return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1796
}
1797
#endif
1798
 
1799
/* Similar to above, but for uncollectable pages.  Needed since we      */
1800
/* do not clear marks for such pages, even for full collections.        */
1801
struct hblk * GC_push_next_marked_uncollectable(h)
1802
struct hblk *h;
1803
{
1804
    register hdr * hhdr = HDR(h);
1805
 
1806
    for (;;) {
1807
        h = GC_next_used_block(h);
1808
        if (h == 0) return(0);
1809
        hhdr = HDR(h);
1810
        if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1811
        h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1812
    }
1813
    GC_push_marked(h, hhdr);
1814
    return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1815
}
1816
 
1817
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.