OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libjava/] [java/] [lang/] [natObject.cc] - Blame information for rev 861

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 758 jeremybenn
// natObject.cc - Implementation of the Object class.
2
 
3
/* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2005  Free Software Foundation
4
 
5
   This file is part of libgcj.
6
 
7
This software is copyrighted work licensed under the terms of the
8
Libgcj License.  Please consult the file "LIBGCJ_LICENSE" for
9
details.  */
10
 
11
#include <config.h>
12
#include <platform.h>
13
 
14
#include <string.h>
15
 
16
#pragma implementation "Object.h"
17
 
18
#include <gcj/cni.h>
19
#include <jvm.h>
20
#include <java/lang/Object.h>
21
#include <java-threads.h>
22
#include <java-signal.h>
23
#include <java/lang/CloneNotSupportedException.h>
24
#include <java/lang/IllegalArgumentException.h>
25
#include <java/lang/IllegalMonitorStateException.h>
26
#include <java/lang/InterruptedException.h>
27
#include <java/lang/NullPointerException.h>
28
#include <java/lang/Class.h>
29
#include <java/lang/Cloneable.h>
30
#include <java/lang/Thread.h>
31
 
32
#ifdef LOCK_DEBUG
33
#  include <stdio.h>
34
#endif
35
 
36
 
37
 
38
using namespace java::lang;
39
 
40
// This is used to represent synchronization information.
41
struct _Jv_SyncInfo
42
{
43
#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
44
  // We only need to keep track of initialization state if we can
45
  // possibly finalize this object.
46
  bool init;
47
#endif
48
  _Jv_ConditionVariable_t condition;
49
  _Jv_Mutex_t mutex;
50
};
51
 
52
 
53
 
54
jclass
55
java::lang::Object::getClass (void)
56
{
57
  _Jv_VTable **dt = (_Jv_VTable **) this;
58
  return (*dt)->clas;
59
}
60
 
61
jint
62
java::lang::Object::hashCode (void)
63
{
64
  return _Jv_HashCode (this);
65
}
66
 
67
jobject
68
java::lang::Object::clone (void)
69
{
70
  jclass klass = getClass ();
71
  jobject r;
72
  jint size;
73
 
74
  // We also clone arrays here.  If we put the array code into
75
  // __JArray, then we'd have to figure out a way to find the array
76
  // vtbl when creating a new array class.  This is easier, if uglier.
77
  if (klass->isArray())
78
    {
79
      __JArray *array = (__JArray *) this;
80
      jclass comp = getClass()->getComponentType();
81
      jint eltsize;
82
      if (comp->isPrimitive())
83
        {
84
          r = _Jv_NewPrimArray (comp, array->length);
85
          eltsize = comp->size();
86
        }
87
      else
88
        {
89
          r = _Jv_NewObjectArray (array->length, comp, NULL);
90
          eltsize = sizeof (jobject);
91
        }
92
      // We can't use sizeof on __JArray because we must account for
93
      // alignment of the element type.
94
      size = (_Jv_GetArrayElementFromElementType (array, comp) - (char *) array
95
              + array->length * eltsize);
96
    }
97
  else
98
    {
99
      if (! java::lang::Cloneable::class$.isAssignableFrom(klass))
100
        throw new CloneNotSupportedException;
101
 
102
      size = klass->size();
103
      r = _Jv_AllocObject (klass);
104
    }
105
 
106
  memcpy ((void *) r, (void *) this, size);
107
#ifndef JV_HASH_SYNCHRONIZATION
108
  // Guarantee that the locks associated to the two objects are
109
  // distinct.
110
  r->sync_info = NULL;
111
#endif
112
  return r;
113
}
114
 
115
void
116
_Jv_FinalizeObject (jobject obj)
117
{
118
  // Ignore exceptions.  From section 12.6 of the Java Language Spec.
119
  try
120
    {
121
      obj->finalize ();
122
    }
123
  catch (java::lang::Throwable *t)
124
    {
125
      // Ignore.
126
    }
127
}
128
 
129
 
130
//
131
// Synchronization code.
132
//
133
 
134
#ifndef JV_HASH_SYNCHRONIZATION
135
// This global is used to make sure that only one thread sets an
136
// object's `sync_info' field.
137
static _Jv_Mutex_t sync_mutex;
138
 
139
// This macro is used to see if synchronization initialization is
140
// needed.
141
#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
142
#  define INIT_NEEDED(Obj) (! (Obj)->sync_info \
143
                            || ! ((_Jv_SyncInfo *) ((Obj)->sync_info))->init)
144
#else
145
#  define INIT_NEEDED(Obj) (! (Obj)->sync_info)
146
#endif
147
 
148
#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
149
// If we have to run a destructor for a sync_info member, then this
150
// function is registered as a finalizer for the sync_info.
151
static void
152
finalize_sync_info (jobject obj)
153
{
154
  _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj;
155
#if defined (_Jv_HaveCondDestroy)
156
  _Jv_CondDestroy (&si->condition);
157
#endif
158
#if defined (_Jv_HaveMutexDestroy)
159
  _Jv_MutexDestroy (&si->mutex);
160
#endif
161
  si->init = false;
162
}
163
#endif
164
 
165
// This is called to initialize the sync_info element of an object.
166
void
167
java::lang::Object::sync_init (void)
168
{
169
  _Jv_MutexLock (&sync_mutex);
170
  // Check again to see if initialization is needed now that we have
171
  // the lock.
172
  if (INIT_NEEDED (this))
173
    {
174
      // We assume there are no pointers in the sync_info
175
      // representation.
176
      _Jv_SyncInfo *si;
177
      // We always create a new sync_info, even if there is already
178
      // one available.  Any given object can only be finalized once.
179
      // If we get here and sync_info is not null, then it has already
180
      // been finalized.  So if we just reinitialize the old one,
181
      // we'll never be able to (re-)destroy the mutex and/or
182
      // condition variable.
183
      si = (_Jv_SyncInfo *) _Jv_AllocBytes (sizeof (_Jv_SyncInfo));
184
      _Jv_MutexInit (&si->mutex);
185
      _Jv_CondInit (&si->condition);
186
#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
187
      // Register a finalizer.
188
      si->init = true;
189
      _Jv_RegisterFinalizer (si, finalize_sync_info);
190
#endif
191
      sync_info = (jobject) si;
192
    }
193
  _Jv_MutexUnlock (&sync_mutex);
194
}
195
 
196
void
197
java::lang::Object::notify (void)
198
{
199
  if (__builtin_expect (INIT_NEEDED (this), false))
200
    sync_init ();
201
  _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
202
  if (__builtin_expect (_Jv_CondNotify (&si->condition, &si->mutex), false))
203
    throw new IllegalMonitorStateException(JvNewStringLatin1
204
                                           ("current thread not owner"));
205
}
206
 
207
void
208
java::lang::Object::notifyAll (void)
209
{
210
  if (__builtin_expect (INIT_NEEDED (this), false))
211
    sync_init ();
212
  _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
213
  if (__builtin_expect (_Jv_CondNotifyAll (&si->condition, &si->mutex), false))
214
    throw new IllegalMonitorStateException(JvNewStringLatin1
215
                                           ("current thread not owner"));
216
}
217
 
218
void
219
java::lang::Object::wait (jlong timeout, jint nanos)
220
{
221
  if (__builtin_expect (INIT_NEEDED (this), false))
222
    sync_init ();
223
  if (__builtin_expect (timeout < 0 || nanos < 0 || nanos > 999999, false))
224
    throw new IllegalArgumentException;
225
  _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
226
  switch (_Jv_CondWait (&si->condition, &si->mutex, timeout, nanos))
227
    {
228
      case _JV_NOT_OWNER:
229
        throw new IllegalMonitorStateException (JvNewStringLatin1
230
                                                ("current thread not owner"));
231
      case _JV_INTERRUPTED:
232
        if (Thread::interrupted ())
233
          throw new InterruptedException;
234
    }
235
}
236
 
237
//
238
// Some runtime code.
239
//
240
 
241
// This function is called at system startup to initialize the
242
// `sync_mutex'.
243
void
244
_Jv_InitializeSyncMutex (void)
245
{
246
  _Jv_MutexInit (&sync_mutex);
247
}
248
 
249
void
250
_Jv_MonitorEnter (jobject obj)
251
{
252
#ifndef HANDLE_SEGV
253
  if (__builtin_expect (! obj, false))
254
    throw new java::lang::NullPointerException;
255
#endif
256
  if (__builtin_expect (INIT_NEEDED (obj), false))
257
    obj->sync_init ();
258
  _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj->sync_info;
259
  _Jv_MutexLock (&si->mutex);
260
  // FIXME: In the Windows case, this can return a nonzero error code.
261
  // We should turn that into some exception ...
262
}
263
 
264
void
265
_Jv_MonitorExit (jobject obj)
266
{
267
  JvAssert (obj);
268
  JvAssert (! INIT_NEEDED (obj));
269
  _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj->sync_info;
270
  if (__builtin_expect (_Jv_MutexUnlock (&si->mutex), false))
271
    throw new java::lang::IllegalMonitorStateException;
272
}
273
 
274
bool
275
_Jv_ObjectCheckMonitor (jobject obj)
276
{
277
  if (__builtin_expect (INIT_NEEDED (obj), false))
278
    obj->sync_init ();
279
  _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj->sync_info;
280
  return _Jv_MutexCheckMonitor (&si->mutex);
281
}
282
 
283
#else /* JV_HASH_SYNCHRONIZATION */
284
 
285
// FIXME: We shouldn't be calling GC_register_finalizer directly.
286
#ifndef HAVE_BOEHM_GC
287
# error Hash synchronization currently requires boehm-gc
288
// That's actually a bit of a lie: It should also work with the null GC,
289
// probably even better than the alternative.
290
// To really support alternate GCs here, we would need to widen the
291
// interface to finalization, since we sometimes have to register a
292
// second finalizer for an object that already has one.
293
// We might also want to move the GC interface to a .h file, since
294
// the number of procedure call levels involved in some of these
295
// operations is already ridiculous, and would become worse if we
296
// went through the proper intermediaries.
297
#else
298
# ifdef LIBGCJ_GC_DEBUG
299
#   define GC_DEBUG
300
# endif
301
# include "gc.h"
302
#endif
303
 
304
// What follows currenly assumes a Linux-like platform.
305
// Some of it specifically assumes X86 or IA64 Linux, though that
306
// should be easily fixable.
307
 
308
// A Java monitor implemention based on a table of locks.
309
// Each entry in the table describes
310
// locks held for objects that hash to that location.
311
// This started out as a reimplementation of the technique used in SGIs JVM,
312
// for which we obtained permission from SGI.
313
// But in fact, this ended up quite different, though some ideas are
314
// still shared with the original.
315
// It was also influenced by some of the published IBM work,
316
// though it also differs in many ways from that.
317
// We could speed this up if we had a way to atomically update
318
// an entire cache entry, i.e. 2 contiguous words of memory.
319
// That would usually be the case with a 32 bit ABI on a 64 bit processor.
320
// But we don't currently go out of our way to target those.
321
// I don't know how to do much better with a N bit ABI on a processor
322
// that can atomically update only N bits at a time.
323
// Author: Hans-J. Boehm  (Hans_Boehm@hp.com, boehm@acm.org)
324
 
325
#include <limits.h>
326
#include <unistd.h>     // for usleep, sysconf.
327
#include <gcj/javaprims.h>
328
#include <sysdep/locks.h>
329
#include <java/lang/Thread.h>
330
 
331
// Try to determine whether we are on a multiprocessor, i.e. whether
332
// spinning may be profitable.
333
// This should really use a suitable autoconf macro.
334
// False is the conservative answer, though the right one is much better.
335
static bool
336
is_mp()
337
{
338
#ifdef _SC_NPROCESSORS_ONLN
339
  long nprocs = sysconf(_SC_NPROCESSORS_ONLN);
340
  return (nprocs > 1);
341
#else
342
  return false;
343
#endif
344
}
345
 
346
// A call to keep_live(p) forces p to be accessible to the GC
347
// at this point.
348
inline static void
349
keep_live(obj_addr_t p)
350
{
351
    __asm__ __volatile__("" : : "rm"(p) : "memory");
352
}
353
 
354
// Each hash table entry holds a single preallocated "lightweight" lock.
355
// In addition, it holds a chain of "heavyweight" locks.  Lightweight
356
// locks do not support Object.wait(), and are converted to heavyweight
357
// status in response to contention.  Unlike the SGI scheme, both
358
// ligtweight and heavyweight locks in one hash entry can be simultaneously
359
// in use.  (The SGI scheme requires that we be able to acquire a heavyweight
360
// lock on behalf of another thread, and can thus convert a lock we don't
361
// hold to heavyweight status.  Here we don't insist on that, and thus
362
// let the original holder of the lighweight lock keep it.)
363
 
364
struct heavy_lock {
365
  void * reserved_for_gc;
366
  struct heavy_lock *next;      // Hash chain link.
367
                                // Traced by GC.
368
  void * old_client_data;       // The only other field traced by GC.
369
  GC_finalization_proc old_finalization_proc;
370
  obj_addr_t address;           // Object to which this lock corresponds.
371
                                // Should not be traced by GC.
372
                                // Cleared as heavy_lock is destroyed.
373
                                // Together with the rest of the heavy lock
374
                                // chain, this is protected by the lock
375
                                // bit in the hash table entry to which
376
                                // the chain is attached.
377
  _Jv_SyncInfo si;
378
  // The remaining fields save prior finalization info for
379
  // the object, which we needed to replace in order to arrange
380
  // for cleanup of the lock structure.
381
};
382
 
383
#ifdef LOCK_DEBUG
384
void
385
print_hl_list(heavy_lock *hl)
386
{
387
    heavy_lock *p = hl;
388
    for (; 0 != p; p = p->next)
389
      fprintf (stderr, "(hl = %p, addr = %p)", p, (void *)(p -> address));
390
}
391
#endif /* LOCK_DEBUG */
392
 
393
#if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
394
// If we have to run a destructor for a sync_info member, then this
395
// function could be registered as a finalizer for the sync_info.
396
// In fact, we now only invoke it explicitly.
397
static inline void
398
heavy_lock_finalization_proc (heavy_lock *hl)
399
{
400
#if defined (_Jv_HaveCondDestroy)
401
  _Jv_CondDestroy (&hl->si.condition);
402
#endif
403
#if defined (_Jv_HaveMutexDestroy)
404
  _Jv_MutexDestroy (&hl->si.mutex);
405
#endif
406
  hl->si.init = false;
407
}
408
#endif /* defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) */
409
 
410
// We convert the lock back to lightweight status when
411
// we exit, so that a single contention episode doesn't doom the lock
412
// forever.  But we also need to make sure that lock structures for dead
413
// objects are eventually reclaimed.  We do that in a an additional
414
// finalizer on the underlying object.
415
// Note that if the corresponding object is dead, it is safe to drop
416
// the heavy_lock structure from its list.  It is not necessarily
417
// safe to deallocate it, since the unlock code could still be running.
418
 
419
struct hash_entry {
420
  volatile obj_addr_t address;  // Address of object for which lightweight
421
                                // k is held.
422
                                // We assume the 3 low order bits are zero.
423
                                // With the Boehm collector and bitmap
424
                                // allocation, objects of size 4 bytes are
425
                                // broken anyway.  Thus this is primarily
426
                                // a constraint on statically allocated
427
                                // objects used for synchronization.
428
                                // This allows us to use the low order
429
                                // bits as follows:
430
#   define LOCKED       1       // This hash entry is locked, and its
431
                                // state may be invalid.
432
                                // The lock protects both the hash_entry
433
                                // itself (except for the light_count
434
                                // and light_thr_id fields, which
435
                                // are protected by the lightweight
436
                                // lock itself), and any heavy_monitor
437
                                // structures attached to it.
438
#   define HEAVY        2       // Heavyweight locks associated with this
439
                                // hash entry may be held.
440
                                // The lightweight entry is still valid,
441
                                // if the leading bits of the address
442
                                // field are nonzero.
443
                                // If the LOCKED bit is clear, then this is
444
                                // set exactly when heavy_count is > 0 .
445
                                // Stored redundantly so a single
446
                                // compare-and-swap works in the easy case.
447
                                // If HEAVY is not set, it is safe to use
448
                                // an available lightweight lock entry
449
                                // without checking if there is an existing
450
                                // heavyweight lock for the same object.
451
                                // (There may be one, but it won't be held
452
                                // or waited for.)
453
#   define REQUEST_CONVERSION 4 // The lightweight lock is held.  But
454
                                // one or more other threads have tried
455
                                // to acquire the lock, and hence request
456
                                // conversion to heavyweight status.
457
                                // The heavyweight lock is already allocated.
458
                                // Threads requesting conversion are
459
                                // waiting on the condition variable associated
460
                                // with the heavyweight lock.
461
                                // Not used for conversion due to
462
                                // Object.wait() calls.
463
#   define FLAGS (LOCKED | HEAVY | REQUEST_CONVERSION)
464
  volatile _Jv_ThreadId_t light_thr_id;
465
                                // Thr_id of holder of lightweight lock.
466
                                // Only updated by lightweight lock holder.
467
                                // Must be recognizably invalid if the
468
                                // lightweight lock is not held.
469
#   define INVALID_THREAD_ID 0  // Works for Linux?
470
                                // If zero doesn't work, we have to
471
                                // initialize lock table.
472
  volatile unsigned short light_count;
473
                                // Number of times the lightweight lock
474
                                // is held minus one.  Zero if lightweight
475
                                // lock is not held.  Only updated by
476
                                // lightweight lock holder or, in one
477
                                // case, while holding the LOCKED bit in
478
                                // a state in which there can be no
479
                                // lightweight lock holder.
480
  unsigned short heavy_count;   // Total number of times heavyweight locks
481
                                // associated with this hash entry are held
482
                                // or waiting to be acquired.
483
                                // Threads in wait() are included eventhough
484
                                // they have temporarily released the lock.
485
                                // Protected by LOCKED bit.
486
                                // Threads requesting conversion to heavyweight
487
                                // status are also included.
488
  struct heavy_lock * heavy_locks;
489
                                // Chain of heavy locks.  Protected
490
                                // by lockbit for he.  Locks may
491
                                // remain allocated here even if HEAVY
492
                                // is not set and heavy_count is 0.
493
                                // If a lightweight and heavyweight lock
494
                                // correspond to the same address, the
495
                                // lightweight lock is the right one.
496
};
497
 
498
#ifndef JV_SYNC_TABLE_SZ
499
# define JV_SYNC_TABLE_SZ 2048  // Must be power of 2.
500
#endif
501
 
502
hash_entry light_locks[JV_SYNC_TABLE_SZ];
503
 
504
#define JV_SYNC_HASH(p) (((long)p ^ ((long)p >> 10)) & (JV_SYNC_TABLE_SZ-1))
505
 
506
// Note that the light_locks table is scanned conservatively by the
507
// collector.  It is essential the the heavy_locks field is scanned.
508
// Currently the address field may or may not cause the associated object
509
// to be retained, depending on whether flag bits are set.
510
// This means that we can conceivable get an unexpected deadlock if
511
// 1) Object at address A is locked.
512
// 2) The client drops A without unlocking it.
513
// 3) Flag bits in the address entry are set, so the collector reclaims
514
//    the object at A.
515
// 4) A is reallocated, and an attempt is made to lock the result.
516
// This could be fixed by scanning light_locks in a more customized
517
// manner that ignores the flag bits.  But it can only happen with hand
518
// generated semi-illegal .class files, and then it doesn't present a
519
// security hole.
520
 
521
#ifdef LOCK_DEBUG
522
  void print_he(hash_entry *he)
523
  {
524
     fprintf(stderr, "lock hash entry = %p, index = %d, address = 0x%lx\n"
525
                     "\tlight_thr_id = 0x%lx, light_count = %d, "
526
                     "heavy_count = %d\n\theavy_locks:", he,
527
                     he - light_locks, (unsigned long)(he -> address),
528
                     (unsigned long)(he -> light_thr_id),
529
                     he -> light_count, he -> heavy_count);
530
     print_hl_list(he -> heavy_locks);
531
     fprintf(stderr, "\n");
532
  }
533
#endif /* LOCK_DEBUG */
534
 
535
#ifdef LOCK_LOG
536
  // Log locking operations.  For debugging only.
537
  // Logging is intended to be as unintrusive as possible.
538
  // Log calls are made after an operation completes, and hence
539
  // may not completely reflect actual synchronization ordering.
540
  // The choice of events to log is currently a bit haphazard.
541
  // The intent is that if we have to track down any other bugs
542
  // inthis code, we extend the logging as appropriate.
543
  typedef enum
544
  {
545
    ACQ_LIGHT, ACQ_LIGHT2, ACQ_HEAVY, ACQ_HEAVY2, PROMOTE, REL_LIGHT,
546
    REL_HEAVY, REQ_CONV, PROMOTE2, WAIT_START, WAIT_END, NOTIFY, NOTIFY_ALL
547
  } event_type;
548
 
549
  struct lock_history
550
  {
551
    event_type tp;
552
    obj_addr_t addr;  // Often includes flags.
553
    _Jv_ThreadId_t thr;
554
  };
555
 
556
  const int LOG_SIZE = 128;     // Power of 2.
557
 
558
  lock_history lock_log[LOG_SIZE];
559
 
560
  volatile obj_addr_t log_next = 0;
561
                           // Next location in lock_log.
562
                           // Really an int, but we need compare_and_swap.
563
 
564
  static void add_log_entry(event_type t, obj_addr_t a, _Jv_ThreadId_t th)
565
  {
566
    obj_addr_t my_entry;
567
    obj_addr_t next_entry;
568
    do
569
      {
570
        my_entry = log_next;
571
        next_entry = ((my_entry + 1) & (LOG_SIZE - 1));
572
      }
573
    while (!compare_and_swap(&log_next, my_entry, next_entry));
574
    lock_log[my_entry].tp = t;
575
    lock_log[my_entry].addr = a;
576
    lock_log[my_entry].thr = th;
577
  }
578
 
579
# define LOG(t, a, th) add_log_entry(t, a, th)
580
#else /* !LOCK_LOG */
581
# define LOG(t, a, th)
582
#endif
583
 
584
static bool mp = false; // Known multiprocesssor.
585
 
586
// Wait for roughly 2^n units, touching as little memory as possible.
587
static void
588
spin(unsigned n)
589
{
590
  const unsigned MP_SPINS = 10;
591
  const unsigned YIELDS = 4;
592
  const unsigned SPINS_PER_UNIT = 30;
593
  const unsigned MIN_SLEEP_USECS = 2001; // Shorter times spin under Linux.
594
  const unsigned MAX_SLEEP_USECS = 200000;
595
  static unsigned spin_limit = 0;
596
  static unsigned yield_limit = YIELDS;
597
  static bool spin_initialized = false;
598
 
599
  if (!spin_initialized)
600
    {
601
      mp = is_mp();
602
      if (mp)
603
        {
604
          spin_limit = MP_SPINS;
605
          yield_limit = MP_SPINS + YIELDS;
606
        }
607
      spin_initialized = true;
608
    }
609
  if (n < spin_limit)
610
    {
611
      unsigned i = SPINS_PER_UNIT << n;
612
      for (; i > 0; --i)
613
        __asm__ __volatile__("");
614
    }
615
  else if (n < yield_limit)
616
    {
617
      _Jv_ThreadYield();
618
    }
619
  else
620
    {
621
      unsigned duration = MIN_SLEEP_USECS << (n - yield_limit);
622
      if (n >= 15 + yield_limit || duration > MAX_SLEEP_USECS)
623
        duration = MAX_SLEEP_USECS;
624
      _Jv_platform_usleep(duration);
625
    }
626
}
627
 
628
// Wait for a hash entry to become unlocked.
629
static void
630
wait_unlocked (hash_entry *he)
631
{
632
  unsigned i = 0;
633
  while (he -> address & LOCKED)
634
    spin (i++);
635
}
636
 
637
// Return the heavy lock for addr if it was already allocated.
638
// The client passes in the appropriate hash_entry.
639
// We hold the lock for he.
640
static inline heavy_lock *
641
find_heavy (obj_addr_t addr, hash_entry *he)
642
{
643
  heavy_lock *hl = he -> heavy_locks;
644
  while (hl != 0 && hl -> address != addr) hl = hl -> next;
645
  return hl;
646
}
647
 
648
// Unlink the heavy lock for the given address from its hash table chain.
649
// Dies miserably and conspicuously if it's not there, since that should
650
// be impossible.
651
static inline void
652
unlink_heavy (obj_addr_t addr, hash_entry *he)
653
{
654
  heavy_lock **currentp = &(he -> heavy_locks);
655
  while ((*currentp) -> address != addr)
656
    currentp = &((*currentp) -> next);
657
  *currentp = (*currentp) -> next;
658
}
659
 
660
// Finalization procedure for objects that have associated heavy-weight
661
// locks.  This may replace the real finalization procedure.
662
static void
663
heavy_lock_obj_finalization_proc (void *obj, void *cd)
664
{
665
  heavy_lock *hl = (heavy_lock *)cd;
666
 
667
// This only addresses misalignment of statics, not heap objects.  It
668
// works only because registering statics for finalization is a noop,
669
// no matter what the least significant bits are.
670
#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
671
  obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)0x7);
672
#else
673
  obj_addr_t addr = (obj_addr_t)obj;
674
#endif
675
  hash_entry *he = light_locks + JV_SYNC_HASH(addr);
676
  obj_addr_t he_address = (he -> address & ~LOCKED);
677
 
678
  // Acquire lock bit immediately.  It's possible that the hl was already
679
  // destroyed while we were waiting for the finalizer to run.  If it
680
  // was, the address field was set to zero.  The address filed access is
681
  // protected by the lock bit to ensure that we do this exactly once.
682
  // The lock bit also protects updates to the objects finalizer.
683
  while (!compare_and_swap(&(he -> address), he_address, he_address|LOCKED ))
684
    {
685
      // Hash table entry is currently locked.  We can't safely 
686
      // touch the list of heavy locks.  
687
      wait_unlocked(he);
688
      he_address = (he -> address & ~LOCKED);
689
    }
690
  if (0 == hl -> address)
691
    {
692
      // remove_all_heavy destroyed hl, and took care of the real finalizer.
693
      release_set(&(he -> address), he_address);
694
      return;
695
    }
696
  JvAssert(hl -> address == addr);
697
  GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc;
698
  if (old_finalization_proc != 0)
699
    {
700
      // We still need to run a real finalizer.  In an idealized
701
      // world, in which people write thread-safe finalizers, that is
702
      // likely to require synchronization.  Thus we reregister
703
      // ourselves as the only finalizer, and simply run the real one.
704
      // Thus we don't clean up the lock yet, but we're likely to do so
705
      // on the next GC cycle.
706
      // It's OK if remove_all_heavy actually destroys the heavy lock,
707
      // since we've updated old_finalization_proc, and thus the user's
708
      // finalizer won't be rerun.
709
      void * old_client_data = hl -> old_client_data;
710
      hl -> old_finalization_proc = 0;
711
      hl -> old_client_data = 0;
712
#     ifdef HAVE_BOEHM_GC
713
        GC_REGISTER_FINALIZER_NO_ORDER(obj, heavy_lock_obj_finalization_proc, cd, 0, 0);
714
#     endif
715
      release_set(&(he -> address), he_address);
716
      old_finalization_proc(obj, old_client_data);
717
    }
718
  else
719
    {
720
      // The object is really dead, although it's conceivable that
721
      // some thread may still be in the process of releasing the
722
      // heavy lock.  Unlink it and, if necessary, register a finalizer
723
      // to destroy sync_info.
724
      unlink_heavy(addr, he);
725
      hl -> address = 0;         // Don't destroy it again.
726
      release_set(&(he -> address), he_address);
727
#     if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
728
        // Make sure lock is not held and then destroy condvar and mutex.
729
        _Jv_MutexLock(&(hl->si.mutex));
730
        _Jv_MutexUnlock(&(hl->si.mutex));
731
        heavy_lock_finalization_proc (hl);
732
#     endif
733
    }
734
}
735
 
736
// We hold the lock on he, and heavy_count is 0.
737
// Release the lock by replacing the address with new_address_val.
738
// Remove all heavy locks on the list.  Note that the only possible way
739
// in which a lock may still be in use is if it's in the process of
740
// being unlocked.
741
// FIXME:  Why does this unlock the hash entry?  I think that
742
// could now be done more cleanly in MonitorExit.
743
static void
744
remove_all_heavy (hash_entry *he, obj_addr_t new_address_val)
745
{
746
  JvAssert(he -> heavy_count == 0);
747
  JvAssert(he -> address & LOCKED);
748
  heavy_lock *hl = he -> heavy_locks;
749
  he -> heavy_locks = 0;
750
  // We would really like to release the lock bit here.  Unfortunately, that
751
  // Creates a race between or finalizer removal, and the potential
752
  // reinstallation of a new finalizer as a new heavy lock is created.
753
  // This may need to be revisited.
754
  for(; 0 != hl; hl = hl->next)
755
    {
756
      obj_addr_t obj = hl -> address;
757
      JvAssert(0 != obj);  // If this was previously finalized, it should no
758
                           // longer appear on our list.
759
      hl -> address = 0; // Finalization proc might still see it after we
760
                         // finish.
761
      GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc;
762
      void * old_client_data = hl -> old_client_data;
763
#     ifdef HAVE_BOEHM_GC
764
        // Remove our finalization procedure.
765
        // Reregister the clients if applicable.
766
          GC_REGISTER_FINALIZER_NO_ORDER((GC_PTR)obj, old_finalization_proc,
767
                                         old_client_data, 0, 0);
768
          // Note that our old finalization procedure may have been
769
          // previously determined to be runnable, and may still run.
770
          // FIXME - direct dependency on boehm GC.
771
#     endif
772
#     if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
773
        // Wait for a possible lock holder to finish unlocking it.
774
        // This is only an issue if we have to explicitly destroy the mutex
775
        // or possibly if we have to destroy a condition variable that is
776
        // still being notified.
777
          _Jv_MutexLock(&(hl->si.mutex));
778
          _Jv_MutexUnlock(&(hl->si.mutex));
779
          heavy_lock_finalization_proc (hl);
780
#     endif
781
    }
782
  release_set(&(he -> address), new_address_val);
783
}
784
 
785
// We hold the lock on he and heavy_count is 0.
786
// We release it by replacing the address field with new_address_val.
787
// Remove all heavy locks on the list if the list is sufficiently long.
788
// This is called periodically to avoid very long lists of heavy locks.
789
// This seems to otherwise become an issue with SPECjbb, for example.
790
static inline void
791
maybe_remove_all_heavy (hash_entry *he, obj_addr_t new_address_val)
792
{
793
  static const int max_len = 5;
794
  heavy_lock *hl = he -> heavy_locks;
795
 
796
  for (int i = 0; i < max_len; ++i)
797
    {
798
      if (0 == hl)
799
        {
800
          release_set(&(he -> address), new_address_val);
801
          return;
802
        }
803
      hl = hl -> next;
804
    }
805
  remove_all_heavy(he, new_address_val);
806
}
807
 
808
// Allocate a new heavy lock for addr, returning its address.
809
// Assumes we already have the hash_entry locked, and there
810
// is currently no lightweight or allocated lock for addr.
811
// We register a finalizer for addr, which is responsible for
812
// removing the heavy lock when addr goes away, in addition
813
// to the responsibilities of any prior finalizer.
814
// This unfortunately holds the lock bit for the hash entry while it
815
// allocates two objects (on for the finalizer).
816
// It would be nice to avoid that somehow ...
817
static heavy_lock *
818
alloc_heavy(obj_addr_t addr, hash_entry *he)
819
{
820
  heavy_lock * hl = (heavy_lock *) _Jv_AllocTraceTwo(sizeof (heavy_lock));
821
 
822
  hl -> address = addr;
823
  _Jv_MutexInit (&(hl -> si.mutex));
824
  _Jv_CondInit (&(hl -> si.condition));
825
# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
826
    hl->si.init = true;  // needed ?
827
# endif
828
  hl -> next = he -> heavy_locks;
829
  he -> heavy_locks = hl;
830
  // FIXME: The only call that cheats and goes directly to the GC interface.
831
# ifdef HAVE_BOEHM_GC
832
    GC_REGISTER_FINALIZER_NO_ORDER(
833
                          (void *)addr, heavy_lock_obj_finalization_proc,
834
                          hl, &hl->old_finalization_proc,
835
                          &hl->old_client_data);
836
# endif /* HAVE_BOEHM_GC */
837
  return hl;
838
}
839
 
840
// Return the heavy lock for addr, allocating if necessary.
841
// Assumes we have the cache entry locked, and there is no lightweight
842
// lock for addr.
843
static heavy_lock *
844
get_heavy(obj_addr_t addr, hash_entry *he)
845
{
846
  heavy_lock *hl = find_heavy(addr, he);
847
  if (0 == hl)
848
    hl = alloc_heavy(addr, he);
849
  return hl;
850
}
851
 
852
void
853
_Jv_MonitorEnter (jobject obj)
854
{
855
#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
856
  obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);
857
#else
858
  obj_addr_t addr = (obj_addr_t)obj;
859
#endif
860
  obj_addr_t address;
861
  unsigned hash = JV_SYNC_HASH(addr);
862
  hash_entry * he = light_locks + hash;
863
  _Jv_ThreadId_t self = _Jv_ThreadSelf();
864
  unsigned count;
865
  const unsigned N_SPINS = 18;
866
 
867
  // We need to somehow check that addr is not NULL on the fast path.
868
  // A very predictable
869
  // branch on a register value is probably cheaper than dereferencing addr.
870
  // We could also permanently lock the NULL entry in the hash table.
871
  // But it's not clear that's cheaper either.
872
  if (__builtin_expect(!addr, false))
873
    throw new java::lang::NullPointerException;
874
 
875
  JvAssert(!(addr & FLAGS));
876
retry:
877
  if (__builtin_expect(compare_and_swap(&(he -> address),
878
                                        0, addr),true))
879
    {
880
      JvAssert(he -> light_thr_id == INVALID_THREAD_ID);
881
      JvAssert(he -> light_count == 0);
882
      he -> light_thr_id = self;
883
      // Count fields are set correctly.  Heavy_count was also zero,
884
      // but can change asynchronously.
885
      // This path is hopefully both fast and the most common.
886
      LOG(ACQ_LIGHT, addr, self);
887
      return;
888
    }
889
  address = he -> address;
890
  if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr)
891
    {
892
      if (he -> light_thr_id == self)
893
        {
894
          // We hold the lightweight lock, and it's for the right
895
          // address.
896
          count = he -> light_count;
897
          if (count == USHRT_MAX)
898
            {
899
              // I think most JVMs don't check for this.
900
              // But I'm not convinced I couldn't turn this into a security
901
              // hole, even with a 32 bit counter.
902
              throw new java::lang::IllegalMonitorStateException(
903
                JvNewStringLatin1("maximum monitor nesting level exceeded"));
904
            }
905
          he -> light_count = count + 1;
906
          return;
907
        }
908
      else
909
        {
910
          JvAssert(!(address & LOCKED));
911
          // Lightweight lock is held, but by somone else.
912
          // Spin a few times.  This avoids turning this into a heavyweight
913
          // lock if the current holder is about to release it.
914
          // FIXME: Does this make sense on a uniprocessor, where
915
          // it actually yields?  It's probably cheaper to convert.
916
          for (unsigned int i = 0; i < N_SPINS; ++i)
917
            {
918
              if ((he -> address & ~LOCKED) != address) goto retry;
919
              spin(i);
920
            }
921
          if (!compare_and_swap(&(he -> address), address, address | LOCKED ))
922
            {
923
              wait_unlocked(he);
924
              goto retry;
925
            }
926
          heavy_lock *hl = get_heavy(addr, he);
927
          ++ (he -> heavy_count);
928
          // The hl lock acquisition can't block for long, since it can
929
          // only be held by other threads waiting for conversion, and
930
          // they, like us, drop it quickly without blocking.
931
          _Jv_MutexLock(&(hl->si.mutex));
932
          JvAssert(he -> address == address | LOCKED );
933
          release_set(&(he -> address), (address | REQUEST_CONVERSION | HEAVY));
934
                                // release lock on he
935
          LOG(REQ_CONV, (address | REQUEST_CONVERSION | HEAVY), self);
936
          // If _Jv_CondWait is interrupted, we ignore the interrupt, but
937
          // restore the thread's interrupt status flag when done.
938
          jboolean interrupt_flag = false;
939
          while ((he -> address & ~FLAGS) == (address & ~FLAGS))
940
            {
941
              // Once converted, the lock has to retain heavyweight
942
              // status, since heavy_count > 0.
943
              int r = _Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), 0, 0);
944
              if (r == _JV_INTERRUPTED)
945
                {
946
                  interrupt_flag = true;
947
                  Thread::currentThread()->interrupt_flag = false;
948
                }
949
            }
950
          if (interrupt_flag)
951
            Thread::currentThread()->interrupt_flag = interrupt_flag;
952
          keep_live(addr);
953
                // Guarantee that hl doesn't get unlinked by finalizer.
954
                // This is only an issue if the client fails to release
955
                // the lock, which is unlikely.
956
          JvAssert(he -> address & HEAVY);
957
          // Lock has been converted, we hold the heavyweight lock,
958
          // heavy_count has been incremented.
959
          return;
960
        }
961
    }
962
  obj_addr_t was_heavy = (address & HEAVY);
963
  if ((address & LOCKED) ||
964
      !compare_and_swap(&(he -> address), address, (address | LOCKED )))
965
    {
966
      wait_unlocked(he);
967
      goto retry;
968
    }
969
  if ((address & ~(HEAVY | REQUEST_CONVERSION)) == 0)
970
    {
971
      // Either was_heavy is true, or something changed out from under us,
972
      // since the initial test for 0 failed.
973
      JvAssert(!(address & REQUEST_CONVERSION));
974
        // Can't convert a nonexistent lightweight lock.
975
      heavy_lock *hl;
976
      hl = (was_heavy? find_heavy(addr, he) : 0);
977
        // The CAS succeeded, so was_heavy is still accurate.
978
      if (0 == hl)
979
        {
980
          // It is OK to use the lighweight lock, since either the
981
          // heavyweight lock does not exist, or none of the
982
          // heavyweight locks are currently in use.  Future threads
983
          // trying to acquire the lock will see the lightweight
984
          // one first and use that.
985
          he -> light_thr_id = self;  // OK, since nobody else can hold
986
                                      // light lock or do this at the same time.
987
          JvAssert(he -> light_count == 0);
988
          JvAssert(was_heavy == (he -> address & HEAVY));
989
          release_set(&(he -> address), (addr | was_heavy));
990
          LOG(ACQ_LIGHT2, addr | was_heavy, self);
991
        }
992
      else
993
        {
994
          // Must use heavy lock.
995
          ++ (he -> heavy_count);
996
          JvAssert(0 == (address & ~HEAVY));
997
          release_set(&(he -> address), HEAVY);
998
          LOG(ACQ_HEAVY, addr | was_heavy, self);
999
          _Jv_MutexLock(&(hl->si.mutex));
1000
          keep_live(addr);
1001
        }
1002
      return;
1003
    }
1004
  // Lightweight lock is held, but does not correspond to this object.
1005
  // We hold the lock on the hash entry, and he -> address can't
1006
  // change from under us.  Neither can the chain of heavy locks.
1007
    {
1008
      JvAssert(0 == he -> heavy_count || (address & HEAVY));
1009
      heavy_lock *hl = get_heavy(addr, he);
1010
      ++ (he -> heavy_count);
1011
      release_set(&(he -> address), address | HEAVY);
1012
      LOG(ACQ_HEAVY2, address | HEAVY, self);
1013
      _Jv_MutexLock(&(hl->si.mutex));
1014
      keep_live(addr);
1015
    }
1016
}
1017
 
1018
 
1019
void
1020
_Jv_MonitorExit (jobject obj)
1021
{
1022
#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1023
  obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);
1024
#else
1025
  obj_addr_t addr = (obj_addr_t)obj;
1026
#endif
1027
  _Jv_ThreadId_t self = _Jv_ThreadSelf();
1028
  unsigned hash = JV_SYNC_HASH(addr);
1029
  hash_entry * he = light_locks + hash;
1030
  _Jv_ThreadId_t light_thr_id;
1031
  unsigned count;
1032
  obj_addr_t address;
1033
 
1034
retry:
1035
  light_thr_id = he -> light_thr_id;
1036
  // Unfortunately, it turns out we always need to read the address
1037
  // first.  Even if we are going to update it with compare_and_swap,
1038
  // we need to reset light_thr_id, and that's not safe unless we know
1039
  // that we hold the lock.
1040
  address = he -> address;
1041
  // First the (relatively) fast cases:
1042
  if (__builtin_expect(light_thr_id == self, true))
1043
    // Above must fail if addr == 0 .
1044
    {
1045
      count = he -> light_count;
1046
      if (__builtin_expect((address & ~HEAVY) == addr, true))
1047
        {
1048
          if (count != 0)
1049
            {
1050
              // We held the lightweight lock all along.  Thus the values
1051
              // we saw for light_thr_id and light_count must have been valid. 
1052
              he -> light_count = count - 1;
1053
              return;
1054
            }
1055
          else
1056
            {
1057
              // We hold the lightweight lock once.
1058
              he -> light_thr_id = INVALID_THREAD_ID;
1059
              if (compare_and_swap_release(&(he -> address), address,
1060
                                           address & HEAVY))
1061
                {
1062
                  LOG(REL_LIGHT, address & HEAVY, self);
1063
                  return;
1064
                }
1065
              else
1066
                {
1067
                  he -> light_thr_id = light_thr_id; // Undo prior damage.
1068
                  goto retry;
1069
                }
1070
            }
1071
        }
1072
      // else lock is not for this address, conversion is requested,
1073
      // or the lock bit in the address field is set.
1074
    }
1075
  else
1076
    {
1077
      if (__builtin_expect(!addr, false))
1078
        throw new java::lang::NullPointerException;
1079
      if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr)
1080
        {
1081
#         ifdef LOCK_DEBUG
1082
            fprintf(stderr, "Lightweight lock held by other thread\n\t"
1083
                            "light_thr_id = 0x%lx, self = 0x%lx, "
1084
                            "address = 0x%lx, heavy_count = %d, pid = %d\n",
1085
                            light_thr_id, self, (unsigned long)address,
1086
                            he -> heavy_count, getpid());
1087
            print_he(he);
1088
            for(;;) {}
1089
#         endif
1090
          // Someone holds the lightweight lock for this object, and
1091
          // it can't be us.
1092
          throw new java::lang::IllegalMonitorStateException(
1093
                        JvNewStringLatin1("current thread not owner"));
1094
        }
1095
      else
1096
        count = he -> light_count;
1097
    }
1098
  if (address & LOCKED)
1099
    {
1100
      wait_unlocked(he);
1101
      goto retry;
1102
    }
1103
  // Now the unlikely cases.
1104
  // We do know that:
1105
  // - Address is set, and doesn't contain the LOCKED bit.
1106
  // - If address refers to the same object as addr, then he -> light_thr_id
1107
  //   refers to this thread, and count is valid.
1108
  // - The case in which we held the lightweight lock has been
1109
  //   completely handled, except for the REQUEST_CONVERSION case.
1110
  //   
1111
  if ((address & ~FLAGS) == addr)
1112
    {
1113
      // The lightweight lock is assigned to this object.
1114
      // Thus we must be in the REQUEST_CONVERSION case.
1115
      if (0 != count)
1116
        {
1117
          // Defer conversion until we exit completely.
1118
          he -> light_count = count - 1;
1119
          return;
1120
        }
1121
      JvAssert(he -> light_thr_id == self);
1122
      JvAssert(address & REQUEST_CONVERSION);
1123
      // Conversion requested
1124
      // Convert now.
1125
      if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1126
        goto retry;
1127
      heavy_lock *hl = find_heavy(addr, he);
1128
      JvAssert (0 != hl);
1129
                // Requestor created it.
1130
      he -> light_count = 0;
1131
      JvAssert(he -> heavy_count > 0);
1132
                // was incremented by requestor.
1133
      _Jv_MutexLock(&(hl->si.mutex));
1134
        // Release the he lock after acquiring the mutex.
1135
        // Otherwise we can accidentally
1136
        // notify a thread that has already seen a heavyweight
1137
        // lock.
1138
      he -> light_thr_id = INVALID_THREAD_ID;
1139
      release_set(&(he -> address), HEAVY);
1140
      LOG(PROMOTE, address, self);
1141
                // lightweight lock now unused.
1142
      _Jv_CondNotifyAll(&(hl->si.condition), &(hl->si.mutex));
1143
      _Jv_MutexUnlock(&(hl->si.mutex));
1144
      // heavy_count was already incremented by original requestor.
1145
      keep_live(addr);
1146
      return;
1147
    }
1148
  // lightweight lock not for this object.
1149
  JvAssert(!(address & LOCKED));
1150
  JvAssert((address & ~FLAGS) != addr);
1151
  if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1152
        goto retry;
1153
  heavy_lock *hl = find_heavy(addr, he);
1154
  if (NULL == hl)
1155
    {
1156
#     ifdef LOCK_DEBUG
1157
        fprintf(stderr, "Failed to find heavyweight lock for addr 0x%lx"
1158
                        " pid = %d\n", addr, getpid());
1159
        print_he(he);
1160
        for(;;) {}
1161
#     endif
1162
      release_set(&(he -> address), address);
1163
      throw new java::lang::IllegalMonitorStateException(
1164
                        JvNewStringLatin1("current thread not owner"));
1165
    }
1166
  JvAssert(address & HEAVY);
1167
  count = he -> heavy_count;
1168
  JvAssert(count > 0);
1169
  --count;
1170
  he -> heavy_count = count;
1171
  if (0 == count)
1172
    {
1173
      const unsigned test_freq = 16;  // Power of 2
1174
      static volatile unsigned counter = 0;
1175
      unsigned my_counter = counter;
1176
 
1177
      counter = my_counter + 1;
1178
      if (my_counter%test_freq == 0)
1179
        {
1180
          // Randomize the interval length a bit.
1181
            counter = my_counter + (my_counter >> 4) % (test_freq/2);
1182
          // Unlock mutex first, to avoid self-deadlock, or worse.
1183
          _Jv_MutexUnlock(&(hl->si.mutex));
1184
          maybe_remove_all_heavy(he, address &~HEAVY);
1185
                                // release lock bit, preserving
1186
                                // REQUEST_CONVERSION
1187
                                // and object address.
1188
        }
1189
      else
1190
        {
1191
          release_set(&(he -> address), address &~HEAVY);
1192
          _Jv_MutexUnlock(&(hl->si.mutex));
1193
                        // Unlock after releasing the lock bit, so that
1194
                        // we don't switch to another thread prematurely.
1195
        }
1196
    }
1197
  else
1198
    {
1199
      release_set(&(he -> address), address);
1200
      _Jv_MutexUnlock(&(hl->si.mutex));
1201
    }
1202
  LOG(REL_HEAVY, addr, self);
1203
  keep_live(addr);
1204
}
1205
 
1206
// Return false if obj's monitor is held by the current thread
1207
bool
1208
_Jv_ObjectCheckMonitor (jobject obj)
1209
{
1210
#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1211
  obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);
1212
#else
1213
  obj_addr_t addr = (obj_addr_t)obj;
1214
#endif
1215
  obj_addr_t address;
1216
  unsigned hash = JV_SYNC_HASH(addr);
1217
  hash_entry * he = light_locks + hash;
1218
 
1219
  JvAssert(!(addr & FLAGS));
1220
  address = he -> address;
1221
  // Try it the easy way first:
1222
    if (address == 0) return true;
1223
    _Jv_ThreadId_t self = _Jv_ThreadSelf();
1224
    if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr)
1225
        // Fails if entry is LOCKED.
1226
        // I can't asynchronously become or stop being the holder.
1227
        return he -> light_thr_id != self;
1228
retry:
1229
  // Acquire the hash table entry lock
1230
  address &= ~LOCKED;
1231
  if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1232
    {
1233
      wait_unlocked(he);
1234
      goto retry;
1235
    }
1236
 
1237
  bool not_mine;
1238
 
1239
  if ((address & ~FLAGS) == addr)
1240
    not_mine = (he -> light_thr_id != self);
1241
  else
1242
    {
1243
      heavy_lock* hl = find_heavy(addr, he);
1244
      not_mine = hl ? _Jv_MutexCheckMonitor(&hl->si.mutex) : true;
1245
    }
1246
 
1247
  release_set(&(he -> address), address);       // unlock hash entry
1248
  return not_mine;
1249
}
1250
 
1251
// The rest of these are moderately thin veneers on _Jv_Cond ops.
1252
// The current version of Notify might be able to make the pthread
1253
// call AFTER releasing the lock, thus saving some context switches??
1254
 
1255
void
1256
java::lang::Object::wait (jlong timeout, jint nanos)
1257
{
1258
#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1259
  obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1260
#else
1261
  obj_addr_t addr = (obj_addr_t)this;
1262
#endif
1263
  _Jv_ThreadId_t self = _Jv_ThreadSelf();
1264
  unsigned hash = JV_SYNC_HASH(addr);
1265
  hash_entry * he = light_locks + hash;
1266
  unsigned count;
1267
  obj_addr_t address;
1268
  heavy_lock *hl;
1269
 
1270
  if (__builtin_expect (timeout < 0 || nanos < 0 || nanos > 999999, false))
1271
    throw new IllegalArgumentException;
1272
retry:
1273
  address = he -> address;
1274
  address &= ~LOCKED;
1275
  if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1276
    {
1277
      wait_unlocked(he);
1278
      goto retry;
1279
    }
1280
  // address did not have the lock bit set.  We now hold the lock on he.
1281
  if ((address & ~FLAGS) == addr)
1282
    {
1283
      // Convert to heavyweight.
1284
        if (he -> light_thr_id != self)
1285
          {
1286
#           ifdef LOCK_DEBUG
1287
              fprintf(stderr, "Found wrong lightweight lock owner in wait "
1288
                              "address = 0x%lx pid = %d\n", address, getpid());
1289
              print_he(he);
1290
              for(;;) {}
1291
#           endif
1292
            release_set(&(he -> address), address);
1293
            throw new IllegalMonitorStateException (JvNewStringLatin1
1294
                          ("current thread not owner"));
1295
          }
1296
        count = he -> light_count;
1297
        hl = get_heavy(addr, he);
1298
        he -> light_count = 0;
1299
        he -> heavy_count += count + 1;
1300
        for (unsigned i = 0; i <= count; ++i)
1301
          _Jv_MutexLock(&(hl->si.mutex));
1302
        // Again release the he lock after acquiring the mutex.
1303
        he -> light_thr_id = INVALID_THREAD_ID;
1304
        release_set(&(he -> address), HEAVY);  // lightweight lock now unused.
1305
        LOG(PROMOTE2, addr, self);
1306
        if (address & REQUEST_CONVERSION)
1307
          _Jv_CondNotifyAll (&(hl->si.condition), &(hl->si.mutex));
1308
          // Since we do this before we do a CondWait, we guarantee that
1309
          // threads waiting on requested conversion are awoken before
1310
          // a real wait on the same condition variable.
1311
          // No other notification can occur in the interim, since
1312
          // we hold the heavy lock, and notifications are made
1313
          // without acquiring it.
1314
    }
1315
  else /* We should hold the heavyweight lock. */
1316
    {
1317
      hl = find_heavy(addr, he);
1318
      release_set(&(he -> address), address);
1319
      if (0 == hl)
1320
        {
1321
#         ifdef LOCK_DEBUG
1322
            fprintf(stderr, "Couldn't find heavy lock in wait "
1323
                            "addr = 0x%lx pid = %d\n", addr, getpid());
1324
            print_he(he);
1325
            for(;;) {}
1326
#         endif
1327
          throw new IllegalMonitorStateException (JvNewStringLatin1
1328
                          ("current thread not owner"));
1329
        }
1330
      JvAssert(address & HEAVY);
1331
    }
1332
  LOG(WAIT_START, addr, self);
1333
  switch (_Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), timeout, nanos))
1334
    {
1335
      case _JV_NOT_OWNER:
1336
        throw new IllegalMonitorStateException (JvNewStringLatin1
1337
                          ("current thread not owner"));
1338
      case _JV_INTERRUPTED:
1339
        if (Thread::interrupted ())
1340
          throw new InterruptedException;
1341
    }
1342
  LOG(WAIT_END, addr, self);
1343
}
1344
 
1345
void
1346
java::lang::Object::notify (void)
1347
{
1348
#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1349
  obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1350
#else
1351
  obj_addr_t addr = (obj_addr_t)this;
1352
#endif
1353
  _Jv_ThreadId_t self = _Jv_ThreadSelf();
1354
  unsigned hash = JV_SYNC_HASH(addr);
1355
  hash_entry * he = light_locks + hash;
1356
  heavy_lock *hl;
1357
  obj_addr_t address;
1358
  int result;
1359
 
1360
retry:
1361
  address = ((he -> address) & ~LOCKED);
1362
  if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1363
    {
1364
      wait_unlocked(he);
1365
      goto retry;
1366
    }
1367
  if ((address & ~FLAGS) == addr && he -> light_thr_id == self)
1368
    {
1369
      // We hold lightweight lock.  Since it has not
1370
      // been inflated, there are no waiters.
1371
      release_set(&(he -> address), address);   // unlock
1372
      return;
1373
    }
1374
  hl = find_heavy(addr, he);
1375
  // Hl can't disappear since we point to the underlying object.
1376
  // It's important that we release the lock bit before the notify, since
1377
  // otherwise we will try to wake up the target while we still hold the
1378
  // bit.  This results in lock bit contention, which we don't handle
1379
  // terribly well.
1380
  release_set(&(he -> address), address); // unlock
1381
  if (0 == hl)
1382
    {
1383
      throw new IllegalMonitorStateException(JvNewStringLatin1
1384
                                              ("current thread not owner"));
1385
      return;
1386
    }
1387
  // We know that we hold the heavyweight lock at this point,
1388
  // and the lightweight lock is not in use.
1389
  result = _Jv_CondNotify(&(hl->si.condition), &(hl->si.mutex));
1390
  LOG(NOTIFY, addr, self);
1391
  keep_live(addr);
1392
  if (__builtin_expect (result, 0))
1393
    throw new IllegalMonitorStateException(JvNewStringLatin1
1394
                                              ("current thread not owner"));
1395
}
1396
 
1397
void
1398
java::lang::Object::notifyAll (void)
1399
{
1400
#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1401
  obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1402
#else
1403
  obj_addr_t addr = (obj_addr_t)this;
1404
#endif
1405
  _Jv_ThreadId_t self = _Jv_ThreadSelf();
1406
  unsigned hash = JV_SYNC_HASH(addr);
1407
  hash_entry * he = light_locks + hash;
1408
  heavy_lock *hl;
1409
  obj_addr_t address;
1410
  int result;
1411
 
1412
retry:
1413
  address = (he -> address) & ~LOCKED;
1414
  if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1415
    {
1416
      wait_unlocked(he);
1417
      goto retry;
1418
    }
1419
  hl = find_heavy(addr, he);
1420
  if ((address & ~FLAGS) == addr && he -> light_thr_id == self)
1421
    {
1422
      // We hold lightweight lock.  Since it has not
1423
      // been inflated, there are no waiters.
1424
      release_set(&(he -> address), address);   // unlock
1425
      return;
1426
    }
1427
  release_set(&(he -> address), address); // unlock
1428
  if (0 == hl)
1429
    {
1430
      throw new IllegalMonitorStateException(JvNewStringLatin1
1431
                                              ("current thread not owner"));
1432
    }
1433
  result = _Jv_CondNotifyAll(&(hl->si.condition), &(hl->si.mutex));
1434
  LOG(NOTIFY_ALL, addr, self);
1435
  if (__builtin_expect (result, 0))
1436
    throw new IllegalMonitorStateException(JvNewStringLatin1
1437
                                              ("current thread not owner"));
1438
}
1439
 
1440
// This is declared in Java code and in Object.h.
1441
// It should never be called with JV_HASH_SYNCHRONIZATION
1442
void
1443
java::lang::Object::sync_init (void)
1444
{
1445
  throw new IllegalMonitorStateException(JvNewStringLatin1
1446
                                              ("internal error: sync_init"));
1447
}
1448
 
1449
// This is called on startup and declared in Object.h.
1450
// For now we just make it a no-op.
1451
void
1452
_Jv_InitializeSyncMutex (void)
1453
{
1454
}
1455
 
1456
#endif /* JV_HASH_SYNCHRONIZATION */
1457
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.