OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [newlib-1.18.0/] [newlib/] [libc/] [sys/] [linux/] [linuxthreads/] [spinlock.c] - Blame information for rev 301

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 207 jeremybenn
/* Linuxthreads - a simple clone()-based implementation of Posix        */
2
/* threads for Linux.                                                   */
3
/* Copyright (C) 1998 Xavier Leroy (Xavier.Leroy@inria.fr)              */
4
/*                                                                      */
5
/* This program is free software; you can redistribute it and/or        */
6
/* modify it under the terms of the GNU Library General Public License  */
7
/* as published by the Free Software Foundation; either version 2       */
8
/* of the License, or (at your option) any later version.               */
9
/*                                                                      */
10
/* This program is distributed in the hope that it will be useful,      */
11
/* but WITHOUT ANY WARRANTY; without even the implied warranty of       */
12
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the        */
13
/* GNU Library General Public License for more details.                 */
14
 
15
/* Internal locks */
16
 
17
#include <errno.h>
18
#include <sched.h>
19
#include <time.h>
20
#include <stdlib.h>
21
#include <limits.h>
22
#include "pthread.h"
23
#include "internals.h"
24
#include "spinlock.h"
25
#include "restart.h"
26
 
27
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
28
static void __pthread_acquire(int * spinlock);
29
 
30
static inline void __pthread_release(int * spinlock)
31
{
32
  WRITE_MEMORY_BARRIER();
33
  *spinlock = __LT_SPINLOCK_INIT;
34
  __asm __volatile ("" : "=m" (*spinlock) : "m" (*spinlock));
35
}
36
#endif
37
 
38
 
39
/* The status field of a spinlock is a pointer whose least significant
40
   bit is a locked flag.
41
 
42
   Thus the field values have the following meanings:
43
 
44
   status == 0:       spinlock is free
45
   status == 1:       spinlock is taken; no thread is waiting on it
46
 
47
   (status & 1) == 1: spinlock is taken and (status & ~1L) is a
48
                      pointer to the first waiting thread; other
49
                      waiting threads are linked via the p_nextlock
50
                      field.
51
   (status & 1) == 0: same as above, but spinlock is not taken.
52
 
53
   The waiting list is not sorted by priority order.
54
   Actually, we always insert at top of list (sole insertion mode
55
   that can be performed without locking).
56
   For __pthread_unlock, we perform a linear search in the list
57
   to find the highest-priority, oldest waiting thread.
58
   This is safe because there are no concurrent __pthread_unlock
59
   operations -- only the thread that locked the mutex can unlock it. */
60
 
61
 
62
void internal_function __pthread_lock(struct _pthread_fastlock * lock,
63
                                      pthread_descr self)
64
{
65
#if defined HAS_COMPARE_AND_SWAP
66
  long oldstatus, newstatus;
67
  int successful_seizure, spurious_wakeup_count;
68
  int spin_count;
69
#endif
70
 
71
#if defined TEST_FOR_COMPARE_AND_SWAP
72
  if (!__pthread_has_cas)
73
#endif
74
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
75
  {
76
    __pthread_acquire(&lock->__spinlock);
77
    return;
78
  }
79
#endif
80
 
81
#if defined HAS_COMPARE_AND_SWAP
82
  /* First try it without preparation.  Maybe it's a completely
83
     uncontested lock.  */
84
  if (lock->__status == 0 && __compare_and_swap (&lock->__status, 0, 1))
85
    return;
86
 
87
  spurious_wakeup_count = 0;
88
  spin_count = 0;
89
 
90
again:
91
 
92
  /* On SMP, try spinning to get the lock. */
93
 
94
  if (__pthread_smp_kernel) {
95
    int max_count = lock->__spinlock * 2 + 10;
96
 
97
    if (max_count > MAX_ADAPTIVE_SPIN_COUNT)
98
      max_count = MAX_ADAPTIVE_SPIN_COUNT;
99
 
100
    for (spin_count = 0; spin_count < max_count; spin_count++) {
101
      if (((oldstatus = lock->__status) & 1) == 0) {
102
        if(__compare_and_swap(&lock->__status, oldstatus, oldstatus | 1))
103
        {
104
          if (spin_count)
105
            lock->__spinlock += (spin_count - lock->__spinlock) / 8;
106
          READ_MEMORY_BARRIER();
107
          return;
108
        }
109
      }
110
#ifdef BUSY_WAIT_NOP
111
      BUSY_WAIT_NOP;
112
#endif
113
      __asm __volatile ("" : "=m" (lock->__status) : "m" (lock->__status));
114
    }
115
 
116
    lock->__spinlock += (spin_count - lock->__spinlock) / 8;
117
  }
118
 
119
  /* No luck, try once more or suspend. */
120
 
121
  do {
122
    oldstatus = lock->__status;
123
    successful_seizure = 0;
124
 
125
    if ((oldstatus & 1) == 0) {
126
      newstatus = oldstatus | 1;
127
      successful_seizure = 1;
128
    } else {
129
      if (self == NULL)
130
        self = thread_self();
131
      newstatus = (long) self | 1;
132
    }
133
 
134
    if (self != NULL) {
135
      THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus & ~1L));
136
      /* Make sure the store in p_nextlock completes before performing
137
         the compare-and-swap */
138
      MEMORY_BARRIER();
139
    }
140
  } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
141
 
142
  /* Suspend with guard against spurious wakeup.
143
     This can happen in pthread_cond_timedwait_relative, when the thread
144
     wakes up due to timeout and is still on the condvar queue, and then
145
     locks the queue to remove itself. At that point it may still be on the
146
     queue, and may be resumed by a condition signal. */
147
 
148
  if (!successful_seizure) {
149
    for (;;) {
150
      suspend(self);
151
      if (self->p_nextlock != NULL) {
152
        /* Count resumes that don't belong to us. */
153
        spurious_wakeup_count++;
154
        continue;
155
      }
156
      break;
157
    }
158
    goto again;
159
  }
160
 
161
  /* Put back any resumes we caught that don't belong to us. */
162
  while (spurious_wakeup_count--)
163
    restart(self);
164
 
165
  READ_MEMORY_BARRIER();
166
#endif
167
}
168
 
169
int __pthread_unlock(struct _pthread_fastlock * lock)
170
{
171
#if defined HAS_COMPARE_AND_SWAP
172
  long oldstatus;
173
  pthread_descr thr, * ptr, * maxptr;
174
  int maxprio;
175
#endif
176
 
177
#if defined TEST_FOR_COMPARE_AND_SWAP
178
  if (!__pthread_has_cas)
179
#endif
180
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
181
  {
182
    __pthread_release(&lock->__spinlock);
183
    return 0;
184
  }
185
#endif
186
 
187
#if defined HAS_COMPARE_AND_SWAP
188
  WRITE_MEMORY_BARRIER();
189
 
190
again:
191
  while ((oldstatus = lock->__status) == 1) {
192
    if (__compare_and_swap_with_release_semantics(&lock->__status,
193
        oldstatus, 0))
194
      return 0;
195
  }
196
 
197
  /* Find thread in waiting queue with maximal priority */
198
  ptr = (pthread_descr *) &lock->__status;
199
  thr = (pthread_descr) (oldstatus & ~1L);
200
  maxprio = 0;
201
  maxptr = ptr;
202
 
203
  /* Before we iterate over the wait queue, we need to execute
204
     a read barrier, otherwise we may read stale contents of nodes that may
205
     just have been inserted by other processors. One read barrier is enough to
206
     ensure we have a stable list; we don't need one for each pointer chase
207
     through the list, because we are the owner of the lock; other threads
208
     can only add nodes at the front; if a front node is consistent,
209
     the ones behind it must also be. */
210
 
211
  READ_MEMORY_BARRIER();
212
 
213
  while (thr != 0) {
214
    if (thr->p_priority >= maxprio) {
215
      maxptr = ptr;
216
      maxprio = thr->p_priority;
217
    }
218
    ptr = &(thr->p_nextlock);
219
    thr = *ptr;
220
  }
221
 
222
  /* Remove max prio thread from waiting list. */
223
  if (maxptr == (pthread_descr *) &lock->__status) {
224
    /* If max prio thread is at head, remove it with compare-and-swap
225
       to guard against concurrent lock operation. This removal
226
       also has the side effect of marking the lock as released
227
       because the new status comes from thr->p_nextlock whose
228
       least significant bit is clear. */
229
    thr = (pthread_descr) (oldstatus & ~1L);
230
    if (! __compare_and_swap_with_release_semantics
231
            (&lock->__status, oldstatus, (long)(thr->p_nextlock)))
232
      goto again;
233
  } else {
234
    /* No risk of concurrent access, remove max prio thread normally.
235
       But in this case we must also flip the least significant bit
236
       of the status to mark the lock as released. */
237
    thr = *maxptr;
238
    *maxptr = thr->p_nextlock;
239
 
240
    /* Ensure deletion from linked list completes before we
241
       release the lock. */
242
    WRITE_MEMORY_BARRIER();
243
 
244
    do {
245
      oldstatus = lock->__status;
246
    } while (!__compare_and_swap_with_release_semantics(&lock->__status,
247
             oldstatus, oldstatus & ~1L));
248
  }
249
 
250
  /* Wake up the selected waiting thread. Woken thread can check
251
     its own p_nextlock field for NULL to detect that it has been removed. No
252
     barrier is needed here, since restart() and suspend() take
253
     care of memory synchronization. */
254
 
255
  thr->p_nextlock = NULL;
256
  restart(thr);
257
 
258
  return 0;
259
#endif
260
}
261
 
262
/*
263
 * Alternate fastlocks do not queue threads directly. Instead, they queue
264
 * these wait queue node structures. When a timed wait wakes up due to
265
 * a timeout, it can leave its wait node in the queue (because there
266
 * is no safe way to remove from the quue). Some other thread will
267
 * deallocate the abandoned node.
268
 */
269
 
270
 
271
struct wait_node {
272
  struct wait_node *next;       /* Next node in null terminated linked list */
273
  pthread_descr thr;            /* The thread waiting with this node */
274
  int abandoned;                /* Atomic flag */
275
};
276
 
277
static long wait_node_free_list;
278
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
279
static int wait_node_free_list_spinlock;
280
#endif
281
 
282
/* Allocate a new node from the head of the free list using an atomic
283
   operation, or else using malloc if that list is empty.  A fundamental
284
   assumption here is that we can safely access wait_node_free_list->next.
285
   That's because we never free nodes once we allocate them, so a pointer to a
286
   node remains valid indefinitely. */
287
 
288
static struct wait_node *wait_node_alloc(void)
289
{
290
#if defined HAS_COMPARE_AND_SWAP
291
  long oldvalue, newvalue;
292
#endif
293
 
294
#if defined TEST_FOR_COMPARE_AND_SWAP
295
  if (!__pthread_has_cas)
296
#endif
297
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
298
  {
299
    struct wait_node *new_node = 0;
300
 
301
    __pthread_acquire(&wait_node_free_list_spinlock);
302
    if (wait_node_free_list != 0) {
303
      new_node = (struct wait_node *) wait_node_free_list;
304
      wait_node_free_list = (long) new_node->next;
305
    }
306
    WRITE_MEMORY_BARRIER();
307
    wait_node_free_list_spinlock = 0;
308
 
309
    if (new_node == 0)
310
      return malloc(sizeof *wait_node_alloc());
311
 
312
    return new_node;
313
  }
314
#endif
315
 
316
#if defined HAS_COMPARE_AND_SWAP
317
  do {
318
    oldvalue = wait_node_free_list;
319
 
320
    if (oldvalue == 0)
321
      return malloc(sizeof *wait_node_alloc());
322
 
323
    /* Ensure we don't read stale next link through oldvalue pointer. */
324
    READ_MEMORY_BARRIER();
325
    newvalue = (long) ((struct wait_node *) oldvalue)->next;
326
  } while (! __compare_and_swap(&wait_node_free_list, oldvalue, newvalue));
327
 
328
  return (struct wait_node *) oldvalue;
329
#endif
330
}
331
 
332
/* Return a node to the head of the free list using an atomic
333
   operation. */
334
 
335
static void wait_node_free(struct wait_node *wn)
336
{
337
#if defined HAS_COMPARE_AND_SWAP
338
  long oldvalue, newvalue;
339
#endif
340
 
341
#if defined TEST_FOR_COMPARE_AND_SWAP
342
  if (!__pthread_has_cas)
343
#endif
344
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
345
  {
346
    __pthread_acquire(&wait_node_free_list_spinlock);
347
    wn->next = (struct wait_node *) wait_node_free_list;
348
    wait_node_free_list = (long) wn;
349
    WRITE_MEMORY_BARRIER();
350
    wait_node_free_list_spinlock = 0;
351
    return;
352
  }
353
#endif
354
 
355
#if defined HAS_COMPARE_AND_SWAP
356
  do {
357
    oldvalue = wait_node_free_list;
358
    wn->next = (struct wait_node *) oldvalue;
359
    newvalue = (long) wn;
360
    /* Ensure node contents are written before we swap it into the list. */
361
    WRITE_MEMORY_BARRIER();
362
  } while (! __compare_and_swap(&wait_node_free_list, oldvalue, newvalue));
363
#endif
364
}
365
 
366
#if defined HAS_COMPARE_AND_SWAP
367
 
368
/* Remove a wait node from the specified queue.  It is assumed
369
   that the removal takes place concurrently with only atomic insertions at the
370
   head of the queue. */
371
 
372
static void wait_node_dequeue(struct wait_node **pp_head,
373
                              struct wait_node **pp_node,
374
                              struct wait_node *p_node)
375
{
376
  /* If the node is being deleted from the head of the
377
     list, it must be deleted using atomic compare-and-swap.
378
     Otherwise it can be deleted in the straightforward way. */
379
 
380
  if (pp_node == pp_head) {
381
    /* We don't need a read barrier between these next two loads,
382
       because it is assumed that the caller has already ensured
383
       the stability of *p_node with respect to p_node. */
384
 
385
    long oldvalue = (long) p_node;
386
    long newvalue = (long) p_node->next;
387
 
388
    if (__compare_and_swap((long *) pp_node, oldvalue, newvalue))
389
      return;
390
 
391
    /* Oops! Compare and swap failed, which means the node is
392
       no longer first. We delete it using the ordinary method.  But we don't
393
       know the identity of the node which now holds the pointer to the node
394
       being deleted, so we must search from the beginning. */
395
 
396
    for (pp_node = pp_head; p_node != *pp_node; ) {
397
      pp_node = &(*pp_node)->next;
398
      READ_MEMORY_BARRIER(); /* Stabilize *pp_node for next iteration. */
399
    }
400
  }
401
 
402
  *pp_node = p_node->next;
403
  return;
404
}
405
 
406
#endif
407
 
408
void __pthread_alt_lock(struct _pthread_fastlock * lock,
409
                        pthread_descr self)
410
{
411
#if defined HAS_COMPARE_AND_SWAP
412
  long oldstatus, newstatus;
413
#endif
414
  struct wait_node wait_node;
415
 
416
#if defined TEST_FOR_COMPARE_AND_SWAP
417
  if (!__pthread_has_cas)
418
#endif
419
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
420
  {
421
    int suspend_needed = 0;
422
    __pthread_acquire(&lock->__spinlock);
423
 
424
    if (lock->__status == 0)
425
      lock->__status = 1;
426
    else {
427
      if (self == NULL)
428
        self = thread_self();
429
 
430
      wait_node.abandoned = 0;
431
      wait_node.next = (struct wait_node *) lock->__status;
432
      wait_node.thr = self;
433
      lock->__status = (long) &wait_node;
434
      suspend_needed = 1;
435
    }
436
 
437
    __pthread_release(&lock->__spinlock);
438
 
439
    if (suspend_needed)
440
      suspend (self);
441
    return;
442
  }
443
#endif
444
 
445
#if defined HAS_COMPARE_AND_SWAP
446
  do {
447
    oldstatus = lock->__status;
448
    if (oldstatus == 0) {
449
      newstatus = 1;
450
    } else {
451
      if (self == NULL)
452
        self = thread_self();
453
      wait_node.thr = self;
454
      newstatus = (long) &wait_node;
455
    }
456
    wait_node.abandoned = 0;
457
    wait_node.next = (struct wait_node *) oldstatus;
458
    /* Make sure the store in wait_node.next completes before performing
459
       the compare-and-swap */
460
    MEMORY_BARRIER();
461
  } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
462
 
463
  /* Suspend. Note that unlike in __pthread_lock, we don't worry
464
     here about spurious wakeup. That's because this lock is not
465
     used in situations where that can happen; the restart can
466
     only come from the previous lock owner. */
467
 
468
  if (oldstatus != 0)
469
    suspend(self);
470
 
471
  READ_MEMORY_BARRIER();
472
#endif
473
}
474
 
475
/* Timed-out lock operation; returns 0 to indicate timeout. */
476
 
477
int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
478
                            pthread_descr self, const struct timespec *abstime)
479
{
480
  long oldstatus = 0;
481
#if defined HAS_COMPARE_AND_SWAP
482
  long newstatus;
483
#endif
484
  struct wait_node *p_wait_node = wait_node_alloc();
485
 
486
  /* Out of memory, just give up and do ordinary lock. */
487
  if (p_wait_node == 0) {
488
    __pthread_alt_lock(lock, self);
489
    return 1;
490
  }
491
 
492
#if defined TEST_FOR_COMPARE_AND_SWAP
493
  if (!__pthread_has_cas)
494
#endif
495
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
496
  {
497
    __pthread_acquire(&lock->__spinlock);
498
 
499
    if (lock->__status == 0)
500
      lock->__status = 1;
501
    else {
502
      if (self == NULL)
503
        self = thread_self();
504
 
505
      p_wait_node->abandoned = 0;
506
      p_wait_node->next = (struct wait_node *) lock->__status;
507
      p_wait_node->thr = self;
508
      lock->__status = (long) p_wait_node;
509
      oldstatus = 1; /* force suspend */
510
    }
511
 
512
    __pthread_release(&lock->__spinlock);
513
    goto suspend;
514
  }
515
#endif
516
 
517
#if defined HAS_COMPARE_AND_SWAP
518
  do {
519
    oldstatus = lock->__status;
520
    if (oldstatus == 0) {
521
      newstatus = 1;
522
    } else {
523
      if (self == NULL)
524
        self = thread_self();
525
      p_wait_node->thr = self;
526
      newstatus = (long) p_wait_node;
527
    }
528
    p_wait_node->abandoned = 0;
529
    p_wait_node->next = (struct wait_node *) oldstatus;
530
    /* Make sure the store in wait_node.next completes before performing
531
       the compare-and-swap */
532
    MEMORY_BARRIER();
533
  } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
534
#endif
535
 
536
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
537
  suspend:
538
#endif
539
 
540
  /* If we did not get the lock, do a timed suspend. If we wake up due
541
     to a timeout, then there is a race; the old lock owner may try
542
     to remove us from the queue. This race is resolved by us and the owner
543
     doing an atomic testandset() to change the state of the wait node from 0
544
     to 1. If we succeed, then it's a timeout and we abandon the node in the
545
     queue. If we fail, it means the owner gave us the lock. */
546
 
547
  if (oldstatus != 0) {
548
    if (timedsuspend(self, abstime) == 0) {
549
      if (!testandset(&p_wait_node->abandoned))
550
        return 0; /* Timeout! */
551
 
552
      /* Eat oustanding resume from owner, otherwise wait_node_free() below
553
         will race with owner's wait_node_dequeue(). */
554
      suspend(self);
555
    }
556
  }
557
 
558
  wait_node_free(p_wait_node);
559
 
560
  READ_MEMORY_BARRIER();
561
 
562
  return 1; /* Got the lock! */
563
}
564
 
565
void __pthread_alt_unlock(struct _pthread_fastlock *lock)
566
{
567
  struct wait_node *p_node, **pp_node, *p_max_prio, **pp_max_prio;
568
  struct wait_node ** const pp_head = (struct wait_node **) &lock->__status;
569
  int maxprio;
570
 
571
  WRITE_MEMORY_BARRIER();
572
 
573
#if defined TEST_FOR_COMPARE_AND_SWAP
574
  if (!__pthread_has_cas)
575
#endif
576
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
577
  {
578
    __pthread_acquire(&lock->__spinlock);
579
  }
580
#endif
581
 
582
  while (1) {
583
 
584
  /* If no threads are waiting for this lock, try to just
585
     atomically release it. */
586
#if defined TEST_FOR_COMPARE_AND_SWAP
587
    if (!__pthread_has_cas)
588
#endif
589
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
590
    {
591
      if (lock->__status == 0 || lock->__status == 1) {
592
        lock->__status = 0;
593
        break;
594
      }
595
    }
596
#endif
597
 
598
#if defined TEST_FOR_COMPARE_AND_SWAP
599
    else
600
#endif
601
 
602
#if defined HAS_COMPARE_AND_SWAP
603
    {
604
      long oldstatus = lock->__status;
605
      if (oldstatus == 0 || oldstatus == 1) {
606
        if (__compare_and_swap_with_release_semantics (&lock->__status, oldstatus, 0))
607
          break;
608
        else
609
          continue;
610
      }
611
    }
612
#endif
613
 
614
    /* Process the entire queue of wait nodes. Remove all abandoned
615
       wait nodes and put them into the global free queue, and
616
       remember the one unabandoned node which refers to the thread
617
       having the highest priority. */
618
 
619
    pp_max_prio = pp_node = pp_head;
620
    p_max_prio = p_node = *pp_head;
621
    maxprio = INT_MIN;
622
 
623
    READ_MEMORY_BARRIER(); /* Prevent access to stale data through p_node */
624
 
625
    while (p_node != (struct wait_node *) 1) {
626
      int prio;
627
 
628
      if (p_node->abandoned) {
629
        /* Remove abandoned node. */
630
#if defined TEST_FOR_COMPARE_AND_SWAP
631
        if (!__pthread_has_cas)
632
#endif
633
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
634
          *pp_node = p_node->next;
635
#endif
636
#if defined TEST_FOR_COMPARE_AND_SWAP
637
        else
638
#endif
639
#if defined HAS_COMPARE_AND_SWAP
640
          wait_node_dequeue(pp_head, pp_node, p_node);
641
#endif
642
        wait_node_free(p_node);
643
        /* Note that the next assignment may take us to the beginning
644
           of the queue, to newly inserted nodes, if pp_node == pp_head.
645
           In that case we need a memory barrier to stabilize the first of
646
           these new nodes. */
647
        p_node = *pp_node;
648
        if (pp_node == pp_head)
649
          READ_MEMORY_BARRIER(); /* No stale reads through p_node */
650
        continue;
651
      } else if ((prio = p_node->thr->p_priority) >= maxprio) {
652
        /* Otherwise remember it if its thread has a higher or equal priority
653
           compared to that of any node seen thus far. */
654
        maxprio = prio;
655
        pp_max_prio = pp_node;
656
        p_max_prio = p_node;
657
      }
658
 
659
      /* This canno6 jump backward in the list, so no further read
660
         barrier is needed. */
661
      pp_node = &p_node->next;
662
      p_node = *pp_node;
663
    }
664
 
665
    /* If all threads abandoned, go back to top */
666
    if (maxprio == INT_MIN)
667
      continue;
668
 
669
    ASSERT (p_max_prio != (struct wait_node *) 1);
670
 
671
    /* Now we want to to remove the max priority thread's wait node from
672
       the list. Before we can do this, we must atomically try to change the
673
       node's abandon state from zero to nonzero. If we succeed, that means we
674
       have the node that we will wake up. If we failed, then it means the
675
       thread timed out and abandoned the node in which case we repeat the
676
       whole unlock operation. */
677
 
678
    if (!testandset(&p_max_prio->abandoned)) {
679
#if defined TEST_FOR_COMPARE_AND_SWAP
680
      if (!__pthread_has_cas)
681
#endif
682
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
683
        *pp_max_prio = p_max_prio->next;
684
#endif
685
#if defined TEST_FOR_COMPARE_AND_SWAP
686
      else
687
#endif
688
#if defined HAS_COMPARE_AND_SWAP
689
        wait_node_dequeue(pp_head, pp_max_prio, p_max_prio);
690
#endif
691
      restart(p_max_prio->thr);
692
      break;
693
    }
694
  }
695
 
696
#if defined TEST_FOR_COMPARE_AND_SWAP
697
  if (!__pthread_has_cas)
698
#endif
699
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
700
  {
701
    __pthread_release(&lock->__spinlock);
702
  }
703
#endif
704
}
705
 
706
 
707
/* Compare-and-swap emulation with a spinlock */
708
 
709
#ifdef TEST_FOR_COMPARE_AND_SWAP
710
int __pthread_has_cas = 0;
711
#endif
712
 
713
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
714
 
715
int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
716
                               int * spinlock)
717
{
718
  int res;
719
 
720
  __pthread_acquire(spinlock);
721
 
722
  if (*ptr == oldval) {
723
    *ptr = newval; res = 1;
724
  } else {
725
    res = 0;
726
  }
727
 
728
  __pthread_release(spinlock);
729
 
730
  return res;
731
}
732
 
733
/* This function is called if the inlined test-and-set
734
   in __pthread_compare_and_swap() failed */
735
 
736
/* The retry strategy is as follows:
737
   - We test and set the spinlock MAX_SPIN_COUNT times, calling
738
     sched_yield() each time.  This gives ample opportunity for other
739
     threads with priority >= our priority to make progress and
740
     release the spinlock.
741
   - If a thread with priority < our priority owns the spinlock,
742
     calling sched_yield() repeatedly is useless, since we're preventing
743
     the owning thread from making progress and releasing the spinlock.
744
     So, after MAX_SPIN_LOCK attemps, we suspend the calling thread
745
     using nanosleep().  This again should give time to the owning thread
746
     for releasing the spinlock.
747
     Notice that the nanosleep() interval must not be too small,
748
     since the kernel does busy-waiting for short intervals in a realtime
749
     process (!).  The smallest duration that guarantees thread
750
     suspension is currently 2ms.
751
   - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
752
     sched_yield(), then sleeping again if needed. */
753
 
754
static void __pthread_acquire(int * spinlock)
755
{
756
  int cnt = 0;
757
  struct timespec tm;
758
 
759
  READ_MEMORY_BARRIER();
760
 
761
  while (testandset(spinlock)) {
762
    if (cnt < MAX_SPIN_COUNT) {
763
      sched_yield();
764
      cnt++;
765
    } else {
766
      tm.tv_sec = 0;
767
      tm.tv_nsec = SPIN_SLEEP_DURATION;
768
      nanosleep(&tm, NULL);
769
      cnt = 0;
770
    }
771
  }
772
}
773
 
774
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.