OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libgo/] [runtime/] [lock_sema.c] - Rev 753

Go to most recent revision | Compare with Previous | Blame | View Log

// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
 
// +build darwin netbsd openbsd plan9 windows
 
#include "runtime.h"
 
// This implementation depends on OS-specific implementations of
//
//	uintptr runtime_semacreate(void)
//		Create a semaphore, which will be assigned to m->waitsema.
//		The zero value is treated as absence of any semaphore,
//		so be sure to return a non-zero value.
//
//	int32 runtime_semasleep(int64 ns)
//		If ns < 0, acquire m->waitsema and return 0.
//		If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
//		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
//
//	int32 runtime_semawakeup(M *mp)
//		Wake up mp, which is or will soon be sleeping on mp->waitsema.
//
 
enum
{
	LOCKED = 1,
 
	ACTIVE_SPIN = 4,
	ACTIVE_SPIN_CNT = 30,
	PASSIVE_SPIN = 1,
};
 
void
runtime_lock(Lock *l)
{
	M *m;
	uintptr v;
	uint32 i, spin;
 
	m = runtime_m();
	if(m->locks++ < 0)
		runtime_throw("runtime_lock: lock count");
 
	// Speculative grab for lock.
	if(runtime_casp(&l->waitm, nil, (void*)LOCKED))
		return;
 
	if(m->waitsema == 0)
		m->waitsema = runtime_semacreate();
 
	// On uniprocessor's, no point spinning.
	// On multiprocessors, spin for ACTIVE_SPIN attempts.
	spin = 0;
	if(runtime_ncpu > 1)
		spin = ACTIVE_SPIN;
 
	for(i=0;; i++) {
		v = (uintptr)runtime_atomicloadp(&l->waitm);
		if((v&LOCKED) == 0) {
unlocked:
			if(runtime_casp(&l->waitm, (void*)v, (void*)(v|LOCKED)))
				return;
			i = 0;
		}
		if(i<spin)
			runtime_procyield(ACTIVE_SPIN_CNT);
		else if(i<spin+PASSIVE_SPIN)
			runtime_osyield();
		else {
			// Someone else has it.
			// l->waitm points to a linked list of M's waiting
			// for this lock, chained through m->nextwaitm.
			// Queue this M.
			for(;;) {
				m->nextwaitm = (void*)(v&~LOCKED);
				if(runtime_casp(&l->waitm, (void*)v, (void*)((uintptr)m|LOCKED)))
					break;
				v = (uintptr)runtime_atomicloadp(&l->waitm);
				if((v&LOCKED) == 0)
					goto unlocked;
			}
			if(v&LOCKED) {
				// Queued.  Wait.
				runtime_semasleep(-1);
				i = 0;
			}
		}
	}
}
 
void
runtime_unlock(Lock *l)
{
	uintptr v;
	M *mp;
 
	if(--runtime_m()->locks < 0)
		runtime_throw("runtime_unlock: lock count");
 
	for(;;) {
		v = (uintptr)runtime_atomicloadp(&l->waitm);
		if(v == LOCKED) {
			if(runtime_casp(&l->waitm, (void*)LOCKED, nil))
				break;
		} else {
			// Other M's are waiting for the lock.
			// Dequeue an M.
			mp = (void*)(v&~LOCKED);
			if(runtime_casp(&l->waitm, (void*)v, mp->nextwaitm)) {
				// Dequeued an M.  Wake it.
				runtime_semawakeup(mp);
				break;
			}
		}
	}
}
 
// One-time notifications.
void
runtime_noteclear(Note *n)
{
	n->waitm = nil;
}
 
void
runtime_notewakeup(Note *n)
{
	M *mp;
 
	do
		mp = runtime_atomicloadp(&n->waitm);
	while(!runtime_casp(&n->waitm, mp, (void*)LOCKED));
 
	// Successfully set waitm to LOCKED.
	// What was it before?
	if(mp == nil) {
		// Nothing was waiting.  Done.
	} else if(mp == (M*)LOCKED) {
		// Two notewakeups!  Not allowed.
		runtime_throw("notewakeup - double wakeup");
	} else {
		// Must be the waiting m.  Wake it up.
		runtime_semawakeup(mp);
	}
}
 
void
runtime_notesleep(Note *n)
{
	M *m;
 
	m = runtime_m();
	if(m->waitsema == 0)
		m->waitsema = runtime_semacreate();
	if(!runtime_casp(&n->waitm, nil, m)) {  // must be LOCKED (got wakeup)
		if(n->waitm != (void*)LOCKED)
			runtime_throw("notesleep - waitm out of sync");
		return;
	}
	// Queued.  Sleep.
	runtime_semasleep(-1);
}
 
void
runtime_notetsleep(Note *n, int64 ns)
{
	M *m;
	M *mp;
	int64 deadline, now;
 
	if(ns < 0) {
		runtime_notesleep(n);
		return;
	}
 
	m = runtime_m();
	if(m->waitsema == 0)
		m->waitsema = runtime_semacreate();
 
	// Register for wakeup on n->waitm.
	if(!runtime_casp(&n->waitm, nil, m)) {  // must be LOCKED (got wakeup already)
		if(n->waitm != (void*)LOCKED)
			runtime_throw("notetsleep - waitm out of sync");
		return;
	}
 
	deadline = runtime_nanotime() + ns;
	for(;;) {
		// Registered.  Sleep.
		if(runtime_semasleep(ns) >= 0) {
			// Acquired semaphore, semawakeup unregistered us.
			// Done.
			return;
		}
 
		// Interrupted or timed out.  Still registered.  Semaphore not acquired.
		now = runtime_nanotime();
		if(now >= deadline)
			break;
 
		// Deadline hasn't arrived.  Keep sleeping.
		ns = deadline - now;
	}
 
	// Deadline arrived.  Still registered.  Semaphore not acquired.
	// Want to give up and return, but have to unregister first,
	// so that any notewakeup racing with the return does not
	// try to grant us the semaphore when we don't expect it.
	for(;;) {
		mp = runtime_atomicloadp(&n->waitm);
		if(mp == m) {
			// No wakeup yet; unregister if possible.
			if(runtime_casp(&n->waitm, mp, nil))
				return;
		} else if(mp == (M*)LOCKED) {
			// Wakeup happened so semaphore is available.
			// Grab it to avoid getting out of sync.
			if(runtime_semasleep(-1) < 0)
				runtime_throw("runtime: unable to acquire - semaphore out of sync");
			return;
		} else {
			runtime_throw("runtime: unexpected waitm - semaphore out of sync");
		}
	}
}
 

Go to most recent revision | Compare with Previous | Blame | View Log

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.