OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-parisc/] [spinlock_t.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef __PARISC_SPINLOCK_T_H
2
#define __PARISC_SPINLOCK_T_H
3
 
4
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
5
 *
6
 * Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
7
 * since it only has load-and-zero.
8
 */
9
#ifdef CONFIG_PA20
10
/*
11
> From: "Jim Hull" <jim.hull of hp.com>
12
> Delivery-date: Wed, 29 Jan 2003 13:57:05 -0500
13
> I've attached a summary of the change, but basically, for PA 2.0, as
14
> long as the ",CO" (coherent operation) completer is specified, then the
15
> 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
16
> they only require "natural" alignment (4-byte for ldcw, 8-byte for
17
> ldcd).
18
*/
19
 
20
#define __ldcw(a) ({ \
21
        unsigned __ret; \
22
        __asm__ __volatile__("ldcw,co 0(%1),%0" : "=r" (__ret) : "r" (a)); \
23
        __ret; \
24
})
25
#else
26
#define __ldcw(a) ({ \
27
        unsigned __ret; \
28
        __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
29
        __ret; \
30
})
31
#endif
32
 
33
/*
34
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
35
 */
36
 
37
typedef struct {
38
#ifdef CONFIG_PA20
39
        volatile unsigned int lock;
40
#else
41
        volatile unsigned int __attribute__((aligned(16))) lock;
42
#endif
43
#ifdef CONFIG_DEBUG_SPINLOCK
44
        volatile unsigned long owner_pc;
45
        volatile unsigned long owner_cpu;
46
#endif
47
} spinlock_t;
48
 
49
#ifndef CONFIG_DEBUG_SPINLOCK
50
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
51
 
52
/* Define 6 spinlock primitives that don't depend on anything else. */
53
 
54
#define spin_lock_init(x)       do { (x)->lock = 1; } while(0)
55
#define spin_is_locked(x)       ((x)->lock == 0)
56
#define spin_trylock(x)         (__ldcw(&(x)->lock) != 0)
57
 
58
/*
59
 * PA2.0 is not strongly ordered.  PA1.X is strongly ordered.
60
 * ldcw enforces ordering and we need to make sure ordering is
61
 * enforced on the unlock too.
62
 * "stw,ma" with Zero index is an alias for "stw,o".
63
 * But PA 1.x can assemble the "stw,ma" while it doesn't know about "stw,o".
64
 * And PA 2.0 will generate the right insn using either form.
65
 * Thanks to John David Anglin for this cute trick.
66
 *
67
 * Writing this with asm also ensures that the unlock doesn't
68
 * get reordered
69
 */
70
#define spin_unlock(x) \
71
        __asm__ __volatile__ ("stw,ma  %%sp,0(%0)" : : "r" (&(x)->lock) : "memory" )
72
 
73
#define spin_unlock_wait(x)     do { barrier(); } while(((volatile spinlock_t *)(x))->lock == 0)
74
 
75
#define spin_lock(x) do { \
76
        while (__ldcw (&(x)->lock) == 0) \
77
                while ((x)->lock == 0) ; \
78
} while (0)
79
 
80
#else
81
 
82
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1, 0, 0 }
83
 
84
/* Define 6 spinlock primitives that don't depend on anything else. */
85
 
86
#define spin_lock_init(x)       do { (x)->lock = 1; (x)->owner_cpu = 0; (x)->owner_pc = 0; } while(0)
87
#define spin_is_locked(x)       ((x)->lock == 0)
88
void spin_lock(spinlock_t *lock);
89
int spin_trylock(spinlock_t *lock);
90
void spin_unlock(spinlock_t *lock);
91
#define spin_unlock_wait(x)     do { barrier(); } while(((volatile spinlock_t *)(x))->lock == 0)
92
 
93
#endif
94
 
95
#endif /* __PARISC_SPINLOCK_T_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.