OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-x86_64/] [locks.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *      SMP locks primitives for building ix86 locks
3
 *      (not yet used).
4
 *
5
 *              Alan Cox, alan@redhat.com, 1995
6
 */
7
 
8
/*
9
 *      This would be much easier but far less clear and easy
10
 *      to borrow for other processors if it was just assembler.
11
 */
12
 
13
extern __inline__ void prim_spin_lock(struct spinlock *sp)
14
{
15
        int processor=smp_processor_id();
16
 
17
        /*
18
         *      Grab the lock bit
19
         */
20
 
21
        while(lock_set_bit(0,&sp->lock))
22
        {
23
                /*
24
                 *      Failed, but that's cos we own it!
25
                 */
26
 
27
                if(sp->cpu==processor)
28
                {
29
                        sp->users++;
30
                        return 0;
31
                }
32
                /*
33
                 *      Spin in the cache S state if possible
34
                 */
35
                while(sp->lock)
36
                {
37
                        /*
38
                         *      Wait for any invalidates to go off
39
                         */
40
 
41
                        if(smp_invalidate_needed&(1<<processor))
42
                                while(lock_clear_bit(processor,&smp_invalidate_needed))
43
                                        local_flush_tlb();
44
                        sp->spins++;
45
                }
46
                /*
47
                 *      Someone wrote the line, we go 'I' and get
48
                 *      the cache entry. Now try to regrab
49
                 */
50
        }
51
        sp->users++;sp->cpu=processor;
52
        return 1;
53
}
54
 
55
/*
56
 *      Release a spin lock
57
 */
58
 
59
extern __inline__ int prim_spin_unlock(struct spinlock *sp)
60
{
61
        /* This is safe. The decrement is still guarded by the lock. A multilock would
62
           not be safe this way */
63
        if(!--sp->users)
64
        {
65
                sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock);
66
                return 1;
67
        }
68
        return 0;
69
}
70
 
71
 
72
/*
73
 *      Non blocking lock grab
74
 */
75
 
76
extern __inline__ int prim_spin_lock_nb(struct spinlock *sp)
77
{
78
        if(lock_set_bit(0,&sp->lock))
79
                return 0;                /* Locked already */
80
        sp->users++;
81
        return 1;                       /* We got the lock */
82
}
83
 
84
 
85
/*
86
 *      These wrap the locking primitives up for usage
87
 */
88
 
89
extern __inline__ void spinlock(struct spinlock *sp)
90
{
91
        if(sp->priority<current->lock_order)
92
                panic("lock order violation: %s (%d)\n", sp->name, current->lock_order);
93
        if(prim_spin_lock(sp))
94
        {
95
                /*
96
                 *      We got a new lock. Update the priority chain
97
                 */
98
                sp->oldpri=current->lock_order;
99
                current->lock_order=sp->priority;
100
        }
101
}
102
 
103
extern __inline__ void spinunlock(struct spinlock *sp)
104
{
105
        int pri;
106
        if(current->lock_order!=sp->priority)
107
                panic("lock release order violation %s (%d)\n", sp->name, current->lock_order);
108
        pri=sp->oldpri;
109
        if(prim_spin_unlock(sp))
110
        {
111
                /*
112
                 *      Update the debugging lock priority chain. We dumped
113
                 *      our last right to the lock.
114
                 */
115
                current->lock_order=sp->pri;
116
        }
117
}
118
 
119
extern __inline__ void spintestlock(struct spinlock *sp)
120
{
121
        /*
122
         *      We do no sanity checks, it's legal to optimistically
123
         *      get a lower lock.
124
         */
125
        prim_spin_lock_nb(sp);
126
}
127
 
128
extern __inline__ void spintestunlock(struct spinlock *sp)
129
{
130
        /*
131
         *      A testlock doesn't update the lock chain so we
132
         *      must not update it on free
133
         */
134
        prim_spin_unlock(sp);
135
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.