OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ia64/] [atomic.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _ASM_IA64_ATOMIC_H
2
#define _ASM_IA64_ATOMIC_H
3
 
4
/*
5
 * Atomic operations that C can't guarantee us.  Useful for
6
 * resource counting etc..
7
 *
8
 * NOTE: don't mess with the types below!  The "unsigned long" and
9
 * "int" types were carefully placed so as to ensure proper operation
10
 * of the macros.
11
 *
12
 * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
13
 *      David Mosberger-Tang <davidm@hpl.hp.com>
14
 */
15
#include <linux/types.h>
16
 
17
#include <asm/intrinsics.h>
18
 
19
/*
20
 * On IA-64, counter must always be volatile to ensure that that the
21
 * memory accesses are ordered.
22
 */
23
typedef struct { volatile __s32 counter; } atomic_t;
24
 
25
#define ATOMIC_INIT(i)          ((atomic_t) { (i) })
26
 
27
#define atomic_read(v)          ((v)->counter)
28
#define atomic_set(v,i)         (((v)->counter) = (i))
29
 
30
static __inline__ int
31
ia64_atomic_add (int i, atomic_t *v)
32
{
33
        __s32 old, new;
34
        CMPXCHG_BUGCHECK_DECL
35
 
36
        do {
37
                CMPXCHG_BUGCHECK(v);
38
                old = atomic_read(v);
39
                new = old + i;
40
        } while (ia64_cmpxchg("acq", v, old, old + i, sizeof(atomic_t)) != old);
41
        return new;
42
}
43
 
44
static __inline__ int
45
ia64_atomic_sub (int i, atomic_t *v)
46
{
47
        __s32 old, new;
48
        CMPXCHG_BUGCHECK_DECL
49
 
50
        do {
51
                CMPXCHG_BUGCHECK(v);
52
                old = atomic_read(v);
53
                new = old - i;
54
        } while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
55
        return new;
56
}
57
 
58
/*
59
 * Atomically add I to V and return TRUE if the resulting value is
60
 * negative.
61
 */
62
static __inline__ int
63
atomic_add_negative (int i, atomic_t *v)
64
{
65
        return ia64_atomic_add(i, v) < 0;
66
}
67
 
68
#define atomic_add_return(i,v)                                          \
69
        ((__builtin_constant_p(i) &&                                    \
70
          (   (i ==  1) || (i ==  4) || (i ==  8) || (i ==  16)         \
71
           || (i == -1) || (i == -4) || (i == -8) || (i == -16)))       \
72
         ? ia64_fetch_and_add(i, &(v)->counter)                         \
73
         : ia64_atomic_add(i, v))
74
 
75
#define atomic_sub_return(i,v)                                          \
76
        ((__builtin_constant_p(i) &&                                    \
77
          (   (i ==  1) || (i ==  4) || (i ==  8) || (i ==  16)         \
78
           || (i == -1) || (i == -4) || (i == -8) || (i == -16)))       \
79
         ? ia64_fetch_and_add(-(i), &(v)->counter)                      \
80
         : ia64_atomic_sub(i, v))
81
 
82
#define atomic_dec_return(v)            atomic_sub_return(1, (v))
83
#define atomic_inc_return(v)            atomic_add_return(1, (v))
84
 
85
#define atomic_sub_and_test(i,v)        (atomic_sub_return((i), (v)) == 0)
86
#define atomic_dec_and_test(v)          (atomic_sub_return(1, (v)) == 0)
87
#define atomic_inc_and_test(v)          (atomic_add_return(1, (v)) != 0)
88
 
89
#define atomic_add(i,v)                 atomic_add_return((i), (v))
90
#define atomic_sub(i,v)                 atomic_sub_return((i), (v))
91
#define atomic_inc(v)                   atomic_add(1, (v))
92
#define atomic_dec(v)                   atomic_sub(1, (v))
93
 
94
/* Atomic operations are already serializing */
95
#define smp_mb__before_atomic_dec()     barrier()
96
#define smp_mb__after_atomic_dec()      barrier()
97
#define smp_mb__before_atomic_inc()     barrier()
98
#define smp_mb__after_atomic_inc()      barrier()
99
 
100
#endif /* _ASM_IA64_ATOMIC_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.