OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ppc/] [system.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1276 phoenix
/*
2
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3
 */
4
#ifndef __PPC_SYSTEM_H
5
#define __PPC_SYSTEM_H
6
 
7
#include <linux/config.h>
8
#include <linux/kdev_t.h>
9
 
10
#include <asm/processor.h>
11
#include <asm/atomic.h>
12
#include <asm/hw_irq.h>
13
 
14
/*
15
 * Memory barrier.
16
 * The sync instruction guarantees that all memory accesses initiated
17
 * by this processor have been performed (with respect to all other
18
 * mechanisms that access memory).  The eieio instruction is a barrier
19
 * providing an ordering (separately) for (a) cacheable stores and (b)
20
 * loads and stores to non-cacheable memory (e.g. I/O devices).
21
 *
22
 * mb() prevents loads and stores being reordered across this point.
23
 * rmb() prevents loads being reordered across this point.
24
 * wmb() prevents stores being reordered across this point.
25
 *
26
 * We can use the eieio instruction for wmb, but since it doesn't
27
 * give any ordering guarantees about loads, we have to use the
28
 * stronger but slower sync instruction for mb and rmb.
29
 */
30
#define mb()  __asm__ __volatile__ ("sync" : : : "memory")
31
#define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
32
#define wmb()  __asm__ __volatile__ ("eieio" : : : "memory")
33
 
34
#define set_mb(var, value)      do { var = value; mb(); } while (0)
35
#define set_wmb(var, value)     do { var = value; wmb(); } while (0)
36
 
37
#ifdef CONFIG_SMP
38
#define smp_mb()        mb()
39
#define smp_rmb()       rmb()
40
#define smp_wmb()       wmb()
41
#else
42
#define smp_mb()        __asm__ __volatile__("": : :"memory")
43
#define smp_rmb()       __asm__ __volatile__("": : :"memory")
44
#define smp_wmb()       __asm__ __volatile__("": : :"memory")
45
#endif /* CONFIG_SMP */
46
 
47
#ifdef __KERNEL__
48
extern void xmon_irq(int, void *, struct pt_regs *);
49
extern void xmon(struct pt_regs *excp);
50
extern void print_backtrace(unsigned long *);
51
extern void show_regs(struct pt_regs * regs);
52
extern void flush_instruction_cache(void);
53
extern void hard_reset_now(void);
54
extern void poweroff_now(void);
55
#ifdef CONFIG_6xx
56
extern long _get_L2CR(void);
57
extern void _set_L2CR(unsigned long);
58
extern long _get_L3CR(void);
59
extern void _set_L3CR(unsigned long);
60
#else
61
#define _get_L2CR()     0L
62
#define _set_L2CR(val)  do { } while(0)
63
#define _get_L3CR()     0L
64
#define _set_L3CR(val)  do { } while(0)
65
#endif
66
extern void via_cuda_init(void);
67
extern void pmac_nvram_init(void);
68
extern void read_rtc_time(void);
69
extern void pmac_find_display(void);
70
extern void giveup_fpu(struct task_struct *);
71
extern void enable_kernel_fp(void);
72
extern void giveup_altivec(struct task_struct *);
73
extern void load_up_altivec(struct task_struct *);
74
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
75
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
76
extern int call_rtas(const char *, int, int, unsigned long *, ...);
77
extern int abs(int);
78
extern void cacheable_memzero(void *p, unsigned int nb);
79
 
80
struct device_node;
81
extern void note_scsi_host(struct device_node *, void *);
82
 
83
struct task_struct;
84
#define prepare_to_switch()     do { } while(0)
85
#define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
86
extern void _switch_to(struct task_struct *, struct task_struct *,
87
                       struct task_struct **);
88
 
89
struct thread_struct;
90
extern struct task_struct *_switch(struct thread_struct *prev,
91
                                   struct thread_struct *next);
92
 
93
extern unsigned int rtas_data;
94
 
95
struct pt_regs;
96
extern void dump_regs(struct pt_regs *);
97
 
98
#ifndef CONFIG_SMP
99
 
100
#define cli()   __cli()
101
#define sti()   __sti()
102
#define save_flags(flags)       __save_flags(flags)
103
#define restore_flags(flags)    __restore_flags(flags)
104
#define save_and_cli(flags)     __save_and_cli(flags)
105
#define save_and_sti(flags)     __save_and_sti(flags)
106
 
107
#else /* CONFIG_SMP */
108
 
109
extern void __global_cli(void);
110
extern void __global_sti(void);
111
extern unsigned long __global_save_flags(void);
112
extern void __global_restore_flags(unsigned long);
113
#define cli() __global_cli()
114
#define sti() __global_sti()
115
#define save_flags(x) ((x)=__global_save_flags())
116
#define restore_flags(x) __global_restore_flags(x)
117
 
118
#define save_and_cli(x) do { save_flags(x); cli(); } while(0);
119
#define save_and_sti(x) do { save_flags(x); sti(); } while(0);
120
 
121
#endif /* !CONFIG_SMP */
122
 
123
#define local_irq_disable()             __cli()
124
#define local_irq_enable()              __sti()
125
#define local_irq_save(flags)           __save_and_cli(flags)
126
#define local_irq_set(flags)            __save_and_sti(flags)
127
#define local_irq_restore(flags)        __restore_flags(flags)
128
 
129
static __inline__ unsigned long
130
xchg_u32(volatile void *p, unsigned long val)
131
{
132
        unsigned long prev;
133
 
134
        __asm__ __volatile__ ("\n\
135
1:      lwarx   %0,0,%2 \n"
136
        PPC405_ERR77(0,%2)
137
"       stwcx.  %3,0,%2 \n\
138
        bne-    1b"
139
        : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
140
        : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
141
        : "cc", "memory");
142
 
143
        return prev;
144
}
145
 
146
/*
147
 * This function doesn't exist, so you'll get a linker error
148
 * if something tries to do an invalid xchg().
149
 */
150
extern void __xchg_called_with_bad_pointer(void);
151
 
152
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
153
#define tas(ptr) (xchg((ptr),1))
154
 
155
static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
156
{
157
        switch (size) {
158
        case 4:
159
                return (unsigned long )xchg_u32(ptr, x);
160
#if 0   /* xchg_u64 doesn't exist on 32-bit PPC */
161
        case 8:
162
                return (unsigned long )xchg_u64(ptr, x);
163
#endif /* 0 */
164
        }
165
        __xchg_called_with_bad_pointer();
166
        return x;
167
 
168
 
169
}
170
 
171
extern inline void * xchg_ptr(void * m, void * val)
172
{
173
        return (void *) xchg_u32(m, (unsigned long) val);
174
}
175
 
176
 
177
#define __HAVE_ARCH_CMPXCHG     1
178
 
179
static __inline__ unsigned long
180
__cmpxchg_u32(volatile int *p, int old, int new)
181
{
182
        int prev;
183
 
184
        __asm__ __volatile__ ("\n\
185
1:      lwarx   %0,0,%2 \n\
186
        cmpw    0,%0,%3 \n\
187
        bne     2f \n"
188
        PPC405_ERR77(0,%2)
189
"       stwcx.  %4,0,%2 \n\
190
        bne-    1b\n"
191
#ifdef CONFIG_SMP
192
"       sync\n"
193
#endif /* CONFIG_SMP */
194
"2:"
195
        : "=&r" (prev), "=m" (*p)
196
        : "r" (p), "r" (old), "r" (new), "m" (*p)
197
        : "cc", "memory");
198
 
199
        return prev;
200
}
201
 
202
/* This function doesn't exist, so you'll get a linker error
203
   if something tries to do an invalid cmpxchg().  */
204
extern void __cmpxchg_called_with_bad_pointer(void);
205
 
206
static __inline__ unsigned long
207
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
208
{
209
        switch (size) {
210
        case 4:
211
                return __cmpxchg_u32(ptr, old, new);
212
#if 0   /* we don't have __cmpxchg_u64 on 32-bit PPC */
213
        case 8:
214
                return __cmpxchg_u64(ptr, old, new);
215
#endif /* 0 */
216
        }
217
        __cmpxchg_called_with_bad_pointer();
218
        return old;
219
}
220
 
221
#define cmpxchg(ptr,o,n)                                                 \
222
  ({                                                                     \
223
     __typeof__(*(ptr)) _o_ = (o);                                       \
224
     __typeof__(*(ptr)) _n_ = (n);                                       \
225
     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,           \
226
                                    (unsigned long)_n_, sizeof(*(ptr))); \
227
  })
228
 
229
#endif /* __KERNEL__ */
230
#endif /* __PPC_SYSTEM_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.