OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-or32/] [uaccess.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1276 phoenix
/*
2
 * BK Id: SCCS/s.uaccess.h 1.10 05/21/02 21:44:32 paulus
3
 */
4
#ifdef __KERNEL__
5
#ifndef _OR32_UACCESS_H
6
#define _OR32_UACCESS_H
7
 
8
#ifndef __ASSEMBLY__
9
#include <linux/sched.h>
10
#include <linux/errno.h>
11
#include <asm/processor.h>
12
 
13
#define VERIFY_READ     0
14
#define VERIFY_WRITE    1
15
 
16
/*
17
 * The fs value determines whether argument validity checking should be
18
 * performed or not.  If get_fs() == USER_DS, checking is performed, with
19
 * get_fs() == KERNEL_DS, checking is bypassed.
20
 *
21
 * For historical reasons, these macros are grossly misnamed.
22
 */
23
 
24
#define KERNEL_DS       ((mm_segment_t) { 0 })
25
#define USER_DS         ((mm_segment_t) { 1 })
26
 
27
#define get_ds()        (KERNEL_DS)
28
#define get_fs()        (current->thread.fs)
29
#define set_fs(val)     (current->thread.fs = (val))
30
 
31
#define segment_eq(a,b) ((a).seg == (b).seg)
32
 
33
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
34
#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
35
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
36
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
37
 
38
extern inline int verify_area(int type, const void * addr, unsigned long size)
39
{
40
        return access_ok(type,addr,size) ? 0 : -EFAULT;
41
}
42
 
43
 
44
/*
45
 * The exception table consists of pairs of addresses: the first is the
46
 * address of an instruction that is allowed to fault, and the second is
47
 * the address at which the program should continue.  No registers are
48
 * modified, so it is entirely up to the continuation code to figure out
49
 * what to do.
50
 *
51
 * All the routines below use bits of fixup code that are out of line
52
 * with the main instruction path.  This means when everything is well,
53
 * we don't even have to jump over them.  Further, they do not intrude
54
 * on our cache or tlb entries.
55
 */
56
 
57
struct exception_table_entry
58
{
59
        unsigned long insn, fixup;
60
};
61
 
62
/* Returns 0 if exception not found and fixup otherwise.  */
63
extern unsigned long search_exception_table(unsigned long);
64
extern void sort_exception_table(void);
65
 
66
/*
67
 * These are the main single-value transfer routines.  They automatically
68
 * use the right size if we just have the right pointer type.
69
 *
70
 * This gets kind of ugly. We want to return _two_ values in "get_user()"
71
 * and yet we don't want to do any pointers, because that is too much
72
 * of a performance impact. Thus we have a few rather ugly macros here,
73
 * and hide all the uglyness from the user.
74
 *
75
 * The "__xxx" versions of the user access functions are versions that
76
 * do not verify the address space, that must have been done previously
77
 * with a separate "access_ok()" call (this is used when we do multiple
78
 * accesses to the same area of user memory).
79
 *
80
 * As we use the same address space for kernel and user data on the
81
 * PowerPC, we can just do these as direct assignments.  (Of course, the
82
 * exception handling means that it's no longer "just"...)
83
 */
84
#define get_user(x,ptr) \
85
  __get_user_check((x),(ptr),sizeof(*(ptr)))
86
#define put_user(x,ptr) \
87
  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88
 
89
#define __get_user(x,ptr) \
90
  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
91
#define __put_user(x,ptr) \
92
  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
93
 
94
extern long __put_user_bad(void);
95
 
96
#define __put_user_nocheck(x,ptr,size)                  \
97
({                                                      \
98
        long __pu_err;                                  \
99
        __put_user_size((x),(ptr),(size),__pu_err);     \
100
        __pu_err;                                       \
101
})
102
 
103
#define __put_user_check(x,ptr,size)                            \
104
({                                                              \
105
        long __pu_err = -EFAULT;                                \
106
        __typeof__(*(ptr)) *__pu_addr = (ptr);                  \
107
        if (access_ok(VERIFY_WRITE,__pu_addr,size))             \
108
                __put_user_size((x),__pu_addr,(size),__pu_err); \
109
        __pu_err;                                               \
110
})
111
 
112
#define __put_user_size(x,ptr,size,retval)                      \
113
do {                                                            \
114
        retval = 0;                                              \
115
        switch (size) {                                         \
116
          case 1: __put_user_asm(x,ptr,retval,"l.sb"); break;   \
117
          case 2: __put_user_asm(x,ptr,retval,"l.sh"); break;   \
118
          case 4: __put_user_asm(x,ptr,retval,"l.sw"); break;   \
119
          case 8: __put_user_asm2(x,ptr,retval); break;         \
120
          default: __put_user_bad();                            \
121
        }                                                       \
122
} while (0)
123
 
124
struct __large_struct { unsigned long buf[100]; };
125
#define __m(x) (*(struct __large_struct *)(x))
126
 
127
/*
128
 * We don't tell gcc that we are accessing memory, but this is OK
129
 * because we do not write to any memory gcc knows about, so there
130
 * are no aliasing issues.
131
 */
132
#define __put_user_asm(x, addr, err, op)                        \
133
        __asm__ __volatile__(                                   \
134
                "1:     "op" 0(%2),%1\n"                        \
135
                "2:\n"                                          \
136
                ".section .fixup,\"ax\"\n"                      \
137
                "3:     l.addi %0,r0,%3\n"                      \
138
                "       l.j 2b\n"                               \
139
                "       l.nop \n"                               \
140
                ".previous\n"                                   \
141
                ".section __ex_table,\"a\"\n"                   \
142
                "       .align 2\n"                             \
143
                "       .long 1b,3b\n"                          \
144
                ".previous"                                     \
145
                : "=r"(err)                                     \
146
                : "r"(x), "r"(addr), "g"(-EFAULT), "0"(err))
147
 
148
#define __put_user_asm2(x, addr, err)                           \
149
        __asm__ __volatile__(                                   \
150
                "1:     l.sw 0(%2),%1\n"                        \
151
                "2:     l.sw 4(%2),%1+1\n"                      \
152
                "3:\n"                                          \
153
                ".section .fixup,\"ax\"\n"                      \
154
                "4:     l.addi %0,r0,%3\n"                      \
155
                "       l.j 3b\n"                               \
156
                "       l.nop \n"                               \
157
                ".previous\n"                                   \
158
                ".section __ex_table,\"a\"\n"                   \
159
                "       .align 2\n"                             \
160
                "       .long 1b,4b\n"                          \
161
                "       .long 2b,4b\n"                          \
162
                ".previous"                                     \
163
                : "=r"(err)                                     \
164
                : "r"(x), "r"(addr), "g"(-EFAULT), "0"(err))
165
 
166
#define __get_user_nocheck(x,ptr,size)                          \
167
({                                                              \
168
        long __gu_err, __gu_val;                                \
169
        __get_user_size(__gu_val,(ptr),(size),__gu_err);        \
170
        (x) = (__typeof__(*(ptr)))__gu_val;                     \
171
        __gu_err;                                               \
172
})
173
 
174
#define __get_user_check(x,ptr,size)                                    \
175
({                                                                      \
176
        long __gu_err = -EFAULT, __gu_val = 0;                           \
177
        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
178
        if (access_ok(VERIFY_READ,__gu_addr,size))                      \
179
                __get_user_size(__gu_val,__gu_addr,(size),__gu_err);    \
180
        (x) = (__typeof__(*(ptr)))__gu_val;                             \
181
        __gu_err;                                                       \
182
})
183
 
184
extern long __get_user_bad(void);
185
 
186
#define __get_user_size(x,ptr,size,retval)                      \
187
do {                                                            \
188
        retval = 0;                                              \
189
        switch (size) {                                         \
190
          case 1: __get_user_asm(x,ptr,retval,"l.lbz"); break;  \
191
          case 2: __get_user_asm(x,ptr,retval,"l.lhz"); break;  \
192
          case 4: __get_user_asm(x,ptr,retval,"l.lwz"); break;  \
193
          case 8: __get_user_asm2(x, ptr, retval);              \
194
          default: (x) = __get_user_bad();                      \
195
        }                                                       \
196
} while (0)
197
 
198
#define __get_user_asm(x, addr, err, op)                \
199
        __asm__ __volatile__(                           \
200
                "1:     "op" %1,0(%2)\n"                \
201
                "2:\n"                                  \
202
                ".section .fixup,\"ax\"\n"              \
203
                "3:     l.addi %0,r0,%3\n"              \
204
                "       l.addi %1,r0,0\n"               \
205
                "       l.j 2b\n"                       \
206
                "       l.nop \n"                       \
207
                ".previous\n"                           \
208
                ".section __ex_table,\"a\"\n"           \
209
                "       .align 2\n"                     \
210
                "       .long 1b,3b\n"                  \
211
                ".previous"                             \
212
                : "=r"(err), "=r"(x)                    \
213
                : "r"(addr), "i"(-EFAULT), "0"(err))
214
 
215
#define __get_user_asm2(x, addr, err)                   \
216
        __asm__ __volatile__(                           \
217
                "1:     l.lwz %1,0(%2)\n"               \
218
                "2:     l.lwz %1+1,4(%2)\n"             \
219
                "3:\n"                                  \
220
                ".section .fixup,\"ax\"\n"              \
221
                "4:     l.addi %0,r0,%3\n"              \
222
                "       l.addi %1,r0,0\n"               \
223
                "       l.addi %1+1,r0,0\n"             \
224
                "       l.j 3b\n"                       \
225
                "       l.nop \n"                       \
226
                ".previous\n"                           \
227
                ".section __ex_table,\"a\"\n"           \
228
                "       .align 2\n"                     \
229
                "       .long 1b,4b\n"                  \
230
                "       .long 2b,4b\n"                  \
231
                ".previous"                             \
232
                : "=r"(err), "=&r"(x)                   \
233
                : "r"(addr), "i"(-EFAULT), "0"(err))
234
 
235
/* more complex routines */
236
 
237
extern int __copy_tofrom_user(void *to, const void *from, unsigned long size);
238
 
239
extern inline unsigned long
240
copy_from_user(void *to, const void *from, unsigned long n)
241
{
242
        unsigned long over;
243
 
244
        if (access_ok(VERIFY_READ, from, n))
245
                return __copy_tofrom_user(to, from, n);
246
        if ((unsigned long)from < TASK_SIZE) {
247
                over = (unsigned long)from + n - TASK_SIZE;
248
                return __copy_tofrom_user(to, from, n - over) + over;
249
        }
250
        return n;
251
}
252
 
253
extern inline unsigned long
254
copy_to_user(void *to, const void *from, unsigned long n)
255
{
256
        unsigned long over;
257
 
258
        if (access_ok(VERIFY_WRITE, to, n))
259
                return __copy_tofrom_user(to, from, n);
260
        if ((unsigned long)to < TASK_SIZE) {
261
                over = (unsigned long)to + n - TASK_SIZE;
262
                return __copy_tofrom_user(to, from, n - over) + over;
263
        }
264
        return n;
265
}
266
 
267
#define __copy_from_user(to, from, size) \
268
        __copy_tofrom_user((to), (from), (size))
269
#define __copy_to_user(to, from, size) \
270
        __copy_tofrom_user((to), (from), (size))
271
 
272
extern unsigned long __clear_user(void *addr, unsigned long size);
273
 
274
extern inline unsigned long
275
clear_user(void *addr, unsigned long size)
276
{
277
 
278
        if (access_ok(VERIFY_WRITE, addr, size))
279
                return __clear_user(addr, size);
280
        if ((unsigned long)addr < TASK_SIZE) {
281
                unsigned long over = (unsigned long)addr + size - TASK_SIZE;
282
                return __clear_user(addr, size - over) + over;
283
        }
284
        return size;
285
}
286
 
287
extern int __strncpy_from_user(char *dst, const char *src, long count);
288
 
289
extern inline long
290
strncpy_from_user(char *dst, const char *src, long count)
291
{
292
        if (access_ok(VERIFY_READ, src, 1))
293
                return __strncpy_from_user(dst, src, count);
294
        return -EFAULT;
295
}
296
 
297
/*
298
 * Return the size of a string (including the ending 0)
299
 *
300
 * Return 0 for error
301
 */
302
 
303
extern int __strnlen_user(const char *str, long len, unsigned long top);
304
 
305
/*
306
 * Returns the length of the string at str (including the null byte),
307
 * or 0 if we hit a page we can't access,
308
 * or something > len if we didn't find a null byte.
309
 *
310
 * The `top' parameter to __strnlen_user is to make sure that
311
 * we can never overflow from the user area into kernel space.
312
 */
313
extern __inline__ int strnlen_user(const char *str, long len)
314
{
315
        unsigned long top = __kernel_ok? ~0UL: TASK_SIZE - 1;
316
 
317
        if ((unsigned long)str > top)
318
                return 0;
319
        return __strnlen_user(str, len, top);
320
}
321
 
322
#define strlen_user(str)        strnlen_user((str), 0x7ffffffe)
323
 
324
#endif  /* __ASSEMBLY__ */
325
 
326
#endif  /* _OR32_UACCESS_H */
327
#endif /* __KERNEL__ */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.