OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [m68k/] [kernel/] [sys_m68k.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1623 jcastillo
/*
2
 * linux/arch/m68k/kernel/sys_m68k.c
3
 *
4
 * This file contains various random system calls that
5
 * have a non-standard calling sequence on the Linux/m68k
6
 * platform.
7
 */
8
 
9
#include <linux/errno.h>
10
#include <linux/sched.h>
11
#include <linux/mm.h>
12
#include <linux/sem.h>
13
#include <linux/msg.h>
14
#include <linux/shm.h>
15
#include <linux/stat.h>
16
#include <linux/mman.h>
17
 
18
#include <asm/segment.h>
19
#include <asm/cachectl.h>
20
 
21
/*
22
 * sys_pipe() is the normal C calling standard for creating
23
 * a pipe. It's not the way unix traditionally does this, though.
24
 */
25
asmlinkage int sys_pipe(unsigned long * fildes)
26
{
27
        int fd[2];
28
        int error;
29
 
30
        error = verify_area(VERIFY_WRITE,fildes,8);
31
        if (error)
32
                return error;
33
        error = do_pipe(fd);
34
        if (error)
35
                return error;
36
        put_user(fd[0],0+fildes);
37
        put_user(fd[1],1+fildes);
38
        return 0;
39
}
40
 
41
/*
42
 * Perform the select(nd, in, out, ex, tv) and mmap() system
43
 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
44
 * handle more than 4 system call parameters, so these system calls
45
 * used a memory block for parameter passing..
46
 */
47
 
48
asmlinkage int old_mmap(unsigned long *buffer)
49
{
50
        int error;
51
        unsigned long flags;
52
        struct file * file = NULL;
53
 
54
        error = verify_area(VERIFY_READ, buffer, 6*sizeof(long));
55
        if (error)
56
                return error;
57
        flags = get_user(buffer+3);
58
        if (!(flags & MAP_ANONYMOUS)) {
59
                unsigned long fd = get_user(buffer+4);
60
                if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
61
                        return -EBADF;
62
        }
63
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
64
        return do_mmap(file, get_user(buffer), get_user(buffer+1),
65
                       get_user(buffer+2), flags, get_user(buffer+5));
66
}
67
 
68
 
69
extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
70
 
71
asmlinkage int old_select(unsigned long *buffer)
72
{
73
        int n;
74
        fd_set *inp;
75
        fd_set *outp;
76
        fd_set *exp;
77
        struct timeval *tvp;
78
 
79
        n = verify_area(VERIFY_READ, buffer, 5*sizeof(unsigned long));
80
        if (n)
81
          return n;
82
 
83
        n = get_user(buffer);
84
        inp = (fd_set *) get_user(buffer+1);
85
        outp = (fd_set *) get_user(buffer+2);
86
        exp = (fd_set *) get_user(buffer+3);
87
        tvp = (struct timeval *) get_user(buffer+4);
88
        return sys_select(n, inp, outp, exp, tvp);
89
}
90
 
91
/*
92
 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
93
 *
94
 * This is really horribly ugly.
95
 */
96
asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
97
{
98
        int version;
99
 
100
        version = call >> 16; /* hack for backward compatibility */
101
        call &= 0xffff;
102
 
103
        if (call <= SEMCTL)
104
                switch (call) {
105
                case SEMOP:
106
                        return sys_semop (first, (struct sembuf *)ptr, second);
107
                case SEMGET:
108
                        return sys_semget (first, second, third);
109
                case SEMCTL: {
110
                        union semun fourth;
111
                        int err;
112
                        if (!ptr)
113
                                return -EINVAL;
114
                        if ((err = verify_area (VERIFY_READ, ptr, sizeof(long))))
115
                                return err;
116
                        fourth.__pad = get_user((void **)ptr);
117
                        return sys_semctl (first, second, third, fourth);
118
                        }
119
                default:
120
                        return -EINVAL;
121
                }
122
        if (call <= MSGCTL)
123
                switch (call) {
124
                case MSGSND:
125
                        return sys_msgsnd (first, (struct msgbuf *) ptr,
126
                                           second, third);
127
                case MSGRCV:
128
                        switch (version) {
129
                        case 0: {
130
                                struct ipc_kludge tmp;
131
                                int err;
132
                                if (!ptr)
133
                                        return -EINVAL;
134
                                if ((err = verify_area (VERIFY_READ, ptr, sizeof(tmp))))
135
                                        return err;
136
                                memcpy_fromfs (&tmp,(struct ipc_kludge *) ptr,
137
                                               sizeof (tmp));
138
                                return sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
139
                                }
140
                        case 1: default:
141
                                return sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, third);
142
                        }
143
                case MSGGET:
144
                        return sys_msgget ((key_t) first, second);
145
                case MSGCTL:
146
                        return sys_msgctl (first, second, (struct msqid_ds *) ptr);
147
                default:
148
                        return -EINVAL;
149
                }
150
        if (call <= SHMCTL)
151
                switch (call) {
152
                case SHMAT:
153
                        switch (version) {
154
                        case 0: default: {
155
                                ulong raddr;
156
                                int err;
157
                                if ((err = verify_area(VERIFY_WRITE, (ulong*) third, sizeof(ulong))))
158
                                        return err;
159
                                err = sys_shmat (first, (char *) ptr, second, &raddr);
160
                                if (err)
161
                                        return err;
162
                                put_user (raddr, (ulong *) third);
163
                                return 0;
164
                                }
165
                        case 1: /* iBCS2 emulator entry point */
166
                                if (get_fs() != get_ds())
167
                                        return -EINVAL;
168
                                return sys_shmat (first, (char *) ptr, second, (ulong *) third);
169
                        }
170
                case SHMDT:
171
                        return sys_shmdt ((char *)ptr);
172
                case SHMGET:
173
                        return sys_shmget (first, second, third);
174
                case SHMCTL:
175
                        return sys_shmctl (first, second, (struct shmid_ds *) ptr);
176
                default:
177
                        return -EINVAL;
178
                }
179
        return -EINVAL;
180
}
181
 
182
asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
183
{
184
  return -ENOSYS;
185
}
186
 
187
/* Convert virtual address VADDR to physical address PADDR, recording
188
   in VALID whether the virtual address is actually mapped.  */
189
#define virt_to_phys_040(vaddr, paddr, valid)                           \
190
{                                                                       \
191
  register unsigned long _tmp1 __asm__ ("a0") = (vaddr);                \
192
  register unsigned long _tmp2 __asm__ ("d0");                          \
193
  unsigned long _mmusr;                                                 \
194
                                                                        \
195
  __asm__ __volatile__ (".word 0xf568 /* ptestr (%1) */\n\t"            \
196
                        ".long 0x4e7a0805 /* movec %%mmusr,%0 */"       \
197
                        : "=d" (_tmp2)                                  \
198
                        : "a" (_tmp1));                                 \
199
  _mmusr = _tmp2;                                                       \
200
  if (0 /* XXX _mmusr & MMU_?_040 */)                                    \
201
    (valid) = 0;                                                 \
202
  else                                                                  \
203
    {                                                                   \
204
      (valid) = 1;                                                      \
205
      (paddr) = _mmusr & ~0xfff;                                        \
206
    }                                                                   \
207
}
208
 
209
static inline int
210
cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
211
{
212
  unsigned long paddr;
213
  int valid;
214
 
215
  switch (scope)
216
    {
217
    case FLUSH_SCOPE_ALL:
218
      switch (cache)
219
        {
220
        case FLUSH_CACHE_DATA:
221
          /* This nop is needed for some broken versions of the 68040.  */
222
          __asm__ __volatile__ ("nop\n\t"
223
                                ".word 0xf478 /* cpusha %%dc */");
224
          break;
225
        case FLUSH_CACHE_INSN:
226
          __asm__ __volatile__ ("nop\n\t"
227
                                ".word 0xf4b8 /* cpusha %%ic */");
228
          break;
229
        default:
230
        case FLUSH_CACHE_BOTH:
231
          __asm__ __volatile__ ("nop\n\t"
232
                                ".word 0xf4f8 /* cpusha %%bc */");
233
          break;
234
        }
235
      break;
236
 
237
    case FLUSH_SCOPE_LINE:
238
      len >>= 4;
239
      for (;;)
240
        {
241
          virt_to_phys_040 (addr, paddr, valid);
242
          if (valid)
243
            break;
244
          if (len <= PAGE_SIZE / 16)
245
            return 0;
246
          len -= PAGE_SIZE / 16;
247
          addr += PAGE_SIZE;
248
        }
249
      while (len--)
250
        {
251
          register unsigned long tmp __asm__ ("a0") = paddr;
252
          switch (cache)
253
            {
254
            case FLUSH_CACHE_DATA:
255
              __asm__ __volatile__ ("nop\n\t"
256
                                    ".word 0xf468 /* cpushl %%dc,(%0) */"
257
                                    : : "a" (tmp));
258
              break;
259
            case FLUSH_CACHE_INSN:
260
              __asm__ __volatile__ ("nop\n\t"
261
                                    ".word 0xf4a8 /* cpushl %%ic,(%0) */"
262
                                    : : "a" (tmp));
263
              break;
264
            default:
265
            case FLUSH_CACHE_BOTH:
266
              __asm__ __volatile__ ("nop\n\t"
267
                                    ".word 0xf4e8 /* cpushl %%bc,(%0) */"
268
                                    : : "a" (paddr));
269
              break;
270
            }
271
          addr += 16;
272
          if (len)
273
            {
274
              if ((addr & (PAGE_SIZE-1)) < 16)
275
                {
276
                  /* Recompute physical address when crossing a page
277
                     boundary. */
278
                  for (;;)
279
                    {
280
                      virt_to_phys_040 (addr, paddr, valid);
281
                      if (valid)
282
                        break;
283
                      if (len <= PAGE_SIZE / 16)
284
                        return 0;
285
                      len -= PAGE_SIZE / 16;
286
                      addr += PAGE_SIZE;
287
                    }
288
                }
289
              else
290
                paddr += 16;
291
            }
292
        }
293
      break;
294
 
295
    default:
296
    case FLUSH_SCOPE_PAGE:
297
      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
298
        {
299
          register unsigned long tmp __asm__ ("a0");
300
          virt_to_phys_040 (addr, paddr, valid);
301
          if (!valid)
302
            continue;
303
          tmp = paddr;
304
          switch (cache)
305
            {
306
            case FLUSH_CACHE_DATA:
307
              __asm__ __volatile__ ("nop\n\t"
308
                                    ".word 0xf470 /* cpushp %%dc,(%0) */"
309
                                    : : "a" (tmp));
310
              break;
311
            case FLUSH_CACHE_INSN:
312
              __asm__ __volatile__ ("nop\n\t"
313
                                    ".word 0xf4b0 /* cpushp %%ic,(%0) */"
314
                                    : : "a" (tmp));
315
              break;
316
            default:
317
            case FLUSH_CACHE_BOTH:
318
              __asm__ __volatile__ ("nop\n\t"
319
                                    ".word 0xf4f0 /* cpushp %%bc,(%0) */"
320
                                    : : "a" (tmp));
321
              break;
322
            }
323
        }
324
      break;
325
    }
326
  return 0;
327
}
328
 
329
#define virt_to_phys_060(vaddr, paddr, valid)           \
330
{                                                       \
331
  register unsigned long _tmp __asm__ ("a0") = (vaddr); \
332
                                                        \
333
  __asm__ __volatile__ (".word 0xf5c8 /* plpar (%1) */" \
334
                        : "=a" (_tmp)                   \
335
                        : "0" (_tmp));                   \
336
  (valid) = 1; /* XXX */                                \
337
  (paddr) = _tmp;                                       \
338
}
339
 
340
static inline int
341
cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
342
{
343
  unsigned long paddr;
344
  int valid;
345
 
346
  switch (scope)
347
    {
348
    case FLUSH_SCOPE_ALL:
349
      switch (cache)
350
        {
351
        case FLUSH_CACHE_DATA:
352
          __asm__ __volatile__ (".word 0xf478 /* cpusha %%dc */\n\t"
353
                                ".word 0xf458 /* cinva %%dc */");
354
          break;
355
        case FLUSH_CACHE_INSN:
356
          __asm__ __volatile__ (".word 0xf4b8 /* cpusha %%ic */\n\t"
357
                                ".word 0xf498 /* cinva %%ic */");
358
          break;
359
        default:
360
        case FLUSH_CACHE_BOTH:
361
          __asm__ __volatile__ (".word 0xf4f8 /* cpusha %%bc */\n\t"
362
                                ".word 0xf4d8 /* cinva %%bc */");
363
          break;
364
        }
365
      break;
366
 
367
    case FLUSH_SCOPE_LINE:
368
      len >>= 4;
369
      for (;;)
370
        {
371
          virt_to_phys_060 (addr, paddr, valid);
372
          if (valid)
373
            break;
374
          if (len <= PAGE_SIZE / 16)
375
            return 0;
376
          len -= PAGE_SIZE / 16;
377
          addr += PAGE_SIZE;
378
        }
379
      while (len--)
380
        {
381
          register unsigned long tmp __asm__ ("a0") = paddr;
382
          switch (cache)
383
            {
384
            case FLUSH_CACHE_DATA:
385
              __asm__ __volatile__ (".word 0xf468 /* cpushl %%dc,(%0) */\n\t"
386
                                    ".word 0xf448 /* cinv %%dc,(%0) */"
387
                                    : : "a" (tmp));
388
              break;
389
            case FLUSH_CACHE_INSN:
390
              __asm__ __volatile__ (".word 0xf4a8 /* cpushl %%ic,(%0) */\n\t"
391
                                    ".word 0xf488 /* cinv %%ic,(%0) */"
392
                                    : : "a" (tmp));
393
              break;
394
            default:
395
            case FLUSH_CACHE_BOTH:
396
              __asm__ __volatile__ (".word 0xf4e8 /* cpushl %%bc,(%0) */\n\t"
397
                                    ".word 0xf4c8 /* cinv %%bc,(%0) */"
398
                                    : : "a" (paddr));
399
              break;
400
            }
401
          addr += 16;
402
          if (len)
403
            {
404
              if ((addr & (PAGE_SIZE-1)) < 16)
405
                {
406
                  /* Recompute the physical address when crossing a
407
                     page boundary.  */
408
                  for (;;)
409
                    {
410
                      virt_to_phys_060 (addr, paddr, valid);
411
                      if (valid)
412
                        break;
413
                      if (len <= PAGE_SIZE / 16)
414
                        return 0;
415
                      len -= PAGE_SIZE / 16;
416
                      addr += PAGE_SIZE;
417
                    }
418
                }
419
              else
420
                paddr += 16;
421
            }
422
        }
423
      break;
424
 
425
    default:
426
    case FLUSH_SCOPE_PAGE:
427
      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
428
        {
429
          register unsigned long tmp __asm__ ("a0");
430
          virt_to_phys_060 (addr, paddr, valid);
431
          if (!valid)
432
            continue;
433
          tmp = paddr;
434
          switch (cache)
435
            {
436
            case FLUSH_CACHE_DATA:
437
              __asm__ __volatile__ (".word 0xf470 /* cpushp %%dc,(%0) */\n\t"
438
                                    ".word 0xf450 /* cinv %%dc,(%0) */"
439
                                    : : "a" (tmp));
440
              break;
441
            case FLUSH_CACHE_INSN:
442
              __asm__ __volatile__ (".word 0xf4b0 /* cpushp %%ic,(%0) */\n\t"
443
                                    ".word 0xf490 /* cinv %%ic,(%0) */"
444
                                    : : "a" (tmp));
445
              break;
446
            default:
447
            case FLUSH_CACHE_BOTH:
448
              __asm__ __volatile__ (".word 0xf4f0 /* cpushp %%bc,(%0) */\n\t"
449
                                    ".word 0xf4d0 /* cinv %%bc,(%0) */"
450
                                    : : "a" (tmp));
451
              break;
452
            }
453
        }
454
      break;
455
    }
456
  return 0;
457
}
458
 
459
/* sys_cacheflush -- flush (part of) the processor cache.  */
460
asmlinkage int
461
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
462
{
463
  struct vm_area_struct *vma;
464
 
465
  if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL
466
      || cache & ~FLUSH_CACHE_BOTH)
467
    return -EINVAL;
468
 
469
  if (scope == FLUSH_SCOPE_ALL)
470
    {
471
      /* Only the superuser may flush the whole cache. */
472
      if (!suser ())
473
        return -EPERM;
474
    }
475
  else
476
    {
477
      /* Verify that the specified address region actually belongs to
478
         this process.  */
479
      vma = find_vma (current, addr);
480
      if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
481
        return -EINVAL;
482
    }
483
 
484
  switch (m68k_is040or060)
485
    {
486
    default: /* 030 */
487
      /* Always flush the whole cache, everything else would not be
488
         worth the hassle.  */
489
      __asm__ __volatile__
490
        ("movec %%cacr, %%d0\n\t"
491
         "or %0, %%d0\n\t"
492
         "movec %%d0, %%cacr"
493
         : /* no outputs */
494
         : "di" ((cache & FLUSH_CACHE_INSN ? 8 : 0)
495
                 | (cache & FLUSH_CACHE_DATA ? 0x800 : 0))
496
         : "d0");
497
      return 0;
498
 
499
    case 4: /* 040 */
500
      return cache_flush_040 (addr, scope, cache, len);
501
 
502
    case 6: /* 060 */
503
      return cache_flush_060 (addr, scope, cache, len);
504
    }
505
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.