OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [rtos/] [ecos-2.0/] [packages/] [services/] [gfx/] [mw/] [v2_0/] [src/] [contrib/] [BSD/] [bcopy.s] - Blame information for rev 174

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 27 unneback
Hi,
2
 
3
The following code is the file support.s from the FreeBSD 2.6
4
distribution for i386.  I included the entire file so you can
5
pick and choose as you like and you can pick up the license.
6
There's a generic bcopy that does overlapping, uses rep movs
7
in the largest chunk possible, etc.  That might do the trick.
8
There's a few macros around but hopefully you can decipher
9
them.
10
 
11
Later,
12
FM
13
 
14
--
15
Frank W. Miller
16
Cornfed Systems Inc
17
www.cornfed.com
18
 
19
 
20
--
21
/*-
22
 * Copyright (c) 1993 The Regents of the University of California.
23
 * All rights reserved.
24
 *
25
 * Redistribution and use in source and binary forms, with or without
26
 * modification, are permitted provided that the following conditions
27
 * are met:
28
 * 1. Redistributions of source code must retain the above copyright
29
 *    notice, this list of conditions and the following disclaimer.
30
 * 2. Redistributions in binary form must reproduce the above copyright
31
 *    notice, this list of conditions and the following disclaimer in the
32
 *    documentation and/or other materials provided with the distribution.
33
 * 3. All advertising materials mentioning features or use of this software
34
 *    must display the following acknowledgement:
35
 *      This product includes software developed by the University of
36
 *      California, Berkeley and its contributors.
37
 * 4. Neither the name of the University nor the names of its contributors
38
 *    may be used to endorse or promote products derived from this software
39
 *    without specific prior written permission.
40
 *
41
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
42
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
45
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51
 * SUCH DAMAGE.
52
 *
53
 *      $Id: bcopy.s,v 1.1.1.1 2004-02-14 13:34:41 phoenix Exp $
54
 */
55
 
56
#include "npx.h"
57
#include "opt_cpu.h"
58
 
59
#include <machine/asmacros.h>
60
#include <machine/cputypes.h>
61
#include <machine/pmap.h>
62
#include <machine/specialreg.h>
63
 
64
#include "assym.s"
65
 
66
#define KDSEL           0x10                    /* kernel data selector */
67
#define IDXSHIFT        10
68
 
69
        .data
70
        .globl  _bcopy_vector
71
_bcopy_vector:
72
        .long   _generic_bcopy
73
        .globl  _bzero
74
_bzero:
75
        .long   _generic_bzero
76
        .globl  _copyin_vector
77
_copyin_vector:
78
        .long   _generic_copyin
79
        .globl  _copyout_vector
80
_copyout_vector:
81
        .long   _generic_copyout
82
        .globl  _ovbcopy_vector
83
_ovbcopy_vector:
84
        .long   _generic_bcopy
85
#if defined(I586_CPU) && NNPX > 0
86
kernel_fpu_lock:
87
        .byte   0xfe
88
        .space  3
89
#endif
90
 
91
        .text
92
 
93
/*
94
 * bcopy family
95
 * void bzero(void *buf, u_int len)
96
 */
97
 
98
ENTRY(generic_bzero)
99
        pushl   %edi
100
        movl    8(%esp),%edi
101
        movl    12(%esp),%ecx
102
        xorl    %eax,%eax
103
        shrl    $2,%ecx
104
        cld
105
        rep
106
        stosl
107
        movl    12(%esp),%ecx
108
        andl    $3,%ecx
109
        rep
110
        stosb
111
        popl    %edi
112
        ret
113
 
114
#if defined(I486_CPU)
115
ENTRY(i486_bzero)
116
        movl    4(%esp),%edx
117
        movl    8(%esp),%ecx
118
        xorl    %eax,%eax
119
/*
120
 * do 64 byte chunks first
121
 *
122
 * XXX this is probably over-unrolled at least for DX2's
123
 */
124
2:
125
        cmpl    $64,%ecx
126
        jb      3f
127
        movl    %eax,(%edx)
128
        movl    %eax,4(%edx)
129
        movl    %eax,8(%edx)
130
        movl    %eax,12(%edx)
131
        movl    %eax,16(%edx)
132
        movl    %eax,20(%edx)
133
        movl    %eax,24(%edx)
134
        movl    %eax,28(%edx)
135
        movl    %eax,32(%edx)
136
        movl    %eax,36(%edx)
137
        movl    %eax,40(%edx)
138
        movl    %eax,44(%edx)
139
        movl    %eax,48(%edx)
140
        movl    %eax,52(%edx)
141
        movl    %eax,56(%edx)
142
        movl    %eax,60(%edx)
143
        addl    $64,%edx
144
        subl    $64,%ecx
145
        jnz     2b
146
        ret
147
 
148
/*
149
 * do 16 byte chunks
150
 */
151
        SUPERALIGN_TEXT
152
3:
153
        cmpl    $16,%ecx
154
        jb      4f
155
        movl    %eax,(%edx)
156
        movl    %eax,4(%edx)
157
        movl    %eax,8(%edx)
158
        movl    %eax,12(%edx)
159
        addl    $16,%edx
160
        subl    $16,%ecx
161
        jnz     3b
162
        ret
163
 
164
/*
165
 * do 4 byte chunks
166
 */
167
        SUPERALIGN_TEXT
168
4:
169
        cmpl    $4,%ecx
170
        jb      5f
171
        movl    %eax,(%edx)
172
        addl    $4,%edx
173
        subl    $4,%ecx
174
        jnz     4b
175
        ret
176
 
177
/*
178
 * do 1 byte chunks
179
 * a jump table seems to be faster than a loop or more range reductions
180
 *
181
 * XXX need a const section for non-text
182
 */
183
        .data
184
jtab:
185
        .long   do0
186
        .long   do1
187
        .long   do2
188
        .long   do3
189
 
190
        .text
191
        SUPERALIGN_TEXT
192
5:
193
        jmp     jtab(,%ecx,4)
194
 
195
        SUPERALIGN_TEXT
196
do3:
197
        movw    %ax,(%edx)
198
        movb    %al,2(%edx)
199
        ret
200
 
201
        SUPERALIGN_TEXT
202
do2:
203
        movw    %ax,(%edx)
204
        ret
205
 
206
        SUPERALIGN_TEXT
207
do1:
208
        movb    %al,(%edx)
209
        ret
210
 
211
        SUPERALIGN_TEXT
212
do0:
213
        ret
214
#endif
215
 
216
#if defined(I586_CPU) && NNPX > 0
217
ENTRY(i586_bzero)
218
        movl    4(%esp),%edx
219
        movl    8(%esp),%ecx
220
 
221
        /*
222
         * The FPU register method is twice as fast as the integer register
223
         * method unless the target is in the L1 cache and we pre-allocate a
224
         * cache line for it (then the integer register method is 4-5 times
225
         * faster).  However, we never pre-allocate cache lines, since that
226
         * would make the integer method 25% or more slower for the common
227
         * case when the target isn't in either the L1 cache or the L2 cache.
228
         * Thus we normally use the FPU register method unless the overhead
229
         * would be too large.
230
         */
231
        cmpl    $256,%ecx       /* empirical; clts, fninit, smsw cost a lot */
232
        jb      intreg_i586_bzero
233
 
234
        /*
235
         * The FPU registers may belong to an application or to fastmove()
236
         * or to another invocation of bcopy() or ourself in a higher level
237
         * interrupt or trap handler.  Preserving the registers is
238
         * complicated since we avoid it if possible at all levels.  We
239
         * want to localize the complications even when that increases them.
240
         * Here the extra work involves preserving CR0_TS in TS.
241
         * `npxproc != NULL' is supposed to be the condition that all the
242
         * FPU resources belong to an application, but npxproc and CR0_TS
243
         * aren't set atomically enough for this condition to work in
244
         * interrupt handlers.
245
         *
246
         * Case 1: FPU registers belong to the application: we must preserve
247
         * the registers if we use them, so we only use the FPU register
248
         * method if the target size is large enough to amortize the extra
249
         * overhead for preserving them.  CR0_TS must be preserved although
250
         * it is very likely to end up as set.
251
         *
252
         * Case 2: FPU registers belong to fastmove(): fastmove() currently
253
         * makes the registers look like they belong to an application so
254
         * that cpu_switch() and savectx() don't have to know about it, so
255
         * this case reduces to case 1.
256
         *
257
         * Case 3: FPU registers belong to the kernel: don't use the FPU
258
         * register method.  This case is unlikely, and supporting it would
259
         * be more complicated and might take too much stack.
260
         *
261
         * Case 4: FPU registers don't belong to anyone: the FPU registers
262
         * don't need to be preserved, so we always use the FPU register
263
         * method.  CR0_TS must be preserved although it is very likely to
264
         * always end up as clear.
265
         */
266
        cmpl    $0,_npxproc
267
        je      i586_bz1
268
        cmpl    $256+184,%ecx           /* empirical; not quite 2*108 more */
269
        jb      intreg_i586_bzero
270
        sarb    $1,kernel_fpu_lock
271
        jc      intreg_i586_bzero
272
        smsw    %ax
273
        clts
274
        subl    $108,%esp
275
        fnsave  0(%esp)
276
        jmp     i586_bz2
277
 
278
i586_bz1:
279
        sarb    $1,kernel_fpu_lock
280
        jc      intreg_i586_bzero
281
        smsw    %ax
282
        clts
283
        fninit                          /* XXX should avoid needing this */
284
i586_bz2:
285
        fldz
286
 
287
        /*
288
         * Align to an 8 byte boundary (misalignment in the main loop would
289
         * cost a factor of >= 2).  Avoid jumps (at little cost if it is
290
         * already aligned) by always zeroing 8 bytes and using the part up
291
         * to the _next_ alignment position.
292
         */
293
        fstl    0(%edx)
294
        addl    %edx,%ecx               /* part of %ecx -= new_%edx - %edx */
295
        addl    $8,%edx
296
        andl    $~7,%edx
297
        subl    %edx,%ecx
298
 
299
        /*
300
         * Similarly align `len' to a multiple of 8.
301
         */
302
        fstl    -8(%edx,%ecx)
303
        decl    %ecx
304
        andl    $~7,%ecx
305
 
306
        /*
307
         * This wouldn't be any faster if it were unrolled, since the loop
308
         * control instructions are much faster than the fstl and/or done
309
         * in parallel with it so their overhead is insignificant.
310
         */
311
fpureg_i586_bzero_loop:
312
        fstl    0(%edx)
313
        addl    $8,%edx
314
        subl    $8,%ecx
315
        cmpl    $8,%ecx
316
        jae     fpureg_i586_bzero_loop
317
 
318
        cmpl    $0,_npxproc
319
        je      i586_bz3
320
        frstor  0(%esp)
321
        addl    $108,%esp
322
        lmsw    %ax
323
        movb    $0xfe,kernel_fpu_lock
324
        ret
325
 
326
i586_bz3:
327
        fstpl   %st(0)
328
        lmsw    %ax
329
        movb    $0xfe,kernel_fpu_lock
330
        ret
331
 
332
intreg_i586_bzero:
333
        /*
334
         * `rep stos' seems to be the best method in practice for small
335
         * counts.  Fancy methods usually take too long to start up due
336
         * to cache and BTB misses.
337
         */
338
        pushl   %edi
339
        movl    %edx,%edi
340
        xorl    %eax,%eax
341
        shrl    $2,%ecx
342
        cld
343
        rep
344
        stosl
345
        movl    12(%esp),%ecx
346
        andl    $3,%ecx
347
        jne     1f
348
        popl    %edi
349
        ret
350
 
351
1:
352
        rep
353
        stosb
354
        popl    %edi
355
        ret
356
#endif /* I586_CPU && NNPX > 0 */
357
 
358
/* fillw(pat, base, cnt) */
359
ENTRY(fillw)
360
        pushl   %edi
361
        movl    8(%esp),%eax
362
        movl    12(%esp),%edi
363
        movl    16(%esp),%ecx
364
        cld
365
        rep
366
        stosw
367
        popl    %edi
368
        ret
369
 
370
ENTRY(bcopyb)
371
bcopyb:
372
        pushl   %esi
373
        pushl   %edi
374
        movl    12(%esp),%esi
375
        movl    16(%esp),%edi
376
        movl    20(%esp),%ecx
377
        movl    %edi,%eax
378
        subl    %esi,%eax
379
        cmpl    %ecx,%eax                       /* overlapping && src < dst? */
380
        jb      1f
381
        cld                                     /* nope, copy forwards */
382
        rep
383
        movsb
384
        popl    %edi
385
        popl    %esi
386
        ret
387
 
388
        ALIGN_TEXT
389
1:
390
        addl    %ecx,%edi                       /* copy backwards. */
391
        addl    %ecx,%esi
392
        decl    %edi
393
        decl    %esi
394
        std
395
        rep
396
        movsb
397
        popl    %edi
398
        popl    %esi
399
        cld
400
        ret
401
 
402
ENTRY(bcopy)
403
        MEXITCOUNT
404
        jmp     *_bcopy_vector
405
 
406
ENTRY(ovbcopy)
407
        MEXITCOUNT
408
        jmp     *_ovbcopy_vector
409
 
410
/*
411
 * generic_bcopy(src, dst, cnt)
412
 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
413
 */
414
ENTRY(generic_bcopy)
415
        pushl   %esi
416
        pushl   %edi
417
        movl    12(%esp),%esi
418
        movl    16(%esp),%edi
419
        movl    20(%esp),%ecx
420
 
421
        movl    %edi,%eax
422
        subl    %esi,%eax
423
        cmpl    %ecx,%eax                       /* overlapping && src < dst? */
424
        jb      1f
425
 
426
        shrl    $2,%ecx                         /* copy by 32-bit words */
427
        cld                                     /* nope, copy forwards */
428
        rep
429
        movsl
430
        movl    20(%esp),%ecx
431
        andl    $3,%ecx                         /* any bytes left? */
432
        rep
433
        movsb
434
        popl    %edi
435
        popl    %esi
436
        ret
437
 
438
        ALIGN_TEXT
439
1:
440
        addl    %ecx,%edi                       /* copy backwards */
441
        addl    %ecx,%esi
442
        decl    %edi
443
        decl    %esi
444
        andl    $3,%ecx                         /* any fractional bytes? */
445
        std
446
        rep
447
        movsb
448
        movl    20(%esp),%ecx                   /* copy remainder by 32-bit words */
449
        shrl    $2,%ecx
450
        subl    $3,%esi
451
        subl    $3,%edi
452
        rep
453
        movsl
454
        popl    %edi
455
        popl    %esi
456
        cld
457
        ret
458
 
459
#if defined(I586_CPU) && NNPX > 0
460
ENTRY(i586_bcopy)
461
        pushl   %esi
462
        pushl   %edi
463
        movl    12(%esp),%esi
464
        movl    16(%esp),%edi
465
        movl    20(%esp),%ecx
466
 
467
        movl    %edi,%eax
468
        subl    %esi,%eax
469
        cmpl    %ecx,%eax                       /* overlapping && src < dst? */
470
        jb      1f
471
 
472
        cmpl    $1024,%ecx
473
        jb      small_i586_bcopy
474
 
475
        sarb    $1,kernel_fpu_lock
476
        jc      small_i586_bcopy
477
        cmpl    $0,_npxproc
478
        je      i586_bc1
479
        smsw    %dx
480
        clts
481
        subl    $108,%esp
482
        fnsave  0(%esp)
483
        jmp     4f
484
 
485
i586_bc1:
486
        smsw    %dx
487
        clts
488
        fninit                          /* XXX should avoid needing this */
489
 
490
        ALIGN_TEXT
491
4:
492
        pushl   %ecx
493
#define DCACHE_SIZE     8192
494
        cmpl    $(DCACHE_SIZE-512)/2,%ecx
495
        jbe     2f
496
        movl    $(DCACHE_SIZE-512)/2,%ecx
497
2:
498
        subl    %ecx,0(%esp)
499
        cmpl    $256,%ecx
500
        jb      5f                      /* XXX should prefetch if %ecx >= 32 */
501
        pushl   %esi
502
        pushl   %ecx
503
        ALIGN_TEXT
504
3:
505
        movl    0(%esi),%eax
506
        movl    32(%esi),%eax
507
        movl    64(%esi),%eax
508
        movl    96(%esi),%eax
509
        movl    128(%esi),%eax
510
        movl    160(%esi),%eax
511
        movl    192(%esi),%eax
512
        movl    224(%esi),%eax
513
        addl    $256,%esi
514
        subl    $256,%ecx
515
        cmpl    $256,%ecx
516
        jae     3b
517
        popl    %ecx
518
        popl    %esi
519
5:
520
        ALIGN_TEXT
521
large_i586_bcopy_loop:
522
        fildq   0(%esi)
523
        fildq   8(%esi)
524
        fildq   16(%esi)
525
        fildq   24(%esi)
526
        fildq   32(%esi)
527
        fildq   40(%esi)
528
        fildq   48(%esi)
529
        fildq   56(%esi)
530
        fistpq  56(%edi)
531
        fistpq  48(%edi)
532
        fistpq  40(%edi)
533
        fistpq  32(%edi)
534
        fistpq  24(%edi)
535
        fistpq  16(%edi)
536
        fistpq  8(%edi)
537
        fistpq  0(%edi)
538
        addl    $64,%esi
539
        addl    $64,%edi
540
        subl    $64,%ecx
541
        cmpl    $64,%ecx
542
        jae     large_i586_bcopy_loop
543
        popl    %eax
544
        addl    %eax,%ecx
545
        cmpl    $64,%ecx
546
        jae     4b
547
 
548
        cmpl    $0,_npxproc
549
        je      i586_bc2
550
        frstor  0(%esp)
551
        addl    $108,%esp
552
i586_bc2:
553
        lmsw    %dx
554
        movb    $0xfe,kernel_fpu_lock
555
 
556
/*
557
 * This is a duplicate of the main part of generic_bcopy.  See the comments
558
 * there.  Jumping into generic_bcopy would cost a whole 0-1 cycles and
559
 * would mess up high resolution profiling.
560
 */
561
        ALIGN_TEXT
562
small_i586_bcopy:
563
        shrl    $2,%ecx
564
        cld
565
        rep
566
        movsl
567
        movl    20(%esp),%ecx
568
        andl    $3,%ecx
569
        rep
570
        movsb
571
        popl    %edi
572
        popl    %esi
573
        ret
574
 
575
        ALIGN_TEXT
576
1:
577
        addl    %ecx,%edi
578
        addl    %ecx,%esi
579
        decl    %edi
580
        decl    %esi
581
        andl    $3,%ecx
582
        std
583
        rep
584
        movsb
585
        movl    20(%esp),%ecx
586
        shrl    $2,%ecx
587
        subl    $3,%esi
588
        subl    $3,%edi
589
        rep
590
        movsl
591
        popl    %edi
592
        popl    %esi
593
        cld
594
        ret
595
#endif /* I586_CPU && NNPX > 0 */
596
 
597
/*
598
 * Note: memcpy does not support overlapping copies
599
 */
600
ENTRY(memcpy)
601
        pushl   %edi
602
        pushl   %esi
603
        movl    12(%esp),%edi
604
        movl    16(%esp),%esi
605
        movl    20(%esp),%ecx
606
        movl    %edi,%eax
607
        shrl    $2,%ecx                         /* copy by 32-bit words */
608
        cld                                     /* nope, copy forwards */
609
        rep
610
        movsl
611
        movl    20(%esp),%ecx
612
        andl    $3,%ecx                         /* any bytes left? */
613
        rep
614
        movsb
615
        popl    %esi
616
        popl    %edi
617
        ret
618
 
619
 
620
/*****************************************************************************/
621
/* copyout and fubyte family                                                 */
622
/*****************************************************************************/
623
/*
624
 * Access user memory from inside the kernel. These routines and possibly
625
 * the math- and DOS emulators should be the only places that do this.
626
 *
627
 * We have to access the memory with user's permissions, so use a segment
628
 * selector with RPL 3. For writes to user space we have to additionally
629
 * check the PTE for write permission, because the 386 does not check
630
 * write permissions when we are executing with EPL 0. The 486 does check
631
 * this if the WP bit is set in CR0, so we can use a simpler version here.
632
 *
633
 * These routines set curpcb->onfault for the time they execute. When a
634
 * protection violation occurs inside the functions, the trap handler
635
 * returns to *curpcb->onfault instead of the function.
636
 */
637
 
638
/* copyout(from_kernel, to_user, len) */
639
ENTRY(copyout)
640
        MEXITCOUNT
641
        jmp     *_copyout_vector
642
 
643
ENTRY(generic_copyout)
644
        movl    _curpcb,%eax
645
        movl    $copyout_fault,PCB_ONFAULT(%eax)
646
        pushl   %esi
647
        pushl   %edi
648
        pushl   %ebx
649
        movl    16(%esp),%esi
650
        movl    20(%esp),%edi
651
        movl    24(%esp),%ebx
652
        testl   %ebx,%ebx                       /* anything to do? */
653
        jz      done_copyout
654
 
655
        /*
656
         * Check explicitly for non-user addresses.  If 486 write protection
657
         * is being used, this check is essential because we are in kernel
658
         * mode so the h/w does not provide any protection against writing
659
         * kernel addresses.
660
         */
661
 
662
        /*
663
         * First, prevent address wrapping.
664
         */
665
        movl    %edi,%eax
666
        addl    %ebx,%eax
667
        jc      copyout_fault
668
/*
669
 * XXX STOP USING VM_MAXUSER_ADDRESS.
670
 * It is an end address, not a max, so every time it is used correctly it
671
 * looks like there is an off by one error, and of course it caused an off
672
 * by one error in several places.
673
 */
674
        cmpl    $VM_MAXUSER_ADDRESS,%eax
675
        ja      copyout_fault
676
 
677
#if defined(I386_CPU)
678
 
679
#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
680
        cmpl    $CPUCLASS_386,_cpu_class
681
        jne     3f
682
#endif
683
/*
684
 * We have to check each PTE for user write permission.
685
 * The checking may cause a page fault, so it is important to set
686
 * up everything for return via copyout_fault before here.
687
 */
688
        /* compute number of pages */
689
        movl    %edi,%ecx
690
        andl    $PAGE_MASK,%ecx
691
        addl    %ebx,%ecx
692
        decl    %ecx
693
        shrl    $IDXSHIFT+2,%ecx
694
        incl    %ecx
695
 
696
        /* compute PTE offset for start address */
697
        movl    %edi,%edx
698
        shrl    $IDXSHIFT,%edx
699
        andb    $0xfc,%dl
700
 
701
1:
702
        /* check PTE for each page */
703
        leal    _PTmap(%edx),%eax
704
        shrl    $IDXSHIFT,%eax
705
        andb    $0xfc,%al
706
        testb   $PG_V,_PTmap(%eax)              /* PTE page must be valid */
707
        je      4f
708
        movb    _PTmap(%edx),%al
709
        andb    $PG_V|PG_RW|PG_U,%al            /* page must be valid and user writable */
710
        cmpb    $PG_V|PG_RW|PG_U,%al
711
        je      2f
712
 
713
4:
714
        /* simulate a trap */
715
        pushl   %edx
716
        pushl   %ecx
717
        shll    $IDXSHIFT,%edx
718
        pushl   %edx
719
        call    _trapwrite                      /* trapwrite(addr) */
720
        popl    %edx
721
        popl    %ecx
722
        popl    %edx
723
 
724
        testl   %eax,%eax                       /* if not ok, return EFAULT */
725
        jnz     copyout_fault
726
 
727
2:
728
        addl    $4,%edx
729
        decl    %ecx
730
        jnz     1b                              /* check next page */
731
#endif /* I386_CPU */
732
 
733
        /* bcopy(%esi, %edi, %ebx) */
734
3:
735
        movl    %ebx,%ecx
736
 
737
#if defined(I586_CPU) && NNPX > 0
738
        ALIGN_TEXT
739
slow_copyout:
740
#endif
741
        shrl    $2,%ecx
742
        cld
743
        rep
744
        movsl
745
        movb    %bl,%cl
746
        andb    $3,%cl
747
        rep
748
        movsb
749
 
750
done_copyout:
751
        popl    %ebx
752
        popl    %edi
753
        popl    %esi
754
        xorl    %eax,%eax
755
        movl    _curpcb,%edx
756
        movl    %eax,PCB_ONFAULT(%edx)
757
        ret
758
 
759
        ALIGN_TEXT
760
copyout_fault:
761
        popl    %ebx
762
        popl    %edi
763
        popl    %esi
764
        movl    _curpcb,%edx
765
        movl    $0,PCB_ONFAULT(%edx)
766
        movl    $EFAULT,%eax
767
        ret
768
 
769
#if defined(I586_CPU) && NNPX > 0
770
ENTRY(i586_copyout)
771
        /*
772
         * Duplicated from generic_copyout.  Could be done a bit better.
773
         */
774
        movl    _curpcb,%eax
775
        movl    $copyout_fault,PCB_ONFAULT(%eax)
776
        pushl   %esi
777
        pushl   %edi
778
        pushl   %ebx
779
        movl    16(%esp),%esi
780
        movl    20(%esp),%edi
781
        movl    24(%esp),%ebx
782
        testl   %ebx,%ebx                       /* anything to do? */
783
        jz      done_copyout
784
 
785
        /*
786
         * Check explicitly for non-user addresses.  If 486 write protection
787
         * is being used, this check is essential because we are in kernel
788
         * mode so the h/w does not provide any protection against writing
789
         * kernel addresses.
790
         */
791
 
792
        /*
793
         * First, prevent address wrapping.
794
         */
795
        movl    %edi,%eax
796
        addl    %ebx,%eax
797
        jc      copyout_fault
798
/*
799
 * XXX STOP USING VM_MAXUSER_ADDRESS.
800
 * It is an end address, not a max, so every time it is used correctly it
801
 * looks like there is an off by one error, and of course it caused an off
802
 * by one error in several places.
803
 */
804
        cmpl    $VM_MAXUSER_ADDRESS,%eax
805
        ja      copyout_fault
806
 
807
        /* bcopy(%esi, %edi, %ebx) */
808
3:
809
        movl    %ebx,%ecx
810
        /*
811
         * End of duplicated code.
812
         */
813
 
814
        cmpl    $1024,%ecx
815
        jb      slow_copyout
816
 
817
        pushl   %ecx
818
        call    _fastmove
819
        addl    $4,%esp
820
        jmp     done_copyout
821
#endif /* I586_CPU && NNPX > 0 */
822
 
823
/* copyin(from_user, to_kernel, len) */
824
ENTRY(copyin)
825
        MEXITCOUNT
826
        jmp     *_copyin_vector
827
 
828
ENTRY(generic_copyin)
829
        movl    _curpcb,%eax
830
        movl    $copyin_fault,PCB_ONFAULT(%eax)
831
        pushl   %esi
832
        pushl   %edi
833
        movl    12(%esp),%esi                   /* caddr_t from */
834
        movl    16(%esp),%edi                   /* caddr_t to */
835
        movl    20(%esp),%ecx                   /* size_t  len */
836
 
837
        /*
838
         * make sure address is valid
839
         */
840
        movl    %esi,%edx
841
        addl    %ecx,%edx
842
        jc      copyin_fault
843
        cmpl    $VM_MAXUSER_ADDRESS,%edx
844
        ja      copyin_fault
845
 
846
#if defined(I586_CPU) && NNPX > 0
847
        ALIGN_TEXT
848
slow_copyin:
849
#endif
850
        movb    %cl,%al
851
        shrl    $2,%ecx                         /* copy longword-wise */
852
        cld
853
        rep
854
        movsl
855
        movb    %al,%cl
856
        andb    $3,%cl                          /* copy remaining bytes */
857
        rep
858
        movsb
859
 
860
#if defined(I586_CPU) && NNPX > 0
861
        ALIGN_TEXT
862
done_copyin:
863
#endif
864
        popl    %edi
865
        popl    %esi
866
        xorl    %eax,%eax
867
        movl    _curpcb,%edx
868
        movl    %eax,PCB_ONFAULT(%edx)
869
        ret
870
 
871
        ALIGN_TEXT
872
copyin_fault:
873
        popl    %edi
874
        popl    %esi
875
        movl    _curpcb,%edx
876
        movl    $0,PCB_ONFAULT(%edx)
877
        movl    $EFAULT,%eax
878
        ret
879
 
880
#if defined(I586_CPU) && NNPX > 0
881
ENTRY(i586_copyin)
882
        /*
883
         * Duplicated from generic_copyin.  Could be done a bit better.
884
         */
885
        movl    _curpcb,%eax
886
        movl    $copyin_fault,PCB_ONFAULT(%eax)
887
        pushl   %esi
888
        pushl   %edi
889
        movl    12(%esp),%esi                   /* caddr_t from */
890
        movl    16(%esp),%edi                   /* caddr_t to */
891
        movl    20(%esp),%ecx                   /* size_t  len */
892
 
893
        /*
894
         * make sure address is valid
895
         */
896
        movl    %esi,%edx
897
        addl    %ecx,%edx
898
        jc      copyin_fault
899
        cmpl    $VM_MAXUSER_ADDRESS,%edx
900
        ja      copyin_fault
901
        /*
902
         * End of duplicated code.
903
         */
904
 
905
        cmpl    $1024,%ecx
906
        jb      slow_copyin
907
 
908
        pushl   %ebx                    /* XXX prepare for fastmove_fault */
909
        pushl   %ecx
910
        call    _fastmove
911
        addl    $8,%esp
912
        jmp     done_copyin
913
#endif /* I586_CPU && NNPX > 0 */
914
 
915
#if defined(I586_CPU) && NNPX > 0
916
/* fastmove(src, dst, len)
917
        src in %esi
918
        dst in %edi
919
        len in %ecx             XXX changed to on stack for profiling
920
        uses %eax and %edx for tmp. storage
921
 */
922
/* XXX use ENTRY() to get profiling.  fastmove() is actually a non-entry. */
923
ENTRY(fastmove)
924
        pushl   %ebp
925
        movl    %esp,%ebp
926
        subl    $PCB_SAVEFPU_SIZE+3*4,%esp
927
 
928
        movl    8(%ebp),%ecx
929
        cmpl    $63,%ecx
930
        jbe     fastmove_tail
931
 
932
        testl   $7,%esi /* check if src addr is multiple of 8 */
933
        jnz     fastmove_tail
934
 
935
        testl   $7,%edi /* check if dst addr is multiple of 8 */
936
        jnz     fastmove_tail
937
 
938
/* if (npxproc != NULL) { */
939
        cmpl    $0,_npxproc
940
        je      6f
941
/*    fnsave(&curpcb->pcb_savefpu); */
942
        movl    _curpcb,%eax
943
        fnsave  PCB_SAVEFPU(%eax)
944
/*   npxproc = NULL; */
945
        movl    $0,_npxproc
946
/* } */
947
6:
948
/* now we own the FPU. */
949
 
950
/*
951
 * The process' FP state is saved in the pcb, but if we get
952
 * switched, the cpu_switch() will store our FP state in the
953
 * pcb.  It should be possible to avoid all the copying for
954
 * this, e.g., by setting a flag to tell cpu_switch() to
955
 * save the state somewhere else.
956
 */
957
/* tmp = curpcb->pcb_savefpu; */
958
        movl    %ecx,-12(%ebp)
959
        movl    %esi,-8(%ebp)
960
        movl    %edi,-4(%ebp)
961
        movl    %esp,%edi
962
        movl    _curpcb,%esi
963
        addl    $PCB_SAVEFPU,%esi
964
        cld
965
        movl    $PCB_SAVEFPU_SIZE>>2,%ecx
966
        rep
967
        movsl
968
        movl    -12(%ebp),%ecx
969
        movl    -8(%ebp),%esi
970
        movl    -4(%ebp),%edi
971
/* stop_emulating(); */
972
        clts
973
/* npxproc = curproc; */
974
        movl    _curproc,%eax
975
        movl    %eax,_npxproc
976
        movl    _curpcb,%eax
977
        movl    $fastmove_fault,PCB_ONFAULT(%eax)
978
4:
979
        movl    %ecx,-12(%ebp)
980
        cmpl    $1792,%ecx
981
        jbe     2f
982
        movl    $1792,%ecx
983
2:
984
        subl    %ecx,-12(%ebp)
985
        cmpl    $256,%ecx
986
        jb      5f
987
        movl    %ecx,-8(%ebp)
988
        movl    %esi,-4(%ebp)
989
        ALIGN_TEXT
990
3:
991
        movl    0(%esi),%eax
992
        movl    32(%esi),%eax
993
        movl    64(%esi),%eax
994
        movl    96(%esi),%eax
995
        movl    128(%esi),%eax
996
        movl    160(%esi),%eax
997
        movl    192(%esi),%eax
998
        movl    224(%esi),%eax
999
        addl    $256,%esi
1000
        subl    $256,%ecx
1001
        cmpl    $256,%ecx
1002
        jae     3b
1003
        movl    -8(%ebp),%ecx
1004
        movl    -4(%ebp),%esi
1005
5:
1006
        ALIGN_TEXT
1007
fastmove_loop:
1008
        fildq   0(%esi)
1009
        fildq   8(%esi)
1010
        fildq   16(%esi)
1011
        fildq   24(%esi)
1012
        fildq   32(%esi)
1013
        fildq   40(%esi)
1014
        fildq   48(%esi)
1015
        fildq   56(%esi)
1016
        fistpq  56(%edi)
1017
        fistpq  48(%edi)
1018
        fistpq  40(%edi)
1019
        fistpq  32(%edi)
1020
        fistpq  24(%edi)
1021
        fistpq  16(%edi)
1022
        fistpq  8(%edi)
1023
        fistpq  0(%edi)
1024
        addl    $-64,%ecx
1025
        addl    $64,%esi
1026
        addl    $64,%edi
1027
        cmpl    $63,%ecx
1028
        ja      fastmove_loop
1029
        movl    -12(%ebp),%eax
1030
        addl    %eax,%ecx
1031
        cmpl    $64,%ecx
1032
        jae     4b
1033
 
1034
/* curpcb->pcb_savefpu = tmp; */
1035
        movl    %ecx,-12(%ebp)
1036
        movl    %esi,-8(%ebp)
1037
        movl    %edi,-4(%ebp)
1038
        movl    _curpcb,%edi
1039
        addl    $PCB_SAVEFPU,%edi
1040
        movl    %esp,%esi
1041
        cld
1042
        movl    $PCB_SAVEFPU_SIZE>>2,%ecx
1043
        rep
1044
        movsl
1045
        movl    -12(%ebp),%ecx
1046
        movl    -8(%ebp),%esi
1047
        movl    -4(%ebp),%edi
1048
 
1049
/* start_emulating(); */
1050
        smsw    %ax
1051
        orb     $CR0_TS,%al
1052
        lmsw    %ax
1053
/* npxproc = NULL; */
1054
        movl    $0,_npxproc
1055
 
1056
        ALIGN_TEXT
1057
fastmove_tail:
1058
        movl    _curpcb,%eax
1059
        movl    $fastmove_tail_fault,PCB_ONFAULT(%eax)
1060
 
1061
        movb    %cl,%al
1062
        shrl    $2,%ecx                         /* copy longword-wise */
1063
        cld
1064
        rep
1065
        movsl
1066
        movb    %al,%cl
1067
        andb    $3,%cl                          /* copy remaining bytes */
1068
        rep
1069
        movsb
1070
 
1071
        movl    %ebp,%esp
1072
        popl    %ebp
1073
        ret
1074
 
1075
        ALIGN_TEXT
1076
fastmove_fault:
1077
        movl    _curpcb,%edi
1078
        addl    $PCB_SAVEFPU,%edi
1079
        movl    %esp,%esi
1080
        cld
1081
        movl    $PCB_SAVEFPU_SIZE>>2,%ecx
1082
        rep
1083
        movsl
1084
 
1085
        smsw    %ax
1086
        orb     $CR0_TS,%al
1087
        lmsw    %ax
1088
        movl    $0,_npxproc
1089
 
1090
fastmove_tail_fault:
1091
        movl    %ebp,%esp
1092
        popl    %ebp
1093
        addl    $8,%esp
1094
        popl    %ebx
1095
        popl    %edi
1096
        popl    %esi
1097
        movl    _curpcb,%edx
1098
        movl    $0,PCB_ONFAULT(%edx)
1099
        movl    $EFAULT,%eax
1100
        ret
1101
#endif /* I586_CPU && NNPX > 0 */
1102
 
1103
/*
1104
 * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
1105
 */
1106
ENTRY(fuword)
1107
        movl    _curpcb,%ecx
1108
        movl    $fusufault,PCB_ONFAULT(%ecx)
1109
        movl    4(%esp),%edx                    /* from */
1110
 
1111
        cmpl    $VM_MAXUSER_ADDRESS-4,%edx      /* verify address is valid */
1112
        ja      fusufault
1113
 
1114
        movl    (%edx),%eax
1115
        movl    $0,PCB_ONFAULT(%ecx)
1116
        ret
1117
 
1118
/*
1119
 * These two routines are called from the profiling code, potentially
1120
 * at interrupt time. If they fail, that's okay, good things will
1121
 * happen later. Fail all the time for now - until the trap code is
1122
 * able to deal with this.
1123
 */
1124
ALTENTRY(suswintr)
1125
ENTRY(fuswintr)
1126
        movl    $-1,%eax
1127
        ret
1128
 
1129
ENTRY(fusword)
1130
        movl    _curpcb,%ecx
1131
        movl    $fusufault,PCB_ONFAULT(%ecx)
1132
        movl    4(%esp),%edx
1133
 
1134
        cmpl    $VM_MAXUSER_ADDRESS-2,%edx
1135
        ja      fusufault
1136
 
1137
        movzwl  (%edx),%eax
1138
        movl    $0,PCB_ONFAULT(%ecx)
1139
        ret
1140
 
1141
ENTRY(fubyte)
1142
        movl    _curpcb,%ecx
1143
        movl    $fusufault,PCB_ONFAULT(%ecx)
1144
        movl    4(%esp),%edx
1145
 
1146
        cmpl    $VM_MAXUSER_ADDRESS-1,%edx
1147
        ja      fusufault
1148
 
1149
        movzbl  (%edx),%eax
1150
        movl    $0,PCB_ONFAULT(%ecx)
1151
        ret
1152
 
1153
        ALIGN_TEXT
1154
fusufault:
1155
        movl    _curpcb,%ecx
1156
        xorl    %eax,%eax
1157
        movl    %eax,PCB_ONFAULT(%ecx)
1158
        decl    %eax
1159
        ret
1160
 
1161
/*
1162
 * su{byte,sword,word}: write a byte (word, longword) to user memory
1163
 */
1164
ENTRY(suword)
1165
        movl    _curpcb,%ecx
1166
        movl    $fusufault,PCB_ONFAULT(%ecx)
1167
        movl    4(%esp),%edx
1168
 
1169
#if defined(I386_CPU)
1170
 
1171
#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1172
        cmpl    $CPUCLASS_386,_cpu_class
1173
        jne     2f                              /* we only have to set the right segment selector */
1174
#endif /* I486_CPU || I586_CPU || I686_CPU */
1175
 
1176
        /* XXX - page boundary crossing is still not handled */
1177
        movl    %edx,%eax
1178
        shrl    $IDXSHIFT,%edx
1179
        andb    $0xfc,%dl
1180
 
1181
        leal    _PTmap(%edx),%ecx
1182
        shrl    $IDXSHIFT,%ecx
1183
        andb    $0xfc,%cl
1184
        testb   $PG_V,_PTmap(%ecx)              /* PTE page must be valid */
1185
        je      4f
1186
        movb    _PTmap(%edx),%dl
1187
        andb    $PG_V|PG_RW|PG_U,%dl            /* page must be valid and user writable */
1188
        cmpb    $PG_V|PG_RW|PG_U,%dl
1189
        je      1f
1190
 
1191
4:
1192
        /* simulate a trap */
1193
        pushl   %eax
1194
        call    _trapwrite
1195
        popl    %edx                            /* remove junk parameter from stack */
1196
        testl   %eax,%eax
1197
        jnz     fusufault
1198
1:
1199
        movl    4(%esp),%edx
1200
#endif
1201
 
1202
2:
1203
        cmpl    $VM_MAXUSER_ADDRESS-4,%edx      /* verify address validity */
1204
        ja      fusufault
1205
 
1206
        movl    8(%esp),%eax
1207
        movl    %eax,(%edx)
1208
        xorl    %eax,%eax
1209
        movl    _curpcb,%ecx
1210
        movl    %eax,PCB_ONFAULT(%ecx)
1211
        ret
1212
 
1213
ENTRY(susword)
1214
        movl    _curpcb,%ecx
1215
        movl    $fusufault,PCB_ONFAULT(%ecx)
1216
        movl    4(%esp),%edx
1217
 
1218
#if defined(I386_CPU)
1219
 
1220
#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1221
        cmpl    $CPUCLASS_386,_cpu_class
1222
        jne     2f
1223
#endif /* I486_CPU || I586_CPU || I686_CPU */
1224
 
1225
        /* XXX - page boundary crossing is still not handled */
1226
        movl    %edx,%eax
1227
        shrl    $IDXSHIFT,%edx
1228
        andb    $0xfc,%dl
1229
 
1230
        leal    _PTmap(%edx),%ecx
1231
        shrl    $IDXSHIFT,%ecx
1232
        andb    $0xfc,%cl
1233
        testb   $PG_V,_PTmap(%ecx)              /* PTE page must be valid */
1234
        je      4f
1235
        movb    _PTmap(%edx),%dl
1236
        andb    $PG_V|PG_RW|PG_U,%dl            /* page must be valid and user writable */
1237
        cmpb    $PG_V|PG_RW|PG_U,%dl
1238
        je      1f
1239
 
1240
4:
1241
        /* simulate a trap */
1242
        pushl   %eax
1243
        call    _trapwrite
1244
        popl    %edx                            /* remove junk parameter from stack */
1245
        testl   %eax,%eax
1246
        jnz     fusufault
1247
1:
1248
        movl    4(%esp),%edx
1249
#endif
1250
 
1251
2:
1252
        cmpl    $VM_MAXUSER_ADDRESS-2,%edx      /* verify address validity */
1253
        ja      fusufault
1254
 
1255
        movw    8(%esp),%ax
1256
        movw    %ax,(%edx)
1257
        xorl    %eax,%eax
1258
        movl    _curpcb,%ecx                    /* restore trashed register */
1259
        movl    %eax,PCB_ONFAULT(%ecx)
1260
        ret
1261
 
1262
ALTENTRY(suibyte)
1263
ENTRY(subyte)
1264
        movl    _curpcb,%ecx
1265
        movl    $fusufault,PCB_ONFAULT(%ecx)
1266
        movl    4(%esp),%edx
1267
 
1268
#if defined(I386_CPU)
1269
 
1270
#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1271
        cmpl    $CPUCLASS_386,_cpu_class
1272
        jne     2f
1273
#endif /* I486_CPU || I586_CPU || I686_CPU */
1274
 
1275
        movl    %edx,%eax
1276
        shrl    $IDXSHIFT,%edx
1277
        andb    $0xfc,%dl
1278
 
1279
        leal    _PTmap(%edx),%ecx
1280
        shrl    $IDXSHIFT,%ecx
1281
        andb    $0xfc,%cl
1282
        testb   $PG_V,_PTmap(%ecx)              /* PTE page must be valid */
1283
        je      4f
1284
        movb    _PTmap(%edx),%dl
1285
        andb    $PG_V|PG_RW|PG_U,%dl            /* page must be valid and user writable */
1286
        cmpb    $PG_V|PG_RW|PG_U,%dl
1287
        je      1f
1288
 
1289
4:
1290
        /* simulate a trap */
1291
        pushl   %eax
1292
        call    _trapwrite
1293
        popl    %edx                            /* remove junk parameter from stack */
1294
        testl   %eax,%eax
1295
        jnz     fusufault
1296
1:
1297
        movl    4(%esp),%edx
1298
#endif
1299
 
1300
2:
1301
        cmpl    $VM_MAXUSER_ADDRESS-1,%edx      /* verify address validity */
1302
        ja      fusufault
1303
 
1304
        movb    8(%esp),%al
1305
        movb    %al,(%edx)
1306
        xorl    %eax,%eax
1307
        movl    _curpcb,%ecx                    /* restore trashed register */
1308
        movl    %eax,PCB_ONFAULT(%ecx)
1309
        ret
1310
 
1311
/*
1312
 * copyinstr(from, to, maxlen, int *lencopied)
1313
 *      copy a string from from to to, stop when a 0 character is reached.
1314
 *      return ENAMETOOLONG if string is longer than maxlen, and
1315
 *      EFAULT on protection violations. If lencopied is non-zero,
1316
 *      return the actual length in *lencopied.
1317
 */
1318
ENTRY(copyinstr)
1319
        pushl   %esi
1320
        pushl   %edi
1321
        movl    _curpcb,%ecx
1322
        movl    $cpystrflt,PCB_ONFAULT(%ecx)
1323
 
1324
        movl    12(%esp),%esi                   /* %esi = from */
1325
        movl    16(%esp),%edi                   /* %edi = to */
1326
        movl    20(%esp),%edx                   /* %edx = maxlen */
1327
 
1328
        movl    $VM_MAXUSER_ADDRESS,%eax
1329
 
1330
        /* make sure 'from' is within bounds */
1331
        subl    %esi,%eax
1332
        jbe     cpystrflt
1333
 
1334
        /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
1335
        cmpl    %edx,%eax
1336
        jae     1f
1337
        movl    %eax,%edx
1338
        movl    %eax,20(%esp)
1339
1:
1340
        incl    %edx
1341
        cld
1342
 
1343
2:
1344
        decl    %edx
1345
        jz      3f
1346
 
1347
        lodsb
1348
        stosb
1349
        orb     %al,%al
1350
        jnz     2b
1351
 
1352
        /* Success -- 0 byte reached */
1353
        decl    %edx
1354
        xorl    %eax,%eax
1355
        jmp     cpystrflt_x
1356
3:
1357
        /* edx is zero - return ENAMETOOLONG or EFAULT */
1358
        cmpl    $VM_MAXUSER_ADDRESS,%esi
1359
        jae     cpystrflt
1360
4:
1361
        movl    $ENAMETOOLONG,%eax
1362
        jmp     cpystrflt_x
1363
 
1364
cpystrflt:
1365
        movl    $EFAULT,%eax
1366
 
1367
cpystrflt_x:
1368
        /* set *lencopied and return %eax */
1369
        movl    _curpcb,%ecx
1370
        movl    $0,PCB_ONFAULT(%ecx)
1371
        movl    20(%esp),%ecx
1372
        subl    %edx,%ecx
1373
        movl    24(%esp),%edx
1374
        testl   %edx,%edx
1375
        jz      1f
1376
        movl    %ecx,(%edx)
1377
1:
1378
        popl    %edi
1379
        popl    %esi
1380
        ret
1381
 
1382
 
1383
/*
1384
 * copystr(from, to, maxlen, int *lencopied)
1385
 */
1386
ENTRY(copystr)
1387
        pushl   %esi
1388
        pushl   %edi
1389
 
1390
        movl    12(%esp),%esi                   /* %esi = from */
1391
        movl    16(%esp),%edi                   /* %edi = to */
1392
        movl    20(%esp),%edx                   /* %edx = maxlen */
1393
        incl    %edx
1394
        cld
1395
1:
1396
        decl    %edx
1397
        jz      4f
1398
        lodsb
1399
        stosb
1400
        orb     %al,%al
1401
        jnz     1b
1402
 
1403
        /* Success -- 0 byte reached */
1404
        decl    %edx
1405
        xorl    %eax,%eax
1406
        jmp     6f
1407
4:
1408
        /* edx is zero -- return ENAMETOOLONG */
1409
        movl    $ENAMETOOLONG,%eax
1410
 
1411
6:
1412
        /* set *lencopied and return %eax */
1413
        movl    20(%esp),%ecx
1414
        subl    %edx,%ecx
1415
        movl    24(%esp),%edx
1416
        testl   %edx,%edx
1417
        jz      7f
1418
        movl    %ecx,(%edx)
1419
7:
1420
        popl    %edi
1421
        popl    %esi
1422
        ret
1423
 
1424
ENTRY(bcmp)
1425
        pushl   %edi
1426
        pushl   %esi
1427
        movl    12(%esp),%edi
1428
        movl    16(%esp),%esi
1429
        movl    20(%esp),%edx
1430
        xorl    %eax,%eax
1431
 
1432
        movl    %edx,%ecx
1433
        shrl    $2,%ecx
1434
        cld                                     /* compare forwards */
1435
        repe
1436
        cmpsl
1437
        jne     1f
1438
 
1439
        movl    %edx,%ecx
1440
        andl    $3,%ecx
1441
        repe
1442
        cmpsb
1443
        je      2f
1444
1:
1445
        incl    %eax
1446
2:
1447
        popl    %esi
1448
        popl    %edi
1449
        ret
1450
 
1451
 
1452
/*
1453
 * Handling of special 386 registers and descriptor tables etc
1454
 */
1455
/* void lgdt(struct region_descriptor *rdp); */
1456
ENTRY(lgdt)
1457
        /* reload the descriptor table */
1458
        movl    4(%esp),%eax
1459
        lgdt    (%eax)
1460
 
1461
        /* flush the prefetch q */
1462
        jmp     1f
1463
        nop
1464
1:
1465
        /* reload "stale" selectors */
1466
        movl    $KDSEL,%eax
1467
        movl    %ax,%ds
1468
        movl    %ax,%es
1469
        movl    %ax,%ss
1470
 
1471
        /* reload code selector by turning return into intersegmental return */
1472
        movl    (%esp),%eax
1473
        pushl   %eax
1474
#       movl    $KCSEL,4(%esp)
1475
        movl    $8,4(%esp)
1476
        lret
1477
 
1478
/*
1479
 * void lidt(struct region_descriptor *rdp);
1480
 */
1481
ENTRY(lidt)
1482
        movl    4(%esp),%eax
1483
        lidt    (%eax)
1484
        ret
1485
 
1486
/*
1487
 * void lldt(u_short sel)
1488
 */
1489
ENTRY(lldt)
1490
        lldt    4(%esp)
1491
        ret
1492
 
1493
/*
1494
 * void ltr(u_short sel)
1495
 */
1496
ENTRY(ltr)
1497
        ltr     4(%esp)
1498
        ret
1499
 
1500
/* ssdtosd(*ssdp,*sdp) */
1501
ENTRY(ssdtosd)
1502
        pushl   %ebx
1503
        movl    8(%esp),%ecx
1504
        movl    8(%ecx),%ebx
1505
        shll    $16,%ebx
1506
        movl    (%ecx),%edx
1507
        roll    $16,%edx
1508
        movb    %dh,%bl
1509
        movb    %dl,%bh
1510
        rorl    $8,%ebx
1511
        movl    4(%ecx),%eax
1512
        movw    %ax,%dx
1513
        andl    $0xf0000,%eax
1514
        orl     %eax,%ebx
1515
        movl    12(%esp),%ecx
1516
        movl    %edx,(%ecx)
1517
        movl    %ebx,4(%ecx)
1518
        popl    %ebx
1519
        ret
1520
 
1521
/* load_cr0(cr0) */
1522
ENTRY(load_cr0)
1523
        movl    4(%esp),%eax
1524
        movl    %eax,%cr0
1525
        ret
1526
 
1527
/* rcr0() */
1528
ENTRY(rcr0)
1529
        movl    %cr0,%eax
1530
        ret
1531
 
1532
/* rcr3() */
1533
ENTRY(rcr3)
1534
        movl    %cr3,%eax
1535
        ret
1536
 
1537
/* void load_cr3(caddr_t cr3) */
1538
ENTRY(load_cr3)
1539
        movl    4(%esp),%eax
1540
        movl    %eax,%cr3
1541
        ret
1542
 
1543
 
1544
/*****************************************************************************/
1545
/* setjump, longjump                                                         */
1546
/*****************************************************************************/
1547
 
1548
ENTRY(setjmp)
1549
        movl    4(%esp),%eax
1550
        movl    %ebx,(%eax)                     /* save ebx */
1551
        movl    %esp,4(%eax)                    /* save esp */
1552
        movl    %ebp,8(%eax)                    /* save ebp */
1553
        movl    %esi,12(%eax)                   /* save esi */
1554
        movl    %edi,16(%eax)                   /* save edi */
1555
        movl    (%esp),%edx                     /* get rta */
1556
        movl    %edx,20(%eax)                   /* save eip */
1557
        xorl    %eax,%eax                       /* return(0); */
1558
        ret
1559
 
1560
ENTRY(longjmp)
1561
        movl    4(%esp),%eax
1562
        movl    (%eax),%ebx                     /* restore ebx */
1563
        movl    4(%eax),%esp                    /* restore esp */
1564
        movl    8(%eax),%ebp                    /* restore ebp */
1565
        movl    12(%eax),%esi                   /* restore esi */
1566
        movl    16(%eax),%edi                   /* restore edi */
1567
        movl    20(%eax),%edx                   /* get rta */
1568
        movl    %edx,(%esp)                     /* put in return frame */
1569
        xorl    %eax,%eax                       /* return(1); */
1570
        incl    %eax
1571
        ret
1572
 
1573
/*
1574
 * Here for doing BB-profiling (gcc -a).
1575
 * We rely on the "bbset" instead, but need a dummy function.
1576
 */
1577
NON_GPROF_ENTRY(__bb_init_func)
1578
        movl    4(%esp),%eax
1579
        movl    $1,(%eax)
1580
        .byte   0xc3                            /* avoid macro for `ret' */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.