OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [uClibc/] [libc/] [string/] [arm/] [_memcpy.S] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1325 phoenix
/*-
2
 * Copyright (c) 1997 The NetBSD Foundation, Inc.
3
 * All rights reserved.
4
 *
5
 * This code is derived from software contributed to The NetBSD Foundation
6
 * by Neil A. Carson and Mark Brinicombe
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 * 3. All advertising materials mentioning features or use of this software
17
 *    must display the following acknowledgement:
18
 *        This product includes software developed by the NetBSD
19
 *        Foundation, Inc. and its contributors.
20
 * 4. Neither the name of The NetBSD Foundation nor the names of its
21
 *    contributors may be used to endorse or promote products derived
22
 *    from this software without specific prior written permission.
23
 *
24
 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34
 * POSSIBILITY OF SUCH DAMAGE.
35
 *
36
 * Adapted for uClibc from NetBSD _memcpy.S,v 1.6 2003/10/09
37
 * by Erik Andersen 
38
 */
39
 
40
 
41
#include 
42
 
43
/*
44
 * This is one fun bit of code ...
45
 * Some easy listening music is suggested while trying to understand this
46
 * code e.g. Iron Maiden
47
 *
48
 * For anyone attempting to understand it :
49
 *
50
 * The core code is implemented here with simple stubs for memcpy()
51
 * memmove() and bcopy().
52
 *
53
 * All local labels are prefixed with Lmemcpy_
54
 * Following the prefix a label starting f is used in the forward copy code
55
 * while a label using b is used in the backwards copy code
56
 * The source and destination addresses determine whether a forward or
57
 * backward copy is performed.
58
 * Separate bits of code are used to deal with the following situations
59
 * for both the forward and backwards copy.
60
 * unaligned source address
61
 * unaligned destination address
62
 * Separate copy routines are used to produce an optimised result for each
63
 * of these cases.
64
 * The copy code will use LDM/STM instructions to copy up to 32 bytes at
65
 * a time where possible.
66
 *
67
 * Note: r12 (aka ip) can be trashed during the function along with
68
 * r0-r3 although r0-r2 have defined uses i.e. src, dest, len through out.
69
 * Additional registers are preserved prior to use i.e. r4, r5 & lr
70
 *
71
 * Apologies for the state of the comments ;-)
72
 */
73
 
74
                .text
75
                .global _memcpy;
76
                .type _memcpy,%function
77
                .align 4;                                                               \
78
 
79
_memcpy:
80
        /* Determine copy direction */
81
        cmp     r1, r0
82
        bcc     .Lmemcpy_backwards
83
 
84
        moveq   r0, #0                  /* Quick abort for len=0 */
85
        moveq   pc, lr
86
 
87
        stmdb   sp!, {r0, lr}           /* memcpy() returns dest addr */
88
        subs    r2, r2, #4
89
        blt     .Lmemcpy_fl4            /* less than 4 bytes */
90
        ands    r12, r0, #3
91
        bne     .Lmemcpy_fdestul        /* oh unaligned destination addr */
92
        ands    r12, r1, #3
93
        bne     .Lmemcpy_fsrcul         /* oh unaligned source addr */
94
 
95
.Lmemcpy_ft8:
96
        /* We have aligned source and destination */
97
        subs    r2, r2, #8
98
        blt     .Lmemcpy_fl12           /* less than 12 bytes (4 from above) */
99
        subs    r2, r2, #0x14
100
        blt     .Lmemcpy_fl32           /* less than 32 bytes (12 from above) */
101
        stmdb   sp!, {r4}               /* borrow r4 */
102
 
103
        /* blat 32 bytes at a time */
104
        /* XXX for really big copies perhaps we should use more registers */
105
.Lmemcpy_floop32:
106
        ldmia   r1!, {r3, r4, r12, lr}
107
        stmia   r0!, {r3, r4, r12, lr}
108
        ldmia   r1!, {r3, r4, r12, lr}
109
        stmia   r0!, {r3, r4, r12, lr}
110
        subs    r2, r2, #0x20
111
        bge     .Lmemcpy_floop32
112
 
113
        cmn     r2, #0x10
114
        ldmgeia r1!, {r3, r4, r12, lr}  /* blat a remaining 16 bytes */
115
        stmgeia r0!, {r3, r4, r12, lr}
116
        subge   r2, r2, #0x10
117
        ldmia   sp!, {r4}               /* return r4 */
118
 
119
.Lmemcpy_fl32:
120
        adds    r2, r2, #0x14
121
 
122
        /* blat 12 bytes at a time */
123
.Lmemcpy_floop12:
124
        ldmgeia r1!, {r3, r12, lr}
125
        stmgeia r0!, {r3, r12, lr}
126
        subges  r2, r2, #0x0c
127
        bge     .Lmemcpy_floop12
128
 
129
.Lmemcpy_fl12:
130
        adds    r2, r2, #8
131
        blt     .Lmemcpy_fl4
132
 
133
        subs    r2, r2, #4
134
        ldrlt   r3, [r1], #4
135
        strlt   r3, [r0], #4
136
        ldmgeia r1!, {r3, r12}
137
        stmgeia r0!, {r3, r12}
138
        subge   r2, r2, #4
139
 
140
.Lmemcpy_fl4:
141
        /* less than 4 bytes to go */
142
        adds    r2, r2, #4
143
        ldmeqia sp!, {r0, pc}           /* done */
144
 
145
        /* copy the crud byte at a time */
146
        cmp     r2, #2
147
        ldrb    r3, [r1], #1
148
        strb    r3, [r0], #1
149
        ldrgeb  r3, [r1], #1
150
        strgeb  r3, [r0], #1
151
        ldrgtb  r3, [r1], #1
152
        strgtb  r3, [r0], #1
153
        ldmia   sp!, {r0, pc}
154
 
155
        /* erg - unaligned destination */
156
.Lmemcpy_fdestul:
157
        rsb     r12, r12, #4
158
        cmp     r12, #2
159
 
160
        /* align destination with byte copies */
161
        ldrb    r3, [r1], #1
162
        strb    r3, [r0], #1
163
        ldrgeb  r3, [r1], #1
164
        strgeb  r3, [r0], #1
165
        ldrgtb  r3, [r1], #1
166
        strgtb  r3, [r0], #1
167
        subs    r2, r2, r12
168
        blt     .Lmemcpy_fl4            /* less the 4 bytes */
169
 
170
        ands    r12, r1, #3
171
        beq     .Lmemcpy_ft8            /* we have an aligned source */
172
 
173
        /* erg - unaligned source */
174
        /* This is where it gets nasty ... */
175
.Lmemcpy_fsrcul:
176
        bic     r1, r1, #3
177
        ldr     lr, [r1], #4
178
        cmp     r12, #2
179
        bgt     .Lmemcpy_fsrcul3
180
        beq     .Lmemcpy_fsrcul2
181
        cmp     r2, #0x0c
182
        blt     .Lmemcpy_fsrcul1loop4
183
        sub     r2, r2, #0x0c
184
        stmdb   sp!, {r4, r5}
185
 
186
.Lmemcpy_fsrcul1loop16:
187
#if __BYTE_ORDER == __BIG_ENDIAN
188
        mov     r3, lr, lsl #8
189
        ldmia   r1!, {r4, r5, r12, lr}
190
        orr     r3, r3, r4, lsr #24
191
        mov     r4, r4, lsl #8
192
        orr     r4, r4, r5, lsr #24
193
        mov     r5, r5, lsl #8
194
        orr     r5, r5, r12, lsr #24
195
        mov     r12, r12, lsl #8
196
        orr     r12, r12, lr, lsr #24
197
#else
198
        mov     r3, lr, lsr #8
199
        ldmia   r1!, {r4, r5, r12, lr}
200
        orr     r3, r3, r4, lsl #24
201
        mov     r4, r4, lsr #8
202
        orr     r4, r4, r5, lsl #24
203
        mov     r5, r5, lsr #8
204
        orr     r5, r5, r12, lsl #24
205
        mov     r12, r12, lsr #8
206
        orr     r12, r12, lr, lsl #24
207
#endif
208
        stmia   r0!, {r3-r5, r12}
209
        subs    r2, r2, #0x10
210
        bge     .Lmemcpy_fsrcul1loop16
211
        ldmia   sp!, {r4, r5}
212
        adds    r2, r2, #0x0c
213
        blt     .Lmemcpy_fsrcul1l4
214
 
215
.Lmemcpy_fsrcul1loop4:
216
#if __BYTE_ORDER == __BIG_ENDIAN
217
        mov     r12, lr, lsl #8
218
        ldr     lr, [r1], #4
219
        orr     r12, r12, lr, lsr #24
220
#else
221
        mov     r12, lr, lsr #8
222
        ldr     lr, [r1], #4
223
        orr     r12, r12, lr, lsl #24
224
#endif
225
        str     r12, [r0], #4
226
        subs    r2, r2, #4
227
        bge     .Lmemcpy_fsrcul1loop4
228
 
229
.Lmemcpy_fsrcul1l4:
230
        sub     r1, r1, #3
231
        b       .Lmemcpy_fl4
232
 
233
.Lmemcpy_fsrcul2:
234
        cmp     r2, #0x0c
235
        blt     .Lmemcpy_fsrcul2loop4
236
        sub     r2, r2, #0x0c
237
        stmdb   sp!, {r4, r5}
238
 
239
.Lmemcpy_fsrcul2loop16:
240
#if __BYTE_ORDER == __BIG_ENDIAN
241
        mov     r3, lr, lsl #16
242
        ldmia   r1!, {r4, r5, r12, lr}
243
        orr     r3, r3, r4, lsr #16
244
        mov     r4, r4, lsl #16
245
        orr     r4, r4, r5, lsr #16
246
        mov     r5, r5, lsl #16
247
        orr     r5, r5, r12, lsr #16
248
        mov     r12, r12, lsl #16
249
        orr     r12, r12, lr, lsr #16
250
#else
251
        mov     r3, lr, lsr #16
252
        ldmia   r1!, {r4, r5, r12, lr}
253
        orr     r3, r3, r4, lsl #16
254
        mov     r4, r4, lsr #16
255
        orr     r4, r4, r5, lsl #16
256
        mov     r5, r5, lsr #16
257
        orr     r5, r5, r12, lsl #16
258
        mov     r12, r12, lsr #16
259
        orr     r12, r12, lr, lsl #16
260
#endif
261
        stmia   r0!, {r3-r5, r12}
262
        subs    r2, r2, #0x10
263
        bge     .Lmemcpy_fsrcul2loop16
264
        ldmia   sp!, {r4, r5}
265
        adds    r2, r2, #0x0c
266
        blt     .Lmemcpy_fsrcul2l4
267
 
268
.Lmemcpy_fsrcul2loop4:
269
#if __BYTE_ORDER == __BIG_ENDIAN
270
        mov     r12, lr, lsl #16
271
        ldr     lr, [r1], #4
272
        orr     r12, r12, lr, lsr #16
273
#else
274
        mov     r12, lr, lsr #16
275
        ldr     lr, [r1], #4
276
        orr     r12, r12, lr, lsl #16
277
#endif
278
        str     r12, [r0], #4
279
        subs    r2, r2, #4
280
        bge     .Lmemcpy_fsrcul2loop4
281
 
282
.Lmemcpy_fsrcul2l4:
283
        sub     r1, r1, #2
284
        b       .Lmemcpy_fl4
285
 
286
.Lmemcpy_fsrcul3:
287
        cmp     r2, #0x0c
288
        blt     .Lmemcpy_fsrcul3loop4
289
        sub     r2, r2, #0x0c
290
        stmdb   sp!, {r4, r5}
291
 
292
.Lmemcpy_fsrcul3loop16:
293
#if __BYTE_ORDER == __BIG_ENDIAN
294
        mov     r3, lr, lsl #24
295
        ldmia   r1!, {r4, r5, r12, lr}
296
        orr     r3, r3, r4, lsr #8
297
        mov     r4, r4, lsl #24
298
        orr     r4, r4, r5, lsr #8
299
        mov     r5, r5, lsl #24
300
        orr     r5, r5, r12, lsr #8
301
        mov     r12, r12, lsl #24
302
        orr     r12, r12, lr, lsr #8
303
#else
304
        mov     r3, lr, lsr #24
305
        ldmia   r1!, {r4, r5, r12, lr}
306
        orr     r3, r3, r4, lsl #8
307
        mov     r4, r4, lsr #24
308
        orr     r4, r4, r5, lsl #8
309
        mov     r5, r5, lsr #24
310
        orr     r5, r5, r12, lsl #8
311
        mov     r12, r12, lsr #24
312
        orr     r12, r12, lr, lsl #8
313
#endif
314
        stmia   r0!, {r3-r5, r12}
315
        subs    r2, r2, #0x10
316
        bge     .Lmemcpy_fsrcul3loop16
317
        ldmia   sp!, {r4, r5}
318
        adds    r2, r2, #0x0c
319
        blt     .Lmemcpy_fsrcul3l4
320
 
321
.Lmemcpy_fsrcul3loop4:
322
#if __BYTE_ORDER == __BIG_ENDIAN
323
        mov     r12, lr, lsl #24
324
        ldr     lr, [r1], #4
325
        orr     r12, r12, lr, lsr #8
326
#else
327
        mov     r12, lr, lsr #24
328
        ldr     lr, [r1], #4
329
        orr     r12, r12, lr, lsl #8
330
#endif
331
        str     r12, [r0], #4
332
        subs    r2, r2, #4
333
        bge     .Lmemcpy_fsrcul3loop4
334
 
335
.Lmemcpy_fsrcul3l4:
336
        sub     r1, r1, #1
337
        b       .Lmemcpy_fl4
338
 
339
.Lmemcpy_backwards:
340
        add     r1, r1, r2
341
        add     r0, r0, r2
342
        subs    r2, r2, #4
343
        blt     .Lmemcpy_bl4            /* less than 4 bytes */
344
        ands    r12, r0, #3
345
        bne     .Lmemcpy_bdestul        /* oh unaligned destination addr */
346
        ands    r12, r1, #3
347
        bne     .Lmemcpy_bsrcul         /* oh unaligned source addr */
348
 
349
.Lmemcpy_bt8:
350
        /* We have aligned source and destination */
351
        subs    r2, r2, #8
352
        blt     .Lmemcpy_bl12           /* less than 12 bytes (4 from above) */
353
        stmdb   sp!, {r4, lr}
354
        subs    r2, r2, #0x14           /* less than 32 bytes (12 from above) */
355
        blt     .Lmemcpy_bl32
356
 
357
        /* blat 32 bytes at a time */
358
        /* XXX for really big copies perhaps we should use more registers */
359
.Lmemcpy_bloop32:
360
        ldmdb   r1!, {r3, r4, r12, lr}
361
        stmdb   r0!, {r3, r4, r12, lr}
362
        ldmdb   r1!, {r3, r4, r12, lr}
363
        stmdb   r0!, {r3, r4, r12, lr}
364
        subs    r2, r2, #0x20
365
        bge     .Lmemcpy_bloop32
366
 
367
.Lmemcpy_bl32:
368
        cmn     r2, #0x10
369
        ldmgedb r1!, {r3, r4, r12, lr}  /* blat a remaining 16 bytes */
370
        stmgedb r0!, {r3, r4, r12, lr}
371
        subge   r2, r2, #0x10
372
        adds    r2, r2, #0x14
373
        ldmgedb r1!, {r3, r12, lr}      /* blat a remaining 12 bytes */
374
        stmgedb r0!, {r3, r12, lr}
375
        subge   r2, r2, #0x0c
376
        ldmia   sp!, {r4, lr}
377
 
378
.Lmemcpy_bl12:
379
        adds    r2, r2, #8
380
        blt     .Lmemcpy_bl4
381
        subs    r2, r2, #4
382
        ldrlt   r3, [r1, #-4]!
383
        strlt   r3, [r0, #-4]!
384
        ldmgedb r1!, {r3, r12}
385
        stmgedb r0!, {r3, r12}
386
        subge   r2, r2, #4
387
 
388
.Lmemcpy_bl4:
389
        /* less than 4 bytes to go */
390
        adds    r2, r2, #4
391
        moveq   pc, lr                  /* done */
392
 
393
        /* copy the crud byte at a time */
394
        cmp     r2, #2
395
        ldrb    r3, [r1, #-1]!
396
        strb    r3, [r0, #-1]!
397
        ldrgeb  r3, [r1, #-1]!
398
        strgeb  r3, [r0, #-1]!
399
        ldrgtb  r3, [r1, #-1]!
400
        strgtb  r3, [r0, #-1]!
401
        mov     pc, lr
402
 
403
        /* erg - unaligned destination */
404
.Lmemcpy_bdestul:
405
        cmp     r12, #2
406
 
407
        /* align destination with byte copies */
408
        ldrb    r3, [r1, #-1]!
409
        strb    r3, [r0, #-1]!
410
        ldrgeb  r3, [r1, #-1]!
411
        strgeb  r3, [r0, #-1]!
412
        ldrgtb  r3, [r1, #-1]!
413
        strgtb  r3, [r0, #-1]!
414
        subs    r2, r2, r12
415
        blt     .Lmemcpy_bl4            /* less than 4 bytes to go */
416
        ands    r12, r1, #3
417
        beq     .Lmemcpy_bt8            /* we have an aligned source */
418
 
419
        /* erg - unaligned source */
420
        /* This is where it gets nasty ... */
421
.Lmemcpy_bsrcul:
422
        bic     r1, r1, #3
423
        ldr     r3, [r1, #0]
424
        cmp     r12, #2
425
        blt     .Lmemcpy_bsrcul1
426
        beq     .Lmemcpy_bsrcul2
427
        cmp     r2, #0x0c
428
        blt     .Lmemcpy_bsrcul3loop4
429
        sub     r2, r2, #0x0c
430
        stmdb   sp!, {r4, r5, lr}
431
 
432
.Lmemcpy_bsrcul3loop16:
433
#if __BYTE_ORDER == __BIG_ENDIAN
434
        mov     lr, r3, lsr #8
435
        ldmdb   r1!, {r3-r5, r12}
436
        orr     lr, lr, r12, lsl #24
437
        mov     r12, r12, lsr #8
438
        orr     r12, r12, r5, lsl #24
439
        mov     r5, r5, lsr #8
440
        orr     r5, r5, r4, lsl #24
441
        mov     r4, r4, lsr #8
442
        orr     r4, r4, r3, lsl #24
443
#else
444
        mov     lr, r3, lsl #8
445
        ldmdb   r1!, {r3-r5, r12}
446
        orr     lr, lr, r12, lsr #24
447
        mov     r12, r12, lsl #8
448
        orr     r12, r12, r5, lsr #24
449
        mov     r5, r5, lsl #8
450
        orr     r5, r5, r4, lsr #24
451
        mov     r4, r4, lsl #8
452
        orr     r4, r4, r3, lsr #24
453
#endif
454
        stmdb   r0!, {r4, r5, r12, lr}
455
        subs    r2, r2, #0x10
456
        bge     .Lmemcpy_bsrcul3loop16
457
        ldmia   sp!, {r4, r5, lr}
458
        adds    r2, r2, #0x0c
459
        blt     .Lmemcpy_bsrcul3l4
460
 
461
.Lmemcpy_bsrcul3loop4:
462
#if __BYTE_ORDER == __BIG_ENDIAN
463
        mov     r12, r3, lsr #8
464
        ldr     r3, [r1, #-4]!
465
        orr     r12, r12, r3, lsl #24
466
#else
467
        mov     r12, r3, lsl #8
468
        ldr     r3, [r1, #-4]!
469
        orr     r12, r12, r3, lsr #24
470
#endif
471
        str     r12, [r0, #-4]!
472
        subs    r2, r2, #4
473
        bge     .Lmemcpy_bsrcul3loop4
474
 
475
.Lmemcpy_bsrcul3l4:
476
        add     r1, r1, #3
477
        b       .Lmemcpy_bl4
478
 
479
.Lmemcpy_bsrcul2:
480
        cmp     r2, #0x0c
481
        blt     .Lmemcpy_bsrcul2loop4
482
        sub     r2, r2, #0x0c
483
        stmdb   sp!, {r4, r5, lr}
484
 
485
.Lmemcpy_bsrcul2loop16:
486
#if __BYTE_ORDER == __BIG_ENDIAN
487
        mov     lr, r3, lsr #16
488
        ldmdb   r1!, {r3-r5, r12}
489
        orr     lr, lr, r12, lsl #16
490
        mov     r12, r12, lsr #16
491
        orr     r12, r12, r5, lsl #16
492
        mov     r5, r5, lsr #16
493
        orr     r5, r5, r4, lsl #16
494
        mov     r4, r4, lsr #16
495
        orr     r4, r4, r3, lsl #16
496
#else
497
        mov     lr, r3, lsl #16
498
        ldmdb   r1!, {r3-r5, r12}
499
        orr     lr, lr, r12, lsr #16
500
        mov     r12, r12, lsl #16
501
        orr     r12, r12, r5, lsr #16
502
        mov     r5, r5, lsl #16
503
        orr     r5, r5, r4, lsr #16
504
        mov     r4, r4, lsl #16
505
        orr     r4, r4, r3, lsr #16
506
#endif
507
        stmdb   r0!, {r4, r5, r12, lr}
508
        subs    r2, r2, #0x10
509
        bge     .Lmemcpy_bsrcul2loop16
510
        ldmia   sp!, {r4, r5, lr}
511
        adds    r2, r2, #0x0c
512
        blt     .Lmemcpy_bsrcul2l4
513
 
514
.Lmemcpy_bsrcul2loop4:
515
#if __BYTE_ORDER == __BIG_ENDIAN
516
        mov     r12, r3, lsr #16
517
        ldr     r3, [r1, #-4]!
518
        orr     r12, r12, r3, lsl #16
519
#else
520
        mov     r12, r3, lsl #16
521
        ldr     r3, [r1, #-4]!
522
        orr     r12, r12, r3, lsr #16
523
#endif
524
        str     r12, [r0, #-4]!
525
        subs    r2, r2, #4
526
        bge     .Lmemcpy_bsrcul2loop4
527
 
528
.Lmemcpy_bsrcul2l4:
529
        add     r1, r1, #2
530
        b       .Lmemcpy_bl4
531
 
532
.Lmemcpy_bsrcul1:
533
        cmp     r2, #0x0c
534
        blt     .Lmemcpy_bsrcul1loop4
535
        sub     r2, r2, #0x0c
536
        stmdb   sp!, {r4, r5, lr}
537
 
538
.Lmemcpy_bsrcul1loop32:
539
#if __BYTE_ORDER == __BIG_ENDIAN
540
        mov     lr, r3, lsr #24
541
        ldmdb   r1!, {r3-r5, r12}
542
        orr     lr, lr, r12, lsl #8
543
        mov     r12, r12, lsr #24
544
        orr     r12, r12, r5, lsl #8
545
        mov     r5, r5, lsr #24
546
        orr     r5, r5, r4, lsl #8
547
        mov     r4, r4, lsr #24
548
        orr     r4, r4, r3, lsl #8
549
#else
550
        mov     lr, r3, lsl #24
551
        ldmdb   r1!, {r3-r5, r12}
552
        orr     lr, lr, r12, lsr #8
553
        mov     r12, r12, lsl #24
554
        orr     r12, r12, r5, lsr #8
555
        mov     r5, r5, lsl #24
556
        orr     r5, r5, r4, lsr #8
557
        mov     r4, r4, lsl #24
558
        orr     r4, r4, r3, lsr #8
559
#endif
560
        stmdb   r0!, {r4, r5, r12, lr}
561
        subs    r2, r2, #0x10
562
        bge     .Lmemcpy_bsrcul1loop32
563
        ldmia   sp!, {r4, r5, lr}
564
        adds    r2, r2, #0x0c
565
        blt     .Lmemcpy_bsrcul1l4
566
 
567
.Lmemcpy_bsrcul1loop4:
568
#if __BYTE_ORDER == __BIG_ENDIAN
569
        mov     r12, r3, lsr #24
570
        ldr     r3, [r1, #-4]!
571
        orr     r12, r12, r3, lsl #8
572
#else
573
        mov     r12, r3, lsl #24
574
        ldr     r3, [r1, #-4]!
575
        orr     r12, r12, r3, lsr #8
576
#endif
577
        str     r12, [r0, #-4]!
578
        subs    r2, r2, #4
579
        bge     .Lmemcpy_bsrcul1loop4
580
 
581
.Lmemcpy_bsrcul1l4:
582
        add     r1, r1, #1
583
        b       .Lmemcpy_bl4

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.