URL
https://opencores.org/ocsvn/or1k_old/or1k_old/trunk
Subversion Repositories or1k_old
[/] [or1k_old/] [trunk/] [linux/] [uClibc/] [libc/] [string/] [arm/] [_memcpy.S] - Rev 1782
Compare with Previous | Blame | View Log
/*-* Copyright (c) 1997 The NetBSD Foundation, Inc.* All rights reserved.** This code is derived from software contributed to The NetBSD Foundation* by Neil A. Carson and Mark Brinicombe** Redistribution and use in source and binary forms, with or without* modification, are permitted provided that the following conditions* are met:* 1. Redistributions of source code must retain the above copyright* notice, this list of conditions and the following disclaimer.* 2. Redistributions in binary form must reproduce the above copyright* notice, this list of conditions and the following disclaimer in the* documentation and/or other materials provided with the distribution.* 3. All advertising materials mentioning features or use of this software* must display the following acknowledgement:* This product includes software developed by the NetBSD* Foundation, Inc. and its contributors.* 4. Neither the name of The NetBSD Foundation nor the names of its* contributors may be used to endorse or promote products derived* from this software without specific prior written permission.** THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE* POSSIBILITY OF SUCH DAMAGE.** Adapted for uClibc from NetBSD _memcpy.S,v 1.6 2003/10/09* by Erik Andersen <andersen@codepoet.org>*/#include <endian.h>/** This is one fun bit of code ...* Some easy listening music is suggested while trying to understand this* code e.g. Iron Maiden** For anyone attempting to understand it :** The core code is implemented here with simple stubs for memcpy()* memmove() and bcopy().** All local labels are prefixed with Lmemcpy_* Following the prefix a label starting f is used in the forward copy code* while a label using b is used in the backwards copy code* The source and destination addresses determine whether a forward or* backward copy is performed.* Separate bits of code are used to deal with the following situations* for both the forward and backwards copy.* unaligned source address* unaligned destination address* Separate copy routines are used to produce an optimised result for each* of these cases.* The copy code will use LDM/STM instructions to copy up to 32 bytes at* a time where possible.** Note: r12 (aka ip) can be trashed during the function along with* r0-r3 although r0-r2 have defined uses i.e. src, dest, len through out.* Additional registers are preserved prior to use i.e. r4, r5 & lr** Apologies for the state of the comments ;-)*/.text.global _memcpy;.type _memcpy,%function.align 4; \_memcpy:/* Determine copy direction */cmp r1, r0bcc .Lmemcpy_backwardsmoveq r0, #0 /* Quick abort for len=0 */moveq pc, lrstmdb sp!, {r0, lr} /* memcpy() returns dest addr */subs r2, r2, #4blt .Lmemcpy_fl4 /* less than 4 bytes */ands r12, r0, #3bne .Lmemcpy_fdestul /* oh unaligned destination addr */ands r12, r1, #3bne .Lmemcpy_fsrcul /* oh unaligned source addr */.Lmemcpy_ft8:/* We have aligned source and destination */subs r2, r2, #8blt .Lmemcpy_fl12 /* less than 12 bytes (4 from above) */subs r2, r2, #0x14blt .Lmemcpy_fl32 /* less than 32 bytes (12 from above) */stmdb sp!, {r4} /* borrow r4 *//* blat 32 bytes at a time *//* XXX for really big copies perhaps we should use more registers */.Lmemcpy_floop32:ldmia r1!, {r3, r4, r12, lr}stmia r0!, {r3, r4, r12, lr}ldmia r1!, {r3, r4, r12, lr}stmia r0!, {r3, r4, r12, lr}subs r2, r2, #0x20bge .Lmemcpy_floop32cmn r2, #0x10ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */stmgeia r0!, {r3, r4, r12, lr}subge r2, r2, #0x10ldmia sp!, {r4} /* return r4 */.Lmemcpy_fl32:adds r2, r2, #0x14/* blat 12 bytes at a time */.Lmemcpy_floop12:ldmgeia r1!, {r3, r12, lr}stmgeia r0!, {r3, r12, lr}subges r2, r2, #0x0cbge .Lmemcpy_floop12.Lmemcpy_fl12:adds r2, r2, #8blt .Lmemcpy_fl4subs r2, r2, #4ldrlt r3, [r1], #4strlt r3, [r0], #4ldmgeia r1!, {r3, r12}stmgeia r0!, {r3, r12}subge r2, r2, #4.Lmemcpy_fl4:/* less than 4 bytes to go */adds r2, r2, #4ldmeqia sp!, {r0, pc} /* done *//* copy the crud byte at a time */cmp r2, #2ldrb r3, [r1], #1strb r3, [r0], #1ldrgeb r3, [r1], #1strgeb r3, [r0], #1ldrgtb r3, [r1], #1strgtb r3, [r0], #1ldmia sp!, {r0, pc}/* erg - unaligned destination */.Lmemcpy_fdestul:rsb r12, r12, #4cmp r12, #2/* align destination with byte copies */ldrb r3, [r1], #1strb r3, [r0], #1ldrgeb r3, [r1], #1strgeb r3, [r0], #1ldrgtb r3, [r1], #1strgtb r3, [r0], #1subs r2, r2, r12blt .Lmemcpy_fl4 /* less the 4 bytes */ands r12, r1, #3beq .Lmemcpy_ft8 /* we have an aligned source *//* erg - unaligned source *//* This is where it gets nasty ... */.Lmemcpy_fsrcul:bic r1, r1, #3ldr lr, [r1], #4cmp r12, #2bgt .Lmemcpy_fsrcul3beq .Lmemcpy_fsrcul2cmp r2, #0x0cblt .Lmemcpy_fsrcul1loop4sub r2, r2, #0x0cstmdb sp!, {r4, r5}.Lmemcpy_fsrcul1loop16:#if __BYTE_ORDER == __BIG_ENDIANmov r3, lr, lsl #8ldmia r1!, {r4, r5, r12, lr}orr r3, r3, r4, lsr #24mov r4, r4, lsl #8orr r4, r4, r5, lsr #24mov r5, r5, lsl #8orr r5, r5, r12, lsr #24mov r12, r12, lsl #8orr r12, r12, lr, lsr #24#elsemov r3, lr, lsr #8ldmia r1!, {r4, r5, r12, lr}orr r3, r3, r4, lsl #24mov r4, r4, lsr #8orr r4, r4, r5, lsl #24mov r5, r5, lsr #8orr r5, r5, r12, lsl #24mov r12, r12, lsr #8orr r12, r12, lr, lsl #24#endifstmia r0!, {r3-r5, r12}subs r2, r2, #0x10bge .Lmemcpy_fsrcul1loop16ldmia sp!, {r4, r5}adds r2, r2, #0x0cblt .Lmemcpy_fsrcul1l4.Lmemcpy_fsrcul1loop4:#if __BYTE_ORDER == __BIG_ENDIANmov r12, lr, lsl #8ldr lr, [r1], #4orr r12, r12, lr, lsr #24#elsemov r12, lr, lsr #8ldr lr, [r1], #4orr r12, r12, lr, lsl #24#endifstr r12, [r0], #4subs r2, r2, #4bge .Lmemcpy_fsrcul1loop4.Lmemcpy_fsrcul1l4:sub r1, r1, #3b .Lmemcpy_fl4.Lmemcpy_fsrcul2:cmp r2, #0x0cblt .Lmemcpy_fsrcul2loop4sub r2, r2, #0x0cstmdb sp!, {r4, r5}.Lmemcpy_fsrcul2loop16:#if __BYTE_ORDER == __BIG_ENDIANmov r3, lr, lsl #16ldmia r1!, {r4, r5, r12, lr}orr r3, r3, r4, lsr #16mov r4, r4, lsl #16orr r4, r4, r5, lsr #16mov r5, r5, lsl #16orr r5, r5, r12, lsr #16mov r12, r12, lsl #16orr r12, r12, lr, lsr #16#elsemov r3, lr, lsr #16ldmia r1!, {r4, r5, r12, lr}orr r3, r3, r4, lsl #16mov r4, r4, lsr #16orr r4, r4, r5, lsl #16mov r5, r5, lsr #16orr r5, r5, r12, lsl #16mov r12, r12, lsr #16orr r12, r12, lr, lsl #16#endifstmia r0!, {r3-r5, r12}subs r2, r2, #0x10bge .Lmemcpy_fsrcul2loop16ldmia sp!, {r4, r5}adds r2, r2, #0x0cblt .Lmemcpy_fsrcul2l4.Lmemcpy_fsrcul2loop4:#if __BYTE_ORDER == __BIG_ENDIANmov r12, lr, lsl #16ldr lr, [r1], #4orr r12, r12, lr, lsr #16#elsemov r12, lr, lsr #16ldr lr, [r1], #4orr r12, r12, lr, lsl #16#endifstr r12, [r0], #4subs r2, r2, #4bge .Lmemcpy_fsrcul2loop4.Lmemcpy_fsrcul2l4:sub r1, r1, #2b .Lmemcpy_fl4.Lmemcpy_fsrcul3:cmp r2, #0x0cblt .Lmemcpy_fsrcul3loop4sub r2, r2, #0x0cstmdb sp!, {r4, r5}.Lmemcpy_fsrcul3loop16:#if __BYTE_ORDER == __BIG_ENDIANmov r3, lr, lsl #24ldmia r1!, {r4, r5, r12, lr}orr r3, r3, r4, lsr #8mov r4, r4, lsl #24orr r4, r4, r5, lsr #8mov r5, r5, lsl #24orr r5, r5, r12, lsr #8mov r12, r12, lsl #24orr r12, r12, lr, lsr #8#elsemov r3, lr, lsr #24ldmia r1!, {r4, r5, r12, lr}orr r3, r3, r4, lsl #8mov r4, r4, lsr #24orr r4, r4, r5, lsl #8mov r5, r5, lsr #24orr r5, r5, r12, lsl #8mov r12, r12, lsr #24orr r12, r12, lr, lsl #8#endifstmia r0!, {r3-r5, r12}subs r2, r2, #0x10bge .Lmemcpy_fsrcul3loop16ldmia sp!, {r4, r5}adds r2, r2, #0x0cblt .Lmemcpy_fsrcul3l4.Lmemcpy_fsrcul3loop4:#if __BYTE_ORDER == __BIG_ENDIANmov r12, lr, lsl #24ldr lr, [r1], #4orr r12, r12, lr, lsr #8#elsemov r12, lr, lsr #24ldr lr, [r1], #4orr r12, r12, lr, lsl #8#endifstr r12, [r0], #4subs r2, r2, #4bge .Lmemcpy_fsrcul3loop4.Lmemcpy_fsrcul3l4:sub r1, r1, #1b .Lmemcpy_fl4.Lmemcpy_backwards:add r1, r1, r2add r0, r0, r2subs r2, r2, #4blt .Lmemcpy_bl4 /* less than 4 bytes */ands r12, r0, #3bne .Lmemcpy_bdestul /* oh unaligned destination addr */ands r12, r1, #3bne .Lmemcpy_bsrcul /* oh unaligned source addr */.Lmemcpy_bt8:/* We have aligned source and destination */subs r2, r2, #8blt .Lmemcpy_bl12 /* less than 12 bytes (4 from above) */stmdb sp!, {r4, lr}subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */blt .Lmemcpy_bl32/* blat 32 bytes at a time *//* XXX for really big copies perhaps we should use more registers */.Lmemcpy_bloop32:ldmdb r1!, {r3, r4, r12, lr}stmdb r0!, {r3, r4, r12, lr}ldmdb r1!, {r3, r4, r12, lr}stmdb r0!, {r3, r4, r12, lr}subs r2, r2, #0x20bge .Lmemcpy_bloop32.Lmemcpy_bl32:cmn r2, #0x10ldmgedb r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */stmgedb r0!, {r3, r4, r12, lr}subge r2, r2, #0x10adds r2, r2, #0x14ldmgedb r1!, {r3, r12, lr} /* blat a remaining 12 bytes */stmgedb r0!, {r3, r12, lr}subge r2, r2, #0x0cldmia sp!, {r4, lr}.Lmemcpy_bl12:adds r2, r2, #8blt .Lmemcpy_bl4subs r2, r2, #4ldrlt r3, [r1, #-4]!strlt r3, [r0, #-4]!ldmgedb r1!, {r3, r12}stmgedb r0!, {r3, r12}subge r2, r2, #4.Lmemcpy_bl4:/* less than 4 bytes to go */adds r2, r2, #4moveq pc, lr /* done *//* copy the crud byte at a time */cmp r2, #2ldrb r3, [r1, #-1]!strb r3, [r0, #-1]!ldrgeb r3, [r1, #-1]!strgeb r3, [r0, #-1]!ldrgtb r3, [r1, #-1]!strgtb r3, [r0, #-1]!mov pc, lr/* erg - unaligned destination */.Lmemcpy_bdestul:cmp r12, #2/* align destination with byte copies */ldrb r3, [r1, #-1]!strb r3, [r0, #-1]!ldrgeb r3, [r1, #-1]!strgeb r3, [r0, #-1]!ldrgtb r3, [r1, #-1]!strgtb r3, [r0, #-1]!subs r2, r2, r12blt .Lmemcpy_bl4 /* less than 4 bytes to go */ands r12, r1, #3beq .Lmemcpy_bt8 /* we have an aligned source *//* erg - unaligned source *//* This is where it gets nasty ... */.Lmemcpy_bsrcul:bic r1, r1, #3ldr r3, [r1, #0]cmp r12, #2blt .Lmemcpy_bsrcul1beq .Lmemcpy_bsrcul2cmp r2, #0x0cblt .Lmemcpy_bsrcul3loop4sub r2, r2, #0x0cstmdb sp!, {r4, r5, lr}.Lmemcpy_bsrcul3loop16:#if __BYTE_ORDER == __BIG_ENDIANmov lr, r3, lsr #8ldmdb r1!, {r3-r5, r12}orr lr, lr, r12, lsl #24mov r12, r12, lsr #8orr r12, r12, r5, lsl #24mov r5, r5, lsr #8orr r5, r5, r4, lsl #24mov r4, r4, lsr #8orr r4, r4, r3, lsl #24#elsemov lr, r3, lsl #8ldmdb r1!, {r3-r5, r12}orr lr, lr, r12, lsr #24mov r12, r12, lsl #8orr r12, r12, r5, lsr #24mov r5, r5, lsl #8orr r5, r5, r4, lsr #24mov r4, r4, lsl #8orr r4, r4, r3, lsr #24#endifstmdb r0!, {r4, r5, r12, lr}subs r2, r2, #0x10bge .Lmemcpy_bsrcul3loop16ldmia sp!, {r4, r5, lr}adds r2, r2, #0x0cblt .Lmemcpy_bsrcul3l4.Lmemcpy_bsrcul3loop4:#if __BYTE_ORDER == __BIG_ENDIANmov r12, r3, lsr #8ldr r3, [r1, #-4]!orr r12, r12, r3, lsl #24#elsemov r12, r3, lsl #8ldr r3, [r1, #-4]!orr r12, r12, r3, lsr #24#endifstr r12, [r0, #-4]!subs r2, r2, #4bge .Lmemcpy_bsrcul3loop4.Lmemcpy_bsrcul3l4:add r1, r1, #3b .Lmemcpy_bl4.Lmemcpy_bsrcul2:cmp r2, #0x0cblt .Lmemcpy_bsrcul2loop4sub r2, r2, #0x0cstmdb sp!, {r4, r5, lr}.Lmemcpy_bsrcul2loop16:#if __BYTE_ORDER == __BIG_ENDIANmov lr, r3, lsr #16ldmdb r1!, {r3-r5, r12}orr lr, lr, r12, lsl #16mov r12, r12, lsr #16orr r12, r12, r5, lsl #16mov r5, r5, lsr #16orr r5, r5, r4, lsl #16mov r4, r4, lsr #16orr r4, r4, r3, lsl #16#elsemov lr, r3, lsl #16ldmdb r1!, {r3-r5, r12}orr lr, lr, r12, lsr #16mov r12, r12, lsl #16orr r12, r12, r5, lsr #16mov r5, r5, lsl #16orr r5, r5, r4, lsr #16mov r4, r4, lsl #16orr r4, r4, r3, lsr #16#endifstmdb r0!, {r4, r5, r12, lr}subs r2, r2, #0x10bge .Lmemcpy_bsrcul2loop16ldmia sp!, {r4, r5, lr}adds r2, r2, #0x0cblt .Lmemcpy_bsrcul2l4.Lmemcpy_bsrcul2loop4:#if __BYTE_ORDER == __BIG_ENDIANmov r12, r3, lsr #16ldr r3, [r1, #-4]!orr r12, r12, r3, lsl #16#elsemov r12, r3, lsl #16ldr r3, [r1, #-4]!orr r12, r12, r3, lsr #16#endifstr r12, [r0, #-4]!subs r2, r2, #4bge .Lmemcpy_bsrcul2loop4.Lmemcpy_bsrcul2l4:add r1, r1, #2b .Lmemcpy_bl4.Lmemcpy_bsrcul1:cmp r2, #0x0cblt .Lmemcpy_bsrcul1loop4sub r2, r2, #0x0cstmdb sp!, {r4, r5, lr}.Lmemcpy_bsrcul1loop32:#if __BYTE_ORDER == __BIG_ENDIANmov lr, r3, lsr #24ldmdb r1!, {r3-r5, r12}orr lr, lr, r12, lsl #8mov r12, r12, lsr #24orr r12, r12, r5, lsl #8mov r5, r5, lsr #24orr r5, r5, r4, lsl #8mov r4, r4, lsr #24orr r4, r4, r3, lsl #8#elsemov lr, r3, lsl #24ldmdb r1!, {r3-r5, r12}orr lr, lr, r12, lsr #8mov r12, r12, lsl #24orr r12, r12, r5, lsr #8mov r5, r5, lsl #24orr r5, r5, r4, lsr #8mov r4, r4, lsl #24orr r4, r4, r3, lsr #8#endifstmdb r0!, {r4, r5, r12, lr}subs r2, r2, #0x10bge .Lmemcpy_bsrcul1loop32ldmia sp!, {r4, r5, lr}adds r2, r2, #0x0cblt .Lmemcpy_bsrcul1l4.Lmemcpy_bsrcul1loop4:#if __BYTE_ORDER == __BIG_ENDIANmov r12, r3, lsr #24ldr r3, [r1, #-4]!orr r12, r12, r3, lsl #8#elsemov r12, r3, lsl #24ldr r3, [r1, #-4]!orr r12, r12, r3, lsr #8#endifstr r12, [r0, #-4]!subs r2, r2, #4bge .Lmemcpy_bsrcul1loop4.Lmemcpy_bsrcul1l4:add r1, r1, #1b .Lmemcpy_bl4
