1 |
207 |
jeremybenn |
/*
|
2 |
|
|
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
|
3 |
|
|
*
|
4 |
|
|
* To anyone who acknowledges that this file is provided "AS IS"
|
5 |
|
|
* without any express or implied warranty:
|
6 |
|
|
* permission to use, copy, modify, and distribute this file
|
7 |
|
|
* for any purpose is hereby granted without fee, provided that
|
8 |
|
|
* the above copyright notice and this notice appears in all
|
9 |
|
|
* copies, and that the name of Hewlett-Packard Company not be
|
10 |
|
|
* used in advertising or publicity pertaining to distribution
|
11 |
|
|
* of the software without specific, written prior permission.
|
12 |
|
|
* Hewlett-Packard Company makes no representations about the
|
13 |
|
|
* suitability of this software for any purpose.
|
14 |
|
|
*/
|
15 |
|
|
|
16 |
|
|
/*HPUX_ID: @(#) $Revision: 1.1 $ */
|
17 |
|
|
/* strncat(s1,s2,n) : concatonate at most n characters from s2 onto s1 */
|
18 |
|
|
|
19 |
|
|
#include "DEFS.h"
|
20 |
|
|
|
21 |
|
|
#define d_addr r26
|
22 |
|
|
#define s_addr r25
|
23 |
|
|
#define count r24
|
24 |
|
|
#define tmp1 r19
|
25 |
|
|
#define tmp2 r20
|
26 |
|
|
#define tmp3 r21
|
27 |
|
|
#define tmp4 r22
|
28 |
|
|
#define tmp5 arg3
|
29 |
|
|
#define tmp6 r31
|
30 |
|
|
#define save r1
|
31 |
|
|
#define tmp7 ret1 /* source offset-- reset to orig source addr if not aligned */
|
32 |
|
|
|
33 |
|
|
|
34 |
|
|
ENTRY(strncat)
|
35 |
|
|
|
36 |
|
|
comb,= r0,s_addr,quit /* quit if s2=NULL */
|
37 |
|
|
copy d_addr,ret0 /* The return value is the value of d_addr. DELAY SLOT*/
|
38 |
|
|
|
39 |
|
|
/* First look for end of s1 (d_addr) */
|
40 |
|
|
|
41 |
|
|
extru d_addr,31,2,tmp1 /* Extract the low two bits of the dest address. */
|
42 |
|
|
combt,= tmp1,r0,dont_mask
|
43 |
|
|
dep 0,31,2,d_addr /*set word alignment */
|
44 |
|
|
ldwm 4(d_addr),tmp2
|
45 |
|
|
sh3add tmp1,r0,save /* build mask based on tmp1 */
|
46 |
|
|
mtctl save,11
|
47 |
|
|
zvdepi -2,32,save
|
48 |
|
|
or save,tmp2,tmp2
|
49 |
|
|
uxor,nbz tmp2,r0,save
|
50 |
|
|
search:
|
51 |
|
|
b,n found_end /* nullified under uxor conditions above and below */
|
52 |
|
|
dont_mask:
|
53 |
|
|
ldwm 4(d_addr),tmp2
|
54 |
|
|
comib,tr r0,r0,search
|
55 |
|
|
uxor,nbz tmp2,r0,save
|
56 |
|
|
|
57 |
|
|
found_end: /* at this point d_addr points to word */
|
58 |
|
|
extru,<> save,7,8,r0 /* following word with null */
|
59 |
|
|
addib,tr,n -4,d_addr,begin_copy /*set d_addr to end of s1 */
|
60 |
|
|
extru,<> save,15,8,r0
|
61 |
|
|
addib,tr,n -3,d_addr,begin_copy
|
62 |
|
|
extru,<> save,23,8,r0
|
63 |
|
|
addi -1,d_addr,d_addr
|
64 |
|
|
addi -1,d_addr,d_addr
|
65 |
|
|
|
66 |
|
|
|
67 |
|
|
begin_copy:
|
68 |
|
|
addibt,<,n -4,count,byteloop /* If count is <= 4 don't get fancy.*/
|
69 |
|
|
|
70 |
|
|
extru s_addr,31,2,tmp4 /* Extract the low two bits of the source address.*/
|
71 |
|
|
extru d_addr,31,2,tmp5 /* Extract the low two bits of the destination address.*/
|
72 |
|
|
add count,tmp5,count /* pre increment the count by the byte address so that the count is*/
|
73 |
|
|
copy s_addr,tmp6 /* save original s_addr in case we find null in first word */
|
74 |
|
|
copy s_addr, tmp7 /* save s_addr in case we find null before first store */
|
75 |
|
|
comb,<> tmp5,tmp4,not_aligned /* branch if tmp5<>tmp4. */
|
76 |
|
|
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT.*/
|
77 |
|
|
/* aligned*/
|
78 |
|
|
combt,= tmp5,r0,skip_mask
|
79 |
|
|
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT)*/
|
80 |
|
|
sh3add tmp5,r0,save /* compute mask in save*/
|
81 |
|
|
mtctl save,11
|
82 |
|
|
zvdepi -2,32,save
|
83 |
|
|
or save,tmp1,tmp1 /* or mask with data*/
|
84 |
|
|
uxor,nbz tmp1,r0,save /* check for null*/
|
85 |
|
|
b,n null1
|
86 |
|
|
addibt,< -4,count,back_porch
|
87 |
|
|
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
|
88 |
|
|
|
89 |
|
|
chunks:
|
90 |
|
|
ldwm 4(0,s_addr),tmp1 /* get a word*/
|
91 |
|
|
|
92 |
|
|
skip_mask:
|
93 |
|
|
uxor,nbz tmp1,r0,save /* check for null*/
|
94 |
|
|
b,n align_null1
|
95 |
|
|
addibf,< -4,count,chunks
|
96 |
|
|
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
|
97 |
|
|
|
98 |
|
|
back_porch: /* last word to store*/
|
99 |
|
|
addibt,=,n 4,count,done /* if count = 0 we're, of course, done !*/
|
100 |
|
|
ldws 0(s_addr),tmp1 /* load up the back_porch*/
|
101 |
|
|
sh3add count,r0, save /* setup right mask based on count*/
|
102 |
|
|
mtctl save,r11
|
103 |
|
|
zvdepi -2,32,save /*save now has left-hand mask*/
|
104 |
|
|
uaddcm r0,save,save /*form right hand mask */
|
105 |
|
|
or tmp1,save,tmp1 /*and insert data*/
|
106 |
|
|
uxor,nbz tmp1,r0,save /* check for null*/
|
107 |
|
|
b,n null2
|
108 |
|
|
add d_addr,count,d_addr/* final store address is +1 too high !*/
|
109 |
|
|
b done
|
110 |
|
|
stbys,e tmp1,0(d_addr) /* done */
|
111 |
|
|
|
112 |
|
|
/* Begin non_aligned code. */
|
113 |
|
|
not_aligned:
|
114 |
|
|
sub,>= tmp5,tmp4,tmp6 /* compute the shift amt.and skip load if tmp5 > tmp4.*/
|
115 |
|
|
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++*/
|
116 |
|
|
zdep tmp6,28,29,tmp4 /* compute the number of bits to shift */
|
117 |
|
|
mtctl tmp4,11 /* load the shift count into cr11 = shift count register.*/
|
118 |
|
|
addibt,<,n -4,count,chkchnk2 /* first step in pre adjustment of count for looping.*/
|
119 |
|
|
|
120 |
|
|
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
|
121 |
|
|
combt,= tmp5,r0,skip_mask4 /* don't mask if whole word is valid*/
|
122 |
|
|
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot)*/
|
123 |
|
|
sh3add tmp5,r0,save /* setup r1*/
|
124 |
|
|
mtctl save,r11 /* setup mask in save*/
|
125 |
|
|
zvdepi -2,32,save
|
126 |
|
|
or save, tmp3, tmp3
|
127 |
|
|
mtctl tmp4,11 /* re-load the shift count into cr11 */
|
128 |
|
|
|
129 |
|
|
skip_mask4:
|
130 |
|
|
uxor,nbz tmp3, r0, save
|
131 |
|
|
b,n null4 /* special case for first word */
|
132 |
|
|
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
|
133 |
|
|
copy r0, tmp7 /* zero out tmp7 so we don't try to use original s_addr anymore */
|
134 |
|
|
b continue
|
135 |
|
|
stbys,b,m tmp3,4(0,d_addr) /* store ! */
|
136 |
|
|
|
137 |
|
|
chunk2:
|
138 |
|
|
ldwm 4(0,s_addr),tmp2
|
139 |
|
|
vshd tmp1,tmp2,tmp3
|
140 |
|
|
|
141 |
|
|
skip_mask2:
|
142 |
|
|
uxor,nbz tmp3, r0, save
|
143 |
|
|
b,n null3
|
144 |
|
|
stbys,b,m tmp3,4(0,d_addr) /* store ! */
|
145 |
|
|
|
146 |
|
|
continue:
|
147 |
|
|
ldwm 4(0,s_addr),tmp1 /* get 2nd word ! */
|
148 |
|
|
vshd tmp2,tmp1,tmp3 /* position data ! */
|
149 |
|
|
uxor,nbz tmp3, r0, save
|
150 |
|
|
b,n null3
|
151 |
|
|
|
152 |
|
|
addibf,< -8,count,chunk2 /* If count is still >= 8 do another loop.*/
|
153 |
|
|
stbys,b,m tmp3,4(0,d_addr) /* store !*/
|
154 |
|
|
|
155 |
|
|
chkchnk2:
|
156 |
|
|
addibt,<,n 4,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0)*/
|
157 |
|
|
|
158 |
|
|
subchnk2: /* we have less than 8 chars to copy*/
|
159 |
|
|
|
160 |
|
|
ldwm 4(0,s_addr),tmp2 /* get next word !*/
|
161 |
|
|
combt,= tmp5,r0,skip_mask3
|
162 |
|
|
vshd tmp1,tmp2,tmp3 /* position data !*/
|
163 |
|
|
sh3add tmp5,r0,save /* setup r1*/
|
164 |
|
|
mtctl save,r11 /* setup mask in save*/
|
165 |
|
|
zvdepi -2,32,save
|
166 |
|
|
or save, tmp3, tmp3
|
167 |
|
|
mtctl tmp4,11 /* restore shift value again */
|
168 |
|
|
skip_mask3:
|
169 |
|
|
uxor,nbz tmp3,r0,save
|
170 |
|
|
b,n null3
|
171 |
|
|
copy r0,tmp5 /* zero out tmp5 so null3 does correct alignment */
|
172 |
|
|
copy r0,tmp7 /* zero out tmp7 so we don't use orignal s_addr since no longer valid */
|
173 |
|
|
b bp_1 /* we now have less than 4 bytes to move*/
|
174 |
|
|
stbys,b,m tmp3,4(0,d_addr) /* store !*/
|
175 |
|
|
|
176 |
|
|
bp_0:
|
177 |
|
|
copy tmp1,tmp2 /* switch registers for shift process */
|
178 |
|
|
addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
|
179 |
|
|
|
180 |
|
|
bp_1:
|
181 |
|
|
ldwm 4(0,s_addr),tmp1 /* get final word ! */
|
182 |
|
|
vshd tmp2,tmp1,tmp3 /* position data !*/
|
183 |
|
|
uxor,nbz tmp3,r0,save /* if no-byte-zero */
|
184 |
|
|
b,n bp_null /* don't goto no_null-find which null instead */
|
185 |
|
|
no_null:
|
186 |
|
|
add d_addr,count,d_addr /* set up d_addr for stbys,e */
|
187 |
|
|
b done /* were done*/
|
188 |
|
|
stbys,e tmp3,0(0,d_addr) /* store the data !*/
|
189 |
|
|
|
190 |
|
|
/* here we do ye old byte-at-a-time moves.*/
|
191 |
|
|
align_null1:
|
192 |
|
|
b byteloop
|
193 |
|
|
addi -4,s_addr,s_addr
|
194 |
|
|
null1:
|
195 |
|
|
copy tmp6,s_addr /* restore orig s_addr (aligned only) */
|
196 |
|
|
byteloop:
|
197 |
|
|
addibt,= 4,count,done
|
198 |
|
|
null2:
|
199 |
|
|
ldbs,ma 1(s_addr),tmp1
|
200 |
|
|
encore:
|
201 |
|
|
combt,=,n tmp1,r0, done
|
202 |
|
|
stbs,ma tmp1,1(d_addr)
|
203 |
|
|
addibf,=,n -1,count,encore
|
204 |
|
|
ldbs,ma 1(s_addr),tmp1
|
205 |
|
|
b,n done
|
206 |
|
|
|
207 |
|
|
bp_null:
|
208 |
|
|
addi -4,count,count /* fudge count 'cause byteloop will re-increment */
|
209 |
|
|
|
210 |
|
|
null3: /* not_aligned case reset s_addr and finish byte-wise */
|
211 |
|
|
combt,=,n r0,tmp7,null3a /* if tmp7 is not valid address then branch below */
|
212 |
|
|
b byteloop /* otherwise reset s_addr to tmp7 and finish */
|
213 |
|
|
copy tmp7, s_addr
|
214 |
|
|
|
215 |
|
|
null3a: /* right shift target */
|
216 |
|
|
addibt,<,n 0,tmp6,null3b /* if left shifting */
|
217 |
|
|
sub r0,tmp6,tmp6 /* do null3b code */
|
218 |
|
|
addi -4,tmp6,tmp6
|
219 |
|
|
b byteloop
|
220 |
|
|
add tmp6,s_addr,s_addr /* reset s_addr by 4 + shift_amt */
|
221 |
|
|
|
222 |
|
|
null3b:
|
223 |
|
|
subi -8,tmp6,tmp6
|
224 |
|
|
add tmp5,tmp6,tmp6 /* adjust by the dest offset if this is our first store */
|
225 |
|
|
b byteloop
|
226 |
|
|
add tmp6,s_addr,s_addr /* adjust s_addr by (8-shift_amt-dest_off) */
|
227 |
|
|
|
228 |
|
|
null4:
|
229 |
|
|
add,> tmp6,r0,tmp6 /* if left shift */
|
230 |
|
|
b,n null3 /* then do null3 */
|
231 |
|
|
b byteloop
|
232 |
|
|
addi -4,s_addr,s_addr /* adj source only by 4 */
|
233 |
|
|
|
234 |
|
|
done:
|
235 |
|
|
bv 0(r2)
|
236 |
|
|
stbs r0,0(d_addr)
|
237 |
|
|
quit:
|
238 |
|
|
EXIT(strncat)
|