OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [uclinux/] [uClinux-2.0.x/] [arch/] [m68k/] [lib/] [checksum.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/*
2
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
3
 *              operating system.  INET is implemented using the  BSD Socket
4
 *              interface as the means of communication with the user level.
5
 *
6
 *              IP/TCP/UDP checksumming routines
7
 *
8
 * Authors:     Jorge Cwik, <jorge@laser.satlink.net>
9
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10
 *              Tom May, <ftom@netcom.com>
11
 *              Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
12
 *              Lots of code moved from tcp.c and ip.c; see those files
13
 *              for more names.
14
 *
15
 * 03/02/96     Jes Sorensen, Andreas Schwab, Roman Hodek:
16
 *              Fixed some nasty bugs, causing some horrible crashes.
17
 *              A: At some points, the sum (%0) was used as
18
 *              length-counter instead of the length counter
19
 *              (%1). Thanks to Roman Hodek for pointing this out.
20
 *              B: GCC seems to mess up if one uses too many
21
 *              data-registers to hold input values and one tries to
22
 *              specify d0 and d1 as scratch registers. Letting gcc choose these
23
 *      registers itself solves the problem.
24
 *
25
 *              This program is free software; you can redistribute it and/or
26
 *              modify it under the terms of the GNU General Public License
27
 *              as published by the Free Software Foundation; either version
28
 *              2 of the License, or (at your option) any later version.
29
 */
30
 
31
#include <net/checksum.h>
32
 
33
/*
34
 * computes a partial checksum, e.g. for TCP/UDP fragments
35
 */
36
 
37
unsigned int
38
csum_partial (const unsigned char *buff, int len, unsigned int sum)
39
{
40
        unsigned long tmp1, tmp2;
41
          /*
42
           * Experiments with ethernet and slip connections show that buff
43
           * is aligned on either a 2-byte or 4-byte boundary.
44
           */
45
        __asm__("movel %2,%3\n\t"
46
                "btst #1,%3\n\t"        /* Check alignment */
47
                "jeq 2f\n\t"
48
                "subql #2,%1\n\t"       /* buff%4==2: treat first word */
49
                "jgt 1f\n\t"
50
                "addql #2,%1\n\t"       /* len was == 2, treat only rest */
51
                "jra 4f\n"
52
             "1:\t"
53
                "addw %2@+,%0\n\t"      /* add first word to sum */
54
                "clrl %3\n\t"
55
                "addxl %3,%0\n"         /* add X bit */
56
             "2:\t"
57
                /* unrolled loop for the main part: do 8 longs at once */
58
                "movel %1,%3\n\t"       /* save len in tmp1 */
59
                "lsrl #5,%1\n\t"        /* len/32 */
60
                "jeq 2f\n\t"            /* not enough... */
61
                "subql #1,%1\n"
62
             "1:\t"
63
                "movel %2@+,%4\n\t"
64
                "addxl %4,%0\n\t"
65
                "movel %2@+,%4\n\t"
66
                "addxl %4,%0\n\t"
67
                "movel %2@+,%4\n\t"
68
                "addxl %4,%0\n\t"
69
                "movel %2@+,%4\n\t"
70
                "addxl %4,%0\n\t"
71
                "movel %2@+,%4\n\t"
72
                "addxl %4,%0\n\t"
73
                "movel %2@+,%4\n\t"
74
                "addxl %4,%0\n\t"
75
                "movel %2@+,%4\n\t"
76
                "addxl %4,%0\n\t"
77
                "movel %2@+,%4\n\t"
78
                "addxl %4,%0\n\t"
79
                "dbra %1,1b\n\t"
80
                "clrl %4\n\t"
81
                "addxl %4,%0\n\t"       /* add X bit */
82
                "clrw %1\n\t"
83
                "subql #1,%1\n\t"
84
                "jcc 1b\n"
85
             "2:\t"
86
                "movel %3,%1\n\t"       /* restore len from tmp1 */
87
                "andw #0x1c,%3\n\t"     /* number of rest longs */
88
                "jeq 4f\n\t"
89
                "lsrw #2,%3\n\t"
90
                "subqw #1,%3\n"
91
             "3:\t"
92
                /* loop for rest longs */
93
                "movel %2@+,%4\n\t"
94
                "addxl %4,%0\n\t"
95
                "dbra %3,3b\n\t"
96
                "clrl %4\n\t"
97
                "addxl %4,%0\n"         /* add X bit */
98
             "4:\t"
99
                /* now check for rest bytes that do not fit into longs */
100
                "andw #3,%1\n\t"
101
                "jeq 7f\n\t"
102
                "clrl %4\n\t"           /* clear tmp2 for rest bytes */
103
                "subqw #2,%1\n\t"
104
                "jlt 5f\n\t"
105
                "movew %2@+,%4\n\t"     /* have rest >= 2: get word */
106
                "swap %4\n\t"           /* into bits 16..31 */
107
                "tstw %1\n\t"           /* another byte? */
108
                "jeq 6f\n"
109
             "5:\t"
110
                "moveb %2@,%4\n\t"      /* have odd rest: get byte */
111
                "lslw #8,%4\n\t"        /* into bits 8..15; 16..31 untouched */
112
             "6:\t"
113
                "addl %4,%0\n\t"        /* now add rest long to sum */
114
                "clrl %4\n\t"
115
                "addxl %4,%0\n"         /* add X bit */
116
             "7:\t"
117
                : "=d" (sum), "=d" (len), "=a" (buff),
118
                  "=&d" (tmp1), "=&d" (tmp2)
119
                : "0" (sum), "1" (len), "2" (buff)
120
            );
121
        return(sum);
122
}
123
 
124
 
125
 
126
/*
127
 * copy from fs while checksumming, otherwise like csum_partial
128
 */
129
 
130
unsigned int
131
csum_partial_copy_fromuser(const char *src, char *dst, int len, int sum)
132
{
133
        unsigned long tmp1, tmp2;
134
        __asm__("movel %2,%4\n\t"
135
                "btst #1,%4\n\t"        /* Check alignment */
136
                "jeq 2f\n\t"
137
                "subql #2,%1\n\t"       /* buff%4==2: treat first word */
138
                "jgt 1f\n\t"
139
                "addql #2,%1\n\t"       /* len was == 2, treat only rest */
140
                "jra 4f\n"
141
             "1:\t"
142
                "movesw %2@+,%4\n\t"    /* add first word to sum */
143
                "addw %4,%0\n\t"
144
                "movew %4,%3@+\n\t"
145
                "clrl %4\n\t"
146
                "addxl %4,%0\n"         /* add X bit */
147
             "2:\t"
148
                /* unrolled loop for the main part: do 8 longs at once */
149
                "movel %1,%4\n\t"       /* save len in tmp1 */
150
                "lsrl #5,%1\n\t"        /* len/32 */
151
                "jeq 2f\n\t"            /* not enough... */
152
                "subql #1,%1\n"
153
             "1:\t"
154
                "movesl %2@+,%5\n\t"
155
                "addxl %5,%0\n\t"
156
                "movel %5,%3@+\n\t"
157
                "movesl %2@+,%5\n\t"
158
                "addxl %5,%0\n\t"
159
                "movel %5,%3@+\n\t"
160
                "movesl %2@+,%5\n\t"
161
                "addxl %5,%0\n\t"
162
                "movel %5,%3@+\n\t"
163
                "movesl %2@+,%5\n\t"
164
                "addxl %5,%0\n\t"
165
                "movel %5,%3@+\n\t"
166
                "movesl %2@+,%5\n\t"
167
                "addxl %5,%0\n\t"
168
                "movel %5,%3@+\n\t"
169
                "movesl %2@+,%5\n\t"
170
                "addxl %5,%0\n\t"
171
                "movel %5,%3@+\n\t"
172
                "movesl %2@+,%5\n\t"
173
                "addxl %5,%0\n\t"
174
                "movel %5,%3@+\n\t"
175
                "movesl %2@+,%5\n\t"
176
                "addxl %5,%0\n\t"
177
                "movel %5,%3@+\n\t"
178
                "dbra %1,1b\n\t"
179
                "clrl %5\n\t"
180
                "addxl %5,%0\n\t"       /* add X bit */
181
                "clrw %1\n\t"
182
                "subql #1,%1\n\t"
183
                "jcc 1b\n"
184
             "2:\t"
185
                "movel %4,%1\n\t"       /* restore len from tmp1 */
186
                "andw #0x1c,%4\n\t"     /* number of rest longs */
187
                "jeq 4f\n\t"
188
                "lsrw #2,%4\n\t"
189
                "subqw #1,%4\n"
190
             "3:\t"
191
                /* loop for rest longs */
192
                "movesl %2@+,%5\n\t"
193
                "addxl %5,%0\n\t"
194
                "movel %5,%3@+\n\t"
195
                "dbra %4,3b\n\t"
196
                "clrl %5\n\t"
197
                "addxl %5,%0\n"         /* add X bit */
198
             "4:\t"
199
                /* now check for rest bytes that do not fit into longs */
200
                "andw #3,%1\n\t"
201
                "jeq 7f\n\t"
202
                "clrl %5\n\t"           /* clear tmp2 for rest bytes */
203
                "subqw #2,%1\n\t"
204
                "jlt 5f\n\t"
205
                "movesw %2@+,%5\n\t"    /* have rest >= 2: get word */
206
                "movew %5,%3@+\n\t"
207
                "swap %5\n\t"           /* into bits 16..31 */
208
                "tstw %1\n\t"           /* another byte? */
209
                "jeq 6f\n"
210
             "5:\t"
211
                "movesb %2@,%5\n\t"     /* have odd rest: get byte */
212
                "moveb %5,%3@+\n\t"
213
                "lslw #8,%5\n\t"        /* into bits 8..15; 16..31 untouched */
214
             "6:\t"
215
                "addl %5,%0\n\t"        /* now add rest long to sum */
216
                "clrl %5\n\t"
217
                "addxl %5,%0\n"         /* add X bit */
218
             "7:\t"
219
                : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
220
                  "=&d" (tmp1), "=&d" (tmp2)
221
                : "0" (sum), "1" (len), "2" (src), "3" (dst)
222
            );
223
        return(sum);
224
}
225
/*
226
 * copy from ds while checksumming, otherwise like csum_partial
227
 */
228
 
229
unsigned int
230
csum_partial_copy(const char *src, char *dst, int len, int sum)
231
{
232
        unsigned long tmp1, tmp2;
233
        __asm__("movel %2,%4\n\t"
234
                "btst #1,%4\n\t"        /* Check alignment */
235
                "jeq 2f\n\t"
236
                "subql #2,%1\n\t"       /* buff%4==2: treat first word */
237
                "jgt 1f\n\t"
238
                "addql #2,%1\n\t"       /* len was == 2, treat only rest */
239
                "jra 4f\n"
240
             "1:\t"
241
                "movew %2@+,%4\n\t"     /* add first word to sum */
242
                "addw %4,%0\n\t"
243
                "movew %4,%3@+\n\t"
244
                "clrl %4\n\t"
245
                "addxl %4,%0\n"         /* add X bit */
246
             "2:\t"
247
                /* unrolled loop for the main part: do 8 longs at once */
248
                "movel %1,%4\n\t"       /* save len in tmp1 */
249
                "lsrl #5,%1\n\t"        /* len/32 */
250
                "jeq 2f\n\t"            /* not enough... */
251
                "subql #1,%1\n"
252
             "1:\t"
253
                "movel %2@+,%5\n\t"
254
                "addxl %5,%0\n\t"
255
                "movel %5,%3@+\n\t"
256
                "movel %2@+,%5\n\t"
257
                "addxl %5,%0\n\t"
258
                "movel %5,%3@+\n\t"
259
                "movel %2@+,%5\n\t"
260
                "addxl %5,%0\n\t"
261
                "movel %5,%3@+\n\t"
262
                "movel %2@+,%5\n\t"
263
                "addxl %5,%0\n\t"
264
                "movel %5,%3@+\n\t"
265
                "movel %2@+,%5\n\t"
266
                "addxl %5,%0\n\t"
267
                "movel %5,%3@+\n\t"
268
                "movel %2@+,%5\n\t"
269
                "addxl %5,%0\n\t"
270
                "movel %5,%3@+\n\t"
271
                "movel %2@+,%5\n\t"
272
                "addxl %5,%0\n\t"
273
                "movel %5,%3@+\n\t"
274
                "movel %2@+,%5\n\t"
275
                "addxl %5,%0\n\t"
276
                "movel %5,%3@+\n\t"
277
                "dbra %1,1b\n\t"
278
                "clrl %5\n\t"
279
                "addxl %5,%0\n\t"       /* add X bit */
280
                "clrw %1\n\t"
281
                "subql #1,%1\n\t"
282
                "jcc 1b\n"
283
             "2:\t"
284
                "movel %4,%1\n\t"       /* restore len from tmp1 */
285
                "andw #0x1c,%4\n\t"     /* number of rest longs */
286
                "jeq 4f\n\t"
287
                "lsrw #2,%4\n\t"
288
                "subqw #1,%4\n"
289
             "3:\t"
290
                /* loop for rest longs */
291
                "movel %2@+,%5\n\t"
292
                "addxl %5,%0\n\t"
293
                "movel %5,%3@+\n\t"
294
                "dbra %4,3b\n\t"
295
                "clrl %5\n\t"
296
                "addxl %5,%0\n"         /* add X bit */
297
             "4:\t"
298
                /* now check for rest bytes that do not fit into longs */
299
                "andw #3,%1\n\t"
300
                "jeq 7f\n\t"
301
                "clrl %5\n\t"           /* clear tmp2 for rest bytes */
302
                "subqw #2,%1\n\t"
303
                "jlt 5f\n\t"
304
                "movew %2@+,%5\n\t"     /* have rest >= 2: get word */
305
                "movew %5,%3@+\n\t"
306
                "swap %5\n\t"           /* into bits 16..31 */
307
                "tstw %1\n\t"           /* another byte? */
308
                "jeq 6f\n"
309
             "5:\t"
310
                "moveb %2@,%5\n\t"      /* have odd rest: get byte */
311
                "moveb %5,%3@+\n\t"
312
                "lslw #8,%5\n"          /* into bits 8..15; 16..31 untouched */
313
             "6:\t"
314
                "addl %5,%0\n\t"        /* now add rest long to sum */
315
                "clrl %5\n\t"
316
                "addxl %5,%0\n"         /* add X bit */
317
             "7:\t"
318
                : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
319
                  "=&d" (tmp1), "=&d" (tmp2)
320
                : "0" (sum), "1" (len), "2" (src), "3" (dst)
321
            );
322
    return(sum);
323
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.