OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [include/] [asm-armnommu/] [checksum.h] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1633 jcastillo
#ifndef __ASM_ARM_CHECKSUM_H
2
#define __ASM_ARM_CHECKSUM_H
3
 
4
#ifndef __ASM_ARM_SEGMENT_H
5
#include <asm/segment.h>
6
#endif
7
 
8
/*
9
 * computes the checksum of a memory block at buff, length len,
10
 * and adds in "sum" (32-bit)
11
 *
12
 * returns a 32-bit number suitable for feeding into itself
13
 * or csum_tcpudp_magic
14
 *
15
 * this function must be called with even lengths, except
16
 * for the last fragment, which may be odd
17
 *
18
 * it's best to have buff aligned on a 32-bit boundary
19
 */
20
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
21
 
22
/*
23
 * the same as csum_partial, but copies from src while it
24
 * checksums
25
 *
26
 * here even more important to align src and dst on a 32-bit (or even
27
 * better 64-bit) boundary
28
 */
29
 
30
unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
31
 
32
 
33
/*
34
 * the same as csum_partial_copy, but copies from user space.
35
 *
36
 * here even more important to align src and dst on a 32-bit (or even
37
 * better 64-bit) boundary
38
 */
39
 
40
static __INLINE__ unsigned int csum_partial_copy_fromuser(const char *src, char *dst, int len, int sum)
41
{
42
        extern unsigned int __csum_partial_copy_fromuser(const char *src, char *dst, int len, int sum);
43
 
44
        if (IS_USER_SEG)
45
                return __csum_partial_copy_fromuser (src, dst, len, sum);
46
        else
47
                return csum_partial_copy (src, dst, len, sum);
48
}
49
 
50
/*
51
 *      This is a version of ip_compute_csum() optimized for IP headers,
52
 *      which always checksum on 4 octet boundaries.
53
 *
54
 *      By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
55
 *      Arnt Gulbrandsen.
56
 *
57
 *      Converted to ARM by R.M.King
58
 */
59
static inline unsigned short ip_fast_csum(unsigned char * iph,
60
                                          unsigned int ihl) {
61
        unsigned int sum, tmp1;
62
 
63
    __asm__ __volatile__("
64
        sub     %2, %2, #5
65
        ldr     %0, [%1], #4
66
        ldr     %3, [%1], #4
67
        adds    %0, %0, %3
68
        ldr     %3, [%1], #4
69
        adcs    %0, %0, %3
70
        ldr     %3, [%1], #4
71
        adcs    %0, %0, %3
72
1:      ldr     %3, [%1], #4
73
        adcs    %0, %0, %3
74
        tst     %2, #15
75
        subne   %2, %2, #1
76
        bne     1b
77
        adc     %0, %0, #0
78
        adds    %0, %0, %0, lsl #16
79
        addcs   %0, %0, #0x10000
80
        mvn     %0, %0
81
        mov     %0, %0, lsr #16
82
         "
83
        : "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (tmp1)
84
        : "1" (iph), "2" (ihl));
85
        return(sum);
86
}
87
 
88
/*
89
 * computes the checksum of the TCP/UDP pseudo-header
90
 * returns a 16-bit checksum, already complemented
91
 */
92
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
93
                                                   unsigned long daddr,
94
                                                   unsigned short len,
95
                                                   unsigned short proto,
96
                                                   unsigned int sum) {
97
    __asm__ __volatile__("
98
    adds        %0, %0, %1
99
    adcs        %0, %0, %4
100
    adcs        %0, %0, %5
101
    adc         %0, %0, #0
102
    adds        %0, %0, %0, lsl #16
103
    addcs       %0, %0, #0x10000
104
    mvn         %0, %0
105
    mov         %0, %0, lsr #16
106
        "
107
        : "=&r" (sum), "=&r" (saddr)
108
        : "0" (daddr), "1"(saddr), "r"((ntohs(len)<<16)+proto*256), "r"(sum));
109
        return((unsigned short)sum);
110
}
111
 
112
/*
113
 *      Fold a partial checksum without adding pseudo headers
114
 */
115
static inline unsigned int csum_fold(unsigned int sum)
116
{
117
    __asm__ __volatile__("
118
    adds        %0, %0, %0, lsl #16
119
    addcss      %0, %0, #0x10000
120
    addcs       %0, %0, #0x10000
121
    mvn         %0, %0
122
    mov         %0, %0, lsr #16
123
        "
124
        : "=r" (sum)
125
        : "0" (sum));
126
        return sum;
127
}
128
 
129
 
130
/*
131
 * this routine is used for miscellaneous IP-like checksums, mainly
132
 * in icmp.c
133
 */
134
 
135
static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
136
    unsigned int sum;
137
 
138
    __asm__ __volatile__("
139
    adds        %0, %0, %0, lsl #16
140
    addcs       %0, %0, #0x10000
141
    mvn         %0, %0
142
    mov         %0, %0, lsr #16
143
        "
144
        : "=r"(sum)
145
        : "0" (csum_partial(buff, len, 0)));
146
        return(sum);
147
}
148
 
149
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.