OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [net/] [ipv6/] [reassembly.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *      IPv6 fragment reassembly
3
 *      Linux INET6 implementation
4
 *
5
 *      Authors:
6
 *      Pedro Roque             <roque@di.fc.ul.pt>
7
 *
8
 *      $Id: reassembly.c,v 1.1.1.1 2004-04-15 01:14:48 phoenix Exp $
9
 *
10
 *      Based on: net/ipv4/ip_fragment.c
11
 *
12
 *      This program is free software; you can redistribute it and/or
13
 *      modify it under the terms of the GNU General Public License
14
 *      as published by the Free Software Foundation; either version
15
 *      2 of the License, or (at your option) any later version.
16
 */
17
 
18
/*
19
 *      Fixes:
20
 *      Andi Kleen      Make it work with multiple hosts.
21
 *                      More RFC compliance.
22
 *
23
 *      Horst von Brand Add missing #include <linux/string.h>
24
 *      Alexey Kuznetsov        SMP races, threading, cleanup.
25
 *      Patrick McHardy         LRU queue of frag heads for evictor.
26
 */
27
#include <linux/config.h>
28
#include <linux/errno.h>
29
#include <linux/types.h>
30
#include <linux/string.h>
31
#include <linux/socket.h>
32
#include <linux/sockios.h>
33
#include <linux/sched.h>
34
#include <linux/list.h>
35
#include <linux/net.h>
36
#include <linux/netdevice.h>
37
#include <linux/in6.h>
38
#include <linux/ipv6.h>
39
#include <linux/icmpv6.h>
40
#include <linux/random.h>
41
#include <linux/jhash.h>
42
 
43
#include <net/sock.h>
44
#include <net/snmp.h>
45
 
46
#include <net/ipv6.h>
47
#include <net/protocol.h>
48
#include <net/transp_v6.h>
49
#include <net/rawv6.h>
50
#include <net/ndisc.h>
51
#include <net/addrconf.h>
52
 
53
int sysctl_ip6frag_high_thresh = 256*1024;
54
int sysctl_ip6frag_low_thresh = 192*1024;
55
 
56
int sysctl_ip6frag_time = IPV6_FRAG_TIMEOUT;
57
 
58
struct ip6frag_skb_cb
59
{
60
        struct inet6_skb_parm   h;
61
        int                     offset;
62
};
63
 
64
#define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
65
 
66
 
67
/*
68
 *      Equivalent of ipv4 struct ipq
69
 */
70
 
71
struct frag_queue
72
{
73
        struct frag_queue       *next;
74
        struct list_head lru_list;              /* lru list member      */
75
 
76
        __u32                   id;             /* fragment id          */
77
        struct in6_addr         saddr;
78
        struct in6_addr         daddr;
79
 
80
        spinlock_t              lock;
81
        atomic_t                refcnt;
82
        struct timer_list       timer;          /* expire timer         */
83
        struct sk_buff          *fragments;
84
        int                     len;
85
        int                     meat;
86
        int                     iif;
87
        struct timeval          stamp;
88
        unsigned int            csum;
89
        __u8                    last_in;        /* has first/last segment arrived? */
90
#define COMPLETE                4
91
#define FIRST_IN                2
92
#define LAST_IN                 1
93
        __u16                   nhoffset;
94
        struct frag_queue       **pprev;
95
};
96
 
97
/* Hash table. */
98
 
99
#define IP6Q_HASHSZ     64
100
 
101
static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ];
102
static rwlock_t ip6_frag_lock = RW_LOCK_UNLOCKED;
103
static u32 ip6_frag_hash_rnd;
104
static LIST_HEAD(ip6_frag_lru_list);
105
int ip6_frag_nqueues = 0;
106
 
107
static __inline__ void __fq_unlink(struct frag_queue *fq)
108
{
109
        if(fq->next)
110
                fq->next->pprev = fq->pprev;
111
        *fq->pprev = fq->next;
112
        list_del(&fq->lru_list);
113
        ip6_frag_nqueues--;
114
}
115
 
116
static __inline__ void fq_unlink(struct frag_queue *fq)
117
{
118
        write_lock(&ip6_frag_lock);
119
        __fq_unlink(fq);
120
        write_unlock(&ip6_frag_lock);
121
}
122
 
123
static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
124
                               struct in6_addr *daddr)
125
{
126
        u32 a, b, c;
127
 
128
        a = saddr->s6_addr32[0];
129
        b = saddr->s6_addr32[1];
130
        c = saddr->s6_addr32[2];
131
 
132
        a += JHASH_GOLDEN_RATIO;
133
        b += JHASH_GOLDEN_RATIO;
134
        c += ip6_frag_hash_rnd;
135
        __jhash_mix(a, b, c);
136
 
137
        a += saddr->s6_addr32[3];
138
        b += daddr->s6_addr32[0];
139
        c += daddr->s6_addr32[1];
140
        __jhash_mix(a, b, c);
141
 
142
        a += daddr->s6_addr32[2];
143
        b += daddr->s6_addr32[3];
144
        c += id;
145
        __jhash_mix(a, b, c);
146
 
147
        return c & (IP6Q_HASHSZ - 1);
148
}
149
 
150
static struct timer_list ip6_frag_secret_timer;
151
static int ip6_frag_secret_interval = 10 * 60 * HZ;
152
 
153
static void ip6_frag_secret_rebuild(unsigned long dummy)
154
{
155
        unsigned long now = jiffies;
156
        int i;
157
 
158
        write_lock(&ip6_frag_lock);
159
        get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
160
        for (i = 0; i < IP6Q_HASHSZ; i++) {
161
                struct frag_queue *q;
162
 
163
                q = ip6_frag_hash[i];
164
                while (q) {
165
                        struct frag_queue *next = q->next;
166
                        unsigned int hval = ip6qhashfn(q->id,
167
                                                       &q->saddr,
168
                                                       &q->daddr);
169
 
170
                        if (hval != i) {
171
                                /* Unlink. */
172
                                if (q->next)
173
                                        q->next->pprev = q->pprev;
174
                                *q->pprev = q->next;
175
 
176
                                /* Relink to new hash chain. */
177
                                if ((q->next = ip6_frag_hash[hval]) != NULL)
178
                                        q->next->pprev = &q->next;
179
                                ip6_frag_hash[hval] = q;
180
                                q->pprev = &ip6_frag_hash[hval];
181
                        }
182
 
183
                        q = next;
184
                }
185
        }
186
        write_unlock(&ip6_frag_lock);
187
 
188
        mod_timer(&ip6_frag_secret_timer, now + ip6_frag_secret_interval);
189
}
190
 
191
atomic_t ip6_frag_mem = ATOMIC_INIT(0);
192
 
193
/* Memory Tracking Functions. */
194
static inline void frag_kfree_skb(struct sk_buff *skb)
195
{
196
        atomic_sub(skb->truesize, &ip6_frag_mem);
197
        kfree_skb(skb);
198
}
199
 
200
static inline void frag_free_queue(struct frag_queue *fq)
201
{
202
        atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
203
        kfree(fq);
204
}
205
 
206
static inline struct frag_queue *frag_alloc_queue(void)
207
{
208
        struct frag_queue *fq = kmalloc(sizeof(struct frag_queue), GFP_ATOMIC);
209
 
210
        if(!fq)
211
                return NULL;
212
        atomic_add(sizeof(struct frag_queue), &ip6_frag_mem);
213
        return fq;
214
}
215
 
216
/* Destruction primitives. */
217
 
218
/* Complete destruction of fq. */
219
static void ip6_frag_destroy(struct frag_queue *fq)
220
{
221
        struct sk_buff *fp;
222
 
223
        BUG_TRAP(fq->last_in&COMPLETE);
224
        BUG_TRAP(del_timer(&fq->timer) == 0);
225
 
226
        /* Release all fragment data. */
227
        fp = fq->fragments;
228
        while (fp) {
229
                struct sk_buff *xp = fp->next;
230
 
231
                frag_kfree_skb(fp);
232
                fp = xp;
233
        }
234
 
235
        frag_free_queue(fq);
236
}
237
 
238
static __inline__ void fq_put(struct frag_queue *fq)
239
{
240
        if (atomic_dec_and_test(&fq->refcnt))
241
                ip6_frag_destroy(fq);
242
}
243
 
244
/* Kill fq entry. It is not destroyed immediately,
245
 * because caller (and someone more) holds reference count.
246
 */
247
static __inline__ void fq_kill(struct frag_queue *fq)
248
{
249
        if (del_timer(&fq->timer))
250
                atomic_dec(&fq->refcnt);
251
 
252
        if (!(fq->last_in & COMPLETE)) {
253
                fq_unlink(fq);
254
                atomic_dec(&fq->refcnt);
255
                fq->last_in |= COMPLETE;
256
        }
257
}
258
 
259
static void ip6_evictor(void)
260
{
261
        struct frag_queue *fq;
262
        struct list_head *tmp;
263
 
264
        for(;;) {
265
                if (atomic_read(&ip6_frag_mem) <= sysctl_ip6frag_low_thresh)
266
                        return;
267
                read_lock(&ip6_frag_lock);
268
                if (list_empty(&ip6_frag_lru_list)) {
269
                        read_unlock(&ip6_frag_lock);
270
                        return;
271
                }
272
                tmp = ip6_frag_lru_list.next;
273
                fq = list_entry(tmp, struct frag_queue, lru_list);
274
                atomic_inc(&fq->refcnt);
275
                read_unlock(&ip6_frag_lock);
276
 
277
                spin_lock(&fq->lock);
278
                if (!(fq->last_in&COMPLETE))
279
                        fq_kill(fq);
280
                spin_unlock(&fq->lock);
281
 
282
                fq_put(fq);
283
                IP6_INC_STATS_BH(Ip6ReasmFails);
284
        }
285
}
286
 
287
static void ip6_frag_expire(unsigned long data)
288
{
289
        struct frag_queue *fq = (struct frag_queue *) data;
290
 
291
        spin_lock(&fq->lock);
292
 
293
        if (fq->last_in & COMPLETE)
294
                goto out;
295
 
296
        fq_kill(fq);
297
 
298
        IP6_INC_STATS_BH(Ip6ReasmTimeout);
299
        IP6_INC_STATS_BH(Ip6ReasmFails);
300
 
301
        /* Send error only if the first segment arrived. */
302
        if (fq->last_in&FIRST_IN && fq->fragments) {
303
                struct net_device *dev = dev_get_by_index(fq->iif);
304
 
305
                /*
306
                   But use as source device on which LAST ARRIVED
307
                   segment was received. And do not use fq->dev
308
                   pointer directly, device might already disappeared.
309
                 */
310
                if (dev) {
311
                        fq->fragments->dev = dev;
312
                        icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0,
313
                                    dev);
314
                        dev_put(dev);
315
                }
316
        }
317
out:
318
        spin_unlock(&fq->lock);
319
        fq_put(fq);
320
}
321
 
322
/* Creation primitives. */
323
 
324
 
325
static struct frag_queue *ip6_frag_intern(unsigned int hash,
326
                                          struct frag_queue *fq_in)
327
{
328
        struct frag_queue *fq;
329
 
330
        write_lock(&ip6_frag_lock);
331
#ifdef CONFIG_SMP
332
        for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
333
                if (fq->id == fq_in->id &&
334
                    !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) &&
335
                    !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) {
336
                        atomic_inc(&fq->refcnt);
337
                        write_unlock(&ip6_frag_lock);
338
                        fq_in->last_in |= COMPLETE;
339
                        fq_put(fq_in);
340
                        return fq;
341
                }
342
        }
343
#endif
344
        fq = fq_in;
345
 
346
        if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time))
347
                atomic_inc(&fq->refcnt);
348
 
349
        atomic_inc(&fq->refcnt);
350
        if((fq->next = ip6_frag_hash[hash]) != NULL)
351
                fq->next->pprev = &fq->next;
352
        ip6_frag_hash[hash] = fq;
353
        fq->pprev = &ip6_frag_hash[hash];
354
        INIT_LIST_HEAD(&fq->lru_list);
355
        list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
356
        ip6_frag_nqueues++;
357
        write_unlock(&ip6_frag_lock);
358
        return fq;
359
}
360
 
361
 
362
static struct frag_queue *
363
ip6_frag_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr *dst)
364
{
365
        struct frag_queue *fq;
366
 
367
        if ((fq = frag_alloc_queue()) == NULL)
368
                goto oom;
369
 
370
        memset(fq, 0, sizeof(struct frag_queue));
371
 
372
        fq->id = id;
373
        ipv6_addr_copy(&fq->saddr, src);
374
        ipv6_addr_copy(&fq->daddr, dst);
375
 
376
        /* init_timer has been done by the memset */
377
        fq->timer.function = ip6_frag_expire;
378
        fq->timer.data = (long) fq;
379
        fq->lock = SPIN_LOCK_UNLOCKED;
380
        atomic_set(&fq->refcnt, 1);
381
 
382
        return ip6_frag_intern(hash, fq);
383
 
384
oom:
385
        IP6_INC_STATS_BH(Ip6ReasmFails);
386
        return NULL;
387
}
388
 
389
static __inline__ struct frag_queue *
390
fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
391
{
392
        struct frag_queue *fq;
393
        unsigned int hash = ip6qhashfn(id, src, dst);
394
 
395
        read_lock(&ip6_frag_lock);
396
        for(fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
397
                if (fq->id == id &&
398
                    !ipv6_addr_cmp(src, &fq->saddr) &&
399
                    !ipv6_addr_cmp(dst, &fq->daddr)) {
400
                        atomic_inc(&fq->refcnt);
401
                        read_unlock(&ip6_frag_lock);
402
                        return fq;
403
                }
404
        }
405
        read_unlock(&ip6_frag_lock);
406
 
407
        return ip6_frag_create(hash, id, src, dst);
408
}
409
 
410
 
411
static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
412
                           struct frag_hdr *fhdr, int nhoff)
413
{
414
        struct sk_buff *prev, *next;
415
        int offset, end;
416
 
417
        if (fq->last_in & COMPLETE)
418
                goto err;
419
 
420
        offset = ntohs(fhdr->frag_off) & ~0x7;
421
        end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
422
                        ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
423
 
424
        if ((unsigned int)end >= 65536) {
425
                icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
426
                return;
427
        }
428
 
429
        if (skb->ip_summed == CHECKSUM_HW)
430
                skb->csum = csum_sub(skb->csum,
431
                                     csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
432
 
433
        /* Is this the final fragment? */
434
        if (!(fhdr->frag_off & htons(0x0001))) {
435
                /* If we already have some bits beyond end
436
                 * or have different end, the segment is corrupted.
437
                 */
438
                if (end < fq->len ||
439
                    ((fq->last_in & LAST_IN) && end != fq->len))
440
                        goto err;
441
                fq->last_in |= LAST_IN;
442
                fq->len = end;
443
        } else {
444
                /* Check if the fragment is rounded to 8 bytes.
445
                 * Required by the RFC.
446
                 */
447
                if (end & 0x7) {
448
                        /* RFC2460 says always send parameter problem in
449
                         * this case. -DaveM
450
                         */
451
                        icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
452
                                          offsetof(struct ipv6hdr, payload_len));
453
                        return;
454
                }
455
                if (end > fq->len) {
456
                        /* Some bits beyond end -> corruption. */
457
                        if (fq->last_in & LAST_IN)
458
                                goto err;
459
                        fq->len = end;
460
                }
461
        }
462
 
463
        if (end == offset)
464
                goto err;
465
 
466
        /* Point into the IP datagram 'data' part. */
467
        if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
468
                goto err;
469
        if (end-offset < skb->len) {
470
                if (pskb_trim(skb, end - offset))
471
                        goto err;
472
                if (skb->ip_summed != CHECKSUM_UNNECESSARY)
473
                        skb->ip_summed = CHECKSUM_NONE;
474
        }
475
 
476
        /* Find out which fragments are in front and at the back of us
477
         * in the chain of fragments so far.  We must know where to put
478
         * this fragment, right?
479
         */
480
        prev = NULL;
481
        for(next = fq->fragments; next != NULL; next = next->next) {
482
                if (FRAG6_CB(next)->offset >= offset)
483
                        break;  /* bingo! */
484
                prev = next;
485
        }
486
 
487
        /* We found where to put this one.  Check for overlap with
488
         * preceding fragment, and, if needed, align things so that
489
         * any overlaps are eliminated.
490
         */
491
        if (prev) {
492
                int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
493
 
494
                if (i > 0) {
495
                        offset += i;
496
                        if (end <= offset)
497
                                goto err;
498
                        if (!pskb_pull(skb, i))
499
                                goto err;
500
                        if (skb->ip_summed != CHECKSUM_UNNECESSARY)
501
                                skb->ip_summed = CHECKSUM_NONE;
502
                }
503
        }
504
 
505
        /* Look for overlap with succeeding segments.
506
         * If we can merge fragments, do it.
507
         */
508
        while (next && FRAG6_CB(next)->offset < end) {
509
                int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
510
 
511
                if (i < next->len) {
512
                        /* Eat head of the next overlapped fragment
513
                         * and leave the loop. The next ones cannot overlap.
514
                         */
515
                        if (!pskb_pull(next, i))
516
                                goto err;
517
                        FRAG6_CB(next)->offset += i;    /* next fragment */
518
                        fq->meat -= i;
519
                        if (next->ip_summed != CHECKSUM_UNNECESSARY)
520
                                next->ip_summed = CHECKSUM_NONE;
521
                        break;
522
                } else {
523
                        struct sk_buff *free_it = next;
524
 
525
                        /* Old fragmnet is completely overridden with
526
                         * new one drop it.
527
                         */
528
                        next = next->next;
529
 
530
                        if (prev)
531
                                prev->next = next;
532
                        else
533
                                fq->fragments = next;
534
 
535
                        fq->meat -= free_it->len;
536
                        frag_kfree_skb(free_it);
537
                }
538
        }
539
 
540
        FRAG6_CB(skb)->offset = offset;
541
 
542
        /* Insert this fragment in the chain of fragments. */
543
        skb->next = next;
544
        if (prev)
545
                prev->next = skb;
546
        else
547
                fq->fragments = skb;
548
 
549
        if (skb->dev)
550
                fq->iif = skb->dev->ifindex;
551
        skb->dev = NULL;
552
        fq->stamp = skb->stamp;
553
        fq->meat += skb->len;
554
        atomic_add(skb->truesize, &ip6_frag_mem);
555
 
556
        /* The first fragment.
557
         * nhoffset is obtained from the first fragment, of course.
558
         */
559
        if (offset == 0) {
560
                fq->nhoffset = nhoff;
561
                fq->last_in |= FIRST_IN;
562
        }
563
        write_lock(&ip6_frag_lock);
564
        list_move_tail(&fq->lru_list, &ip6_frag_lru_list);
565
        write_unlock(&ip6_frag_lock);
566
        return;
567
 
568
err:
569
        kfree_skb(skb);
570
}
571
 
572
/*
573
 *      Check if this packet is complete.
574
 *      Returns NULL on failure by any reason, and pointer
575
 *      to current nexthdr field in reassembled frame.
576
 *
577
 *      It is called with locked fq, and caller must check that
578
 *      queue is eligible for reassembly i.e. it is not COMPLETE,
579
 *      the last and the first frames arrived and all the bits are here.
580
 */
581
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
582
                          struct net_device *dev)
583
{
584
        struct sk_buff *fp, *head = fq->fragments;
585
        int    remove_fraghdr = 0;
586
        int    payload_len;
587
        int    nhoff;
588
 
589
        fq_kill(fq);
590
 
591
        BUG_TRAP(head != NULL);
592
        BUG_TRAP(FRAG6_CB(head)->offset == 0);
593
 
594
        /* Unfragmented part is taken from the first segment. */
595
        payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len;
596
        nhoff = head->h.raw - head->nh.raw;
597
 
598
        if (payload_len > 65535) {
599
                payload_len -= 8;
600
                if (payload_len > 65535)
601
                        goto out_oversize;
602
                remove_fraghdr = 1;
603
        }
604
 
605
        /* Head of list must not be cloned. */
606
        if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
607
                goto out_oom;
608
 
609
        /* If the first fragment is fragmented itself, we split
610
         * it to two chunks: the first with data and paged part
611
         * and the second, holding only fragments. */
612
        if (skb_shinfo(head)->frag_list) {
613
                struct sk_buff *clone;
614
                int i, plen = 0;
615
 
616
                if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
617
                        goto out_oom;
618
                clone->next = head->next;
619
                head->next = clone;
620
                skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
621
                skb_shinfo(head)->frag_list = NULL;
622
                for (i=0; i<skb_shinfo(head)->nr_frags; i++)
623
                        plen += skb_shinfo(head)->frags[i].size;
624
                clone->len = clone->data_len = head->data_len - plen;
625
                head->data_len -= clone->len;
626
                head->len -= clone->len;
627
                clone->csum = 0;
628
                clone->ip_summed = head->ip_summed;
629
                atomic_add(clone->truesize, &ip6_frag_mem);
630
        }
631
 
632
        /* Normally we do not remove frag header from datagram, but
633
         * we have to do this and to relocate header, when payload
634
         * is > 65535-8. */
635
        if (remove_fraghdr) {
636
                nhoff = fq->nhoffset;
637
                head->nh.raw[nhoff] = head->h.raw[0];
638
                memmove(head->head+8, head->head, (head->data-head->head)-8);
639
                head->mac.raw += 8;
640
                head->nh.raw += 8;
641
        } else {
642
                ((struct frag_hdr*)head->h.raw)->frag_off = 0;
643
        }
644
 
645
        skb_shinfo(head)->frag_list = head->next;
646
        head->h.raw = head->data;
647
        skb_push(head, head->data - head->nh.raw);
648
        atomic_sub(head->truesize, &ip6_frag_mem);
649
 
650
        for (fp=head->next; fp; fp = fp->next) {
651
                head->data_len += fp->len;
652
                head->len += fp->len;
653
                if (head->ip_summed != fp->ip_summed)
654
                        head->ip_summed = CHECKSUM_NONE;
655
                else if (head->ip_summed == CHECKSUM_HW)
656
                        head->csum = csum_add(head->csum, fp->csum);
657
                head->truesize += fp->truesize;
658
                atomic_sub(fp->truesize, &ip6_frag_mem);
659
        }
660
 
661
        head->next = NULL;
662
        head->dev = dev;
663
        head->stamp = fq->stamp;
664
        head->nh.ipv6h->payload_len = ntohs(payload_len);
665
 
666
        *skb_in = head;
667
 
668
        /* Yes, and fold redundant checksum back. 8) */
669
        if (head->ip_summed == CHECKSUM_HW)
670
                head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
671
 
672
        IP6_INC_STATS_BH(Ip6ReasmOKs);
673
        fq->fragments = NULL;
674
        return nhoff;
675
 
676
out_oversize:
677
        if (net_ratelimit())
678
                printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
679
        goto out_fail;
680
out_oom:
681
        if (net_ratelimit())
682
                printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
683
out_fail:
684
        IP6_INC_STATS_BH(Ip6ReasmFails);
685
        return -1;
686
}
687
 
688
int ipv6_reassembly(struct sk_buff **skbp, int nhoff)
689
{
690
        struct sk_buff *skb = *skbp;
691
        struct net_device *dev = skb->dev;
692
        struct frag_hdr *fhdr;
693
        struct frag_queue *fq;
694
        struct ipv6hdr *hdr;
695
 
696
        hdr = skb->nh.ipv6h;
697
 
698
        IP6_INC_STATS_BH(Ip6ReasmReqds);
699
 
700
        /* Jumbo payload inhibits frag. header */
701
        if (hdr->payload_len==0) {
702
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
703
                return -1;
704
        }
705
        if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
706
                icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
707
                return -1;
708
        }
709
 
710
        hdr = skb->nh.ipv6h;
711
        fhdr = (struct frag_hdr *)skb->h.raw;
712
 
713
        if (!(fhdr->frag_off & htons(0xFFF9))) {
714
                /* It is not a fragmented frame */
715
                skb->h.raw += sizeof(struct frag_hdr);
716
                IP6_INC_STATS_BH(Ip6ReasmOKs);
717
 
718
                return (u8*)fhdr - skb->nh.raw;
719
        }
720
 
721
        if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
722
                ip6_evictor();
723
 
724
        if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) {
725
                int ret = -1;
726
 
727
                spin_lock(&fq->lock);
728
 
729
                ip6_frag_queue(fq, skb, fhdr, nhoff);
730
 
731
                if (fq->last_in == (FIRST_IN|LAST_IN) &&
732
                    fq->meat == fq->len)
733
                        ret = ip6_frag_reasm(fq, skbp, dev);
734
 
735
                spin_unlock(&fq->lock);
736
                fq_put(fq);
737
                return ret;
738
        }
739
 
740
        IP6_INC_STATS_BH(Ip6ReasmFails);
741
        kfree_skb(skb);
742
        return -1;
743
}
744
 
745
void __init ipv6_frag_init(void)
746
{
747
        ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
748
                                   (jiffies ^ (jiffies >> 6)));
749
 
750
        init_timer(&ip6_frag_secret_timer);
751
        ip6_frag_secret_timer.function = ip6_frag_secret_rebuild;
752
        ip6_frag_secret_timer.expires = jiffies + ip6_frag_secret_interval;
753
        add_timer(&ip6_frag_secret_timer);
754
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.