OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [net/] [ipv4/] [ip_fragment.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
3
 *              operating system.  INET is implemented using the  BSD Socket
4
 *              interface as the means of communication with the user level.
5
 *
6
 *              The IP fragmentation functionality.
7
 *
8
 * Version:     $Id: ip_fragment.c,v 1.1.1.1 2004-04-15 01:13:22 phoenix Exp $
9
 *
10
 * Authors:     Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
11
 *              Alan Cox <Alan.Cox@linux.org>
12
 *
13
 * Fixes:
14
 *              Alan Cox        :       Split from ip.c , see ip_input.c for history.
15
 *              David S. Miller :       Begin massive cleanup...
16
 *              Andi Kleen      :       Add sysctls.
17
 *              xxxx            :       Overlapfrag bug.
18
 *              Ultima          :       ip_expire() kernel panic.
19
 *              Bill Hawes      :       Frag accounting and evictor fixes.
20
 *              John McDonald   :       0 length frag bug.
21
 *              Alexey Kuznetsov:       SMP races, threading, cleanup.
22
 *              Patrick McHardy :       LRU queue of frag heads for evictor.
23
 */
24
 
25
#include <linux/config.h>
26
#include <linux/types.h>
27
#include <linux/mm.h>
28
#include <linux/sched.h>
29
#include <linux/skbuff.h>
30
#include <linux/list.h>
31
#include <linux/ip.h>
32
#include <linux/icmp.h>
33
#include <linux/netdevice.h>
34
#include <linux/jhash.h>
35
#include <linux/random.h>
36
#include <net/sock.h>
37
#include <net/ip.h>
38
#include <net/icmp.h>
39
#include <net/checksum.h>
40
#include <linux/tcp.h>
41
#include <linux/udp.h>
42
#include <linux/inet.h>
43
#include <linux/netfilter_ipv4.h>
44
 
45
/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
46
 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
47
 * as well. Or notify me, at least. --ANK
48
 */
49
 
50
/* Fragment cache limits. We will commit 256K at one time. Should we
51
 * cross that limit we will prune down to 192K. This should cope with
52
 * even the most extreme cases without allowing an attacker to measurably
53
 * harm machine performance.
54
 */
55
int sysctl_ipfrag_high_thresh = 256*1024;
56
int sysctl_ipfrag_low_thresh = 192*1024;
57
 
58
/* Important NOTE! Fragment queue must be destroyed before MSL expires.
59
 * RFC791 is wrong proposing to prolongate timer each fragment arrival by TTL.
60
 */
61
int sysctl_ipfrag_time = IP_FRAG_TIME;
62
 
63
struct ipfrag_skb_cb
64
{
65
        struct inet_skb_parm    h;
66
        int                     offset;
67
};
68
 
69
#define FRAG_CB(skb)    ((struct ipfrag_skb_cb*)((skb)->cb))
70
 
71
/* Describe an entry in the "incomplete datagrams" queue. */
72
struct ipq {
73
        struct ipq      *next;          /* linked list pointers                 */
74
        struct list_head lru_list;      /* lru list member                      */
75
        u32             saddr;
76
        u32             daddr;
77
        u16             id;
78
        u8              protocol;
79
        u8              last_in;
80
#define COMPLETE                4
81
#define FIRST_IN                2
82
#define LAST_IN                 1
83
 
84
        struct sk_buff  *fragments;     /* linked list of received fragments    */
85
        int             len;            /* total length of original datagram    */
86
        int             meat;
87
        spinlock_t      lock;
88
        atomic_t        refcnt;
89
        struct timer_list timer;        /* when will this queue expire?         */
90
        struct ipq      **pprev;
91
        int             iif;
92
        struct timeval  stamp;
93
};
94
 
95
/* Hash table. */
96
 
97
#define IPQ_HASHSZ      64
98
 
99
/* Per-bucket lock is easy to add now. */
100
static struct ipq *ipq_hash[IPQ_HASHSZ];
101
static rwlock_t ipfrag_lock = RW_LOCK_UNLOCKED;
102
static u32 ipfrag_hash_rnd;
103
static LIST_HEAD(ipq_lru_list);
104
int ip_frag_nqueues = 0;
105
 
106
static __inline__ void __ipq_unlink(struct ipq *qp)
107
{
108
        if(qp->next)
109
                qp->next->pprev = qp->pprev;
110
        *qp->pprev = qp->next;
111
        list_del(&qp->lru_list);
112
        ip_frag_nqueues--;
113
}
114
 
115
static __inline__ void ipq_unlink(struct ipq *ipq)
116
{
117
        write_lock(&ipfrag_lock);
118
        __ipq_unlink(ipq);
119
        write_unlock(&ipfrag_lock);
120
}
121
 
122
static unsigned int ipqhashfn(u16 id, u32 saddr, u32 daddr, u8 prot)
123
{
124
        return jhash_3words((u32)id << 16 | prot, saddr, daddr,
125
                            ipfrag_hash_rnd) & (IPQ_HASHSZ - 1);
126
}
127
 
128
static struct timer_list ipfrag_secret_timer;
129
int sysctl_ipfrag_secret_interval = 10 * 60 * HZ;
130
 
131
static void ipfrag_secret_rebuild(unsigned long dummy)
132
{
133
        unsigned long now = jiffies;
134
        int i;
135
 
136
        write_lock(&ipfrag_lock);
137
        get_random_bytes(&ipfrag_hash_rnd, sizeof(u32));
138
        for (i = 0; i < IPQ_HASHSZ; i++) {
139
                struct ipq *q;
140
 
141
                q = ipq_hash[i];
142
                while (q) {
143
                        struct ipq *next = q->next;
144
                        unsigned int hval = ipqhashfn(q->id, q->saddr,
145
                                                      q->daddr, q->protocol);
146
 
147
                        if (hval != i) {
148
                                /* Unlink. */
149
                                if (q->next)
150
                                        q->next->pprev = q->pprev;
151
                                *q->pprev = q->next;
152
 
153
                                /* Relink to new hash chain. */
154
                                if ((q->next = ipq_hash[hval]) != NULL)
155
                                        q->next->pprev = &q->next;
156
                                ipq_hash[hval] = q;
157
                                q->pprev = &ipq_hash[hval];
158
                        }
159
 
160
                        q = next;
161
                }
162
        }
163
        write_unlock(&ipfrag_lock);
164
 
165
        mod_timer(&ipfrag_secret_timer, now + sysctl_ipfrag_secret_interval);
166
}
167
 
168
atomic_t ip_frag_mem = ATOMIC_INIT(0);   /* Memory used for fragments */
169
 
170
/* Memory Tracking Functions. */
171
static __inline__ void frag_kfree_skb(struct sk_buff *skb)
172
{
173
        atomic_sub(skb->truesize, &ip_frag_mem);
174
        kfree_skb(skb);
175
}
176
 
177
static __inline__ void frag_free_queue(struct ipq *qp)
178
{
179
        atomic_sub(sizeof(struct ipq), &ip_frag_mem);
180
        kfree(qp);
181
}
182
 
183
static __inline__ struct ipq *frag_alloc_queue(void)
184
{
185
        struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC);
186
 
187
        if(!qp)
188
                return NULL;
189
        atomic_add(sizeof(struct ipq), &ip_frag_mem);
190
        return qp;
191
}
192
 
193
 
194
/* Destruction primitives. */
195
 
196
/* Complete destruction of ipq. */
197
static void ip_frag_destroy(struct ipq *qp)
198
{
199
        struct sk_buff *fp;
200
 
201
        BUG_TRAP(qp->last_in&COMPLETE);
202
        BUG_TRAP(del_timer(&qp->timer) == 0);
203
 
204
        /* Release all fragment data. */
205
        fp = qp->fragments;
206
        while (fp) {
207
                struct sk_buff *xp = fp->next;
208
 
209
                frag_kfree_skb(fp);
210
                fp = xp;
211
        }
212
 
213
        /* Finally, release the queue descriptor itself. */
214
        frag_free_queue(qp);
215
}
216
 
217
static __inline__ void ipq_put(struct ipq *ipq)
218
{
219
        if (atomic_dec_and_test(&ipq->refcnt))
220
                ip_frag_destroy(ipq);
221
}
222
 
223
/* Kill ipq entry. It is not destroyed immediately,
224
 * because caller (and someone more) holds reference count.
225
 */
226
static __inline__ void ipq_kill(struct ipq *ipq)
227
{
228
        if (del_timer(&ipq->timer))
229
                atomic_dec(&ipq->refcnt);
230
 
231
        if (!(ipq->last_in & COMPLETE)) {
232
                ipq_unlink(ipq);
233
                atomic_dec(&ipq->refcnt);
234
                ipq->last_in |= COMPLETE;
235
        }
236
}
237
 
238
/* Memory limiting on fragments.  Evictor trashes the oldest
239
 * fragment queue until we are back under the low threshold.
240
 */
241
static void ip_evictor(void)
242
{
243
        struct ipq *qp;
244
        struct list_head *tmp;
245
 
246
        for(;;) {
247
                if (atomic_read(&ip_frag_mem) <= sysctl_ipfrag_low_thresh)
248
                        return;
249
                read_lock(&ipfrag_lock);
250
                if (list_empty(&ipq_lru_list)) {
251
                        read_unlock(&ipfrag_lock);
252
                        return;
253
                }
254
                tmp = ipq_lru_list.next;
255
                qp = list_entry(tmp, struct ipq, lru_list);
256
                atomic_inc(&qp->refcnt);
257
                read_unlock(&ipfrag_lock);
258
 
259
                spin_lock(&qp->lock);
260
                if (!(qp->last_in&COMPLETE))
261
                        ipq_kill(qp);
262
                spin_unlock(&qp->lock);
263
 
264
                ipq_put(qp);
265
                IP_INC_STATS_BH(IpReasmFails);
266
        }
267
}
268
 
269
/*
270
 * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
271
 */
272
static void ip_expire(unsigned long arg)
273
{
274
        struct ipq *qp = (struct ipq *) arg;
275
 
276
        spin_lock(&qp->lock);
277
 
278
        if (qp->last_in & COMPLETE)
279
                goto out;
280
 
281
        ipq_kill(qp);
282
 
283
        IP_INC_STATS_BH(IpReasmTimeout);
284
        IP_INC_STATS_BH(IpReasmFails);
285
 
286
        if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) {
287
                struct sk_buff *head = qp->fragments;
288
                /* Send an ICMP "Fragment Reassembly Timeout" message. */
289
                if ((head->dev = dev_get_by_index(qp->iif)) != NULL) {
290
                        icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
291
                        dev_put(head->dev);
292
                }
293
        }
294
out:
295
        spin_unlock(&qp->lock);
296
        ipq_put(qp);
297
}
298
 
299
/* Creation primitives. */
300
 
301
static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in)
302
{
303
        struct ipq *qp;
304
 
305
        write_lock(&ipfrag_lock);
306
#ifdef CONFIG_SMP
307
        /* With SMP race we have to recheck hash table, because
308
         * such entry could be created on other cpu, while we
309
         * promoted read lock to write lock.
310
         */
311
        for(qp = ipq_hash[hash]; qp; qp = qp->next) {
312
                if(qp->id == qp_in->id          &&
313
                   qp->saddr == qp_in->saddr    &&
314
                   qp->daddr == qp_in->daddr    &&
315
                   qp->protocol == qp_in->protocol) {
316
                        atomic_inc(&qp->refcnt);
317
                        write_unlock(&ipfrag_lock);
318
                        qp_in->last_in |= COMPLETE;
319
                        ipq_put(qp_in);
320
                        return qp;
321
                }
322
        }
323
#endif
324
        qp = qp_in;
325
 
326
        if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time))
327
                atomic_inc(&qp->refcnt);
328
 
329
        atomic_inc(&qp->refcnt);
330
        if((qp->next = ipq_hash[hash]) != NULL)
331
                qp->next->pprev = &qp->next;
332
        ipq_hash[hash] = qp;
333
        qp->pprev = &ipq_hash[hash];
334
        INIT_LIST_HEAD(&qp->lru_list);
335
        list_add_tail(&qp->lru_list, &ipq_lru_list);
336
        ip_frag_nqueues++;
337
        write_unlock(&ipfrag_lock);
338
        return qp;
339
}
340
 
341
/* Add an entry to the 'ipq' queue for a newly received IP datagram. */
342
static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph)
343
{
344
        struct ipq *qp;
345
 
346
        if ((qp = frag_alloc_queue()) == NULL)
347
                goto out_nomem;
348
 
349
        qp->protocol = iph->protocol;
350
        qp->last_in = 0;
351
        qp->id = iph->id;
352
        qp->saddr = iph->saddr;
353
        qp->daddr = iph->daddr;
354
        qp->len = 0;
355
        qp->meat = 0;
356
        qp->fragments = NULL;
357
        qp->iif = 0;
358
 
359
        /* Initialize a timer for this entry. */
360
        init_timer(&qp->timer);
361
        qp->timer.data = (unsigned long) qp;    /* pointer to queue     */
362
        qp->timer.function = ip_expire;         /* expire function      */
363
        qp->lock = SPIN_LOCK_UNLOCKED;
364
        atomic_set(&qp->refcnt, 1);
365
 
366
        return ip_frag_intern(hash, qp);
367
 
368
out_nomem:
369
        NETDEBUG(if (net_ratelimit()) printk(KERN_ERR "ip_frag_create: no memory left !\n"));
370
        return NULL;
371
}
372
 
373
/* Find the correct entry in the "incomplete datagrams" queue for
374
 * this IP datagram, and create new one, if nothing is found.
375
 */
376
static inline struct ipq *ip_find(struct iphdr *iph)
377
{
378
        __u16 id = iph->id;
379
        __u32 saddr = iph->saddr;
380
        __u32 daddr = iph->daddr;
381
        __u8 protocol = iph->protocol;
382
        unsigned int hash = ipqhashfn(id, saddr, daddr, protocol);
383
        struct ipq *qp;
384
 
385
        read_lock(&ipfrag_lock);
386
        for(qp = ipq_hash[hash]; qp; qp = qp->next) {
387
                if(qp->id == id         &&
388
                   qp->saddr == saddr   &&
389
                   qp->daddr == daddr   &&
390
                   qp->protocol == protocol) {
391
                        atomic_inc(&qp->refcnt);
392
                        read_unlock(&ipfrag_lock);
393
                        return qp;
394
                }
395
        }
396
        read_unlock(&ipfrag_lock);
397
 
398
        return ip_frag_create(hash, iph);
399
}
400
 
401
/* Add new segment to existing queue. */
402
static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
403
{
404
        struct sk_buff *prev, *next;
405
        int flags, offset;
406
        int ihl, end;
407
 
408
        if (qp->last_in & COMPLETE)
409
                goto err;
410
 
411
        offset = ntohs(skb->nh.iph->frag_off);
412
        flags = offset & ~IP_OFFSET;
413
        offset &= IP_OFFSET;
414
        offset <<= 3;           /* offset is in 8-byte chunks */
415
        ihl = skb->nh.iph->ihl * 4;
416
 
417
        /* Determine the position of this fragment. */
418
        end = offset + skb->len - ihl;
419
 
420
        /* Is this the final fragment? */
421
        if ((flags & IP_MF) == 0) {
422
                /* If we already have some bits beyond end
423
                 * or have different end, the segment is corrrupted.
424
                 */
425
                if (end < qp->len ||
426
                    ((qp->last_in & LAST_IN) && end != qp->len))
427
                        goto err;
428
                qp->last_in |= LAST_IN;
429
                qp->len = end;
430
        } else {
431
                if (end&7) {
432
                        end &= ~7;
433
                        if (skb->ip_summed != CHECKSUM_UNNECESSARY)
434
                                skb->ip_summed = CHECKSUM_NONE;
435
                }
436
                if (end > qp->len) {
437
                        /* Some bits beyond end -> corruption. */
438
                        if (qp->last_in & LAST_IN)
439
                                goto err;
440
                        qp->len = end;
441
                }
442
        }
443
        if (end == offset)
444
                goto err;
445
 
446
        if (pskb_pull(skb, ihl) == NULL)
447
                goto err;
448
        if (pskb_trim(skb, end-offset))
449
                goto err;
450
 
451
        /* Find out which fragments are in front and at the back of us
452
         * in the chain of fragments so far.  We must know where to put
453
         * this fragment, right?
454
         */
455
        prev = NULL;
456
        for(next = qp->fragments; next != NULL; next = next->next) {
457
                if (FRAG_CB(next)->offset >= offset)
458
                        break;  /* bingo! */
459
                prev = next;
460
        }
461
 
462
        /* We found where to put this one.  Check for overlap with
463
         * preceding fragment, and, if needed, align things so that
464
         * any overlaps are eliminated.
465
         */
466
        if (prev) {
467
                int i = (FRAG_CB(prev)->offset + prev->len) - offset;
468
 
469
                if (i > 0) {
470
                        offset += i;
471
                        if (end <= offset)
472
                                goto err;
473
                        if (!pskb_pull(skb, i))
474
                                goto err;
475
                        if (skb->ip_summed != CHECKSUM_UNNECESSARY)
476
                                skb->ip_summed = CHECKSUM_NONE;
477
                }
478
        }
479
 
480
        while (next && FRAG_CB(next)->offset < end) {
481
                int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
482
 
483
                if (i < next->len) {
484
                        /* Eat head of the next overlapped fragment
485
                         * and leave the loop. The next ones cannot overlap.
486
                         */
487
                        if (!pskb_pull(next, i))
488
                                goto err;
489
                        FRAG_CB(next)->offset += i;
490
                        qp->meat -= i;
491
                        if (next->ip_summed != CHECKSUM_UNNECESSARY)
492
                                next->ip_summed = CHECKSUM_NONE;
493
                        break;
494
                } else {
495
                        struct sk_buff *free_it = next;
496
 
497
                        /* Old fragmnet is completely overridden with
498
                         * new one drop it.
499
                         */
500
                        next = next->next;
501
 
502
                        if (prev)
503
                                prev->next = next;
504
                        else
505
                                qp->fragments = next;
506
 
507
                        qp->meat -= free_it->len;
508
                        frag_kfree_skb(free_it);
509
                }
510
        }
511
 
512
        FRAG_CB(skb)->offset = offset;
513
 
514
        /* Insert this fragment in the chain of fragments. */
515
        skb->next = next;
516
        if (prev)
517
                prev->next = skb;
518
        else
519
                qp->fragments = skb;
520
 
521
        if (skb->dev)
522
                qp->iif = skb->dev->ifindex;
523
        skb->dev = NULL;
524
        qp->stamp = skb->stamp;
525
        qp->meat += skb->len;
526
        atomic_add(skb->truesize, &ip_frag_mem);
527
        if (offset == 0)
528
                qp->last_in |= FIRST_IN;
529
 
530
        write_lock(&ipfrag_lock);
531
        list_move_tail(&qp->lru_list, &ipq_lru_list);
532
        write_unlock(&ipfrag_lock);
533
 
534
        return;
535
 
536
err:
537
        kfree_skb(skb);
538
}
539
 
540
 
541
/* Build a new IP datagram from all its fragments. */
542
 
543
static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
544
{
545
        struct iphdr *iph;
546
        struct sk_buff *fp, *head = qp->fragments;
547
        int len;
548
        int ihlen;
549
 
550
        ipq_kill(qp);
551
 
552
        BUG_TRAP(head != NULL);
553
        BUG_TRAP(FRAG_CB(head)->offset == 0);
554
 
555
        /* Allocate a new buffer for the datagram. */
556
        ihlen = head->nh.iph->ihl*4;
557
        len = ihlen + qp->len;
558
 
559
        if(len > 65535)
560
                goto out_oversize;
561
 
562
        /* Head of list must not be cloned. */
563
        if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
564
                goto out_nomem;
565
 
566
        /* If the first fragment is fragmented itself, we split
567
         * it to two chunks: the first with data and paged part
568
         * and the second, holding only fragments. */
569
        if (skb_shinfo(head)->frag_list) {
570
                struct sk_buff *clone;
571
                int i, plen = 0;
572
 
573
                if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
574
                        goto out_nomem;
575
                clone->next = head->next;
576
                head->next = clone;
577
                skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
578
                skb_shinfo(head)->frag_list = NULL;
579
                for (i=0; i<skb_shinfo(head)->nr_frags; i++)
580
                        plen += skb_shinfo(head)->frags[i].size;
581
                clone->len = clone->data_len = head->data_len - plen;
582
                head->data_len -= clone->len;
583
                head->len -= clone->len;
584
                clone->csum = 0;
585
                clone->ip_summed = head->ip_summed;
586
                atomic_add(clone->truesize, &ip_frag_mem);
587
        }
588
 
589
        skb_shinfo(head)->frag_list = head->next;
590
        skb_push(head, head->data - head->nh.raw);
591
        atomic_sub(head->truesize, &ip_frag_mem);
592
 
593
        for (fp=head->next; fp; fp = fp->next) {
594
                head->data_len += fp->len;
595
                head->len += fp->len;
596
                if (head->ip_summed != fp->ip_summed)
597
                        head->ip_summed = CHECKSUM_NONE;
598
                else if (head->ip_summed == CHECKSUM_HW)
599
                        head->csum = csum_add(head->csum, fp->csum);
600
                head->truesize += fp->truesize;
601
                atomic_sub(fp->truesize, &ip_frag_mem);
602
        }
603
 
604
        head->next = NULL;
605
        head->dev = dev;
606
        head->stamp = qp->stamp;
607
 
608
        iph = head->nh.iph;
609
        iph->frag_off = 0;
610
        iph->tot_len = htons(len);
611
        IP_INC_STATS_BH(IpReasmOKs);
612
        qp->fragments = NULL;
613
        return head;
614
 
615
out_nomem:
616
        NETDEBUG(if (net_ratelimit())
617
                 printk(KERN_ERR
618
                        "IP: queue_glue: no memory for gluing queue %p\n",
619
                        qp));
620
        goto out_fail;
621
out_oversize:
622
        if (net_ratelimit())
623
                printk(KERN_INFO
624
                        "Oversized IP packet from %d.%d.%d.%d.\n",
625
                        NIPQUAD(qp->saddr));
626
out_fail:
627
        IP_INC_STATS_BH(IpReasmFails);
628
        return NULL;
629
}
630
 
631
/* Process an incoming IP datagram fragment. */
632
struct sk_buff *ip_defrag(struct sk_buff *skb)
633
{
634
        struct iphdr *iph = skb->nh.iph;
635
        struct ipq *qp;
636
        struct net_device *dev;
637
 
638
        IP_INC_STATS_BH(IpReasmReqds);
639
 
640
        /* Start by cleaning up the memory. */
641
        if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
642
                ip_evictor();
643
 
644
        dev = skb->dev;
645
 
646
        /* Lookup (or create) queue header */
647
        if ((qp = ip_find(iph)) != NULL) {
648
                struct sk_buff *ret = NULL;
649
 
650
                spin_lock(&qp->lock);
651
 
652
                ip_frag_queue(qp, skb);
653
 
654
                if (qp->last_in == (FIRST_IN|LAST_IN) &&
655
                    qp->meat == qp->len)
656
                        ret = ip_frag_reasm(qp, dev);
657
 
658
                spin_unlock(&qp->lock);
659
                ipq_put(qp);
660
                return ret;
661
        }
662
 
663
        IP_INC_STATS_BH(IpReasmFails);
664
        kfree_skb(skb);
665
        return NULL;
666
}
667
 
668
void ipfrag_init(void)
669
{
670
        ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
671
                                 (jiffies ^ (jiffies >> 6)));
672
 
673
        init_timer(&ipfrag_secret_timer);
674
        ipfrag_secret_timer.function = ipfrag_secret_rebuild;
675
        ipfrag_secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval;
676
        add_timer(&ipfrag_secret_timer);
677
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.