OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [net/] [ipv4/] [ip_fragment.c] - Blame information for rev 1765

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1629 jcastillo
/*
2
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
3
 *              operating system.  INET is implemented using the  BSD Socket
4
 *              interface as the means of communication with the user level.
5
 *
6
 *              The IP fragmentation functionality.
7
 *
8
 * Authors:     Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
9
 *              Alan Cox <Alan.Cox@linux.org>
10
 *
11
 * Fixes:
12
 *              Alan Cox        :       Split from ip.c , see ip_input.c for history.
13
 *              Alan Cox        :       Handling oversized frames
14
 *              Uriel Maimon    :       Accounting errors in two fringe cases.
15
 */
16
 
17
#include <linux/types.h>
18
#include <linux/mm.h>
19
#include <linux/sched.h>
20
#include <linux/skbuff.h>
21
#include <linux/ip.h>
22
#include <linux/icmp.h>
23
#include <linux/netdevice.h>
24
#include <net/sock.h>
25
#include <net/ip.h>
26
#include <net/icmp.h>
27
#include <linux/tcp.h>
28
#include <linux/udp.h>
29
#include <linux/inet.h>
30
#include <linux/firewall.h>
31
#include <linux/ip_fw.h>
32
#include <net/checksum.h>
33
 
34
/*
35
 *      Fragment cache limits. We will commit 256K at one time. Should we
36
 *      cross that limit we will prune down to 192K. This should cope with
37
 *      even the most extreme cases without allowing an attacker to measurably
38
 *      harm machine performance.
39
 */
40
 
41
#define IPFRAG_HIGH_THRESH              (256*1024)
42
#define IPFRAG_LOW_THRESH               (192*1024)
43
 
44
/*
45
 *      This fragment handler is a bit of a heap. On the other hand it works quite
46
 *      happily and handles things quite well.
47
 */
48
 
49
static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
50
 
51
atomic_t ip_frag_mem = 0;                        /* Memory used for fragments */
52
 
53
char *in_ntoa(unsigned long in);
54
 
55
/*
56
 *      Memory Tracking Functions
57
 */
58
 
59
extern __inline__ void frag_kfree_skb(struct sk_buff *skb, int type)
60
{
61
        atomic_sub(skb->truesize, &ip_frag_mem);
62
        kfree_skb(skb,type);
63
}
64
 
65
extern __inline__ void frag_kfree_s(void *ptr, int len)
66
{
67
        atomic_sub(len, &ip_frag_mem);
68
        kfree_s(ptr,len);
69
}
70
 
71
extern __inline__ void *frag_kmalloc(int size, int pri)
72
{
73
        void *vp=kmalloc(size,pri);
74
        if(!vp)
75
                return NULL;
76
        atomic_add(size, &ip_frag_mem);
77
        return vp;
78
}
79
 
80
/*
81
 *      Create a new fragment entry.
82
 */
83
 
84
static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
85
{
86
        struct ipfrag *fp;
87
        unsigned long flags;
88
 
89
        fp = (struct ipfrag *) frag_kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
90
        if (fp == NULL)
91
        {
92
                NETDEBUG(printk("IP: frag_create: no memory left !\n"));
93
                return(NULL);
94
        }
95
        memset(fp, 0, sizeof(struct ipfrag));
96
 
97
        /* Fill in the structure. */
98
        fp->offset = offset;
99
        fp->end = end;
100
        fp->len = end - offset;
101
        fp->skb = skb;
102
        fp->ptr = ptr;
103
 
104
        /*
105
         *      Charge for the SKB as well.
106
         */
107
 
108
        save_flags(flags);
109
        cli();
110
        ip_frag_mem+=skb->truesize;
111
        restore_flags(flags);
112
 
113
        return(fp);
114
}
115
 
116
 
117
/*
118
 *      Find the correct entry in the "incomplete datagrams" queue for
119
 *      this IP datagram, and return the queue entry address if found.
120
 */
121
 
122
static struct ipq *ip_find(struct iphdr *iph)
123
{
124
        struct ipq *qp;
125
        struct ipq *qplast;
126
 
127
        cli();
128
        qplast = NULL;
129
        for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
130
        {
131
                if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
132
                        iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
133
                {
134
                        del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
135
                        sti();
136
                        return(qp);
137
                }
138
        }
139
        sti();
140
        return(NULL);
141
}
142
 
143
 
144
/*
145
 *      Remove an entry from the "incomplete datagrams" queue, either
146
 *      because we completed, reassembled and processed it, or because
147
 *      it timed out.
148
 */
149
 
150
static void ip_free(struct ipq *qp)
151
{
152
        struct ipfrag *fp;
153
        struct ipfrag *xp;
154
 
155
        /*
156
         * Stop the timer for this entry.
157
         */
158
 
159
        del_timer(&qp->timer);
160
 
161
        /* Remove this entry from the "incomplete datagrams" queue. */
162
        cli();
163
        if (qp->prev == NULL)
164
        {
165
                ipqueue = qp->next;
166
                if (ipqueue != NULL)
167
                        ipqueue->prev = NULL;
168
        }
169
        else
170
        {
171
                qp->prev->next = qp->next;
172
                if (qp->next != NULL)
173
                        qp->next->prev = qp->prev;
174
        }
175
 
176
        /* Release all fragment data. */
177
 
178
        fp = qp->fragments;
179
        while (fp != NULL)
180
        {
181
                xp = fp->next;
182
                IS_SKB(fp->skb);
183
                frag_kfree_skb(fp->skb,FREE_READ);
184
                frag_kfree_s(fp, sizeof(struct ipfrag));
185
                fp = xp;
186
        }
187
 
188
        /* Release the IP header. */
189
        frag_kfree_s(qp->iph, 64 + 8);
190
 
191
        /* Finally, release the queue descriptor itself. */
192
        frag_kfree_s(qp, sizeof(struct ipq));
193
        sti();
194
}
195
 
196
 
197
/*
198
 *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
199
 */
200
 
201
static void ip_expire(unsigned long arg)
202
{
203
        struct ipq *qp;
204
 
205
        qp = (struct ipq *)arg;
206
 
207
        /*
208
         *      Send an ICMP "Fragment Reassembly Timeout" message.
209
         */
210
 
211
        ip_statistics.IpReasmTimeout++;
212
        ip_statistics.IpReasmFails++;
213
        /* This if is always true... shrug */
214
        if(qp->fragments!=NULL)
215
                icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
216
                                ICMP_EXC_FRAGTIME, 0, qp->dev);
217
 
218
        /*
219
         *      Nuke the fragment queue.
220
         */
221
        ip_free(qp);
222
}
223
 
224
/*
225
 *      Memory limiting on fragments. Evictor trashes the oldest
226
 *      fragment queue until we are back under the low threshold
227
 */
228
 
229
static void ip_evictor(void)
230
{
231
        while(ip_frag_mem>IPFRAG_LOW_THRESH)
232
        {
233
                if(!ipqueue)
234
                        panic("ip_evictor: memcount");
235
                ip_free(ipqueue);
236
        }
237
}
238
 
239
/*
240
 *      Add an entry to the 'ipq' queue for a newly received IP datagram.
241
 *      We will (hopefully :-) receive all other fragments of this datagram
242
 *      in time, so we just create a queue for this datagram, in which we
243
 *      will insert the received fragments at their respective positions.
244
 */
245
 
246
static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
247
{
248
        struct ipq *qp;
249
        int ihlen;
250
 
251
        qp = (struct ipq *) frag_kmalloc(sizeof(struct ipq), GFP_ATOMIC);
252
        if (qp == NULL)
253
        {
254
                NETDEBUG(printk("IP: create: no memory left !\n"));
255
                return(NULL);
256
        }
257
        memset(qp, 0, sizeof(struct ipq));
258
 
259
        /*
260
         *      Allocate memory for the IP header (plus 8 octets for ICMP).
261
         */
262
 
263
        ihlen = iph->ihl * 4;
264
        qp->iph = (struct iphdr *) frag_kmalloc(64 + 8, GFP_ATOMIC);
265
        if (qp->iph == NULL)
266
        {
267
                NETDEBUG(printk("IP: create: no memory left !\n"));
268
                frag_kfree_s(qp, sizeof(struct ipq));
269
                return(NULL);
270
        }
271
 
272
        memcpy(qp->iph, iph, ihlen + 8);
273
        qp->len = 0;
274
        qp->ihlen = ihlen;
275
        qp->fragments = NULL;
276
        qp->dev = dev;
277
 
278
        /* Start a timer for this entry. */
279
        qp->timer.expires = jiffies + IP_FRAG_TIME;     /* about 30 seconds     */
280
        qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
281
        qp->timer.function = ip_expire;                 /* expire function      */
282
        add_timer(&qp->timer);
283
 
284
        /* Add this entry to the queue. */
285
        qp->prev = NULL;
286
        cli();
287
        qp->next = ipqueue;
288
        if (qp->next != NULL)
289
                qp->next->prev = qp;
290
        ipqueue = qp;
291
        sti();
292
        return(qp);
293
}
294
 
295
 
296
/*
297
 *      See if a fragment queue is complete.
298
 */
299
 
300
static int ip_done(struct ipq *qp)
301
{
302
        struct ipfrag *fp;
303
        int offset;
304
 
305
        /* Only possible if we received the final fragment. */
306
        if (qp->len == 0)
307
                return(0);
308
 
309
        /* Check all fragment offsets to see if they connect. */
310
        fp = qp->fragments;
311
        offset = 0;
312
        while (fp != NULL)
313
        {
314
                if (fp->offset > offset)
315
                        return(0);       /* fragment(s) missing */
316
                offset = fp->end;
317
                fp = fp->next;
318
        }
319
 
320
        /* All fragments are present. */
321
        return(1);
322
}
323
 
324
 
325
/*
326
 *      Build a new IP datagram from all its fragments.
327
 *
328
 *      FIXME: We copy here because we lack an effective way of handling lists
329
 *      of bits on input. Until the new skb data handling is in I'm not going
330
 *      to touch this with a bargepole.
331
 */
332
 
333
static struct sk_buff *ip_glue(struct ipq *qp)
334
{
335
        struct sk_buff *skb;
336
        struct iphdr *iph;
337
        struct ipfrag *fp;
338
        unsigned char *ptr;
339
        int count, len;
340
 
341
        /*
342
         *      Allocate a new buffer for the datagram.
343
         */
344
        len = qp->ihlen + qp->len;
345
 
346
        if(len>65535)
347
        {
348
                NETDEBUG(printk("Oversized IP packet from %s.\n", in_ntoa(qp->iph->saddr)));
349
                ip_statistics.IpReasmFails++;
350
                ip_free(qp);
351
                return NULL;
352
        }
353
 
354
        if ((skb = dev_alloc_skb(len)) == NULL)
355
        {
356
                ip_statistics.IpReasmFails++;
357
                NETDEBUG(printk("IP: queue_glue: no memory for gluing queue %p\n", qp));
358
                ip_free(qp);
359
                return(NULL);
360
        }
361
 
362
        /* Fill in the basic details. */
363
        skb_put(skb,len);
364
        skb->h.raw = skb->data;
365
        skb->free = 1;
366
 
367
        /* Copy the original IP headers into the new buffer. */
368
        ptr = (unsigned char *) skb->h.raw;
369
        memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
370
        ptr += qp->ihlen;
371
 
372
        count = 0;
373
 
374
        /* Copy the data portions of all fragments into the new buffer. */
375
        fp = qp->fragments;
376
        while(fp != NULL)
377
        {
378
                if (fp->len < 0 || fp->offset+qp->ihlen+fp->len > skb->len)
379
                {
380
                        NETDEBUG(printk("Invalid fragment list: Fragment over size.\n"));
381
                        ip_free(qp);
382
                        kfree_skb(skb,FREE_WRITE);
383
                        ip_statistics.IpReasmFails++;
384
                        return NULL;
385
                }
386
                memcpy((ptr + fp->offset), fp->ptr, fp->len);
387
                count += fp->len;
388
                fp = fp->next;
389
        }
390
 
391
        skb->pkt_type = qp->fragments->skb->pkt_type;
392
        skb->protocol = qp->fragments->skb->protocol;
393
        /* We glued together all fragments, so remove the queue entry. */
394
        ip_free(qp);
395
 
396
        /* Done with all fragments. Fixup the new IP header. */
397
        iph = skb->h.iph;
398
        iph->frag_off = 0;
399
        iph->tot_len = htons((iph->ihl * 4) + count);
400
        skb->ip_hdr = iph;
401
 
402
        ip_statistics.IpReasmOKs++;
403
        return(skb);
404
}
405
 
406
 
407
/*
408
 *      Process an incoming IP datagram fragment.
409
 */
410
 
411
struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
412
{
413
        struct ipfrag *prev, *next, *tmp;
414
        struct ipfrag *tfp;
415
        struct ipq *qp;
416
        struct sk_buff *skb2;
417
        unsigned char *ptr;
418
        int flags, offset;
419
        int i, ihl, end;
420
 
421
        ip_statistics.IpReasmReqds++;
422
 
423
        /*
424
         *      Start by cleaning up the memory
425
         */
426
 
427
        if(ip_frag_mem>IPFRAG_HIGH_THRESH)
428
                ip_evictor();
429
        /*
430
         *      Find the entry of this IP datagram in the "incomplete datagrams" queue.
431
         */
432
 
433
        qp = ip_find(iph);
434
 
435
        /* Is this a non-fragmented datagram? */
436
        offset = ntohs(iph->frag_off);
437
        flags = offset & ~IP_OFFSET;
438
        offset &= IP_OFFSET;
439
        if (((flags & IP_MF) == 0) && (offset == 0))
440
        {
441
                if (qp != NULL)
442
                        ip_free(qp);    /* Fragmented frame replaced by full unfragmented copy */
443
                return(skb);
444
        }
445
 
446
        offset <<= 3;           /* offset is in 8-byte chunks */
447
        ihl = iph->ihl * 4;
448
 
449
        /*
450
         * If the queue already existed, keep restarting its timer as long
451
         * as we still are receiving fragments.  Otherwise, create a fresh
452
         * queue entry.
453
         */
454
 
455
        if (qp != NULL)
456
        {
457
                /* ANK. If the first fragment is received,
458
                 * we should remember the correct IP header (with options)
459
                 */
460
                if (offset == 0)
461
                {
462
                        qp->ihlen = ihl;
463
                        memcpy(qp->iph, iph, ihl+8);
464
                }
465
                del_timer(&qp->timer);
466
                qp->timer.expires = jiffies + IP_FRAG_TIME;     /* about 30 seconds */
467
                qp->timer.data = (unsigned long) qp;    /* pointer to queue */
468
                qp->timer.function = ip_expire;         /* expire function */
469
                add_timer(&qp->timer);
470
        }
471
        else
472
        {
473
                /*
474
                 *      If we failed to create it, then discard the frame
475
                 */
476
                if ((qp = ip_create(skb, iph, dev)) == NULL)
477
                {
478
                        skb->sk = NULL;
479
                        kfree_skb(skb, FREE_READ);
480
                        ip_statistics.IpReasmFails++;
481
                        return NULL;
482
                }
483
        }
484
 
485
        /*
486
         *      Attempt to construct an oversize packet.
487
         */
488
 
489
        if(ntohs(iph->tot_len)+(int)offset>65535)
490
        {
491
                skb->sk = NULL;
492
                NETDEBUG(printk("Oversized packet received from %s\n",in_ntoa(iph->saddr)));
493
                kfree_skb(skb, FREE_READ);
494
                ip_statistics.IpReasmFails++;
495
                return NULL;
496
        }
497
 
498
        /*
499
         *      Determine the position of this fragment.
500
         */
501
 
502
        end = offset + ntohs(iph->tot_len) - ihl;
503
 
504
        /*
505
         *      Point into the IP datagram 'data' part.
506
         */
507
 
508
        ptr = skb->data + ihl;
509
 
510
        /*
511
         *      Is this the final fragment?
512
         */
513
 
514
        if ((flags & IP_MF) == 0)
515
                qp->len = end;
516
 
517
        /*
518
         *      Find out which fragments are in front and at the back of us
519
         *      in the chain of fragments so far.  We must know where to put
520
         *      this fragment, right?
521
         */
522
 
523
        prev = NULL;
524
        for(next = qp->fragments; next != NULL; next = next->next)
525
        {
526
                if (next->offset >= offset)
527
                        break;  /* bingo! */
528
                prev = next;
529
        }
530
 
531
        /*
532
         *      We found where to put this one.
533
         *      Check for overlap with preceding fragment, and, if needed,
534
         *      align things so that any overlaps are eliminated.
535
         */
536
        if (prev != NULL && offset < prev->end)
537
        {
538
                i = prev->end - offset;
539
                offset += i;    /* ptr into datagram */
540
                ptr += i;       /* ptr into fragment data */
541
        }
542
 
543
        /*
544
         * Look for overlap with succeeding segments.
545
         * If we can merge fragments, do it.
546
         */
547
 
548
        for(tmp=next; tmp != NULL; tmp = tfp)
549
        {
550
                tfp = tmp->next;
551
                if (tmp->offset >= end)
552
                        break;          /* no overlaps at all */
553
 
554
                i = end - next->offset;                 /* overlap is 'i' bytes */
555
                tmp->len -= i;                          /* so reduce size of    */
556
                tmp->offset += i;                       /* next fragment        */
557
                tmp->ptr += i;
558
                /*
559
                 *      If we get a frag size of <= 0, remove it and the packet
560
                 *      that it goes with.
561
                 *
562
                 *      We never throw the new frag away, so the frag being
563
                 *      dumped has always been charged for.
564
                 */
565
                if (tmp->len <= 0)
566
                {
567
                        if (tmp->prev != NULL)
568
                                tmp->prev->next = tmp->next;
569
                        else
570
                                qp->fragments = tmp->next;
571
 
572
                        if (tmp->next != NULL)
573
                                tmp->next->prev = tmp->prev;
574
 
575
                        next=tfp;       /* We have killed the original next frame */
576
 
577
                        frag_kfree_skb(tmp->skb,FREE_READ);
578
                        frag_kfree_s(tmp, sizeof(struct ipfrag));
579
                }
580
        }
581
 
582
        /*
583
         *      Insert this fragment in the chain of fragments.
584
         */
585
 
586
        tfp = NULL;
587
 
588
        if(offset<end)
589
                tfp = ip_frag_create(offset, end, skb, ptr);
590
 
591
        /*
592
         *      No memory to save the fragment - so throw the lot. If we
593
         *      failed the frag_create we haven't charged the queue.
594
         */
595
 
596
        if (!tfp)
597
        {
598
                skb->sk = NULL;
599
                kfree_skb(skb, FREE_READ);
600
                return NULL;
601
        }
602
 
603
        /*
604
         *      From now on our buffer is charged to the queues.
605
         */
606
 
607
        tfp->prev = prev;
608
        tfp->next = next;
609
        if (prev != NULL)
610
                prev->next = tfp;
611
        else
612
                qp->fragments = tfp;
613
 
614
        if (next != NULL)
615
                next->prev = tfp;
616
 
617
        /*
618
         *      OK, so we inserted this new fragment into the chain.
619
         *      Check if we now have a full IP datagram which we can
620
         *      bump up to the IP layer...
621
         */
622
 
623
        if (ip_done(qp))
624
        {
625
                skb2 = ip_glue(qp);             /* glue together the fragments */
626
                return(skb2);
627
        }
628
        return(NULL);
629
}
630
 
631
 
632
/*
633
 *      This IP datagram is too large to be sent in one piece.  Break it up into
634
 *      smaller pieces (each of size equal to the MAC header plus IP header plus
635
 *      a block of the data of the original IP data part) that will yet fit in a
636
 *      single device frame, and queue such a frame for sending by calling the
637
 *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
638
 *      if this function causes a loop...
639
 *
640
 *      Yes this is inefficient, feel free to submit a quicker one.
641
 *
642
 */
643
 
644
void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
645
{
646
        struct iphdr *iph;
647
        unsigned char *raw;
648
        unsigned char *ptr;
649
        struct sk_buff *skb2;
650
        int left, mtu, hlen, len;
651
        int offset;
652
 
653
        unsigned short true_hard_header_len;
654
 
655
        /*
656
         *      Point into the IP datagram header.
657
         */
658
 
659
        raw = skb->data;
660
#if 0
661
        iph = (struct iphdr *) (raw + dev->hard_header_len);
662
        skb->ip_hdr = iph;
663
#else
664
        iph = skb->ip_hdr;
665
#endif
666
 
667
        /*
668
         * Calculate the length of the link-layer header appended to
669
         * the IP-packet.
670
         */
671
        true_hard_header_len = ((unsigned char *)iph) - raw;
672
 
673
        /*
674
         *      Setup starting values.
675
         */
676
 
677
        hlen = iph->ihl * 4;
678
        left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
679
        hlen += true_hard_header_len;
680
        mtu = (dev->mtu - hlen);                /* Size of data space */
681
        ptr = (raw + hlen);                     /* Where to start from */
682
 
683
        /*
684
         *      Check for any "DF" flag. [DF means do not fragment]
685
         */
686
 
687
        if (iph->frag_off & htons(IP_DF))
688
        {
689
                ip_statistics.IpFragFails++;
690
                NETDEBUG(printk("ip_queue_xmit: frag needed\n"));
691
                return;
692
        }
693
 
694
        /*
695
         *      The protocol doesn't seem to say what to do in the case that the
696
         *      frame + options doesn't fit the mtu. As it used to fall down dead
697
         *      in this case we were fortunate it didn't happen
698
         */
699
 
700
        if(mtu<8)
701
        {
702
                /* It's wrong but it's better than nothing */
703
                icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,htons(dev->mtu), dev);
704
                ip_statistics.IpFragFails++;
705
                return;
706
        }
707
 
708
        /*
709
         *      Fragment the datagram.
710
         */
711
 
712
        /*
713
         *      The initial offset is 0 for a complete frame. When
714
         *      fragmenting fragments it's wherever this one starts.
715
         */
716
 
717
        if (is_frag & 2)
718
                offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
719
        else
720
                offset = 0;
721
 
722
 
723
        /*
724
         *      Keep copying data until we run out.
725
         */
726
 
727
        while(left > 0)
728
        {
729
                len = left;
730
                /* IF: it doesn't fit, use 'mtu' - the data space left */
731
                if (len > mtu)
732
                        len = mtu;
733
                /* IF: we are not sending upto and including the packet end
734
                   then align the next start on an eight byte boundary */
735
                if (len < left)
736
                {
737
                        len/=8;
738
                        len*=8;
739
                }
740
                /*
741
                 *      Allocate buffer.
742
                 */
743
 
744
                if ((skb2 = alloc_skb(len + hlen+15,GFP_ATOMIC)) == NULL)
745
                {
746
                        NETDEBUG(printk("IP: frag: no memory for new fragment!\n"));
747
                        ip_statistics.IpFragFails++;
748
                        return;
749
                }
750
 
751
                /*
752
                 *      Set up data on packet
753
                 */
754
 
755
                skb2->arp = skb->arp;
756
                skb2->protocol = htons(ETH_P_IP); /* Atleast PPP needs this */
757
#if 0           
758
                if(skb->free==0)
759
                        printk(KERN_ERR "IP fragmenter: BUG free!=1 in fragmenter\n");
760
#endif                  
761
                skb2->free = 1;
762
                skb_put(skb2,len + hlen);
763
                skb2->h.raw=(char *) skb2->data;
764
                /*
765
                 *      Charge the memory for the fragment to any owner
766
                 *      it might possess
767
                 */
768
 
769
                if (sk)
770
                {
771
                        atomic_add(skb2->truesize, &sk->wmem_alloc);
772
                        skb2->sk=sk;
773
                }
774
                skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
775
 
776
                /*
777
                 *      Copy the packet header into the new buffer.
778
                 */
779
 
780
                memcpy(skb2->h.raw, raw, hlen);
781
 
782
                /*
783
                 *      Copy a block of the IP datagram.
784
                 */
785
                memcpy(skb2->h.raw + hlen, ptr, len);
786
                left -= len;
787
 
788
                skb2->h.raw+=true_hard_header_len;
789
 
790
                /*
791
                 *      Fill in the new header fields.
792
                 */
793
                iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
794
                iph->frag_off = htons((offset >> 3));
795
                skb2->ip_hdr = iph;
796
 
797
                /* ANK: dirty, but effective trick. Upgrade options only if
798
                 * the segment to be fragmented was THE FIRST (otherwise,
799
                 * options are already fixed) and make it ONCE
800
                 * on the initial skb, so that all the following fragments
801
                 * will inherit fixed options.
802
                 */
803
                if (offset == 0)
804
                        ip_options_fragment(skb);
805
 
806
                /*
807
                 *      Added AC : If we are fragmenting a fragment that's not the
808
                 *                 last fragment then keep MF on each bit
809
                 */
810
                if (left > 0 || (is_frag & 1))
811
                        iph->frag_off |= htons(IP_MF);
812
                ptr += len;
813
                offset += len;
814
 
815
                /*
816
                 *      Put this fragment into the sending queue.
817
                 */
818
 
819
                ip_statistics.IpFragCreates++;
820
 
821
                ip_queue_xmit(sk, dev, skb2, 2);
822
        }
823
        ip_statistics.IpFragOKs++;
824
}
825
 
826
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.