OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [before_ORP/] [uclinux/] [uClinux-2.0.x/] [net/] [core/] [skbuff.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 200 simons
/*
2
 *      Routines having to do with the 'struct sk_buff' memory handlers.
3
 *
4
 *      Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
5
 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
6
 *
7
 *      Fixes:
8
 *              Alan Cox        :       Fixed the worst of the load balancer bugs.
9
 *              Dave Platt      :       Interrupt stacking fix.
10
 *      Richard Kooijman        :       Timestamp fixes.
11
 *              Alan Cox        :       Changed buffer format.
12
 *              Alan Cox        :       destructor hook for AF_UNIX etc.
13
 *              Linus Torvalds  :       Better skb_clone.
14
 *              Alan Cox        :       Added skb_copy.
15
 *              Alan Cox        :       Added all the changed routines Linus
16
 *                                      only put in the headers
17
 *              Ray VanTassle   :       Fixed --skb->lock in free
18
 *
19
 *      TO FIX:
20
 *              The __skb_ routines ought to check interrupts are disabled
21
 *      when called, and bitch like crazy if not. Unfortunately I don't think
22
 *      we currently have a portable way to check if interrupts are off -
23
 *      Linus ???
24
 *
25
 *      This program is free software; you can redistribute it and/or
26
 *      modify it under the terms of the GNU General Public License
27
 *      as published by the Free Software Foundation; either version
28
 *      2 of the License, or (at your option) any later version.
29
 */
30
 
31
/*
32
 *      The functions in this file will not compile correctly with gcc 2.4.x
33
 */
34
 
35
#include <linux/config.h>
36
#include <linux/types.h>
37
#include <linux/kernel.h>
38
#include <linux/sched.h>
39
#include <linux/mm.h>
40
#include <linux/interrupt.h>
41
#include <linux/in.h>
42
#include <linux/inet.h>
43
#include <linux/netdevice.h>
44
#include <linux/malloc.h>
45
#include <linux/string.h>
46
#include <linux/skbuff.h>
47
 
48
#include <net/ip.h>
49
#include <net/protocol.h>
50
#include <net/route.h>
51
#include <net/tcp.h>
52
#include <net/udp.h>
53
#include <net/sock.h>
54
 
55
#include <asm/segment.h>
56
#include <asm/system.h>
57
 
58
/*
59
 *      Resource tracking variables
60
 */
61
 
62
atomic_t net_skbcount = 0;
63
atomic_t net_locked = 0;
64
atomic_t net_allocs = 0;
65
atomic_t net_fails  = 0;
66
atomic_t net_free_locked = 0;
67
 
68
extern atomic_t ip_frag_mem;
69
 
70
#undef TRACK_SKB_USAGE
71
 
72
void show_net_buffers(void)
73
{
74
        printk(KERN_INFO "Networking buffers in use          : %u\n",net_skbcount);
75
        printk(KERN_INFO "Network buffers locked by drivers  : %u\n",net_locked);
76
        printk(KERN_INFO "Total network buffer allocations   : %u\n",net_allocs);
77
        printk(KERN_INFO "Total failed network buffer allocs : %u\n",net_fails);
78
        printk(KERN_INFO "Total free while locked events     : %u\n",net_free_locked);
79
#ifdef CONFIG_INET
80
        printk(KERN_INFO "IP fragment buffer size            : %u\n",ip_frag_mem);
81
#endif  
82
}
83
 
84
#if CONFIG_SKB_CHECK
85
 
86
/*
87
 *      Debugging paranoia. Can go later when this crud stack works
88
 */
89
 
90
int skb_check(struct sk_buff *skb, int head, int line, char *file)
91
{
92
        if (head) {
93
                if (skb->magic_debug_cookie != SK_HEAD_SKB) {
94
                        printk("File: %s Line %d, found a bad skb-head\n",
95
                                file,line);
96
                        return -1;
97
                }
98
                if (!skb->next || !skb->prev) {
99
                        printk("skb_check: head without next or prev\n");
100
                        return -1;
101
                }
102
                if (skb->next->magic_debug_cookie != SK_HEAD_SKB
103
                        && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
104
                        printk("File: %s Line %d, bad next head-skb member\n",
105
                                file,line);
106
                        return -1;
107
                }
108
                if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
109
                        && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
110
                        printk("File: %s Line %d, bad prev head-skb member\n",
111
                                file,line);
112
                        return -1;
113
                }
114
#if 0
115
                {
116
                struct sk_buff *skb2 = skb->next;
117
                int i = 0;
118
                while (skb2 != skb && i < 5) {
119
                        if (skb_check(skb2, 0, line, file) < 0) {
120
                                printk("bad queue element in whole queue\n");
121
                                return -1;
122
                        }
123
                        i++;
124
                        skb2 = skb2->next;
125
                }
126
                }
127
#endif
128
                return 0;
129
        }
130
        if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
131
                && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
132
                printk("File: %s Line %d, bad next skb member\n",
133
                        file,line);
134
                return -1;
135
        }
136
        if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
137
                && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
138
                printk("File: %s Line %d, bad prev skb member\n",
139
                        file,line);
140
                return -1;
141
        }
142
 
143
 
144
        if(skb->magic_debug_cookie==SK_FREED_SKB)
145
        {
146
                printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
147
                        file,line);
148
                printk("skb=%p, real size=%d, free=%d\n",
149
                        skb,skb->truesize,skb->free);
150
                return -1;
151
        }
152
        if(skb->magic_debug_cookie!=SK_GOOD_SKB)
153
        {
154
                printk("File: %s Line %d, passed a non skb!\n", file,line);
155
                printk("skb=%p, real size=%d, free=%d\n",
156
                        skb,skb->truesize,skb->free);
157
                return -1;
158
        }
159
        if(skb->head>skb->data)
160
        {
161
                printk("File: %s Line %d, head > data !\n", file,line);
162
                printk("skb=%p, head=%p, data=%p\n",
163
                        skb,skb->head,skb->data);
164
                return -1;
165
        }
166
        if(skb->tail>skb->end)
167
        {
168
                printk("File: %s Line %d, tail > end!\n", file,line);
169
                printk("skb=%p, tail=%p, end=%p\n",
170
                        skb,skb->tail,skb->end);
171
                return -1;
172
        }
173
        if(skb->data>skb->tail)
174
        {
175
                printk("File: %s Line %d, data > tail!\n", file,line);
176
                printk("skb=%p, data=%p, tail=%p\n",
177
                        skb,skb->data,skb->tail);
178
                return -1;
179
        }
180
        if(skb->tail-skb->data!=skb->len)
181
        {
182
                printk("File: %s Line %d, wrong length\n", file,line);
183
                printk("skb=%p, data=%p, end=%p len=%ld\n",
184
                        skb,skb->data,skb->end,skb->len);
185
                return -1;
186
        }
187
        if((unsigned long) skb->end > (unsigned long) skb)
188
        {
189
                printk("File: %s Line %d, control overrun\n", file,line);
190
                printk("skb=%p, end=%p\n",
191
                        skb,skb->end);
192
                return -1;
193
        }
194
 
195
        /* Guess it might be acceptable then */
196
        return 0;
197
}
198
#endif
199
 
200
 
201
#if CONFIG_SKB_CHECK
202
void skb_queue_head_init(struct sk_buff_head *list)
203
{
204
        list->prev = (struct sk_buff *)list;
205
        list->next = (struct sk_buff *)list;
206
        list->qlen = 0;
207
        list->magic_debug_cookie = SK_HEAD_SKB;
208
}
209
 
210
 
211
/*
212
 *      Insert an sk_buff at the start of a list.
213
 */
214
void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
215
{
216
        unsigned long flags;
217
        struct sk_buff *list = (struct sk_buff *)list_;
218
 
219
        save_flags(flags);
220
        cli();
221
 
222
        IS_SKB(newsk);
223
        IS_SKB_HEAD(list);
224
        if (newsk->next || newsk->prev)
225
                printk("Suspicious queue head: sk_buff on list!\n");
226
 
227
        newsk->next = list->next;
228
        newsk->prev = list;
229
 
230
        newsk->next->prev = newsk;
231
        newsk->prev->next = newsk;
232
        newsk->list = list_;
233
        list_->qlen++;
234
 
235
        restore_flags(flags);
236
}
237
 
238
void __skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
239
{
240
        struct sk_buff *list = (struct sk_buff *)list_;
241
 
242
 
243
        IS_SKB(newsk);
244
        IS_SKB_HEAD(list);
245
        if (newsk->next || newsk->prev)
246
                printk("Suspicious queue head: sk_buff on list!\n");
247
 
248
        newsk->next = list->next;
249
        newsk->prev = list;
250
 
251
        newsk->next->prev = newsk;
252
        newsk->prev->next = newsk;
253
        newsk->list = list_;
254
        list_->qlen++;
255
 
256
}
257
 
258
/*
259
 *      Insert an sk_buff at the end of a list.
260
 */
261
void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
262
{
263
        unsigned long flags;
264
        struct sk_buff *list = (struct sk_buff *)list_;
265
 
266
        save_flags(flags);
267
        cli();
268
 
269
        if (newsk->next || newsk->prev)
270
                printk("Suspicious queue tail: sk_buff on list!\n");
271
        IS_SKB(newsk);
272
        IS_SKB_HEAD(list);
273
 
274
        newsk->next = list;
275
        newsk->prev = list->prev;
276
 
277
        newsk->next->prev = newsk;
278
        newsk->prev->next = newsk;
279
 
280
        newsk->list = list_;
281
        list_->qlen++;
282
 
283
        restore_flags(flags);
284
}
285
 
286
void __skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
287
{
288
        struct sk_buff *list = (struct sk_buff *)list_;
289
 
290
        if (newsk->next || newsk->prev)
291
                printk("Suspicious queue tail: sk_buff on list!\n");
292
        IS_SKB(newsk);
293
        IS_SKB_HEAD(list);
294
 
295
        newsk->next = list;
296
        newsk->prev = list->prev;
297
 
298
        newsk->next->prev = newsk;
299
        newsk->prev->next = newsk;
300
 
301
        newsk->list = list_;
302
        list_->qlen++;
303
}
304
 
305
/*
306
 *      Remove an sk_buff from a list. This routine is also interrupt safe
307
 *      so you can grab read and free buffers as another process adds them.
308
 */
309
 
310
struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
311
{
312
        unsigned long flags;
313
        struct sk_buff *result;
314
        struct sk_buff *list = (struct sk_buff *)list_;
315
 
316
        save_flags(flags);
317
        cli();
318
 
319
        IS_SKB_HEAD(list);
320
 
321
        result = list->next;
322
        if (result == list) {
323
                restore_flags(flags);
324
                return NULL;
325
        }
326
 
327
        result->next->prev = list;
328
        list->next = result->next;
329
 
330
        result->next = NULL;
331
        result->prev = NULL;
332
        list_->qlen--;
333
        result->list = NULL;
334
 
335
        restore_flags(flags);
336
 
337
        IS_SKB(result);
338
        return result;
339
}
340
 
341
struct sk_buff *__skb_dequeue(struct sk_buff_head *list_)
342
{
343
        struct sk_buff *result;
344
        struct sk_buff *list = (struct sk_buff *)list_;
345
 
346
        IS_SKB_HEAD(list);
347
 
348
        result = list->next;
349
        if (result == list) {
350
                return NULL;
351
        }
352
 
353
        result->next->prev = list;
354
        list->next = result->next;
355
 
356
        result->next = NULL;
357
        result->prev = NULL;
358
        list_->qlen--;
359
        result->list = NULL;
360
 
361
        IS_SKB(result);
362
        return result;
363
}
364
 
365
/*
366
 *      Insert a packet before another one in a list.
367
 */
368
void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
369
{
370
        unsigned long flags;
371
 
372
        IS_SKB(old);
373
        IS_SKB(newsk);
374
 
375
        if(!old->next || !old->prev)
376
                printk("insert before unlisted item!\n");
377
        if(newsk->next || newsk->prev)
378
                printk("inserted item is already on a list.\n");
379
 
380
        save_flags(flags);
381
        cli();
382
        newsk->next = old;
383
        newsk->prev = old->prev;
384
        old->prev = newsk;
385
        newsk->prev->next = newsk;
386
        newsk->list = old->list;
387
        newsk->list->qlen++;
388
 
389
        restore_flags(flags);
390
}
391
 
392
/*
393
 *      Insert a packet before another one in a list.
394
 */
395
 
396
void __skb_insert(struct sk_buff *newsk,
397
        struct sk_buff * prev, struct sk_buff *next,
398
        struct sk_buff_head * list)
399
{
400
        IS_SKB(prev);
401
        IS_SKB(newsk);
402
        IS_SKB(next);
403
 
404
        if(!prev->next || !prev->prev)
405
                printk("insert after unlisted item!\n");
406
        if(!next->next || !next->prev)
407
                printk("insert before unlisted item!\n");
408
        if(newsk->next || newsk->prev)
409
                printk("inserted item is already on a list.\n");
410
 
411
        newsk->next = next;
412
        newsk->prev = prev;
413
        next->prev = newsk;
414
        prev->next = newsk;
415
        newsk->list = list;
416
        list->qlen++;
417
 
418
}
419
 
420
/*
421
 *      Place a packet after a given packet in a list.
422
 */
423
void skb_append(struct sk_buff *old, struct sk_buff *newsk)
424
{
425
        unsigned long flags;
426
 
427
        IS_SKB(old);
428
        IS_SKB(newsk);
429
 
430
        if(!old->next || !old->prev)
431
                printk("append before unlisted item!\n");
432
        if(newsk->next || newsk->prev)
433
                printk("append item is already on a list.\n");
434
 
435
        save_flags(flags);
436
        cli();
437
 
438
        newsk->prev = old;
439
        newsk->next = old->next;
440
        newsk->next->prev = newsk;
441
        old->next = newsk;
442
        newsk->list = old->list;
443
        newsk->list->qlen++;
444
 
445
        restore_flags(flags);
446
}
447
 
448
/*
449
 *      Remove an sk_buff from its list. Works even without knowing the list it
450
 *      is sitting on, which can be handy at times. It also means that THE LIST
451
 *      MUST EXIST when you unlink. Thus a list must have its contents unlinked
452
 *      _FIRST_.
453
 */
454
void skb_unlink(struct sk_buff *skb)
455
{
456
        unsigned long flags;
457
 
458
        save_flags(flags);
459
        cli();
460
 
461
        IS_SKB(skb);
462
 
463
        if(skb->list)
464
        {
465
                skb->list->qlen--;
466
                skb->next->prev = skb->prev;
467
                skb->prev->next = skb->next;
468
                skb->next = NULL;
469
                skb->prev = NULL;
470
                skb->list = NULL;
471
        }
472
#ifdef PARANOID_BUGHUNT_MODE    /* This is legal but we sometimes want to watch it */
473
        else
474
                printk("skb_unlink: not a linked element\n");
475
#endif
476
        restore_flags(flags);
477
}
478
 
479
void __skb_unlink(struct sk_buff *skb)
480
{
481
        IS_SKB(skb);
482
 
483
        if(skb->list)
484
        {
485
                skb->list->qlen--;
486
                skb->next->prev = skb->prev;
487
                skb->prev->next = skb->next;
488
                skb->next = NULL;
489
                skb->prev = NULL;
490
                skb->list = NULL;
491
        }
492
#ifdef PARANOID_BUGHUNT_MODE    /* This is legal but we sometimes want to watch it */
493
        else
494
                printk("skb_unlink: not a linked element\n");
495
#endif
496
}
497
 
498
/*
499
 *      Add data to an sk_buff
500
 */
501
 
502
unsigned char *skb_put(struct sk_buff *skb, int len)
503
{
504
        unsigned char *tmp=skb->tail;
505
        IS_SKB(skb);
506
        skb->tail+=len;
507
        skb->len+=len;
508
        IS_SKB(skb);
509
        if(skb->tail>skb->end)
510
                panic("skput:over: %p:%d", __builtin_return_address(0),len);
511
        return tmp;
512
}
513
 
514
unsigned char *skb_push(struct sk_buff *skb, int len)
515
{
516
        IS_SKB(skb);
517
        skb->data-=len;
518
        skb->len+=len;
519
        IS_SKB(skb);
520
        if(skb->data<skb->head)
521
                panic("skpush:under: %p:%d", __builtin_return_address(0),len);
522
        return skb->data;
523
}
524
 
525
unsigned char * skb_pull(struct sk_buff *skb, int len)
526
{
527
        IS_SKB(skb);
528
        if(len>skb->len)
529
                return 0;
530
        skb->data+=len;
531
        skb->len-=len;
532
        return skb->data;
533
}
534
 
535
int skb_headroom(struct sk_buff *skb)
536
{
537
        IS_SKB(skb);
538
        return skb->data-skb->head;
539
}
540
 
541
int skb_tailroom(struct sk_buff *skb)
542
{
543
        IS_SKB(skb);
544
        return skb->end-skb->tail;
545
}
546
 
547
void skb_reserve(struct sk_buff *skb, int len)
548
{
549
        IS_SKB(skb);
550
        skb->data+=len;
551
        skb->tail+=len;
552
        if(skb->tail>skb->end)
553
                panic("sk_res: over");
554
        if(skb->data<skb->head)
555
                panic("sk_res: under");
556
        IS_SKB(skb);
557
}
558
 
559
void skb_trim(struct sk_buff *skb, int len)
560
{
561
        IS_SKB(skb);
562
        if(skb->len>len)
563
        {
564
                skb->len=len;
565
                skb->tail=skb->data+len;
566
        }
567
}
568
 
569
 
570
 
571
#endif
572
 
573
/*
574
 *      Free an sk_buff. This still knows about things it should
575
 *      not need to like protocols and sockets.
576
 */
577
 
578
void kfree_skb(struct sk_buff *skb, int rw)
579
{
580
        if (skb == NULL)
581
        {
582
                printk(KERN_CRIT "kfree_skb: skb = NULL (from %p)\n",
583
                        __builtin_return_address(0));
584
                return;
585
        }
586
#if CONFIG_SKB_CHECK
587
        IS_SKB(skb);
588
#endif
589
        /* Check it twice, this is such a rare event and only occurs under
590
         * extremely high load, normal code path should not suffer from the
591
         * overhead of the cli.
592
         */
593
        if (skb->lock) {
594
                unsigned long flags;
595
 
596
                save_flags(flags); cli();
597
                if(skb->lock) {
598
                        skb->free = 3;    /* Free when unlocked */
599
                        net_free_locked++;
600
                        restore_flags(flags);
601
                        return;
602
                }
603
                restore_flags(flags);
604
        }
605
 
606
        if (skb->free == 2)
607
                printk(KERN_WARNING "Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
608
                        __builtin_return_address(0));
609
        if (skb->list)
610
                printk(KERN_WARNING "Warning: kfree_skb passed an skb still on a list (from %p).\n",
611
                        __builtin_return_address(0));
612
 
613
        if(skb->destructor)
614
                skb->destructor(skb);
615
        if (skb->sk)
616
        {
617
                struct sock * sk = skb->sk;
618
                if(sk->prot!=NULL)
619
                {
620
                        if (rw)
621
                                sock_rfree(sk, skb);
622
                        else
623
                                sock_wfree(sk, skb);
624
 
625
                }
626
                else
627
                {
628
                        if (rw)
629
                                atomic_sub(skb->truesize, &sk->rmem_alloc);
630
                        else {
631
                                if(!sk->dead)
632
                                        sk->write_space(sk);
633
                                atomic_sub(skb->truesize, &sk->wmem_alloc);
634
                        }
635
                        kfree_skbmem(skb);
636
                }
637
        }
638
        else
639
                kfree_skbmem(skb);
640
}
641
 
642
#ifdef TRACK_SKB_USAGE
643
int skbtotal = 0;
644
#endif
645
 
646
/*
647
 *      Allocate a new skbuff. We do this ourselves so we can fill in a few 'private'
648
 *      fields and also do memory statistics to find all the [BEEP] leaks.
649
 */
650
struct sk_buff *alloc_skb(unsigned int size,int priority)
651
{
652
        struct sk_buff *skb;
653
        int len=size;
654
        unsigned char *bptr;
655
 
656
        if (intr_count && priority!=GFP_ATOMIC)
657
        {
658
                static int count = 0;
659
                if (++count < 5) {
660
                        printk(KERN_ERR "alloc_skb called nonatomically from interrupt %p\n",
661
                                __builtin_return_address(0));
662
                        priority = GFP_ATOMIC;
663
                }
664
        }
665
 
666
        size=(size+15)&~15;             /* Allow for alignments. Make a multiple of 16 bytes */
667
        size+=sizeof(struct sk_buff);   /* And stick the control itself on the end */
668
 
669
        /*
670
         *      Allocate some space
671
         */
672
 
673
        bptr=(unsigned char *)kmalloc(size,priority);
674
        if (bptr == NULL)
675
        {
676
#ifdef TRACK_SKB_USAGE
677
                printk("Failed to allocate %d byte skb\n", size);
678
#endif
679
                net_fails++;
680
                return NULL;
681
        }
682
 
683
#ifdef TRACK_SKB_USAGE
684
        skbtotal += size;
685
        printk("Allocated %d byte skb, for %d total\n", size, skbtotal);
686
#endif
687
 
688
#ifdef PARANOID_BUGHUNT_MODE
689
        if(skb->magic_debug_cookie == SK_GOOD_SKB)
690
                printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
691
#endif
692
        /*
693
         *      Now we play a little game with the caches. Linux kmalloc is
694
         *      a bit cache dumb, in fact its just about maximally non
695
         *      optimal for typical kernel buffers. We actually run faster
696
         *      by doing the following. Which is to deliberately put the
697
         *      skb at the _end_ not the start of the memory block.
698
         */
699
        net_allocs++;
700
 
701
        skb=(struct sk_buff *)(bptr+size)-1;
702
 
703
        skb->count = 1;         /* only one reference to this */
704
        skb->data_skb = NULL;   /* and we're our own data skb */
705
 
706
        skb->free = 2;  /* Invalid so we pick up forgetful users */
707
        skb->lock = 0;
708
        skb->pkt_type = PACKET_HOST;    /* Default type */
709
        skb->pkt_bridged = 0;            /* Not bridged */
710
        skb->prev = skb->next = skb->link3 = NULL;
711
        skb->list = NULL;
712
        skb->sk = NULL;
713
        skb->truesize=size;
714
        skb->localroute=0;
715
        skb->stamp.tv_sec=0;     /* No idea about time */
716
        skb->localroute = 0;
717
        skb->ip_summed = 0;
718
        memset(skb->proto_priv, 0, sizeof(skb->proto_priv));
719
        net_skbcount++;
720
#if CONFIG_SKB_CHECK
721
        skb->magic_debug_cookie = SK_GOOD_SKB;
722
#endif
723
        skb->users = 0;
724
        /* Load the data pointers */
725
        skb->head=bptr;
726
        skb->data=bptr;
727
        skb->tail=bptr;
728
        skb->end=bptr+len;
729
        skb->len=0;
730
        skb->destructor=NULL;
731
        return skb;
732
}
733
 
734
/*
735
 *      Free an skbuff by memory
736
 */
737
 
738
static inline void __kfree_skbmem(struct sk_buff *skb)
739
{
740
        /* don't do anything if somebody still uses us */
741
        if (atomic_dec_and_test(&skb->count)) {
742
                kfree(skb->head);
743
                atomic_dec(&net_skbcount);
744
        }
745
}
746
 
747
void kfree_skbmem(struct sk_buff *skb)
748
{
749
        void * addr = skb->head;
750
 
751
        /* don't do anything if somebody still uses us */
752
        if (atomic_dec_and_test(&skb->count)) {
753
                /* free the skb that contains the actual data if we've clone()'d */
754
                if (skb->data_skb) {
755
#ifdef TRACK_SKB_USAGE
756
                        skbtotal -= skb->truesize;
757
                        printk("Deallocation %d byte skb clone, for %d total\n", skb->truesize, skbtotal);
758
#endif
759
                        addr = skb;
760
                        __kfree_skbmem(skb->data_skb);
761
                }
762
#ifdef TRACK_SKB_USAGE
763
                skbtotal -= skb->truesize;
764
                printk("Deallocation %d byte skb, for %d total\n", skb->truesize, skbtotal);
765
#endif
766
                kfree(addr);
767
                atomic_dec(&net_skbcount);
768
        }
769
}
770
 
771
/*
772
 *      Duplicate an sk_buff. The new one is not owned by a socket or locked
773
 *      and will be freed on deletion.
774
 */
775
 
776
struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
777
{
778
        struct sk_buff *n;
779
 
780
        IS_SKB(skb);
781
        n = kmalloc(sizeof(*n), priority);
782
        if (!n)
783
                return NULL;
784
        memcpy(n, skb, sizeof(*n));
785
        n->count = 1;
786
        if (skb->data_skb)
787
                skb = skb->data_skb;
788
        atomic_inc(&skb->count);
789
        atomic_inc(&net_allocs);
790
        atomic_inc(&net_skbcount);
791
        n->data_skb = skb;
792
        n->next = n->prev = n->link3 = NULL;
793
        n->list = NULL;
794
        n->sk = NULL;
795
        n->free = 1;
796
        n->tries = 0;
797
        n->lock = 0;
798
        n->users = 0;
799
#ifdef TRACK_SKB_USAGE
800
        skbtotal += skb->truesize;
801
        printk("Allocation %d byte skb clone, for %d total\n", skb->truesize, skbtotal);
802
#endif
803
 
804
        return n;
805
}
806
 
807
/*
808
 *      This is slower, and copies the whole data area
809
 */
810
 
811
struct sk_buff *skb_copy(struct sk_buff *skb, int priority)
812
{
813
        struct sk_buff *n;
814
        unsigned long offset;
815
 
816
        /*
817
         *      Allocate the copy buffer
818
         */
819
 
820
        IS_SKB(skb);
821
 
822
        n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority);
823
        if(n==NULL)
824
                return NULL;
825
 
826
        /*
827
         *      Shift between the two data areas in bytes
828
         */
829
 
830
        offset=n->head-skb->head;
831
 
832
        /* Set the data pointer */
833
        skb_reserve(n,skb->data-skb->head);
834
        /* Set the tail pointer and length */
835
        skb_put(n,skb->len);
836
        /* Copy the bytes */
837
        memcpy(n->head,skb->head,skb->end-skb->head);
838
        n->link3=NULL;
839
        n->list=NULL;
840
        n->sk=NULL;
841
        n->when=skb->when;
842
        n->dev=skb->dev;
843
        n->h.raw=skb->h.raw+offset;
844
        n->mac.raw=skb->mac.raw+offset;
845
        n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
846
        n->saddr=skb->saddr;
847
        n->daddr=skb->daddr;
848
        n->raddr=skb->raddr;
849
        n->seq=skb->seq;
850
        n->end_seq=skb->end_seq;
851
        n->ack_seq=skb->ack_seq;
852
        n->acked=skb->acked;
853
        memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv));
854
        n->used=skb->used;
855
        n->free=1;
856
        n->arp=skb->arp;
857
        n->tries=0;
858
        n->lock=0;
859
        n->users=0;
860
        n->pkt_type=skb->pkt_type;
861
        n->stamp=skb->stamp;
862
 
863
        IS_SKB(n);
864
        return n;
865
}
866
 
867
/*
868
 *     Skbuff device locking
869
 */
870
 
871
void skb_device_lock(struct sk_buff *skb)
872
{
873
        unsigned long flags;
874
 
875
        save_flags(flags); cli();
876
        if(skb->lock)
877
                printk("double lock on device queue, lock=%d caller=%p\n",
878
                        skb->lock, (&skb)[-1]);
879
        else
880
                net_locked++;
881
        skb->lock++;
882
        restore_flags(flags);
883
}
884
 
885
void skb_device_unlock(struct sk_buff *skb)
886
{
887
        unsigned long flags;
888
 
889
        save_flags(flags); cli();
890
        if(skb->lock==0)
891
                printk("double unlock on device queue!\n");
892
        skb->lock--;
893
        if(skb->lock==0)
894
                net_locked--;
895
        restore_flags(flags);
896
 
897
        if (skb->free == 3) {
898
            skb->free = 1;
899
            kfree_skb(skb, FREE_WRITE);
900
        }
901
}
902
 
903
void dev_kfree_skb(struct sk_buff *skb, int mode)
904
{
905
        unsigned long flags;
906
 
907
        save_flags(flags);
908
        cli();
909
        if(skb->lock)
910
        {
911
                net_locked--;
912
                skb->lock--;
913
        }
914
        if (!skb->lock && (skb->free == 1 || skb->free == 3))
915
        {
916
                restore_flags(flags);
917
                kfree_skb(skb,mode);
918
        }
919
        else
920
                restore_flags(flags);
921
}
922
 
923
struct sk_buff *dev_alloc_skb(unsigned int length)
924
{
925
        struct sk_buff *skb;
926
 
927
        skb = alloc_skb(length+16, GFP_ATOMIC);
928
        if (skb)
929
                skb_reserve(skb,16);
930
        return skb;
931
}
932
 
933
int skb_device_locked(struct sk_buff *skb)
934
{
935
        return skb->lock? 1 : 0;
936
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.