OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [linux/] [skbuff.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *      Definitions for the 'struct sk_buff' memory handlers.
3
 *
4
 *      Authors:
5
 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
6
 *              Florian La Roche, <rzsfl@rz.uni-sb.de>
7
 *
8
 *      This program is free software; you can redistribute it and/or
9
 *      modify it under the terms of the GNU General Public License
10
 *      as published by the Free Software Foundation; either version
11
 *      2 of the License, or (at your option) any later version.
12
 */
13
 
14
#ifndef _LINUX_SKBUFF_H
15
#define _LINUX_SKBUFF_H
16
 
17
#include <linux/config.h>
18
#include <linux/kernel.h>
19
#include <linux/sched.h>
20
#include <linux/time.h>
21
#include <linux/cache.h>
22
 
23
#include <asm/atomic.h>
24
#include <asm/types.h>
25
#include <linux/spinlock.h>
26
#include <linux/mm.h>
27
#include <linux/highmem.h>
28
 
29
#define HAVE_ALLOC_SKB          /* For the drivers to know */
30
#define HAVE_ALIGNABLE_SKB      /* Ditto 8)                */
31
#define SLAB_SKB                /* Slabified skbuffs       */
32
 
33
#define CHECKSUM_NONE 0
34
#define CHECKSUM_HW 1
35
#define CHECKSUM_UNNECESSARY 2
36
 
37
#define SKB_DATA_ALIGN(X)       (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
38
#define SKB_MAX_ORDER(X,ORDER)  (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
39
#define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X),0))
40
#define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0,2))
41
 
42
/* A. Checksumming of received packets by device.
43
 *
44
 *      NONE: device failed to checksum this packet.
45
 *              skb->csum is undefined.
46
 *
47
 *      UNNECESSARY: device parsed packet and wouldbe verified checksum.
48
 *              skb->csum is undefined.
49
 *            It is bad option, but, unfortunately, many of vendors do this.
50
 *            Apparently with secret goal to sell you new device, when you
51
 *            will add new protocol to your host. F.e. IPv6. 8)
52
 *
53
 *      HW: the most generic way. Device supplied checksum of _all_
54
 *          the packet as seen by netif_rx in skb->csum.
55
 *          NOTE: Even if device supports only some protocols, but
56
 *          is able to produce some skb->csum, it MUST use HW,
57
 *          not UNNECESSARY.
58
 *
59
 * B. Checksumming on output.
60
 *
61
 *      NONE: skb is checksummed by protocol or csum is not required.
62
 *
63
 *      HW: device is required to csum packet as seen by hard_start_xmit
64
 *      from skb->h.raw to the end and to record the checksum
65
 *      at skb->h.raw+skb->csum.
66
 *
67
 *      Device must show its capabilities in dev->features, set
68
 *      at device setup time.
69
 *      NETIF_F_HW_CSUM - it is clever device, it is able to checksum
70
 *                        everything.
71
 *      NETIF_F_NO_CSUM - loopback or reliable single hop media.
72
 *      NETIF_F_IP_CSUM - device is dumb. It is able to csum only
73
 *                        TCP/UDP over IPv4. Sigh. Vendors like this
74
 *                        way by an unknown reason. Though, see comment above
75
 *                        about CHECKSUM_UNNECESSARY. 8)
76
 *
77
 *      Any questions? No questions, good.              --ANK
78
 */
79
 
80
#ifdef __i386__
81
#define NET_CALLER(arg) (*(((void**)&arg)-1))
82
#else
83
#define NET_CALLER(arg) __builtin_return_address(0)
84
#endif
85
 
86
#ifdef CONFIG_NETFILTER
87
struct nf_conntrack {
88
        atomic_t use;
89
        void (*destroy)(struct nf_conntrack *);
90
};
91
 
92
struct nf_ct_info {
93
        struct nf_conntrack *master;
94
};
95
#endif
96
 
97
struct sk_buff_head {
98
        /* These two members must be first. */
99
        struct sk_buff  * next;
100
        struct sk_buff  * prev;
101
 
102
        __u32           qlen;
103
        spinlock_t      lock;
104
};
105
 
106
struct sk_buff;
107
 
108
#define MAX_SKB_FRAGS 6
109
 
110
typedef struct skb_frag_struct skb_frag_t;
111
 
112
struct skb_frag_struct
113
{
114
        struct page *page;
115
        __u16 page_offset;
116
        __u16 size;
117
};
118
 
119
/* This data is invariant across clones and lives at
120
 * the end of the header data, ie. at skb->end.
121
 */
122
struct skb_shared_info {
123
        atomic_t        dataref;
124
        unsigned int    nr_frags;
125
        struct sk_buff  *frag_list;
126
        skb_frag_t      frags[MAX_SKB_FRAGS];
127
};
128
 
129
struct sk_buff {
130
        /* These two members must be first. */
131
        struct sk_buff  * next;                 /* Next buffer in list                          */
132
        struct sk_buff  * prev;                 /* Previous buffer in list                      */
133
 
134
        struct sk_buff_head * list;             /* List we are on                               */
135
        struct sock     *sk;                    /* Socket we are owned by                       */
136
        struct timeval  stamp;                  /* Time we arrived                              */
137
        struct net_device       *dev;           /* Device we arrived on/are leaving by          */
138
        struct net_device       *real_dev;      /* For support of point to point protocols
139
                                                   (e.g. 802.3ad) over bonding, we must save the
140
                                                   physical device that got the packet before
141
                                                   replacing skb->dev with the virtual device.  */
142
 
143
        /* Transport layer header */
144
        union
145
        {
146
                struct tcphdr   *th;
147
                struct udphdr   *uh;
148
                struct icmphdr  *icmph;
149
                struct igmphdr  *igmph;
150
                struct iphdr    *ipiph;
151
                struct spxhdr   *spxh;
152
                unsigned char   *raw;
153
        } h;
154
 
155
        /* Network layer header */
156
        union
157
        {
158
                struct iphdr    *iph;
159
                struct ipv6hdr  *ipv6h;
160
                struct arphdr   *arph;
161
                struct ipxhdr   *ipxh;
162
                unsigned char   *raw;
163
        } nh;
164
 
165
        /* Link layer header */
166
        union
167
        {
168
                struct ethhdr   *ethernet;
169
                unsigned char   *raw;
170
        } mac;
171
 
172
        struct  dst_entry *dst;
173
 
174
        /*
175
         * This is the control buffer. It is free to use for every
176
         * layer. Please put your private variables there. If you
177
         * want to keep them across layers you have to do a skb_clone()
178
         * first. This is owned by whoever has the skb queued ATM.
179
         */
180
        char            cb[48];
181
 
182
        unsigned int    len;                    /* Length of actual data                        */
183
        unsigned int    data_len;
184
        unsigned int    csum;                   /* Checksum                                     */
185
        unsigned char   __unused,               /* Dead field, may be reused                    */
186
                        cloned,                 /* head may be cloned (check refcnt to be sure). */
187
                        pkt_type,               /* Packet class                                 */
188
                        ip_summed;              /* Driver fed us an IP checksum                 */
189
        __u32           priority;               /* Packet queueing priority                     */
190
        atomic_t        users;                  /* User count - see datagram.c,tcp.c            */
191
        unsigned short  protocol;               /* Packet protocol from driver.                 */
192
        unsigned short  security;               /* Security level of packet                     */
193
        unsigned int    truesize;               /* Buffer size                                  */
194
 
195
        unsigned char   *head;                  /* Head of buffer                               */
196
        unsigned char   *data;                  /* Data head pointer                            */
197
        unsigned char   *tail;                  /* Tail pointer                                 */
198
        unsigned char   *end;                   /* End pointer                                  */
199
 
200
        void            (*destructor)(struct sk_buff *);        /* Destruct function            */
201
#ifdef CONFIG_NETFILTER
202
        /* Can be used for communication between hooks. */
203
        unsigned long   nfmark;
204
        /* Cache info */
205
        __u32           nfcache;
206
        /* Associated connection, if any */
207
        struct nf_ct_info *nfct;
208
#ifdef CONFIG_NETFILTER_DEBUG
209
        unsigned int nf_debug;
210
#endif
211
#endif /*CONFIG_NETFILTER*/
212
 
213
#if defined(CONFIG_HIPPI)
214
        union{
215
                __u32   ifield;
216
        } private;
217
#endif
218
 
219
#ifdef CONFIG_NET_SCHED
220
       __u32           tc_index;               /* traffic control index */
221
#endif
222
};
223
 
224
#ifdef __KERNEL__
225
/*
226
 *      Handling routines are only of interest to the kernel
227
 */
228
#include <linux/slab.h>
229
 
230
#include <asm/system.h>
231
 
232
extern void                     __kfree_skb(struct sk_buff *skb);
233
extern struct sk_buff *         alloc_skb(unsigned int size, int priority);
234
extern void                     kfree_skbmem(struct sk_buff *skb);
235
extern struct sk_buff *         skb_clone(struct sk_buff *skb, int priority);
236
extern struct sk_buff *         skb_copy(const struct sk_buff *skb, int priority);
237
extern struct sk_buff *         pskb_copy(struct sk_buff *skb, int gfp_mask);
238
extern int                      pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
239
extern struct sk_buff *         skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
240
extern struct sk_buff *         skb_copy_expand(const struct sk_buff *skb,
241
                                                int newheadroom,
242
                                                int newtailroom,
243
                                                int priority);
244
extern struct sk_buff *         skb_pad(struct sk_buff *skb, int pad);
245
#define dev_kfree_skb(a)        kfree_skb(a)
246
extern void     skb_over_panic(struct sk_buff *skb, int len, void *here);
247
extern void     skb_under_panic(struct sk_buff *skb, int len, void *here);
248
 
249
/* Internal */
250
#define skb_shinfo(SKB)         ((struct skb_shared_info *)((SKB)->end))
251
 
252
/**
253
 *      skb_queue_empty - check if a queue is empty
254
 *      @list: queue head
255
 *
256
 *      Returns true if the queue is empty, false otherwise.
257
 */
258
 
259
static inline int skb_queue_empty(struct sk_buff_head *list)
260
{
261
        return (list->next == (struct sk_buff *) list);
262
}
263
 
264
/**
265
 *      skb_get - reference buffer
266
 *      @skb: buffer to reference
267
 *
268
 *      Makes another reference to a socket buffer and returns a pointer
269
 *      to the buffer.
270
 */
271
 
272
static inline struct sk_buff *skb_get(struct sk_buff *skb)
273
{
274
        atomic_inc(&skb->users);
275
        return skb;
276
}
277
 
278
/*
279
 * If users==1, we are the only owner and are can avoid redundant
280
 * atomic change.
281
 */
282
 
283
/**
284
 *      kfree_skb - free an sk_buff
285
 *      @skb: buffer to free
286
 *
287
 *      Drop a reference to the buffer and free it if the usage count has
288
 *      hit zero.
289
 */
290
 
291
static inline void kfree_skb(struct sk_buff *skb)
292
{
293
        if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
294
                __kfree_skb(skb);
295
}
296
 
297
/* Use this if you didn't touch the skb state [for fast switching] */
298
static inline void kfree_skb_fast(struct sk_buff *skb)
299
{
300
        if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
301
                kfree_skbmem(skb);
302
}
303
 
304
/**
305
 *      skb_cloned - is the buffer a clone
306
 *      @skb: buffer to check
307
 *
308
 *      Returns true if the buffer was generated with skb_clone() and is
309
 *      one of multiple shared copies of the buffer. Cloned buffers are
310
 *      shared data so must not be written to under normal circumstances.
311
 */
312
 
313
static inline int skb_cloned(struct sk_buff *skb)
314
{
315
        return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
316
}
317
 
318
/**
319
 *      skb_shared - is the buffer shared
320
 *      @skb: buffer to check
321
 *
322
 *      Returns true if more than one person has a reference to this
323
 *      buffer.
324
 */
325
 
326
static inline int skb_shared(struct sk_buff *skb)
327
{
328
        return (atomic_read(&skb->users) != 1);
329
}
330
 
331
/**
332
 *      skb_share_check - check if buffer is shared and if so clone it
333
 *      @skb: buffer to check
334
 *      @pri: priority for memory allocation
335
 *
336
 *      If the buffer is shared the buffer is cloned and the old copy
337
 *      drops a reference. A new clone with a single reference is returned.
338
 *      If the buffer is not shared the original buffer is returned. When
339
 *      being called from interrupt status or with spinlocks held pri must
340
 *      be GFP_ATOMIC.
341
 *
342
 *      NULL is returned on a memory allocation failure.
343
 */
344
 
345
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
346
{
347
        if (skb_shared(skb)) {
348
                struct sk_buff *nskb;
349
                nskb = skb_clone(skb, pri);
350
                kfree_skb(skb);
351
                return nskb;
352
        }
353
        return skb;
354
}
355
 
356
 
357
/*
358
 *      Copy shared buffers into a new sk_buff. We effectively do COW on
359
 *      packets to handle cases where we have a local reader and forward
360
 *      and a couple of other messy ones. The normal one is tcpdumping
361
 *      a packet thats being forwarded.
362
 */
363
 
364
/**
365
 *      skb_unshare - make a copy of a shared buffer
366
 *      @skb: buffer to check
367
 *      @pri: priority for memory allocation
368
 *
369
 *      If the socket buffer is a clone then this function creates a new
370
 *      copy of the data, drops a reference count on the old copy and returns
371
 *      the new copy with the reference count at 1. If the buffer is not a clone
372
 *      the original buffer is returned. When called with a spinlock held or
373
 *      from interrupt state @pri must be %GFP_ATOMIC
374
 *
375
 *      %NULL is returned on a memory allocation failure.
376
 */
377
 
378
static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
379
{
380
        struct sk_buff *nskb;
381
        if(!skb_cloned(skb))
382
                return skb;
383
        nskb=skb_copy(skb, pri);
384
        kfree_skb(skb);         /* Free our shared copy */
385
        return nskb;
386
}
387
 
388
/**
389
 *      skb_peek
390
 *      @list_: list to peek at
391
 *
392
 *      Peek an &sk_buff. Unlike most other operations you _MUST_
393
 *      be careful with this one. A peek leaves the buffer on the
394
 *      list and someone else may run off with it. You must hold
395
 *      the appropriate locks or have a private queue to do this.
396
 *
397
 *      Returns %NULL for an empty list or a pointer to the head element.
398
 *      The reference count is not incremented and the reference is therefore
399
 *      volatile. Use with caution.
400
 */
401
 
402
static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
403
{
404
        struct sk_buff *list = ((struct sk_buff *)list_)->next;
405
        if (list == (struct sk_buff *)list_)
406
                list = NULL;
407
        return list;
408
}
409
 
410
/**
411
 *      skb_peek_tail
412
 *      @list_: list to peek at
413
 *
414
 *      Peek an &sk_buff. Unlike most other operations you _MUST_
415
 *      be careful with this one. A peek leaves the buffer on the
416
 *      list and someone else may run off with it. You must hold
417
 *      the appropriate locks or have a private queue to do this.
418
 *
419
 *      Returns %NULL for an empty list or a pointer to the tail element.
420
 *      The reference count is not incremented and the reference is therefore
421
 *      volatile. Use with caution.
422
 */
423
 
424
static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
425
{
426
        struct sk_buff *list = ((struct sk_buff *)list_)->prev;
427
        if (list == (struct sk_buff *)list_)
428
                list = NULL;
429
        return list;
430
}
431
 
432
/**
433
 *      skb_queue_len   - get queue length
434
 *      @list_: list to measure
435
 *
436
 *      Return the length of an &sk_buff queue.
437
 */
438
 
439
static inline __u32 skb_queue_len(struct sk_buff_head *list_)
440
{
441
        return(list_->qlen);
442
}
443
 
444
static inline void skb_queue_head_init(struct sk_buff_head *list)
445
{
446
        spin_lock_init(&list->lock);
447
        list->prev = (struct sk_buff *)list;
448
        list->next = (struct sk_buff *)list;
449
        list->qlen = 0;
450
}
451
 
452
/*
453
 *      Insert an sk_buff at the start of a list.
454
 *
455
 *      The "__skb_xxxx()" functions are the non-atomic ones that
456
 *      can only be called with interrupts disabled.
457
 */
458
 
459
/**
460
 *      __skb_queue_head - queue a buffer at the list head
461
 *      @list: list to use
462
 *      @newsk: buffer to queue
463
 *
464
 *      Queue a buffer at the start of a list. This function takes no locks
465
 *      and you must therefore hold required locks before calling it.
466
 *
467
 *      A buffer cannot be placed on two lists at the same time.
468
 */
469
 
470
static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
471
{
472
        struct sk_buff *prev, *next;
473
 
474
        newsk->list = list;
475
        list->qlen++;
476
        prev = (struct sk_buff *)list;
477
        next = prev->next;
478
        newsk->next = next;
479
        newsk->prev = prev;
480
        next->prev = newsk;
481
        prev->next = newsk;
482
}
483
 
484
 
485
/**
486
 *      skb_queue_head - queue a buffer at the list head
487
 *      @list: list to use
488
 *      @newsk: buffer to queue
489
 *
490
 *      Queue a buffer at the start of the list. This function takes the
491
 *      list lock and can be used safely with other locking &sk_buff functions
492
 *      safely.
493
 *
494
 *      A buffer cannot be placed on two lists at the same time.
495
 */
496
 
497
static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
498
{
499
        unsigned long flags;
500
 
501
        spin_lock_irqsave(&list->lock, flags);
502
        __skb_queue_head(list, newsk);
503
        spin_unlock_irqrestore(&list->lock, flags);
504
}
505
 
506
/**
507
 *      __skb_queue_tail - queue a buffer at the list tail
508
 *      @list: list to use
509
 *      @newsk: buffer to queue
510
 *
511
 *      Queue a buffer at the end of a list. This function takes no locks
512
 *      and you must therefore hold required locks before calling it.
513
 *
514
 *      A buffer cannot be placed on two lists at the same time.
515
 */
516
 
517
 
518
static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
519
{
520
        struct sk_buff *prev, *next;
521
 
522
        newsk->list = list;
523
        list->qlen++;
524
        next = (struct sk_buff *)list;
525
        prev = next->prev;
526
        newsk->next = next;
527
        newsk->prev = prev;
528
        next->prev = newsk;
529
        prev->next = newsk;
530
}
531
 
532
/**
533
 *      skb_queue_tail - queue a buffer at the list tail
534
 *      @list: list to use
535
 *      @newsk: buffer to queue
536
 *
537
 *      Queue a buffer at the tail of the list. This function takes the
538
 *      list lock and can be used safely with other locking &sk_buff functions
539
 *      safely.
540
 *
541
 *      A buffer cannot be placed on two lists at the same time.
542
 */
543
 
544
static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
545
{
546
        unsigned long flags;
547
 
548
        spin_lock_irqsave(&list->lock, flags);
549
        __skb_queue_tail(list, newsk);
550
        spin_unlock_irqrestore(&list->lock, flags);
551
}
552
 
553
/**
554
 *      __skb_dequeue - remove from the head of the queue
555
 *      @list: list to dequeue from
556
 *
557
 *      Remove the head of the list. This function does not take any locks
558
 *      so must be used with appropriate locks held only. The head item is
559
 *      returned or %NULL if the list is empty.
560
 */
561
 
562
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
563
{
564
        struct sk_buff *next, *prev, *result;
565
 
566
        prev = (struct sk_buff *) list;
567
        next = prev->next;
568
        result = NULL;
569
        if (next != prev) {
570
                result = next;
571
                next = next->next;
572
                list->qlen--;
573
                next->prev = prev;
574
                prev->next = next;
575
                result->next = NULL;
576
                result->prev = NULL;
577
                result->list = NULL;
578
        }
579
        return result;
580
}
581
 
582
/**
583
 *      skb_dequeue - remove from the head of the queue
584
 *      @list: list to dequeue from
585
 *
586
 *      Remove the head of the list. The list lock is taken so the function
587
 *      may be used safely with other locking list functions. The head item is
588
 *      returned or %NULL if the list is empty.
589
 */
590
 
591
static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
592
{
593
        unsigned long flags;
594
        struct sk_buff *result;
595
 
596
        spin_lock_irqsave(&list->lock, flags);
597
        result = __skb_dequeue(list);
598
        spin_unlock_irqrestore(&list->lock, flags);
599
        return result;
600
}
601
 
602
/*
603
 *      Insert a packet on a list.
604
 */
605
 
606
static inline void __skb_insert(struct sk_buff *newsk,
607
        struct sk_buff * prev, struct sk_buff *next,
608
        struct sk_buff_head * list)
609
{
610
        newsk->next = next;
611
        newsk->prev = prev;
612
        next->prev = newsk;
613
        prev->next = newsk;
614
        newsk->list = list;
615
        list->qlen++;
616
}
617
 
618
/**
619
 *      skb_insert      -       insert a buffer
620
 *      @old: buffer to insert before
621
 *      @newsk: buffer to insert
622
 *
623
 *      Place a packet before a given packet in a list. The list locks are taken
624
 *      and this function is atomic with respect to other list locked calls
625
 *      A buffer cannot be placed on two lists at the same time.
626
 */
627
 
628
static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
629
{
630
        unsigned long flags;
631
 
632
        spin_lock_irqsave(&old->list->lock, flags);
633
        __skb_insert(newsk, old->prev, old, old->list);
634
        spin_unlock_irqrestore(&old->list->lock, flags);
635
}
636
 
637
/*
638
 *      Place a packet after a given packet in a list.
639
 */
640
 
641
static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
642
{
643
        __skb_insert(newsk, old, old->next, old->list);
644
}
645
 
646
/**
647
 *      skb_append      -       append a buffer
648
 *      @old: buffer to insert after
649
 *      @newsk: buffer to insert
650
 *
651
 *      Place a packet after a given packet in a list. The list locks are taken
652
 *      and this function is atomic with respect to other list locked calls.
653
 *      A buffer cannot be placed on two lists at the same time.
654
 */
655
 
656
 
657
static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
658
{
659
        unsigned long flags;
660
 
661
        spin_lock_irqsave(&old->list->lock, flags);
662
        __skb_append(old, newsk);
663
        spin_unlock_irqrestore(&old->list->lock, flags);
664
}
665
 
666
/*
667
 * remove sk_buff from list. _Must_ be called atomically, and with
668
 * the list known..
669
 */
670
 
671
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
672
{
673
        struct sk_buff * next, * prev;
674
 
675
        list->qlen--;
676
        next = skb->next;
677
        prev = skb->prev;
678
        skb->next = NULL;
679
        skb->prev = NULL;
680
        skb->list = NULL;
681
        next->prev = prev;
682
        prev->next = next;
683
}
684
 
685
/**
686
 *      skb_unlink      -       remove a buffer from a list
687
 *      @skb: buffer to remove
688
 *
689
 *      Place a packet after a given packet in a list. The list locks are taken
690
 *      and this function is atomic with respect to other list locked calls
691
 *
692
 *      Works even without knowing the list it is sitting on, which can be
693
 *      handy at times. It also means that THE LIST MUST EXIST when you
694
 *      unlink. Thus a list must have its contents unlinked before it is
695
 *      destroyed.
696
 */
697
 
698
static inline void skb_unlink(struct sk_buff *skb)
699
{
700
        struct sk_buff_head *list = skb->list;
701
 
702
        if(list) {
703
                unsigned long flags;
704
 
705
                spin_lock_irqsave(&list->lock, flags);
706
                if(skb->list == list)
707
                        __skb_unlink(skb, skb->list);
708
                spin_unlock_irqrestore(&list->lock, flags);
709
        }
710
}
711
 
712
/* XXX: more streamlined implementation */
713
 
714
/**
715
 *      __skb_dequeue_tail - remove from the tail of the queue
716
 *      @list: list to dequeue from
717
 *
718
 *      Remove the tail of the list. This function does not take any locks
719
 *      so must be used with appropriate locks held only. The tail item is
720
 *      returned or %NULL if the list is empty.
721
 */
722
 
723
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
724
{
725
        struct sk_buff *skb = skb_peek_tail(list);
726
        if (skb)
727
                __skb_unlink(skb, list);
728
        return skb;
729
}
730
 
731
/**
732
 *      skb_dequeue - remove from the head of the queue
733
 *      @list: list to dequeue from
734
 *
735
 *      Remove the head of the list. The list lock is taken so the function
736
 *      may be used safely with other locking list functions. The tail item is
737
 *      returned or %NULL if the list is empty.
738
 */
739
 
740
static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
741
{
742
        unsigned long flags;
743
        struct sk_buff *result;
744
 
745
        spin_lock_irqsave(&list->lock, flags);
746
        result = __skb_dequeue_tail(list);
747
        spin_unlock_irqrestore(&list->lock, flags);
748
        return result;
749
}
750
 
751
static inline int skb_is_nonlinear(const struct sk_buff *skb)
752
{
753
        return skb->data_len;
754
}
755
 
756
static inline unsigned int skb_headlen(const struct sk_buff *skb)
757
{
758
        return skb->len - skb->data_len;
759
}
760
 
761
#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0)
762
#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0)
763
#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0)
764
 
765
/*
766
 *      Add data to an sk_buff
767
 */
768
 
769
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
770
{
771
        unsigned char *tmp=skb->tail;
772
        SKB_LINEAR_ASSERT(skb);
773
        skb->tail+=len;
774
        skb->len+=len;
775
        return tmp;
776
}
777
 
778
/**
779
 *      skb_put - add data to a buffer
780
 *      @skb: buffer to use
781
 *      @len: amount of data to add
782
 *
783
 *      This function extends the used data area of the buffer. If this would
784
 *      exceed the total buffer size the kernel will panic. A pointer to the
785
 *      first byte of the extra data is returned.
786
 */
787
 
788
static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
789
{
790
        unsigned char *tmp=skb->tail;
791
        SKB_LINEAR_ASSERT(skb);
792
        skb->tail+=len;
793
        skb->len+=len;
794
        if(skb->tail>skb->end) {
795
                skb_over_panic(skb, len, current_text_addr());
796
        }
797
        return tmp;
798
}
799
 
800
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
801
{
802
        skb->data-=len;
803
        skb->len+=len;
804
        return skb->data;
805
}
806
 
807
/**
808
 *      skb_push - add data to the start of a buffer
809
 *      @skb: buffer to use
810
 *      @len: amount of data to add
811
 *
812
 *      This function extends the used data area of the buffer at the buffer
813
 *      start. If this would exceed the total buffer headroom the kernel will
814
 *      panic. A pointer to the first byte of the extra data is returned.
815
 */
816
 
817
static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
818
{
819
        skb->data-=len;
820
        skb->len+=len;
821
        if(skb->data<skb->head) {
822
                skb_under_panic(skb, len, current_text_addr());
823
        }
824
        return skb->data;
825
}
826
 
827
static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
828
{
829
        skb->len-=len;
830
        if (skb->len < skb->data_len)
831
                out_of_line_bug();
832
        return  skb->data+=len;
833
}
834
 
835
/**
836
 *      skb_pull - remove data from the start of a buffer
837
 *      @skb: buffer to use
838
 *      @len: amount of data to remove
839
 *
840
 *      This function removes data from the start of a buffer, returning
841
 *      the memory to the headroom. A pointer to the next data in the buffer
842
 *      is returned. Once the data has been pulled future pushes will overwrite
843
 *      the old data.
844
 */
845
 
846
static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
847
{
848
        if (len > skb->len)
849
                return NULL;
850
        return __skb_pull(skb,len);
851
}
852
 
853
extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
854
 
855
static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
856
{
857
        if (len > skb_headlen(skb) &&
858
            __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
859
                return NULL;
860
        skb->len -= len;
861
        return  skb->data += len;
862
}
863
 
864
static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
865
{
866
        if (len > skb->len)
867
                return NULL;
868
        return __pskb_pull(skb,len);
869
}
870
 
871
static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
872
{
873
        if (len <= skb_headlen(skb))
874
                return 1;
875
        if (len > skb->len)
876
                return 0;
877
        return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
878
}
879
 
880
/**
881
 *      skb_headroom - bytes at buffer head
882
 *      @skb: buffer to check
883
 *
884
 *      Return the number of bytes of free space at the head of an &sk_buff.
885
 */
886
 
887
static inline int skb_headroom(const struct sk_buff *skb)
888
{
889
        return skb->data-skb->head;
890
}
891
 
892
/**
893
 *      skb_tailroom - bytes at buffer end
894
 *      @skb: buffer to check
895
 *
896
 *      Return the number of bytes of free space at the tail of an sk_buff
897
 */
898
 
899
static inline int skb_tailroom(const struct sk_buff *skb)
900
{
901
        return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
902
}
903
 
904
/**
905
 *      skb_reserve - adjust headroom
906
 *      @skb: buffer to alter
907
 *      @len: bytes to move
908
 *
909
 *      Increase the headroom of an empty &sk_buff by reducing the tail
910
 *      room. This is only allowed for an empty buffer.
911
 */
912
 
913
static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
914
{
915
        skb->data+=len;
916
        skb->tail+=len;
917
}
918
 
919
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
920
 
921
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
922
{
923
        if (!skb->data_len) {
924
                skb->len = len;
925
                skb->tail = skb->data+len;
926
        } else {
927
                ___pskb_trim(skb, len, 0);
928
        }
929
}
930
 
931
/**
932
 *      skb_trim - remove end from a buffer
933
 *      @skb: buffer to alter
934
 *      @len: new length
935
 *
936
 *      Cut the length of a buffer down by removing data from the tail. If
937
 *      the buffer is already under the length specified it is not modified.
938
 */
939
 
940
static inline void skb_trim(struct sk_buff *skb, unsigned int len)
941
{
942
        if (skb->len > len) {
943
                __skb_trim(skb, len);
944
        }
945
}
946
 
947
 
948
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
949
{
950
        if (!skb->data_len) {
951
                skb->len = len;
952
                skb->tail = skb->data+len;
953
                return 0;
954
        } else {
955
                return ___pskb_trim(skb, len, 1);
956
        }
957
}
958
 
959
static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
960
{
961
        if (len < skb->len)
962
                return __pskb_trim(skb, len);
963
        return 0;
964
}
965
 
966
/**
967
 *      skb_orphan - orphan a buffer
968
 *      @skb: buffer to orphan
969
 *
970
 *      If a buffer currently has an owner then we call the owner's
971
 *      destructor function and make the @skb unowned. The buffer continues
972
 *      to exist but is no longer charged to its former owner.
973
 */
974
 
975
 
976
static inline void skb_orphan(struct sk_buff *skb)
977
{
978
        if (skb->destructor)
979
                skb->destructor(skb);
980
        skb->destructor = NULL;
981
        skb->sk = NULL;
982
}
983
 
984
/**
985
 *      skb_purge - empty a list
986
 *      @list: list to empty
987
 *
988
 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
989
 *      the list and one reference dropped. This function takes the list
990
 *      lock and is atomic with respect to other list locking functions.
991
 */
992
 
993
 
994
static inline void skb_queue_purge(struct sk_buff_head *list)
995
{
996
        struct sk_buff *skb;
997
        while ((skb=skb_dequeue(list))!=NULL)
998
                kfree_skb(skb);
999
}
1000
 
1001
/**
1002
 *      __skb_purge - empty a list
1003
 *      @list: list to empty
1004
 *
1005
 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1006
 *      the list and one reference dropped. This function does not take the
1007
 *      list lock and the caller must hold the relevant locks to use it.
1008
 */
1009
 
1010
 
1011
static inline void __skb_queue_purge(struct sk_buff_head *list)
1012
{
1013
        struct sk_buff *skb;
1014
        while ((skb=__skb_dequeue(list))!=NULL)
1015
                kfree_skb(skb);
1016
}
1017
 
1018
/**
1019
 *      __dev_alloc_skb - allocate an skbuff for sending
1020
 *      @length: length to allocate
1021
 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
1022
 *
1023
 *      Allocate a new &sk_buff and assign it a usage count of one. The
1024
 *      buffer has unspecified headroom built in. Users should allocate
1025
 *      the headroom they think they need without accounting for the
1026
 *      built in space. The built in space is used for optimisations.
1027
 *
1028
 *      %NULL is returned in there is no free memory.
1029
 */
1030
 
1031
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1032
                                              int gfp_mask)
1033
{
1034
        struct sk_buff *skb;
1035
 
1036
        skb = alloc_skb(length+16, gfp_mask);
1037
        if (skb)
1038
                skb_reserve(skb,16);
1039
        return skb;
1040
}
1041
 
1042
/**
1043
 *      dev_alloc_skb - allocate an skbuff for sending
1044
 *      @length: length to allocate
1045
 *
1046
 *      Allocate a new &sk_buff and assign it a usage count of one. The
1047
 *      buffer has unspecified headroom built in. Users should allocate
1048
 *      the headroom they think they need without accounting for the
1049
 *      built in space. The built in space is used for optimisations.
1050
 *
1051
 *      %NULL is returned in there is no free memory. Although this function
1052
 *      allocates memory it can be called from an interrupt.
1053
 */
1054
 
1055
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1056
{
1057
        return __dev_alloc_skb(length, GFP_ATOMIC);
1058
}
1059
 
1060
/**
1061
 *      skb_cow - copy header of skb when it is required
1062
 *      @skb: buffer to cow
1063
 *      @headroom: needed headroom
1064
 *
1065
 *      If the skb passed lacks sufficient headroom or its data part
1066
 *      is shared, data is reallocated. If reallocation fails, an error
1067
 *      is returned and original skb is not changed.
1068
 *
1069
 *      The result is skb with writable area skb->head...skb->tail
1070
 *      and at least @headroom of space at head.
1071
 */
1072
 
1073
static inline int
1074
skb_cow(struct sk_buff *skb, unsigned int headroom)
1075
{
1076
        int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
1077
 
1078
        if (delta < 0)
1079
                delta = 0;
1080
 
1081
        if (delta || skb_cloned(skb))
1082
                return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
1083
        return 0;
1084
}
1085
 
1086
/**
1087
 *      skb_padto       - pad an skbuff up to a minimal size
1088
 *      @skb: buffer to pad
1089
 *      @len: minimal length
1090
 *
1091
 *      Pads up a buffer to ensure the trailing bytes exist and are
1092
 *      blanked. If the buffer already contains sufficient data it
1093
 *      is untouched. Returns the buffer, which may be a replacement
1094
 *      for the original, or NULL for out of memory - in which case
1095
 *      the original buffer is still freed.
1096
 */
1097
 
1098
static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
1099
{
1100
        unsigned int size = skb->len;
1101
        if(likely(size >= len))
1102
                return skb;
1103
        return skb_pad(skb, len-size);
1104
}
1105
 
1106
/**
1107
 *      skb_linearize - convert paged skb to linear one
1108
 *      @skb: buffer to linarize
1109
 *      @gfp: allocation mode
1110
 *
1111
 *      If there is no free memory -ENOMEM is returned, otherwise zero
1112
 *      is returned and the old skb data released.  */
1113
int skb_linearize(struct sk_buff *skb, int gfp);
1114
 
1115
static inline void *kmap_skb_frag(const skb_frag_t *frag)
1116
{
1117
#ifdef CONFIG_HIGHMEM
1118
        if (in_irq())
1119
                out_of_line_bug();
1120
 
1121
        local_bh_disable();
1122
#endif
1123
        return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
1124
}
1125
 
1126
static inline void kunmap_skb_frag(void *vaddr)
1127
{
1128
        kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1129
#ifdef CONFIG_HIGHMEM
1130
        local_bh_enable();
1131
#endif
1132
}
1133
 
1134
#define skb_queue_walk(queue, skb) \
1135
                for (skb = (queue)->next;                       \
1136
                     (skb != (struct sk_buff *)(queue));        \
1137
                     skb=skb->next)
1138
 
1139
 
1140
extern struct sk_buff *         skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
1141
extern unsigned int             datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
1142
extern int                      skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
1143
extern int                      skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size);
1144
extern int                      skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
1145
extern int                      skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov);
1146
extern void                     skb_free_datagram(struct sock * sk, struct sk_buff *skb);
1147
 
1148
extern unsigned int             skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
1149
extern int                      skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
1150
extern unsigned int             skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
1151
extern void                     skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1152
 
1153
extern void skb_init(void);
1154
extern void skb_add_mtu(int mtu);
1155
 
1156
#ifdef CONFIG_NETFILTER
1157
static inline void
1158
nf_conntrack_put(struct nf_ct_info *nfct)
1159
{
1160
        if (nfct && atomic_dec_and_test(&nfct->master->use))
1161
                nfct->master->destroy(nfct->master);
1162
}
1163
static inline void
1164
nf_conntrack_get(struct nf_ct_info *nfct)
1165
{
1166
        if (nfct)
1167
                atomic_inc(&nfct->master->use);
1168
}
1169
#endif
1170
 
1171
#endif  /* __KERNEL__ */
1172
#endif  /* _LINUX_SKBUFF_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.