OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [include/] [linux/] [netdevice.h] - Blame information for rev 81

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
3
 *              operating system.  INET is implemented using the  BSD Socket
4
 *              interface as the means of communication with the user level.
5
 *
6
 *              Definitions for the Interfaces handler.
7
 *
8
 * Version:     @(#)dev.h       1.0.10  08/12/93
9
 *
10
 * Authors:     Ross Biro
11
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12
 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
13
 *              Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14
 *              Alan Cox, <Alan.Cox@linux.org>
15
 *              Bjorn Ekwall. <bj0rn@blox.se>
16
 *              Pekka Riikonen <priikone@poseidon.pspt.fi>
17
 *
18
 *              This program is free software; you can redistribute it and/or
19
 *              modify it under the terms of the GNU General Public License
20
 *              as published by the Free Software Foundation; either version
21
 *              2 of the License, or (at your option) any later version.
22
 *
23
 *              Moved to /usr/include/linux for NET3
24
 */
25
#ifndef _LINUX_NETDEVICE_H
26
#define _LINUX_NETDEVICE_H
27
 
28
#include <linux/if.h>
29
#include <linux/if_ether.h>
30
#include <linux/if_packet.h>
31
 
32
#ifdef __KERNEL__
33
#include <linux/timer.h>
34
#include <linux/delay.h>
35
#include <asm/atomic.h>
36
#include <asm/cache.h>
37
#include <asm/byteorder.h>
38
 
39
#include <linux/device.h>
40
#include <linux/percpu.h>
41
#include <linux/dmaengine.h>
42
#include <linux/workqueue.h>
43
 
44
#include <net/net_namespace.h>
45
 
46
struct vlan_group;
47
struct ethtool_ops;
48
struct netpoll_info;
49
/* 802.11 specific */
50
struct wireless_dev;
51
                                        /* source back-compat hooks */
52
#define SET_ETHTOOL_OPS(netdev,ops) \
53
        ( (netdev)->ethtool_ops = (ops) )
54
 
55
#define HAVE_ALLOC_NETDEV               /* feature macro: alloc_xxxdev
56
                                           functions are available. */
57
#define HAVE_FREE_NETDEV                /* free_netdev() */
58
#define HAVE_NETDEV_PRIV                /* netdev_priv() */
59
 
60
#define NET_XMIT_SUCCESS        0
61
#define NET_XMIT_DROP           1       /* skb dropped                  */
62
#define NET_XMIT_CN             2       /* congestion notification      */
63
#define NET_XMIT_POLICED        3       /* skb is shot by police        */
64
#define NET_XMIT_BYPASS         4       /* packet does not leave via dequeue;
65
                                           (TC use only - dev_queue_xmit
66
                                           returns this as NET_XMIT_SUCCESS) */
67
 
68
/* Backlog congestion levels */
69
#define NET_RX_SUCCESS          0   /* keep 'em coming, baby */
70
#define NET_RX_DROP             1  /* packet dropped */
71
#define NET_RX_CN_LOW           2   /* storm alert, just in case */
72
#define NET_RX_CN_MOD           3   /* Storm on its way! */
73
#define NET_RX_CN_HIGH          4   /* The storm is here */
74
#define NET_RX_BAD              5  /* packet dropped due to kernel error */
75
 
76
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
77
 * indicates that the device will soon be dropping packets, or already drops
78
 * some packets of the same priority; prompting us to send less aggressively. */
79
#define net_xmit_eval(e)        ((e) == NET_XMIT_CN? 0 : (e))
80
#define net_xmit_errno(e)       ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
81
 
82
#endif
83
 
84
#define MAX_ADDR_LEN    32              /* Largest hardware address length */
85
 
86
/* Driver transmit return codes */
87
#define NETDEV_TX_OK 0          /* driver took care of packet */
88
#define NETDEV_TX_BUSY 1        /* driver tx path was busy*/
89
#define NETDEV_TX_LOCKED -1     /* driver tx lock was already taken */
90
 
91
/*
92
 *      Compute the worst case header length according to the protocols
93
 *      used.
94
 */
95
 
96
#if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
97
#define LL_MAX_HEADER   32
98
#else
99
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
100
#define LL_MAX_HEADER   96
101
#else
102
#define LL_MAX_HEADER   48
103
#endif
104
#endif
105
 
106
#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
107
    !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \
108
    !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
109
    !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
110
#define MAX_HEADER LL_MAX_HEADER
111
#else
112
#define MAX_HEADER (LL_MAX_HEADER + 48)
113
#endif
114
 
115
struct net_device_subqueue
116
{
117
        /* Give a control state for each queue.  This struct may contain
118
         * per-queue locks in the future.
119
         */
120
        unsigned long   state;
121
};
122
 
123
/*
124
 *      Network device statistics. Akin to the 2.0 ether stats but
125
 *      with byte counters.
126
 */
127
 
128
struct net_device_stats
129
{
130
        unsigned long   rx_packets;             /* total packets received       */
131
        unsigned long   tx_packets;             /* total packets transmitted    */
132
        unsigned long   rx_bytes;               /* total bytes received         */
133
        unsigned long   tx_bytes;               /* total bytes transmitted      */
134
        unsigned long   rx_errors;              /* bad packets received         */
135
        unsigned long   tx_errors;              /* packet transmit problems     */
136
        unsigned long   rx_dropped;             /* no space in linux buffers    */
137
        unsigned long   tx_dropped;             /* no space available in linux  */
138
        unsigned long   multicast;              /* multicast packets received   */
139
        unsigned long   collisions;
140
 
141
        /* detailed rx_errors: */
142
        unsigned long   rx_length_errors;
143
        unsigned long   rx_over_errors;         /* receiver ring buff overflow  */
144
        unsigned long   rx_crc_errors;          /* recved pkt with crc error    */
145
        unsigned long   rx_frame_errors;        /* recv'd frame alignment error */
146
        unsigned long   rx_fifo_errors;         /* recv'r fifo overrun          */
147
        unsigned long   rx_missed_errors;       /* receiver missed packet       */
148
 
149
        /* detailed tx_errors */
150
        unsigned long   tx_aborted_errors;
151
        unsigned long   tx_carrier_errors;
152
        unsigned long   tx_fifo_errors;
153
        unsigned long   tx_heartbeat_errors;
154
        unsigned long   tx_window_errors;
155
 
156
        /* for cslip etc */
157
        unsigned long   rx_compressed;
158
        unsigned long   tx_compressed;
159
};
160
 
161
 
162
/* Media selection options. */
163
enum {
164
        IF_PORT_UNKNOWN = 0,
165
        IF_PORT_10BASE2,
166
        IF_PORT_10BASET,
167
        IF_PORT_AUI,
168
        IF_PORT_100BASET,
169
        IF_PORT_100BASETX,
170
        IF_PORT_100BASEFX
171
};
172
 
173
#ifdef __KERNEL__
174
 
175
#include <linux/cache.h>
176
#include <linux/skbuff.h>
177
 
178
struct neighbour;
179
struct neigh_parms;
180
struct sk_buff;
181
 
182
struct netif_rx_stats
183
{
184
        unsigned total;
185
        unsigned dropped;
186
        unsigned time_squeeze;
187
        unsigned cpu_collision;
188
};
189
 
190
DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
191
 
192
struct dev_addr_list
193
{
194
        struct dev_addr_list    *next;
195
        u8                      da_addr[MAX_ADDR_LEN];
196
        u8                      da_addrlen;
197
        u8                      da_synced;
198
        int                     da_users;
199
        int                     da_gusers;
200
};
201
 
202
/*
203
 *      We tag multicasts with these structures.
204
 */
205
 
206
#define dev_mc_list     dev_addr_list
207
#define dmi_addr        da_addr
208
#define dmi_addrlen     da_addrlen
209
#define dmi_users       da_users
210
#define dmi_gusers      da_gusers
211
 
212
struct hh_cache
213
{
214
        struct hh_cache *hh_next;       /* Next entry                        */
215
        atomic_t        hh_refcnt;      /* number of users                   */
216
/*
217
 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
218
 * cache line on SMP.
219
 * They are mostly read, but hh_refcnt may be changed quite frequently,
220
 * incurring cache line ping pongs.
221
 */
222
        __be16          hh_type ____cacheline_aligned_in_smp;
223
                                        /* protocol identifier, f.e ETH_P_IP
224
                                         *  NOTE:  For VLANs, this will be the
225
                                         *  encapuslated type. --BLG
226
                                         */
227
        u16             hh_len;         /* length of header */
228
        int             (*hh_output)(struct sk_buff *skb);
229
        seqlock_t       hh_lock;
230
 
231
        /* cached hardware header; allow for machine alignment needs.        */
232
#define HH_DATA_MOD     16
233
#define HH_DATA_OFF(__len) \
234
        (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
235
#define HH_DATA_ALIGN(__len) \
236
        (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
237
        unsigned long   hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
238
};
239
 
240
/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
241
 * Alternative is:
242
 *   dev->hard_header_len ? (dev->hard_header_len +
243
 *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
244
 *
245
 * We could use other alignment values, but we must maintain the
246
 * relationship HH alignment <= LL alignment.
247
 */
248
#define LL_RESERVED_SPACE(dev) \
249
        (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
250
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
251
        ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
252
 
253
struct header_ops {
254
        int     (*create) (struct sk_buff *skb, struct net_device *dev,
255
                           unsigned short type, const void *daddr,
256
                           const void *saddr, unsigned len);
257
        int     (*parse)(const struct sk_buff *skb, unsigned char *haddr);
258
        int     (*rebuild)(struct sk_buff *skb);
259
#define HAVE_HEADER_CACHE
260
        int     (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
261
        void    (*cache_update)(struct hh_cache *hh,
262
                                const struct net_device *dev,
263
                                const unsigned char *haddr);
264
};
265
 
266
/* These flag bits are private to the generic network queueing
267
 * layer, they may not be explicitly referenced by any other
268
 * code.
269
 */
270
 
271
enum netdev_state_t
272
{
273
        __LINK_STATE_XOFF=0,
274
        __LINK_STATE_START,
275
        __LINK_STATE_PRESENT,
276
        __LINK_STATE_SCHED,
277
        __LINK_STATE_NOCARRIER,
278
        __LINK_STATE_LINKWATCH_PENDING,
279
        __LINK_STATE_DORMANT,
280
        __LINK_STATE_QDISC_RUNNING,
281
};
282
 
283
 
284
/*
285
 * This structure holds at boot time configured netdevice settings. They
286
 * are then used in the device probing.
287
 */
288
struct netdev_boot_setup {
289
        char name[IFNAMSIZ];
290
        struct ifmap map;
291
};
292
#define NETDEV_BOOT_SETUP_MAX 8
293
 
294
extern int __init netdev_boot_setup(char *str);
295
 
296
/*
297
 * Structure for NAPI scheduling similar to tasklet but with weighting
298
 */
299
struct napi_struct {
300
        /* The poll_list must only be managed by the entity which
301
         * changes the state of the NAPI_STATE_SCHED bit.  This means
302
         * whoever atomically sets that bit can add this napi_struct
303
         * to the per-cpu poll_list, and whoever clears that bit
304
         * can remove from the list right before clearing the bit.
305
         */
306
        struct list_head        poll_list;
307
 
308
        unsigned long           state;
309
        int                     weight;
310
        int                     (*poll)(struct napi_struct *, int);
311
#ifdef CONFIG_NETPOLL
312
        spinlock_t              poll_lock;
313
        int                     poll_owner;
314
        struct net_device       *dev;
315
        struct list_head        dev_list;
316
#endif
317
};
318
 
319
enum
320
{
321
        NAPI_STATE_SCHED,       /* Poll is scheduled */
322
        NAPI_STATE_DISABLE,     /* Disable pending */
323
};
324
 
325
extern void FASTCALL(__napi_schedule(struct napi_struct *n));
326
 
327
static inline int napi_disable_pending(struct napi_struct *n)
328
{
329
        return test_bit(NAPI_STATE_DISABLE, &n->state);
330
}
331
 
332
/**
333
 *      napi_schedule_prep - check if napi can be scheduled
334
 *      @n: napi context
335
 *
336
 * Test if NAPI routine is already running, and if not mark
337
 * it as running.  This is used as a condition variable
338
 * insure only one NAPI poll instance runs.  We also make
339
 * sure there is no pending NAPI disable.
340
 */
341
static inline int napi_schedule_prep(struct napi_struct *n)
342
{
343
        return !napi_disable_pending(n) &&
344
                !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
345
}
346
 
347
/**
348
 *      napi_schedule - schedule NAPI poll
349
 *      @n: napi context
350
 *
351
 * Schedule NAPI poll routine to be called if it is not already
352
 * running.
353
 */
354
static inline void napi_schedule(struct napi_struct *n)
355
{
356
        if (napi_schedule_prep(n))
357
                __napi_schedule(n);
358
}
359
 
360
/* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
361
static inline int napi_reschedule(struct napi_struct *napi)
362
{
363
        if (napi_schedule_prep(napi)) {
364
                __napi_schedule(napi);
365
                return 1;
366
        }
367
        return 0;
368
}
369
 
370
/**
371
 *      napi_complete - NAPI processing complete
372
 *      @n: napi context
373
 *
374
 * Mark NAPI processing as complete.
375
 */
376
static inline void __napi_complete(struct napi_struct *n)
377
{
378
        BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
379
        list_del(&n->poll_list);
380
        smp_mb__before_clear_bit();
381
        clear_bit(NAPI_STATE_SCHED, &n->state);
382
}
383
 
384
static inline void napi_complete(struct napi_struct *n)
385
{
386
        local_irq_disable();
387
        __napi_complete(n);
388
        local_irq_enable();
389
}
390
 
391
/**
392
 *      napi_disable - prevent NAPI from scheduling
393
 *      @n: napi context
394
 *
395
 * Stop NAPI from being scheduled on this context.
396
 * Waits till any outstanding processing completes.
397
 */
398
static inline void napi_disable(struct napi_struct *n)
399
{
400
        set_bit(NAPI_STATE_DISABLE, &n->state);
401
        while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
402
                msleep(1);
403
        clear_bit(NAPI_STATE_DISABLE, &n->state);
404
}
405
 
406
/**
407
 *      napi_enable - enable NAPI scheduling
408
 *      @n: napi context
409
 *
410
 * Resume NAPI from being scheduled on this context.
411
 * Must be paired with napi_disable.
412
 */
413
static inline void napi_enable(struct napi_struct *n)
414
{
415
        BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
416
        smp_mb__before_clear_bit();
417
        clear_bit(NAPI_STATE_SCHED, &n->state);
418
}
419
 
420
#ifdef CONFIG_SMP
421
/**
422
 *      napi_synchronize - wait until NAPI is not running
423
 *      @n: napi context
424
 *
425
 * Wait until NAPI is done being scheduled on this context.
426
 * Waits till any outstanding processing completes but
427
 * does not disable future activations.
428
 */
429
static inline void napi_synchronize(const struct napi_struct *n)
430
{
431
        while (test_bit(NAPI_STATE_SCHED, &n->state))
432
                msleep(1);
433
}
434
#else
435
# define napi_synchronize(n)    barrier()
436
#endif
437
 
438
/*
439
 *      The DEVICE structure.
440
 *      Actually, this whole structure is a big mistake.  It mixes I/O
441
 *      data with strictly "high-level" data, and it has to know about
442
 *      almost every data structure used in the INET module.
443
 *
444
 *      FIXME: cleanup struct net_device such that network protocol info
445
 *      moves out.
446
 */
447
 
448
struct net_device
449
{
450
 
451
        /*
452
         * This is the first field of the "visible" part of this structure
453
         * (i.e. as seen by users in the "Space.c" file).  It is the name
454
         * the interface.
455
         */
456
        char                    name[IFNAMSIZ];
457
        /* device name hash chain */
458
        struct hlist_node       name_hlist;
459
 
460
        /*
461
         *      I/O specific fields
462
         *      FIXME: Merge these and struct ifmap into one
463
         */
464
        unsigned long           mem_end;        /* shared mem end       */
465
        unsigned long           mem_start;      /* shared mem start     */
466
        unsigned long           base_addr;      /* device I/O address   */
467
        unsigned int            irq;            /* device IRQ number    */
468
 
469
        /*
470
         *      Some hardware also needs these fields, but they are not
471
         *      part of the usual set specified in Space.c.
472
         */
473
 
474
        unsigned char           if_port;        /* Selectable AUI, TP,..*/
475
        unsigned char           dma;            /* DMA channel          */
476
 
477
        unsigned long           state;
478
 
479
        struct list_head        dev_list;
480
#ifdef CONFIG_NETPOLL
481
        struct list_head        napi_list;
482
#endif
483
 
484
        /* The device initialization function. Called only once. */
485
        int                     (*init)(struct net_device *dev);
486
 
487
        /* ------- Fields preinitialized in Space.c finish here ------- */
488
 
489
        /* Net device features */
490
        unsigned long           features;
491
#define NETIF_F_SG              1       /* Scatter/gather IO. */
492
#define NETIF_F_IP_CSUM         2       /* Can checksum TCP/UDP over IPv4. */
493
#define NETIF_F_NO_CSUM         4       /* Does not require checksum. F.e. loopack. */
494
#define NETIF_F_HW_CSUM         8       /* Can checksum all the packets. */
495
#define NETIF_F_IPV6_CSUM       16      /* Can checksum TCP/UDP over IPV6 */
496
#define NETIF_F_HIGHDMA         32      /* Can DMA to high memory. */
497
#define NETIF_F_FRAGLIST        64      /* Scatter/gather IO. */
498
#define NETIF_F_HW_VLAN_TX      128     /* Transmit VLAN hw acceleration */
499
#define NETIF_F_HW_VLAN_RX      256     /* Receive VLAN hw acceleration */
500
#define NETIF_F_HW_VLAN_FILTER  512     /* Receive filtering on VLAN */
501
#define NETIF_F_VLAN_CHALLENGED 1024    /* Device cannot handle VLAN packets */
502
#define NETIF_F_GSO             2048    /* Enable software GSO. */
503
#define NETIF_F_LLTX            4096    /* LockLess TX - deprecated. Please */
504
                                        /* do not use LLTX in new drivers */
505
#define NETIF_F_NETNS_LOCAL     8192    /* Does not change network namespaces */
506
#define NETIF_F_MULTI_QUEUE     16384   /* Has multiple TX/RX queues */
507
#define NETIF_F_LRO             32768   /* large receive offload */
508
 
509
        /* Segmentation offload features */
510
#define NETIF_F_GSO_SHIFT       16
511
#define NETIF_F_GSO_MASK        0xffff0000
512
#define NETIF_F_TSO             (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
513
#define NETIF_F_UFO             (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
514
#define NETIF_F_GSO_ROBUST      (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
515
#define NETIF_F_TSO_ECN         (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
516
#define NETIF_F_TSO6            (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
517
 
518
        /* List of features with software fallbacks. */
519
#define NETIF_F_GSO_SOFTWARE    (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
520
 
521
 
522
#define NETIF_F_GEN_CSUM        (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
523
#define NETIF_F_V4_CSUM         (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
524
#define NETIF_F_V6_CSUM         (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
525
#define NETIF_F_ALL_CSUM        (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
526
 
527
        struct net_device       *next_sched;
528
 
529
        /* Interface index. Unique device identifier    */
530
        int                     ifindex;
531
        int                     iflink;
532
 
533
 
534
        struct net_device_stats* (*get_stats)(struct net_device *dev);
535
        struct net_device_stats stats;
536
 
537
#ifdef CONFIG_WIRELESS_EXT
538
        /* List of functions to handle Wireless Extensions (instead of ioctl).
539
         * See <net/iw_handler.h> for details. Jean II */
540
        const struct iw_handler_def *   wireless_handlers;
541
        /* Instance data managed by the core of Wireless Extensions. */
542
        struct iw_public_data * wireless_data;
543
#endif
544
        const struct ethtool_ops *ethtool_ops;
545
 
546
        /* Hardware header description */
547
        const struct header_ops *header_ops;
548
 
549
        /*
550
         * This marks the end of the "visible" part of the structure. All
551
         * fields hereafter are internal to the system, and may change at
552
         * will (read: may be cleaned up at will).
553
         */
554
 
555
 
556
        unsigned int            flags;  /* interface flags (a la BSD)   */
557
        unsigned short          gflags;
558
        unsigned short          priv_flags; /* Like 'flags' but invisible to userspace. */
559
        unsigned short          padded; /* How much padding added by alloc_netdev() */
560
 
561
        unsigned char           operstate; /* RFC2863 operstate */
562
        unsigned char           link_mode; /* mapping policy to operstate */
563
 
564
        unsigned                mtu;    /* interface MTU value          */
565
        unsigned short          type;   /* interface hardware type      */
566
        unsigned short          hard_header_len;        /* hardware hdr length  */
567
 
568
        struct net_device       *master; /* Pointer to master device of a group,
569
                                          * which this device is member of.
570
                                          */
571
 
572
        /* Interface address info. */
573
        unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
574
        unsigned char           addr_len;       /* hardware address length      */
575
        unsigned short          dev_id;         /* for shared network cards */
576
 
577
        struct dev_addr_list    *uc_list;       /* Secondary unicast mac addresses */
578
        int                     uc_count;       /* Number of installed ucasts   */
579
        int                     uc_promisc;
580
        struct dev_addr_list    *mc_list;       /* Multicast mac addresses      */
581
        int                     mc_count;       /* Number of installed mcasts   */
582
        int                     promiscuity;
583
        int                     allmulti;
584
 
585
 
586
        /* Protocol specific pointers */
587
 
588
        void                    *atalk_ptr;     /* AppleTalk link       */
589
        void                    *ip_ptr;        /* IPv4 specific data   */
590
        void                    *dn_ptr;        /* DECnet specific data */
591
        void                    *ip6_ptr;       /* IPv6 specific data */
592
        void                    *ec_ptr;        /* Econet specific data */
593
        void                    *ax25_ptr;      /* AX.25 specific data */
594
        struct wireless_dev     *ieee80211_ptr; /* IEEE 802.11 specific data,
595
                                                   assign before registering */
596
 
597
/*
598
 * Cache line mostly used on receive path (including eth_type_trans())
599
 */
600
        unsigned long           last_rx;        /* Time of last Rx      */
601
        /* Interface address info used in eth_type_trans() */
602
        unsigned char           dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
603
                                                        because most packets are unicast) */
604
 
605
        unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
606
 
607
/*
608
 * Cache line mostly used on queue transmit path (qdisc)
609
 */
610
        /* device queue lock */
611
        spinlock_t              queue_lock ____cacheline_aligned_in_smp;
612
        struct Qdisc            *qdisc;
613
        struct Qdisc            *qdisc_sleeping;
614
        struct list_head        qdisc_list;
615
        unsigned long           tx_queue_len;   /* Max frames per queue allowed */
616
 
617
        /* Partially transmitted GSO packet. */
618
        struct sk_buff          *gso_skb;
619
 
620
        /* ingress path synchronizer */
621
        spinlock_t              ingress_lock;
622
        struct Qdisc            *qdisc_ingress;
623
 
624
/*
625
 * One part is mostly used on xmit path (device)
626
 */
627
        /* hard_start_xmit synchronizer */
628
        spinlock_t              _xmit_lock ____cacheline_aligned_in_smp;
629
        /* cpu id of processor entered to hard_start_xmit or -1,
630
           if nobody entered there.
631
         */
632
        int                     xmit_lock_owner;
633
        void                    *priv;  /* pointer to private data      */
634
        int                     (*hard_start_xmit) (struct sk_buff *skb,
635
                                                    struct net_device *dev);
636
        /* These may be needed for future network-power-down code. */
637
        unsigned long           trans_start;    /* Time (in jiffies) of last Tx */
638
 
639
        int                     watchdog_timeo; /* used by dev_watchdog() */
640
        struct timer_list       watchdog_timer;
641
 
642
/*
643
 * refcnt is a very hot point, so align it on SMP
644
 */
645
        /* Number of references to this device */
646
        atomic_t                refcnt ____cacheline_aligned_in_smp;
647
 
648
        /* delayed register/unregister */
649
        struct list_head        todo_list;
650
        /* device index hash chain */
651
        struct hlist_node       index_hlist;
652
 
653
        struct net_device       *link_watch_next;
654
 
655
        /* register/unregister state machine */
656
        enum { NETREG_UNINITIALIZED=0,
657
               NETREG_REGISTERED,       /* completed register_netdevice */
658
               NETREG_UNREGISTERING,    /* called unregister_netdevice */
659
               NETREG_UNREGISTERED,     /* completed unregister todo */
660
               NETREG_RELEASED,         /* called free_netdev */
661
        } reg_state;
662
 
663
        /* Called after device is detached from network. */
664
        void                    (*uninit)(struct net_device *dev);
665
        /* Called after last user reference disappears. */
666
        void                    (*destructor)(struct net_device *dev);
667
 
668
        /* Pointers to interface service routines.      */
669
        int                     (*open)(struct net_device *dev);
670
        int                     (*stop)(struct net_device *dev);
671
#define HAVE_NETDEV_POLL
672
#define HAVE_CHANGE_RX_FLAGS
673
        void                    (*change_rx_flags)(struct net_device *dev,
674
                                                   int flags);
675
#define HAVE_SET_RX_MODE
676
        void                    (*set_rx_mode)(struct net_device *dev);
677
#define HAVE_MULTICAST                   
678
        void                    (*set_multicast_list)(struct net_device *dev);
679
#define HAVE_SET_MAC_ADDR                
680
        int                     (*set_mac_address)(struct net_device *dev,
681
                                                   void *addr);
682
#define HAVE_VALIDATE_ADDR
683
        int                     (*validate_addr)(struct net_device *dev);
684
#define HAVE_PRIVATE_IOCTL
685
        int                     (*do_ioctl)(struct net_device *dev,
686
                                            struct ifreq *ifr, int cmd);
687
#define HAVE_SET_CONFIG
688
        int                     (*set_config)(struct net_device *dev,
689
                                              struct ifmap *map);
690
#define HAVE_CHANGE_MTU
691
        int                     (*change_mtu)(struct net_device *dev, int new_mtu);
692
 
693
#define HAVE_TX_TIMEOUT
694
        void                    (*tx_timeout) (struct net_device *dev);
695
 
696
        void                    (*vlan_rx_register)(struct net_device *dev,
697
                                                    struct vlan_group *grp);
698
        void                    (*vlan_rx_add_vid)(struct net_device *dev,
699
                                                   unsigned short vid);
700
        void                    (*vlan_rx_kill_vid)(struct net_device *dev,
701
                                                    unsigned short vid);
702
 
703
        int                     (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
704
#ifdef CONFIG_NETPOLL
705
        struct netpoll_info     *npinfo;
706
#endif
707
#ifdef CONFIG_NET_POLL_CONTROLLER
708
        void                    (*poll_controller)(struct net_device *dev);
709
#endif
710
 
711
        /* Network namespace this network device is inside */
712
        struct net              *nd_net;
713
 
714
        /* bridge stuff */
715
        struct net_bridge_port  *br_port;
716
        /* macvlan */
717
        struct macvlan_port     *macvlan_port;
718
 
719
        /* class/net/name entry */
720
        struct device           dev;
721
        /* space for optional statistics and wireless sysfs groups */
722
        struct attribute_group  *sysfs_groups[3];
723
 
724
        /* rtnetlink link ops */
725
        const struct rtnl_link_ops *rtnl_link_ops;
726
 
727
        /* The TX queue control structures */
728
        unsigned int                    egress_subqueue_count;
729
        struct net_device_subqueue      egress_subqueue[1];
730
};
731
#define to_net_dev(d) container_of(d, struct net_device, dev)
732
 
733
#define NETDEV_ALIGN            32
734
#define NETDEV_ALIGN_CONST      (NETDEV_ALIGN - 1)
735
 
736
/**
737
 *      netdev_priv - access network device private data
738
 *      @dev: network device
739
 *
740
 * Get network device private data
741
 */
742
static inline void *netdev_priv(const struct net_device *dev)
743
{
744
        return dev->priv;
745
}
746
 
747
/* Set the sysfs physical device reference for the network logical device
748
 * if set prior to registration will cause a symlink during initialization.
749
 */
750
#define SET_NETDEV_DEV(net, pdev)       ((net)->dev.parent = (pdev))
751
 
752
/**
753
 *      netif_napi_add - initialize a napi context
754
 *      @dev:  network device
755
 *      @napi: napi context
756
 *      @poll: polling function
757
 *      @weight: default weight
758
 *
759
 * netif_napi_add() must be used to initialize a napi context prior to calling
760
 * *any* of the other napi related functions.
761
 */
762
static inline void netif_napi_add(struct net_device *dev,
763
                                  struct napi_struct *napi,
764
                                  int (*poll)(struct napi_struct *, int),
765
                                  int weight)
766
{
767
        INIT_LIST_HEAD(&napi->poll_list);
768
        napi->poll = poll;
769
        napi->weight = weight;
770
#ifdef CONFIG_NETPOLL
771
        napi->dev = dev;
772
        list_add(&napi->dev_list, &dev->napi_list);
773
        spin_lock_init(&napi->poll_lock);
774
        napi->poll_owner = -1;
775
#endif
776
        set_bit(NAPI_STATE_SCHED, &napi->state);
777
}
778
 
779
struct packet_type {
780
        __be16                  type;   /* This is really htons(ether_type). */
781
        struct net_device       *dev;   /* NULL is wildcarded here           */
782
        int                     (*func) (struct sk_buff *,
783
                                         struct net_device *,
784
                                         struct packet_type *,
785
                                         struct net_device *);
786
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
787
                                                int features);
788
        int                     (*gso_send_check)(struct sk_buff *skb);
789
        void                    *af_packet_priv;
790
        struct list_head        list;
791
};
792
 
793
#include <linux/interrupt.h>
794
#include <linux/notifier.h>
795
 
796
extern rwlock_t                         dev_base_lock;          /* Device list lock */
797
 
798
 
799
#define for_each_netdev(net, d)         \
800
                list_for_each_entry(d, &(net)->dev_base_head, dev_list)
801
#define for_each_netdev_safe(net, d, n) \
802
                list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
803
#define for_each_netdev_continue(net, d)                \
804
                list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
805
#define net_device_entry(lh)    list_entry(lh, struct net_device, dev_list)
806
 
807
static inline struct net_device *next_net_device(struct net_device *dev)
808
{
809
        struct list_head *lh;
810
        struct net *net;
811
 
812
        net = dev->nd_net;
813
        lh = dev->dev_list.next;
814
        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
815
}
816
 
817
static inline struct net_device *first_net_device(struct net *net)
818
{
819
        return list_empty(&net->dev_base_head) ? NULL :
820
                net_device_entry(net->dev_base_head.next);
821
}
822
 
823
extern int                      netdev_boot_setup_check(struct net_device *dev);
824
extern unsigned long            netdev_boot_base(const char *prefix, int unit);
825
extern struct net_device    *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
826
extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
827
extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
828
extern void             dev_add_pack(struct packet_type *pt);
829
extern void             dev_remove_pack(struct packet_type *pt);
830
extern void             __dev_remove_pack(struct packet_type *pt);
831
 
832
extern struct net_device        *dev_get_by_flags(struct net *net, unsigned short flags,
833
                                                  unsigned short mask);
834
extern struct net_device        *dev_get_by_name(struct net *net, const char *name);
835
extern struct net_device        *__dev_get_by_name(struct net *net, const char *name);
836
extern int              dev_alloc_name(struct net_device *dev, const char *name);
837
extern int              dev_open(struct net_device *dev);
838
extern int              dev_close(struct net_device *dev);
839
extern int              dev_queue_xmit(struct sk_buff *skb);
840
extern int              register_netdevice(struct net_device *dev);
841
extern void             unregister_netdevice(struct net_device *dev);
842
extern void             free_netdev(struct net_device *dev);
843
extern void             synchronize_net(void);
844
extern int              register_netdevice_notifier(struct notifier_block *nb);
845
extern int              unregister_netdevice_notifier(struct notifier_block *nb);
846
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
847
extern struct net_device        *dev_get_by_index(struct net *net, int ifindex);
848
extern struct net_device        *__dev_get_by_index(struct net *net, int ifindex);
849
extern int              dev_restart(struct net_device *dev);
850
#ifdef CONFIG_NETPOLL_TRAP
851
extern int              netpoll_trap(void);
852
#endif
853
 
854
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
855
                                  unsigned short type,
856
                                  const void *daddr, const void *saddr,
857
                                  unsigned len)
858
{
859
        if (!dev->header_ops || !dev->header_ops->create)
860
                return 0;
861
 
862
        return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
863
}
864
 
865
static inline int dev_parse_header(const struct sk_buff *skb,
866
                                   unsigned char *haddr)
867
{
868
        const struct net_device *dev = skb->dev;
869
 
870
        if (!dev->header_ops || !dev->header_ops->parse)
871
                return 0;
872
        return dev->header_ops->parse(skb, haddr);
873
}
874
 
875
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
876
extern int              register_gifconf(unsigned int family, gifconf_func_t * gifconf);
877
static inline int unregister_gifconf(unsigned int family)
878
{
879
        return register_gifconf(family, NULL);
880
}
881
 
882
/*
883
 * Incoming packets are placed on per-cpu queues so that
884
 * no locking is needed.
885
 */
886
struct softnet_data
887
{
888
        struct net_device       *output_queue;
889
        struct sk_buff_head     input_pkt_queue;
890
        struct list_head        poll_list;
891
        struct sk_buff          *completion_queue;
892
 
893
        struct napi_struct      backlog;
894
#ifdef CONFIG_NET_DMA
895
        struct dma_chan         *net_dma;
896
#endif
897
};
898
 
899
DECLARE_PER_CPU(struct softnet_data,softnet_data);
900
 
901
#define HAVE_NETIF_QUEUE
902
 
903
extern void __netif_schedule(struct net_device *dev);
904
 
905
static inline void netif_schedule(struct net_device *dev)
906
{
907
        if (!test_bit(__LINK_STATE_XOFF, &dev->state))
908
                __netif_schedule(dev);
909
}
910
 
911
/**
912
 *      netif_start_queue - allow transmit
913
 *      @dev: network device
914
 *
915
 *      Allow upper layers to call the device hard_start_xmit routine.
916
 */
917
static inline void netif_start_queue(struct net_device *dev)
918
{
919
        clear_bit(__LINK_STATE_XOFF, &dev->state);
920
}
921
 
922
/**
923
 *      netif_wake_queue - restart transmit
924
 *      @dev: network device
925
 *
926
 *      Allow upper layers to call the device hard_start_xmit routine.
927
 *      Used for flow control when transmit resources are available.
928
 */
929
static inline void netif_wake_queue(struct net_device *dev)
930
{
931
#ifdef CONFIG_NETPOLL_TRAP
932
        if (netpoll_trap()) {
933
                clear_bit(__LINK_STATE_XOFF, &dev->state);
934
                return;
935
        }
936
#endif
937
        if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
938
                __netif_schedule(dev);
939
}
940
 
941
/**
942
 *      netif_stop_queue - stop transmitted packets
943
 *      @dev: network device
944
 *
945
 *      Stop upper layers calling the device hard_start_xmit routine.
946
 *      Used for flow control when transmit resources are unavailable.
947
 */
948
static inline void netif_stop_queue(struct net_device *dev)
949
{
950
        set_bit(__LINK_STATE_XOFF, &dev->state);
951
}
952
 
953
/**
954
 *      netif_queue_stopped - test if transmit queue is flowblocked
955
 *      @dev: network device
956
 *
957
 *      Test if transmit queue on device is currently unable to send.
958
 */
959
static inline int netif_queue_stopped(const struct net_device *dev)
960
{
961
        return test_bit(__LINK_STATE_XOFF, &dev->state);
962
}
963
 
964
/**
965
 *      netif_running - test if up
966
 *      @dev: network device
967
 *
968
 *      Test if the device has been brought up.
969
 */
970
static inline int netif_running(const struct net_device *dev)
971
{
972
        return test_bit(__LINK_STATE_START, &dev->state);
973
}
974
 
975
/*
976
 * Routines to manage the subqueues on a device.  We only need start
977
 * stop, and a check if it's stopped.  All other device management is
978
 * done at the overall netdevice level.
979
 * Also test the device if we're multiqueue.
980
 */
981
 
982
/**
983
 *      netif_start_subqueue - allow sending packets on subqueue
984
 *      @dev: network device
985
 *      @queue_index: sub queue index
986
 *
987
 * Start individual transmit queue of a device with multiple transmit queues.
988
 */
989
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
990
{
991
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
992
        clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
993
#endif
994
}
995
 
996
/**
997
 *      netif_stop_subqueue - stop sending packets on subqueue
998
 *      @dev: network device
999
 *      @queue_index: sub queue index
1000
 *
1001
 * Stop individual transmit queue of a device with multiple transmit queues.
1002
 */
1003
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1004
{
1005
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1006
#ifdef CONFIG_NETPOLL_TRAP
1007
        if (netpoll_trap())
1008
                return;
1009
#endif
1010
        set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
1011
#endif
1012
}
1013
 
1014
/**
1015
 *      netif_subqueue_stopped - test status of subqueue
1016
 *      @dev: network device
1017
 *      @queue_index: sub queue index
1018
 *
1019
 * Check individual transmit queue of a device with multiple transmit queues.
1020
 */
1021
static inline int __netif_subqueue_stopped(const struct net_device *dev,
1022
                                         u16 queue_index)
1023
{
1024
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1025
        return test_bit(__LINK_STATE_XOFF,
1026
                        &dev->egress_subqueue[queue_index].state);
1027
#else
1028
        return 0;
1029
#endif
1030
}
1031
 
1032
static inline int netif_subqueue_stopped(const struct net_device *dev,
1033
                                         struct sk_buff *skb)
1034
{
1035
        return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1036
}
1037
 
1038
/**
1039
 *      netif_wake_subqueue - allow sending packets on subqueue
1040
 *      @dev: network device
1041
 *      @queue_index: sub queue index
1042
 *
1043
 * Resume individual transmit queue of a device with multiple transmit queues.
1044
 */
1045
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1046
{
1047
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1048
#ifdef CONFIG_NETPOLL_TRAP
1049
        if (netpoll_trap())
1050
                return;
1051
#endif
1052
        if (test_and_clear_bit(__LINK_STATE_XOFF,
1053
                               &dev->egress_subqueue[queue_index].state))
1054
                __netif_schedule(dev);
1055
#endif
1056
}
1057
 
1058
/**
1059
 *      netif_is_multiqueue - test if device has multiple transmit queues
1060
 *      @dev: network device
1061
 *
1062
 * Check if device has multiple transmit queues
1063
 * Always falls if NETDEVICE_MULTIQUEUE is not configured
1064
 */
1065
static inline int netif_is_multiqueue(const struct net_device *dev)
1066
{
1067
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1068
        return (!!(NETIF_F_MULTI_QUEUE & dev->features));
1069
#else
1070
        return 0;
1071
#endif
1072
}
1073
 
1074
/* Use this variant when it is known for sure that it
1075
 * is executing from interrupt context.
1076
 */
1077
extern void dev_kfree_skb_irq(struct sk_buff *skb);
1078
 
1079
/* Use this variant in places where it could be invoked
1080
 * either from interrupt or non-interrupt context.
1081
 */
1082
extern void dev_kfree_skb_any(struct sk_buff *skb);
1083
 
1084
#define HAVE_NETIF_RX 1
1085
extern int              netif_rx(struct sk_buff *skb);
1086
extern int              netif_rx_ni(struct sk_buff *skb);
1087
#define HAVE_NETIF_RECEIVE_SKB 1
1088
extern int              netif_receive_skb(struct sk_buff *skb);
1089
extern int              dev_valid_name(const char *name);
1090
extern int              dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1091
extern int              dev_ethtool(struct net *net, struct ifreq *);
1092
extern unsigned         dev_get_flags(const struct net_device *);
1093
extern int              dev_change_flags(struct net_device *, unsigned);
1094
extern int              dev_change_name(struct net_device *, char *);
1095
extern int              dev_change_net_namespace(struct net_device *,
1096
                                                 struct net *, const char *);
1097
extern int              dev_set_mtu(struct net_device *, int);
1098
extern int              dev_set_mac_address(struct net_device *,
1099
                                            struct sockaddr *);
1100
extern int              dev_hard_start_xmit(struct sk_buff *skb,
1101
                                            struct net_device *dev);
1102
 
1103
extern int              netdev_budget;
1104
 
1105
/* Called by rtnetlink.c:rtnl_unlock() */
1106
extern void netdev_run_todo(void);
1107
 
1108
/**
1109
 *      dev_put - release reference to device
1110
 *      @dev: network device
1111
 *
1112
 * Release reference to device to allow it to be freed.
1113
 */
1114
static inline void dev_put(struct net_device *dev)
1115
{
1116
        atomic_dec(&dev->refcnt);
1117
}
1118
 
1119
/**
1120
 *      dev_hold - get reference to device
1121
 *      @dev: network device
1122
 *
1123
 * Hold reference to device to keep it from being freed.
1124
 */
1125
static inline void dev_hold(struct net_device *dev)
1126
{
1127
        atomic_inc(&dev->refcnt);
1128
}
1129
 
1130
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
1131
 * and _off may be called from IRQ context, but it is caller
1132
 * who is responsible for serialization of these calls.
1133
 *
1134
 * The name carrier is inappropriate, these functions should really be
1135
 * called netif_lowerlayer_*() because they represent the state of any
1136
 * kind of lower layer not just hardware media.
1137
 */
1138
 
1139
extern void linkwatch_fire_event(struct net_device *dev);
1140
 
1141
/**
1142
 *      netif_carrier_ok - test if carrier present
1143
 *      @dev: network device
1144
 *
1145
 * Check if carrier is present on device
1146
 */
1147
static inline int netif_carrier_ok(const struct net_device *dev)
1148
{
1149
        return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1150
}
1151
 
1152
extern void __netdev_watchdog_up(struct net_device *dev);
1153
 
1154
extern void netif_carrier_on(struct net_device *dev);
1155
 
1156
extern void netif_carrier_off(struct net_device *dev);
1157
 
1158
/**
1159
 *      netif_dormant_on - mark device as dormant.
1160
 *      @dev: network device
1161
 *
1162
 * Mark device as dormant (as per RFC2863).
1163
 *
1164
 * The dormant state indicates that the relevant interface is not
1165
 * actually in a condition to pass packets (i.e., it is not 'up') but is
1166
 * in a "pending" state, waiting for some external event.  For "on-
1167
 * demand" interfaces, this new state identifies the situation where the
1168
 * interface is waiting for events to place it in the up state.
1169
 *
1170
 */
1171
static inline void netif_dormant_on(struct net_device *dev)
1172
{
1173
        if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1174
                linkwatch_fire_event(dev);
1175
}
1176
 
1177
/**
1178
 *      netif_dormant_off - set device as not dormant.
1179
 *      @dev: network device
1180
 *
1181
 * Device is not in dormant state.
1182
 */
1183
static inline void netif_dormant_off(struct net_device *dev)
1184
{
1185
        if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1186
                linkwatch_fire_event(dev);
1187
}
1188
 
1189
/**
1190
 *      netif_dormant - test if carrier present
1191
 *      @dev: network device
1192
 *
1193
 * Check if carrier is present on device
1194
 */
1195
static inline int netif_dormant(const struct net_device *dev)
1196
{
1197
        return test_bit(__LINK_STATE_DORMANT, &dev->state);
1198
}
1199
 
1200
 
1201
/**
1202
 *      netif_oper_up - test if device is operational
1203
 *      @dev: network device
1204
 *
1205
 * Check if carrier is operational
1206
 */
1207
static inline int netif_oper_up(const struct net_device *dev) {
1208
        return (dev->operstate == IF_OPER_UP ||
1209
                dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1210
}
1211
 
1212
/**
1213
 *      netif_device_present - is device available or removed
1214
 *      @dev: network device
1215
 *
1216
 * Check if device has not been removed from system.
1217
 */
1218
static inline int netif_device_present(struct net_device *dev)
1219
{
1220
        return test_bit(__LINK_STATE_PRESENT, &dev->state);
1221
}
1222
 
1223
extern void netif_device_detach(struct net_device *dev);
1224
 
1225
extern void netif_device_attach(struct net_device *dev);
1226
 
1227
/*
1228
 * Network interface message level settings
1229
 */
1230
#define HAVE_NETIF_MSG 1
1231
 
1232
enum {
1233
        NETIF_MSG_DRV           = 0x0001,
1234
        NETIF_MSG_PROBE         = 0x0002,
1235
        NETIF_MSG_LINK          = 0x0004,
1236
        NETIF_MSG_TIMER         = 0x0008,
1237
        NETIF_MSG_IFDOWN        = 0x0010,
1238
        NETIF_MSG_IFUP          = 0x0020,
1239
        NETIF_MSG_RX_ERR        = 0x0040,
1240
        NETIF_MSG_TX_ERR        = 0x0080,
1241
        NETIF_MSG_TX_QUEUED     = 0x0100,
1242
        NETIF_MSG_INTR          = 0x0200,
1243
        NETIF_MSG_TX_DONE       = 0x0400,
1244
        NETIF_MSG_RX_STATUS     = 0x0800,
1245
        NETIF_MSG_PKTDATA       = 0x1000,
1246
        NETIF_MSG_HW            = 0x2000,
1247
        NETIF_MSG_WOL           = 0x4000,
1248
};
1249
 
1250
#define netif_msg_drv(p)        ((p)->msg_enable & NETIF_MSG_DRV)
1251
#define netif_msg_probe(p)      ((p)->msg_enable & NETIF_MSG_PROBE)
1252
#define netif_msg_link(p)       ((p)->msg_enable & NETIF_MSG_LINK)
1253
#define netif_msg_timer(p)      ((p)->msg_enable & NETIF_MSG_TIMER)
1254
#define netif_msg_ifdown(p)     ((p)->msg_enable & NETIF_MSG_IFDOWN)
1255
#define netif_msg_ifup(p)       ((p)->msg_enable & NETIF_MSG_IFUP)
1256
#define netif_msg_rx_err(p)     ((p)->msg_enable & NETIF_MSG_RX_ERR)
1257
#define netif_msg_tx_err(p)     ((p)->msg_enable & NETIF_MSG_TX_ERR)
1258
#define netif_msg_tx_queued(p)  ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1259
#define netif_msg_intr(p)       ((p)->msg_enable & NETIF_MSG_INTR)
1260
#define netif_msg_tx_done(p)    ((p)->msg_enable & NETIF_MSG_TX_DONE)
1261
#define netif_msg_rx_status(p)  ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1262
#define netif_msg_pktdata(p)    ((p)->msg_enable & NETIF_MSG_PKTDATA)
1263
#define netif_msg_hw(p)         ((p)->msg_enable & NETIF_MSG_HW)
1264
#define netif_msg_wol(p)        ((p)->msg_enable & NETIF_MSG_WOL)
1265
 
1266
static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1267
{
1268
        /* use default */
1269
        if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1270
                return default_msg_enable_bits;
1271
        if (debug_value == 0)    /* no output */
1272
                return 0;
1273
        /* set low N bits */
1274
        return (1 << debug_value) - 1;
1275
}
1276
 
1277
/* Test if receive needs to be scheduled but only if up */
1278
static inline int netif_rx_schedule_prep(struct net_device *dev,
1279
                                         struct napi_struct *napi)
1280
{
1281
        return napi_schedule_prep(napi);
1282
}
1283
 
1284
/* Add interface to tail of rx poll list. This assumes that _prep has
1285
 * already been called and returned 1.
1286
 */
1287
static inline void __netif_rx_schedule(struct net_device *dev,
1288
                                       struct napi_struct *napi)
1289
{
1290
        __napi_schedule(napi);
1291
}
1292
 
1293
/* Try to reschedule poll. Called by irq handler. */
1294
 
1295
static inline void netif_rx_schedule(struct net_device *dev,
1296
                                     struct napi_struct *napi)
1297
{
1298
        if (netif_rx_schedule_prep(dev, napi))
1299
                __netif_rx_schedule(dev, napi);
1300
}
1301
 
1302
/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().  */
1303
static inline int netif_rx_reschedule(struct net_device *dev,
1304
                                      struct napi_struct *napi)
1305
{
1306
        if (napi_schedule_prep(napi)) {
1307
                __netif_rx_schedule(dev, napi);
1308
                return 1;
1309
        }
1310
        return 0;
1311
}
1312
 
1313
/* same as netif_rx_complete, except that local_irq_save(flags)
1314
 * has already been issued
1315
 */
1316
static inline void __netif_rx_complete(struct net_device *dev,
1317
                                       struct napi_struct *napi)
1318
{
1319
        __napi_complete(napi);
1320
}
1321
 
1322
/* Remove interface from poll list: it must be in the poll list
1323
 * on current cpu. This primitive is called by dev->poll(), when
1324
 * it completes the work. The device cannot be out of poll list at this
1325
 * moment, it is BUG().
1326
 */
1327
static inline void netif_rx_complete(struct net_device *dev,
1328
                                     struct napi_struct *napi)
1329
{
1330
        unsigned long flags;
1331
 
1332
        local_irq_save(flags);
1333
        __netif_rx_complete(dev, napi);
1334
        local_irq_restore(flags);
1335
}
1336
 
1337
/**
1338
 *      netif_tx_lock - grab network device transmit lock
1339
 *      @dev: network device
1340
 *      @cpu: cpu number of lock owner
1341
 *
1342
 * Get network device transmit lock
1343
 */
1344
static inline void __netif_tx_lock(struct net_device *dev, int cpu)
1345
{
1346
        spin_lock(&dev->_xmit_lock);
1347
        dev->xmit_lock_owner = cpu;
1348
}
1349
 
1350
static inline void netif_tx_lock(struct net_device *dev)
1351
{
1352
        __netif_tx_lock(dev, smp_processor_id());
1353
}
1354
 
1355
static inline void netif_tx_lock_bh(struct net_device *dev)
1356
{
1357
        spin_lock_bh(&dev->_xmit_lock);
1358
        dev->xmit_lock_owner = smp_processor_id();
1359
}
1360
 
1361
static inline int netif_tx_trylock(struct net_device *dev)
1362
{
1363
        int ok = spin_trylock(&dev->_xmit_lock);
1364
        if (likely(ok))
1365
                dev->xmit_lock_owner = smp_processor_id();
1366
        return ok;
1367
}
1368
 
1369
static inline void netif_tx_unlock(struct net_device *dev)
1370
{
1371
        dev->xmit_lock_owner = -1;
1372
        spin_unlock(&dev->_xmit_lock);
1373
}
1374
 
1375
static inline void netif_tx_unlock_bh(struct net_device *dev)
1376
{
1377
        dev->xmit_lock_owner = -1;
1378
        spin_unlock_bh(&dev->_xmit_lock);
1379
}
1380
 
1381
#define HARD_TX_LOCK(dev, cpu) {                        \
1382
        if ((dev->features & NETIF_F_LLTX) == 0) {       \
1383
                __netif_tx_lock(dev, cpu);                      \
1384
        }                                               \
1385
}
1386
 
1387
#define HARD_TX_UNLOCK(dev) {                           \
1388
        if ((dev->features & NETIF_F_LLTX) == 0) {       \
1389
                netif_tx_unlock(dev);                   \
1390
        }                                               \
1391
}
1392
 
1393
static inline void netif_tx_disable(struct net_device *dev)
1394
{
1395
        netif_tx_lock_bh(dev);
1396
        netif_stop_queue(dev);
1397
        netif_tx_unlock_bh(dev);
1398
}
1399
 
1400
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1401
 
1402
extern void             ether_setup(struct net_device *dev);
1403
 
1404
/* Support for loadable net-drivers */
1405
extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1406
                                       void (*setup)(struct net_device *),
1407
                                       unsigned int queue_count);
1408
#define alloc_netdev(sizeof_priv, name, setup) \
1409
        alloc_netdev_mq(sizeof_priv, name, setup, 1)
1410
extern int              register_netdev(struct net_device *dev);
1411
extern void             unregister_netdev(struct net_device *dev);
1412
/* Functions used for secondary unicast and multicast support */
1413
extern void             dev_set_rx_mode(struct net_device *dev);
1414
extern void             __dev_set_rx_mode(struct net_device *dev);
1415
extern int              dev_unicast_delete(struct net_device *dev, void *addr, int alen);
1416
extern int              dev_unicast_add(struct net_device *dev, void *addr, int alen);
1417
extern int              dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1418
extern int              dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1419
extern int              dev_mc_sync(struct net_device *to, struct net_device *from);
1420
extern void             dev_mc_unsync(struct net_device *to, struct net_device *from);
1421
extern int              __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1422
extern int              __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
1423
extern void             dev_set_promiscuity(struct net_device *dev, int inc);
1424
extern void             dev_set_allmulti(struct net_device *dev, int inc);
1425
extern void             netdev_state_change(struct net_device *dev);
1426
extern void             netdev_features_change(struct net_device *dev);
1427
/* Load a device via the kmod */
1428
extern void             dev_load(struct net *net, const char *name);
1429
extern void             dev_mcast_init(void);
1430
extern int              netdev_max_backlog;
1431
extern int              weight_p;
1432
extern int              netdev_set_master(struct net_device *dev, struct net_device *master);
1433
extern int skb_checksum_help(struct sk_buff *skb);
1434
extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
1435
#ifdef CONFIG_BUG
1436
extern void netdev_rx_csum_fault(struct net_device *dev);
1437
#else
1438
static inline void netdev_rx_csum_fault(struct net_device *dev)
1439
{
1440
}
1441
#endif
1442
/* rx skb timestamps */
1443
extern void             net_enable_timestamp(void);
1444
extern void             net_disable_timestamp(void);
1445
 
1446
#ifdef CONFIG_PROC_FS
1447
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1448
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1449
extern void dev_seq_stop(struct seq_file *seq, void *v);
1450
#endif
1451
 
1452
extern void linkwatch_run_queue(void);
1453
 
1454
extern int netdev_compute_features(unsigned long all, unsigned long one);
1455
 
1456
static inline int net_gso_ok(int features, int gso_type)
1457
{
1458
        int feature = gso_type << NETIF_F_GSO_SHIFT;
1459
        return (features & feature) == feature;
1460
}
1461
 
1462
static inline int skb_gso_ok(struct sk_buff *skb, int features)
1463
{
1464
        return net_gso_ok(features, skb_shinfo(skb)->gso_type);
1465
}
1466
 
1467
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1468
{
1469
        return skb_is_gso(skb) &&
1470
               (!skb_gso_ok(skb, dev->features) ||
1471
                unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
1472
}
1473
 
1474
/* On bonding slaves other than the currently active slave, suppress
1475
 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1476
 * ARP on active-backup slaves with arp_validate enabled.
1477
 */
1478
static inline int skb_bond_should_drop(struct sk_buff *skb)
1479
{
1480
        struct net_device *dev = skb->dev;
1481
        struct net_device *master = dev->master;
1482
 
1483
        if (master &&
1484
            (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
1485
                if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1486
                    skb->protocol == __constant_htons(ETH_P_ARP))
1487
                        return 0;
1488
 
1489
                if (master->priv_flags & IFF_MASTER_ALB) {
1490
                        if (skb->pkt_type != PACKET_BROADCAST &&
1491
                            skb->pkt_type != PACKET_MULTICAST)
1492
                                return 0;
1493
                }
1494
                if (master->priv_flags & IFF_MASTER_8023AD &&
1495
                    skb->protocol == __constant_htons(ETH_P_SLOW))
1496
                        return 0;
1497
 
1498
                return 1;
1499
        }
1500
        return 0;
1501
}
1502
 
1503
#endif /* __KERNEL__ */
1504
 
1505
#endif  /* _LINUX_DEV_H */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.