OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [sundance.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2
/*
3
        Written 1999-2000 by Donald Becker.
4
 
5
        This software may be used and distributed according to the terms of
6
        the GNU General Public License (GPL), incorporated herein by reference.
7
        Drivers based on or derived from this code fall under the GPL and must
8
        retain the authorship, copyright and license notice.  This file is not
9
        a complete program and may only be used when the entire operating
10
        system is licensed under the GPL.
11
 
12
        The author may be reached as becker@scyld.com, or C/O
13
        Scyld Computing Corporation
14
        410 Severn Ave., Suite 210
15
        Annapolis MD 21403
16
 
17
        Support and updates available at
18
        http://www.scyld.com/network/sundance.html
19
 
20
 
21
        Version LK1.01a (jgarzik):
22
        - Replace some MII-related magic numbers with constants
23
 
24
        Version LK1.02 (D-Link):
25
        - Add new board to PCI ID list
26
        - Fix multicast bug
27
 
28
        Version LK1.03 (D-Link):
29
        - New Rx scheme, reduce Rx congestion
30
        - Option to disable flow control
31
 
32
        Version LK1.04 (D-Link):
33
        - Tx timeout recovery
34
        - More support for ethtool.
35
 
36
        Version LK1.04a:
37
        - Remove unused/constant members from struct pci_id_info
38
        (which then allows removal of 'drv_flags' from private struct)
39
        (jgarzik)
40
        - If no phy is found, fail to load that board (jgarzik)
41
        - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42
        - Autodetect where mii_preable_required is needed,
43
        default to not needed.  (Donald Becker)
44
 
45
        Version LK1.04b:
46
        - Remove mii_preamble_required module parameter (Donald Becker)
47
        - Add per-interface mii_preamble_required (setting is autodetected)
48
          (Donald Becker)
49
        - Remove unnecessary cast from void pointer (jgarzik)
50
        - Re-align comments in private struct (jgarzik)
51
 
52
        Version LK1.04c (jgarzik):
53
        - Support bitmapped message levels (NETIF_MSG_xxx), and the
54
          two ethtool ioctls that get/set them
55
        - Don't hand-code MII ethtool support, use standard API/lib
56
 
57
        Version LK1.04d:
58
        - Merge from Donald Becker's sundance.c: (Jason Lunz)
59
                * proper support for variably-sized MTUs
60
                * default to PIO, to fix chip bugs
61
        - Add missing unregister_netdev (Jason Lunz)
62
        - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63
        - Better rx buf size calculation (Donald Becker)
64
 
65
        Version LK1.05 (D-Link):
66
        - Fix DFE-580TX packet drop issue (for DL10050C)
67
        - Fix reset_tx logic
68
 
69
        Version LK1.06 (D-Link):
70
        - Fix crash while unloading driver
71
 
72
        Versin LK1.06b (D-Link):
73
        - New tx scheme, adaptive tx_coalesce
74
 
75
        Version LK1.07 (D-Link):
76
        - Fix tx bugs in big-endian machines
77
        - Remove unused max_interrupt_work module parameter, the new
78
          NAPI-like rx scheme doesn't need it.
79
        - Remove redundancy get_stats() in intr_handler(), those
80
          I/O access could affect performance in ARM-based system
81
        - Add Linux software VLAN support
82
 
83
        Version LK1.08 (D-Link):
84
        - Fix bug of custom mac address
85
        (StationAddr register only accept word write)
86
 
87
        Version LK1.09 (D-Link):
88
        - Fix the flowctrl bug.
89
        - Set Pause bit in MII ANAR if flow control enabled.
90
 
91
        Version LK1.09a (ICPlus):
92
        - Add the delay time in reading the contents of EEPROM
93
 
94
*/
95
 
96
#define DRV_NAME        "sundance"
97
#define DRV_VERSION     "1.01+LK1.09a"
98
#define DRV_RELDATE     "10-Jul-2003"
99
 
100
 
101
/* The user-configurable values.
102
   These may be modified when a driver module is loaded.*/
103
static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
104
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
105
   Typical is a 64 element hash table based on the Ethernet CRC.  */
106
static int multicast_filter_limit = 32;
107
 
108
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
109
   Setting to > 1518 effectively disables this feature.
110
   This chip can receive into offset buffers, so the Alpha does not
111
   need a copy-align. */
112
static int rx_copybreak;
113
static int flowctrl=1;
114
 
115
/* media[] specifies the media type the NIC operates at.
116
                 autosense      Autosensing active media.
117
                 10mbps_hd      10Mbps half duplex.
118
                 10mbps_fd      10Mbps full duplex.
119
                 100mbps_hd     100Mbps half duplex.
120
                 100mbps_fd     100Mbps full duplex.
121
 
122
                 1              10Mbps half duplex.
123
                 2              10Mbps full duplex.
124
                 3              100Mbps half duplex.
125
                 4              100Mbps full duplex.
126
*/
127
#define MAX_UNITS 8
128
static char *media[MAX_UNITS];
129
 
130
 
131
/* Operational parameters that are set at compile time. */
132
 
133
/* Keep the ring sizes a power of two for compile efficiency.
134
   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
135
   Making the Tx ring too large decreases the effectiveness of channel
136
   bonding and packet priority, and more than 128 requires modifying the
137
   Tx error recovery.
138
   Large receive rings merely waste memory. */
139
#define TX_RING_SIZE    32
140
#define TX_QUEUE_LEN    (TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
141
#define RX_RING_SIZE    64
142
#define RX_BUDGET       32
143
#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct netdev_desc)
144
#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct netdev_desc)
145
 
146
/* Operational parameters that usually are not changed. */
147
/* Time in jiffies before concluding the transmitter is hung. */
148
#define TX_TIMEOUT  (4*HZ)
149
#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
150
 
151
#ifndef __KERNEL__
152
#define __KERNEL__
153
#endif
154
#if !defined(__OPTIMIZE__)
155
#warning  You must compile this file with the correct options!
156
#warning  See the last lines of the source file.
157
#error You must compile this driver with "-O".
158
#endif
159
 
160
/* Include files, designed to support most kernel versions 2.0.0 and later. */
161
#include <linux/module.h>
162
#include <linux/kernel.h>
163
#include <linux/string.h>
164
#include <linux/timer.h>
165
#include <linux/errno.h>
166
#include <linux/ioport.h>
167
#include <linux/slab.h>
168
#include <linux/interrupt.h>
169
#include <linux/pci.h>
170
#include <linux/netdevice.h>
171
#include <linux/etherdevice.h>
172
#include <linux/skbuff.h>
173
#include <linux/init.h>
174
#include <asm/uaccess.h>
175
#include <asm/processor.h>              /* Processor type for cache alignment. */
176
#include <asm/bitops.h>
177
#include <asm/io.h>
178
#include <linux/delay.h>
179
#include <linux/spinlock.h>
180
#ifndef _COMPAT_WITH_OLD_KERNEL
181
#include <linux/crc32.h>
182
#include <linux/ethtool.h>
183
#include <linux/mii.h>
184
#else
185
#include "crc32.h"
186
#include "ethtool.h"
187
#include "mii.h"
188
#include "compat.h"
189
#endif
190
 
191
/* These identify the driver base version and may not be removed. */
192
static char version[] __devinitdata =
193
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "  Written by Donald Becker\n"
194
KERN_INFO "  http://www.scyld.com/network/sundance.html\n";
195
 
196
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
197
MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
198
MODULE_LICENSE("GPL");
199
 
200
MODULE_PARM(debug, "i");
201
MODULE_PARM(rx_copybreak, "i");
202
MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "s");
203
MODULE_PARM(flowctrl, "i");
204
MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
205
MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
206
MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
207
 
208
/*
209
                                Theory of Operation
210
 
211
I. Board Compatibility
212
 
213
This driver is designed for the Sundance Technologies "Alta" ST201 chip.
214
 
215
II. Board-specific settings
216
 
217
III. Driver operation
218
 
219
IIIa. Ring buffers
220
 
221
This driver uses two statically allocated fixed-size descriptor lists
222
formed into rings by a branch from the final descriptor to the beginning of
223
the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
224
Some chips explicitly use only 2^N sized rings, while others use a
225
'next descriptor' pointer that the driver forms into rings.
226
 
227
IIIb/c. Transmit/Receive Structure
228
 
229
This driver uses a zero-copy receive and transmit scheme.
230
The driver allocates full frame size skbuffs for the Rx ring buffers at
231
open() time and passes the skb->data field to the chip as receive data
232
buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
233
a fresh skbuff is allocated and the frame is copied to the new skbuff.
234
When the incoming frame is larger, the skbuff is passed directly up the
235
protocol stack.  Buffers consumed this way are replaced by newly allocated
236
skbuffs in a later phase of receives.
237
 
238
The RX_COPYBREAK value is chosen to trade-off the memory wasted by
239
using a full-sized skbuff for small frames vs. the copying costs of larger
240
frames.  New boards are typically used in generously configured machines
241
and the underfilled buffers have negligible impact compared to the benefit of
242
a single allocation size, so the default value of zero results in never
243
copying packets.  When copying is done, the cost is usually mitigated by using
244
a combined copy/checksum routine.  Copying also preloads the cache, which is
245
most useful with small frames.
246
 
247
A subtle aspect of the operation is that the IP header at offset 14 in an
248
ethernet frame isn't longword aligned for further processing.
249
Unaligned buffers are permitted by the Sundance hardware, so
250
frames are received into the skbuff at an offset of "+2", 16-byte aligning
251
the IP header.
252
 
253
IIId. Synchronization
254
 
255
The driver runs as two independent, single-threaded flows of control.  One
256
is the send-packet routine, which enforces single-threaded use by the
257
dev->tbusy flag.  The other thread is the interrupt handler, which is single
258
threaded by the hardware and interrupt handling software.
259
 
260
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
261
flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
262
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
263
the 'lp->tx_full' flag.
264
 
265
The interrupt handler has exclusive control over the Rx ring and records stats
266
from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
267
empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
268
clears both the tx_full and tbusy flags.
269
 
270
IV. Notes
271
 
272
IVb. References
273
 
274
The Sundance ST201 datasheet, preliminary version.
275
http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
276
http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
277
 
278
IVc. Errata
279
 
280
*/
281
 
282
/* Work-around for Kendin chip bugs. */
283
#ifndef CONFIG_SUNDANCE_MMIO
284
#define USE_IO_OPS 1
285
#endif
286
 
287
static struct pci_device_id sundance_pci_tbl[] __devinitdata = {
288
        {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
289
        {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
290
        {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
291
        {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
292
        {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
293
        {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
294
        {0,}
295
};
296
MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
297
 
298
enum {
299
        netdev_io_size = 128
300
};
301
 
302
struct pci_id_info {
303
        const char *name;
304
};
305
static struct pci_id_info pci_id_tbl[] = {
306
        {"D-Link DFE-550TX FAST Ethernet Adapter"},
307
        {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
308
        {"D-Link DFE-580TX 4 port Server Adapter"},
309
        {"D-Link DFE-530TXS FAST Ethernet Adapter"},
310
        {"D-Link DL10050-based FAST Ethernet Adapter"},
311
        {"Sundance Technology Alta"},
312
        {0,},                    /* 0 terminated list. */
313
};
314
 
315
/* This driver was written to use PCI memory space, however x86-oriented
316
   hardware often uses I/O space accesses. */
317
#ifdef USE_IO_OPS
318
#undef readb
319
#undef readw
320
#undef readl
321
#undef writeb
322
#undef writew
323
#undef writel
324
#define readb inb
325
#define readw inw
326
#define readl inl
327
#define writeb outb
328
#define writew outw
329
#define writel outl
330
#endif
331
 
332
/* Offsets to the device registers.
333
   Unlike software-only systems, device drivers interact with complex hardware.
334
   It's not useful to define symbolic names for every register bit in the
335
   device.  The name can only partially document the semantics and make
336
   the driver longer and more difficult to read.
337
   In general, only the important configuration values or bits changed
338
   multiple times should be defined symbolically.
339
*/
340
enum alta_offsets {
341
        DMACtrl = 0x00,
342
        TxListPtr = 0x04,
343
        TxDMABurstThresh = 0x08,
344
        TxDMAUrgentThresh = 0x09,
345
        TxDMAPollPeriod = 0x0a,
346
        RxDMAStatus = 0x0c,
347
        RxListPtr = 0x10,
348
        DebugCtrl0 = 0x1a,
349
        DebugCtrl1 = 0x1c,
350
        RxDMABurstThresh = 0x14,
351
        RxDMAUrgentThresh = 0x15,
352
        RxDMAPollPeriod = 0x16,
353
        LEDCtrl = 0x1a,
354
        ASICCtrl = 0x30,
355
        EEData = 0x34,
356
        EECtrl = 0x36,
357
        TxStartThresh = 0x3c,
358
        RxEarlyThresh = 0x3e,
359
        FlashAddr = 0x40,
360
        FlashData = 0x44,
361
        TxStatus = 0x46,
362
        TxFrameId = 0x47,
363
        DownCounter = 0x18,
364
        IntrClear = 0x4a,
365
        IntrEnable = 0x4c,
366
        IntrStatus = 0x4e,
367
        MACCtrl0 = 0x50,
368
        MACCtrl1 = 0x52,
369
        StationAddr = 0x54,
370
        MaxFrameSize = 0x5A,
371
        RxMode = 0x5c,
372
        MIICtrl = 0x5e,
373
        MulticastFilter0 = 0x60,
374
        MulticastFilter1 = 0x64,
375
        RxOctetsLow = 0x68,
376
        RxOctetsHigh = 0x6a,
377
        TxOctetsLow = 0x6c,
378
        TxOctetsHigh = 0x6e,
379
        TxFramesOK = 0x70,
380
        RxFramesOK = 0x72,
381
        StatsCarrierError = 0x74,
382
        StatsLateColl = 0x75,
383
        StatsMultiColl = 0x76,
384
        StatsOneColl = 0x77,
385
        StatsTxDefer = 0x78,
386
        RxMissed = 0x79,
387
        StatsTxXSDefer = 0x7a,
388
        StatsTxAbort = 0x7b,
389
        StatsBcastTx = 0x7c,
390
        StatsBcastRx = 0x7d,
391
        StatsMcastTx = 0x7e,
392
        StatsMcastRx = 0x7f,
393
        /* Aliased and bogus values! */
394
        RxStatus = 0x0c,
395
};
396
enum ASICCtrl_HiWord_bit {
397
        GlobalReset = 0x0001,
398
        RxReset = 0x0002,
399
        TxReset = 0x0004,
400
        DMAReset = 0x0008,
401
        FIFOReset = 0x0010,
402
        NetworkReset = 0x0020,
403
        HostReset = 0x0040,
404
        ResetBusy = 0x0400,
405
};
406
 
407
/* Bits in the interrupt status/mask registers. */
408
enum intr_status_bits {
409
        IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
410
        IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
411
        IntrDrvRqst=0x0040,
412
        StatsMax=0x0080, LinkChange=0x0100,
413
        IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
414
};
415
 
416
/* Bits in the RxMode register. */
417
enum rx_mode_bits {
418
        AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
419
        AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
420
};
421
/* Bits in MACCtrl. */
422
enum mac_ctrl0_bits {
423
        EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
424
        EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
425
};
426
enum mac_ctrl1_bits {
427
        StatsEnable=0x0020,     StatsDisable=0x0040, StatsEnabled=0x0080,
428
        TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
429
        RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
430
};
431
 
432
/* The Rx and Tx buffer descriptors. */
433
/* Note that using only 32 bit fields simplifies conversion to big-endian
434
   architectures. */
435
struct netdev_desc {
436
        u32 next_desc;
437
        u32 status;
438
        struct desc_frag { u32 addr, length; } frag[1];
439
};
440
 
441
/* Bits in netdev_desc.status */
442
enum desc_status_bits {
443
        DescOwn=0x8000,
444
        DescEndPacket=0x4000,
445
        DescEndRing=0x2000,
446
        LastFrag=0x80000000,
447
        DescIntrOnTx=0x8000,
448
        DescIntrOnDMADone=0x80000000,
449
        DisableAlign = 0x00000001,
450
};
451
 
452
#define PRIV_ALIGN      15      /* Required alignment mask */
453
/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
454
   within the structure. */
455
#define MII_CNT         4
456
struct netdev_private {
457
        /* Descriptor rings first for alignment. */
458
        struct netdev_desc *rx_ring;
459
        struct netdev_desc *tx_ring;
460
        struct sk_buff* rx_skbuff[RX_RING_SIZE];
461
        struct sk_buff* tx_skbuff[TX_RING_SIZE];
462
        dma_addr_t tx_ring_dma;
463
        dma_addr_t rx_ring_dma;
464
        struct net_device_stats stats;
465
        struct timer_list timer;                /* Media monitoring timer. */
466
        /* Frequently used values: keep some adjacent for cache effect. */
467
        spinlock_t lock;
468
        spinlock_t rx_lock;                     /* Group with Tx control cache line. */
469
        int msg_enable;
470
        int chip_id;
471
        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
472
        unsigned int rx_buf_sz;                 /* Based on MTU+slack. */
473
        struct netdev_desc *last_tx;            /* Last Tx descriptor used. */
474
        unsigned int cur_tx, dirty_tx;
475
        /* These values are keep track of the transceiver/media in use. */
476
        unsigned int flowctrl:1;
477
        unsigned int default_port:4;            /* Last dev->if_port value. */
478
        unsigned int an_enable:1;
479
        unsigned int speed;
480
        struct tasklet_struct rx_tasklet;
481
        struct tasklet_struct tx_tasklet;
482
        int budget;
483
        int cur_task;
484
        /* Multicast and receive mode. */
485
        spinlock_t mcastlock;                   /* SMP lock multicast updates. */
486
        u16 mcast_filter[4];
487
        /* MII transceiver section. */
488
        struct mii_if_info mii_if;
489
        int mii_preamble_required;
490
        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used. */
491
        struct pci_dev *pci_dev;
492
        unsigned char pci_rev_id;
493
};
494
 
495
/* The station address location in the EEPROM. */
496
#define EEPROM_SA_OFFSET        0x10
497
#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
498
                        IntrDrvRqst | IntrTxDone | StatsMax | \
499
                        LinkChange)
500
 
501
static int  change_mtu(struct net_device *dev, int new_mtu);
502
static int  eeprom_read(long ioaddr, int location);
503
static int  mdio_read(struct net_device *dev, int phy_id, int location);
504
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
505
static int  netdev_open(struct net_device *dev);
506
static void check_duplex(struct net_device *dev);
507
static void netdev_timer(unsigned long data);
508
static void tx_timeout(struct net_device *dev);
509
static void init_ring(struct net_device *dev);
510
static int  start_tx(struct sk_buff *skb, struct net_device *dev);
511
static int reset_tx (struct net_device *dev);
512
static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
513
static void rx_poll(unsigned long data);
514
static void tx_poll(unsigned long data);
515
static void refill_rx (struct net_device *dev);
516
static void netdev_error(struct net_device *dev, int intr_status);
517
static void netdev_error(struct net_device *dev, int intr_status);
518
static void set_rx_mode(struct net_device *dev);
519
static int __set_mac_addr(struct net_device *dev);
520
static struct net_device_stats *get_stats(struct net_device *dev);
521
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
522
static int  netdev_close(struct net_device *dev);
523
 
524
 
525
 
526
static int __devinit sundance_probe1 (struct pci_dev *pdev,
527
                                      const struct pci_device_id *ent)
528
{
529
        struct net_device *dev;
530
        struct netdev_private *np;
531
        static int card_idx;
532
        int chip_idx = ent->driver_data;
533
        int irq;
534
        int i;
535
        long ioaddr;
536
        u16 mii_ctl;
537
        void *ring_space;
538
        dma_addr_t ring_dma;
539
 
540
 
541
/* when built into the kernel, we only print version if device is found */
542
#ifndef MODULE
543
        static int printed_version;
544
        if (!printed_version++)
545
                printk(version);
546
#endif
547
 
548
        if (pci_enable_device(pdev))
549
                return -EIO;
550
        pci_set_master(pdev);
551
 
552
        irq = pdev->irq;
553
 
554
        dev = alloc_etherdev(sizeof(*np));
555
        if (!dev)
556
                return -ENOMEM;
557
        SET_MODULE_OWNER(dev);
558
 
559
        if (pci_request_regions(pdev, DRV_NAME))
560
                goto err_out_netdev;
561
 
562
#ifdef USE_IO_OPS
563
        ioaddr = pci_resource_start(pdev, 0);
564
#else
565
        ioaddr = pci_resource_start(pdev, 1);
566
        ioaddr = (long) ioremap (ioaddr, netdev_io_size);
567
        if (!ioaddr)
568
                goto err_out_res;
569
#endif
570
 
571
        for (i = 0; i < 3; i++)
572
                ((u16 *)dev->dev_addr)[i] =
573
                        le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
574
 
575
        dev->base_addr = ioaddr;
576
        dev->irq = irq;
577
 
578
        np = dev->priv;
579
        np->pci_dev = pdev;
580
        np->chip_id = chip_idx;
581
        np->msg_enable = (1 << debug) - 1;
582
        spin_lock_init(&np->lock);
583
        tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
584
        tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
585
 
586
        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
587
        if (!ring_space)
588
                goto err_out_cleardev;
589
        np->tx_ring = (struct netdev_desc *)ring_space;
590
        np->tx_ring_dma = ring_dma;
591
 
592
        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
593
        if (!ring_space)
594
                goto err_out_unmap_tx;
595
        np->rx_ring = (struct netdev_desc *)ring_space;
596
        np->rx_ring_dma = ring_dma;
597
 
598
        np->mii_if.dev = dev;
599
        np->mii_if.mdio_read = mdio_read;
600
        np->mii_if.mdio_write = mdio_write;
601
        np->mii_if.phy_id_mask = 0x1f;
602
        np->mii_if.reg_num_mask = 0x1f;
603
 
604
        /* The chip-specific entries in the device structure. */
605
        dev->open = &netdev_open;
606
        dev->hard_start_xmit = &start_tx;
607
        dev->stop = &netdev_close;
608
        dev->get_stats = &get_stats;
609
        dev->set_multicast_list = &set_rx_mode;
610
        dev->do_ioctl = &netdev_ioctl;
611
        dev->tx_timeout = &tx_timeout;
612
        dev->watchdog_timeo = TX_TIMEOUT;
613
        dev->change_mtu = &change_mtu;
614
        pci_set_drvdata(pdev, dev);
615
 
616
        pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
617
 
618
        i = register_netdev(dev);
619
        if (i)
620
                goto err_out_unmap_rx;
621
 
622
        printk(KERN_INFO "%s: %s at 0x%lx, ",
623
                   dev->name, pci_id_tbl[chip_idx].name, ioaddr);
624
        for (i = 0; i < 5; i++)
625
                        printk("%2.2x:", dev->dev_addr[i]);
626
        printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
627
 
628
        if (1) {
629
                int phy, phy_idx = 0;
630
                np->phys[0] = 1;         /* Default setting */
631
                np->mii_preamble_required++;
632
                for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
633
                        int mii_status = mdio_read(dev, phy, MII_BMSR);
634
                        if (mii_status != 0xffff  &&  mii_status != 0x0000) {
635
                                np->phys[phy_idx++] = phy;
636
                                np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
637
                                if ((mii_status & 0x0040) == 0)
638
                                        np->mii_preamble_required++;
639
                                printk(KERN_INFO "%s: MII PHY found at address %d, status "
640
                                           "0x%4.4x advertising %4.4x.\n",
641
                                           dev->name, phy, mii_status, np->mii_if.advertising);
642
                        }
643
                }
644
                np->mii_preamble_required--;
645
 
646
                if (phy_idx == 0) {
647
                        printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
648
                                   dev->name, readl(ioaddr + ASICCtrl));
649
                        goto err_out_unregister;
650
                }
651
 
652
                np->mii_if.phy_id = np->phys[0];
653
        }
654
 
655
        /* Parse override configuration */
656
        np->an_enable = 1;
657
        if (card_idx < MAX_UNITS) {
658
                if (media[card_idx] != NULL) {
659
                        np->an_enable = 0;
660
                        if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
661
                            strcmp (media[card_idx], "4") == 0) {
662
                                np->speed = 100;
663
                                np->mii_if.full_duplex = 1;
664
                        } else if (strcmp (media[card_idx], "100mbps_hd") == 0
665
                                   || strcmp (media[card_idx], "3") == 0) {
666
                                np->speed = 100;
667
                                np->mii_if.full_duplex = 0;
668
                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
669
                                   strcmp (media[card_idx], "2") == 0) {
670
                                np->speed = 10;
671
                                np->mii_if.full_duplex = 1;
672
                        } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
673
                                   strcmp (media[card_idx], "1") == 0) {
674
                                np->speed = 10;
675
                                np->mii_if.full_duplex = 0;
676
                        } else {
677
                                np->an_enable = 1;
678
                        }
679
                }
680
                if (flowctrl == 1)
681
                        np->flowctrl = 1;
682
        }
683
 
684
        /* Fibre PHY? */
685
        if (readl (ioaddr + ASICCtrl) & 0x80) {
686
                /* Default 100Mbps Full */
687
                if (np->an_enable) {
688
                        np->speed = 100;
689
                        np->mii_if.full_duplex = 1;
690
                        np->an_enable = 0;
691
                }
692
        }
693
        /* Reset PHY */
694
        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
695
        mdelay (300);
696
        /* If flow control enabled, we need to advertise it.*/
697
        if (np->flowctrl)
698
                mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
699
        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
700
        /* Force media type */
701
        if (!np->an_enable) {
702
                mii_ctl = 0;
703
                mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
704
                mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
705
                mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
706
                printk (KERN_INFO "Override speed=%d, %s duplex\n",
707
                        np->speed, np->mii_if.full_duplex ? "Full" : "Half");
708
 
709
        }
710
 
711
        /* Perhaps move the reset here? */
712
        /* Reset the chip to erase previous misconfiguration. */
713
        if (netif_msg_hw(np))
714
                printk("ASIC Control is %x.\n", readl(ioaddr + ASICCtrl));
715
        writew(0x007f, ioaddr + ASICCtrl + 2);
716
        if (netif_msg_hw(np))
717
                printk("ASIC Control is now %x.\n", readl(ioaddr + ASICCtrl));
718
 
719
        card_idx++;
720
        return 0;
721
 
722
err_out_unregister:
723
        unregister_netdev(dev);
724
err_out_unmap_rx:
725
        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
726
err_out_unmap_tx:
727
        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
728
err_out_cleardev:
729
        pci_set_drvdata(pdev, NULL);
730
#ifndef USE_IO_OPS
731
        iounmap((void *)ioaddr);
732
err_out_res:
733
#endif
734
        pci_release_regions(pdev);
735
err_out_netdev:
736
        kfree (dev);
737
        return -ENODEV;
738
}
739
 
740
static int change_mtu(struct net_device *dev, int new_mtu)
741
{
742
        if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
743
                return -EINVAL;
744
        if (netif_running(dev))
745
                return -EBUSY;
746
        dev->mtu = new_mtu;
747
        return 0;
748
}
749
 
750
#define eeprom_delay(ee_addr)   readl(ee_addr)
751
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
752
static int __devinit eeprom_read(long ioaddr, int location)
753
{
754
        int boguscnt = 10000;           /* Typical 1900 ticks. */
755
        writew(0x0200 | (location & 0xff), ioaddr + EECtrl);
756
        do {
757
                eeprom_delay(ioaddr + EECtrl);
758
                if (! (readw(ioaddr + EECtrl) & 0x8000)) {
759
                        return readw(ioaddr + EEData);
760
                }
761
        } while (--boguscnt > 0);
762
        return 0;
763
}
764
 
765
/*  MII transceiver control section.
766
        Read and write the MII registers using software-generated serial
767
        MDIO protocol.  See the MII specifications or DP83840A data sheet
768
        for details.
769
 
770
        The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
771
        met by back-to-back 33Mhz PCI cycles. */
772
#define mdio_delay() readb(mdio_addr)
773
 
774
enum mii_reg_bits {
775
        MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
776
};
777
#define MDIO_EnbIn  (0)
778
#define MDIO_WRITE0 (MDIO_EnbOutput)
779
#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
780
 
781
/* Generate the preamble required for initial synchronization and
782
   a few older transceivers. */
783
static void mdio_sync(long mdio_addr)
784
{
785
        int bits = 32;
786
 
787
        /* Establish sync by sending at least 32 logic ones. */
788
        while (--bits >= 0) {
789
                writeb(MDIO_WRITE1, mdio_addr);
790
                mdio_delay();
791
                writeb(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
792
                mdio_delay();
793
        }
794
}
795
 
796
static int mdio_read(struct net_device *dev, int phy_id, int location)
797
{
798
        struct netdev_private *np = dev->priv;
799
        long mdio_addr = dev->base_addr + MIICtrl;
800
        int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
801
        int i, retval = 0;
802
 
803
        if (np->mii_preamble_required)
804
                mdio_sync(mdio_addr);
805
 
806
        /* Shift the read command bits out. */
807
        for (i = 15; i >= 0; i--) {
808
                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
809
 
810
                writeb(dataval, mdio_addr);
811
                mdio_delay();
812
                writeb(dataval | MDIO_ShiftClk, mdio_addr);
813
                mdio_delay();
814
        }
815
        /* Read the two transition, 16 data, and wire-idle bits. */
816
        for (i = 19; i > 0; i--) {
817
                writeb(MDIO_EnbIn, mdio_addr);
818
                mdio_delay();
819
                retval = (retval << 1) | ((readb(mdio_addr) & MDIO_Data) ? 1 : 0);
820
                writeb(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
821
                mdio_delay();
822
        }
823
        return (retval>>1) & 0xffff;
824
}
825
 
826
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
827
{
828
        struct netdev_private *np = dev->priv;
829
        long mdio_addr = dev->base_addr + MIICtrl;
830
        int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
831
        int i;
832
 
833
        if (np->mii_preamble_required)
834
                mdio_sync(mdio_addr);
835
 
836
        /* Shift the command bits out. */
837
        for (i = 31; i >= 0; i--) {
838
                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
839
 
840
                writeb(dataval, mdio_addr);
841
                mdio_delay();
842
                writeb(dataval | MDIO_ShiftClk, mdio_addr);
843
                mdio_delay();
844
        }
845
        /* Clear out extra bits. */
846
        for (i = 2; i > 0; i--) {
847
                writeb(MDIO_EnbIn, mdio_addr);
848
                mdio_delay();
849
                writeb(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
850
                mdio_delay();
851
        }
852
        return;
853
}
854
 
855
static int netdev_open(struct net_device *dev)
856
{
857
        struct netdev_private *np = dev->priv;
858
        long ioaddr = dev->base_addr;
859
        int i;
860
 
861
        /* Do we need to reset the chip??? */
862
 
863
        i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
864
        if (i)
865
                return i;
866
 
867
        if (netif_msg_ifup(np))
868
                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
869
                           dev->name, dev->irq);
870
        init_ring(dev);
871
 
872
        writel(np->rx_ring_dma, ioaddr + RxListPtr);
873
        /* The Tx list pointer is written as packets are queued. */
874
 
875
        /* Initialize other registers. */
876
        __set_mac_addr(dev);
877
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
878
        writew(dev->mtu + 18, ioaddr + MaxFrameSize);
879
#else
880
        writew(dev->mtu + 14, ioaddr + MaxFrameSize);
881
#endif
882
        if (dev->mtu > 2047)
883
                writel(readl(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
884
 
885
        /* Configure the PCI bus bursts and FIFO thresholds. */
886
 
887
        if (dev->if_port == 0)
888
                dev->if_port = np->default_port;
889
 
890
        np->mcastlock = (spinlock_t) SPIN_LOCK_UNLOCKED;
891
 
892
        set_rx_mode(dev);
893
        writew(0, ioaddr + IntrEnable);
894
        writew(0, ioaddr + DownCounter);
895
        /* Set the chip to poll every N*320nsec. */
896
        writeb(100, ioaddr + RxDMAPollPeriod);
897
        writeb(127, ioaddr + TxDMAPollPeriod);
898
        /* Fix DFE-580TX packet drop issue */
899
        if (np->pci_rev_id >= 0x14)
900
                writeb(0x01, ioaddr + DebugCtrl1);
901
        netif_start_queue(dev);
902
 
903
        writew (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
904
 
905
        if (netif_msg_ifup(np))
906
                printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
907
                           "MAC Control %x, %4.4x %4.4x.\n",
908
                           dev->name, readl(ioaddr + RxStatus), readb(ioaddr + TxStatus),
909
                           readl(ioaddr + MACCtrl0),
910
                           readw(ioaddr + MACCtrl1), readw(ioaddr + MACCtrl0));
911
 
912
        /* Set the timer to check for link beat. */
913
        init_timer(&np->timer);
914
        np->timer.expires = jiffies + 3*HZ;
915
        np->timer.data = (unsigned long)dev;
916
        np->timer.function = &netdev_timer;                             /* timer handler */
917
        add_timer(&np->timer);
918
 
919
        /* Enable interrupts by setting the interrupt mask. */
920
        writew(DEFAULT_INTR, ioaddr + IntrEnable);
921
 
922
        return 0;
923
}
924
 
925
static void check_duplex(struct net_device *dev)
926
{
927
        struct netdev_private *np = dev->priv;
928
        long ioaddr = dev->base_addr;
929
        int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
930
        int negotiated = mii_lpa & np->mii_if.advertising;
931
        int duplex;
932
 
933
        /* Force media */
934
        if (!np->an_enable || mii_lpa == 0xffff) {
935
                if (np->mii_if.full_duplex)
936
                        writew (readw (ioaddr + MACCtrl0) | EnbFullDuplex,
937
                                ioaddr + MACCtrl0);
938
                return;
939
        }
940
 
941
        /* Autonegotiation */
942
        duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
943
        if (np->mii_if.full_duplex != duplex) {
944
                np->mii_if.full_duplex = duplex;
945
                if (netif_msg_link(np))
946
                        printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
947
                                   "negotiated capability %4.4x.\n", dev->name,
948
                                   duplex ? "full" : "half", np->phys[0], negotiated);
949
                writew(readw(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
950
        }
951
}
952
 
953
static void netdev_timer(unsigned long data)
954
{
955
        struct net_device *dev = (struct net_device *)data;
956
        struct netdev_private *np = dev->priv;
957
        long ioaddr = dev->base_addr;
958
        int next_tick = 10*HZ;
959
 
960
        if (netif_msg_timer(np)) {
961
                printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
962
                           "Tx %x Rx %x.\n",
963
                           dev->name, readw(ioaddr + IntrEnable),
964
                           readb(ioaddr + TxStatus), readl(ioaddr + RxStatus));
965
        }
966
        check_duplex(dev);
967
        np->timer.expires = jiffies + next_tick;
968
        add_timer(&np->timer);
969
}
970
 
971
static void tx_timeout(struct net_device *dev)
972
{
973
        struct netdev_private *np = dev->priv;
974
        long ioaddr = dev->base_addr;
975
        unsigned long flag;
976
 
977
        netif_stop_queue(dev);
978
        tasklet_disable(&np->tx_tasklet);
979
        writew(0, ioaddr + IntrEnable);
980
        printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
981
                   "TxFrameId %2.2x,"
982
                   " resetting...\n", dev->name, readb(ioaddr + TxStatus),
983
                   readb(ioaddr + TxFrameId));
984
 
985
        {
986
                int i;
987
                for (i=0; i<TX_RING_SIZE; i++) {
988
                        printk(KERN_DEBUG "%02x %08x %08x %08x(%02x) %08x %08x\n", i,
989
                                np->tx_ring_dma + i*sizeof(*np->tx_ring),
990
                                le32_to_cpu(np->tx_ring[i].next_desc),
991
                                le32_to_cpu(np->tx_ring[i].status),
992
                                (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
993
                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
994
                                le32_to_cpu(np->tx_ring[i].frag[0].length));
995
                }
996
                printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
997
                        readl(dev->base_addr + TxListPtr),
998
                        netif_queue_stopped(dev));
999
                printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1000
                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
1001
                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1002
                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1003
                printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1004
        }
1005
        spin_lock_irqsave(&np->lock, flag);
1006
 
1007
        /* Stop and restart the chip's Tx processes . */
1008
        reset_tx(dev);
1009
        spin_unlock_irqrestore(&np->lock, flag);
1010
 
1011
        dev->if_port = 0;
1012
 
1013
        dev->trans_start = jiffies;
1014
        np->stats.tx_errors++;
1015
        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1016
                netif_wake_queue(dev);
1017
        }
1018
        writew(DEFAULT_INTR, ioaddr + IntrEnable);
1019
        tasklet_enable(&np->tx_tasklet);
1020
}
1021
 
1022
 
1023
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1024
static void init_ring(struct net_device *dev)
1025
{
1026
        struct netdev_private *np = dev->priv;
1027
        int i;
1028
 
1029
        np->cur_rx = np->cur_tx = 0;
1030
        np->dirty_rx = np->dirty_tx = 0;
1031
        np->cur_task = 0;
1032
 
1033
        np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1034
 
1035
        /* Initialize all Rx descriptors. */
1036
        for (i = 0; i < RX_RING_SIZE; i++) {
1037
                np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1038
                        ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1039
                np->rx_ring[i].status = 0;
1040
                np->rx_ring[i].frag[0].length = 0;
1041
                np->rx_skbuff[i] = 0;
1042
        }
1043
 
1044
        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1045
        for (i = 0; i < RX_RING_SIZE; i++) {
1046
                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1047
                np->rx_skbuff[i] = skb;
1048
                if (skb == NULL)
1049
                        break;
1050
                skb->dev = dev;         /* Mark as being used by this device. */
1051
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
1052
                np->rx_ring[i].frag[0].addr = cpu_to_le32(
1053
                        pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz,
1054
                                PCI_DMA_FROMDEVICE));
1055
                np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1056
        }
1057
        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1058
 
1059
        for (i = 0; i < TX_RING_SIZE; i++) {
1060
                np->tx_skbuff[i] = 0;
1061
                np->tx_ring[i].status = 0;
1062
        }
1063
        return;
1064
}
1065
 
1066
static void tx_poll (unsigned long data)
1067
{
1068
        struct net_device *dev = (struct net_device *)data;
1069
        struct netdev_private *np = dev->priv;
1070
        unsigned head = np->cur_task % TX_RING_SIZE;
1071
        struct netdev_desc *txdesc =
1072
                &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1073
 
1074
        /* Chain the next pointer */
1075
        for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1076
                int entry = np->cur_task % TX_RING_SIZE;
1077
                txdesc = &np->tx_ring[entry];
1078
                if (np->last_tx) {
1079
                        np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1080
                                entry*sizeof(struct netdev_desc));
1081
                }
1082
                np->last_tx = txdesc;
1083
        }
1084
        /* Indicate the latest descriptor of tx ring */
1085
        txdesc->status |= cpu_to_le32(DescIntrOnTx);
1086
 
1087
        if (readl (dev->base_addr + TxListPtr) == 0)
1088
                writel (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1089
                        dev->base_addr + TxListPtr);
1090
        return;
1091
}
1092
 
1093
static int
1094
start_tx (struct sk_buff *skb, struct net_device *dev)
1095
{
1096
        struct netdev_private *np = dev->priv;
1097
        struct netdev_desc *txdesc;
1098
        unsigned entry;
1099
 
1100
        /* Calculate the next Tx descriptor entry. */
1101
        entry = np->cur_tx % TX_RING_SIZE;
1102
        np->tx_skbuff[entry] = skb;
1103
        txdesc = &np->tx_ring[entry];
1104
 
1105
        txdesc->next_desc = 0;
1106
        txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1107
        txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1108
                                                        skb->len,
1109
                                                        PCI_DMA_TODEVICE));
1110
        txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1111
 
1112
        /* Increment cur_tx before tasklet_schedule() */
1113
        np->cur_tx++;
1114
        mb();
1115
        /* Schedule a tx_poll() task */
1116
        tasklet_schedule(&np->tx_tasklet);
1117
 
1118
        /* On some architectures: explicitly flush cache lines here. */
1119
        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1120
                        && !netif_queue_stopped(dev)) {
1121
                /* do nothing */
1122
        } else {
1123
                netif_stop_queue (dev);
1124
        }
1125
        dev->trans_start = jiffies;
1126
        if (netif_msg_tx_queued(np)) {
1127
                printk (KERN_DEBUG
1128
                        "%s: Transmit frame #%d queued in slot %d.\n",
1129
                        dev->name, np->cur_tx, entry);
1130
        }
1131
        return 0;
1132
}
1133
 
1134
/* Reset hardware tx and free all of tx buffers */
1135
static int
1136
reset_tx (struct net_device *dev)
1137
{
1138
        struct netdev_private *np = (struct netdev_private*) dev->priv;
1139
        long ioaddr = dev->base_addr;
1140
        struct sk_buff *skb;
1141
        int i;
1142
        int irq = in_interrupt();
1143
 
1144
        /* Reset tx logic, TxListPtr will be cleaned */
1145
        writew (TxDisable, ioaddr + MACCtrl1);
1146
        writew (TxReset | DMAReset | FIFOReset | NetworkReset,
1147
                        ioaddr + ASICCtrl + 2);
1148
        for (i=50; i > 0; i--) {
1149
                if ((readw(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1150
                        break;
1151
                mdelay(1);
1152
        }
1153
        /* free all tx skbuff */
1154
        for (i = 0; i < TX_RING_SIZE; i++) {
1155
                skb = np->tx_skbuff[i];
1156
                if (skb) {
1157
                        pci_unmap_single(np->pci_dev,
1158
                                np->tx_ring[i].frag[0].addr, skb->len,
1159
                                PCI_DMA_TODEVICE);
1160
                        if (irq)
1161
                                dev_kfree_skb_irq (skb);
1162
                        else
1163
                                dev_kfree_skb (skb);
1164
                        np->tx_skbuff[i] = 0;
1165
                        np->stats.tx_dropped++;
1166
                }
1167
        }
1168
        np->cur_tx = np->dirty_tx = 0;
1169
        np->cur_task = 0;
1170
        writew (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1171
        return 0;
1172
}
1173
 
1174
/* The interrupt handler cleans up after the Tx thread,
1175
   and schedule a Rx thread work */
1176
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1177
{
1178
        struct net_device *dev = (struct net_device *)dev_instance;
1179
        struct netdev_private *np;
1180
        long ioaddr;
1181
        int hw_frame_id;
1182
        int tx_cnt;
1183
        int tx_status;
1184
 
1185
        ioaddr = dev->base_addr;
1186
        np = dev->priv;
1187
 
1188
        do {
1189
                int intr_status = readw(ioaddr + IntrStatus);
1190
                writew(intr_status, ioaddr + IntrStatus);
1191
 
1192
                if (netif_msg_intr(np))
1193
                        printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1194
                                   dev->name, intr_status);
1195
 
1196
                if (!(intr_status & DEFAULT_INTR))
1197
                        break;
1198
 
1199
                if (intr_status & (IntrRxDMADone)) {
1200
                        writew(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1201
                                        ioaddr + IntrEnable);
1202
                        if (np->budget < 0)
1203
                                np->budget = RX_BUDGET;
1204
                        tasklet_schedule(&np->rx_tasklet);
1205
                }
1206
                if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1207
                        tx_status = readw (ioaddr + TxStatus);
1208
                        for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1209
                                if (netif_msg_tx_done(np))
1210
                                        printk
1211
                                            ("%s: Transmit status is %2.2x.\n",
1212
                                        dev->name, tx_status);
1213
                                if (tx_status & 0x1e) {
1214
                                        np->stats.tx_errors++;
1215
                                        if (tx_status & 0x10)
1216
                                                np->stats.tx_fifo_errors++;
1217
                                        if (tx_status & 0x08)
1218
                                                np->stats.collisions++;
1219
                                        if (tx_status & 0x02)
1220
                                                np->stats.tx_window_errors++;
1221
                                        /* This reset has not been verified!. */
1222
                                        if (tx_status & 0x10) { /* Reset the Tx. */
1223
                                                np->stats.tx_fifo_errors++;
1224
                                                spin_lock(&np->lock);
1225
                                                reset_tx(dev);
1226
                                                spin_unlock(&np->lock);
1227
                                        }
1228
                                        if (tx_status & 0x1e)   /* Restart the Tx. */
1229
                                                writew (TxEnable,
1230
                                                        ioaddr + MACCtrl1);
1231
                                }
1232
                                /* Yup, this is a documentation bug.  It cost me *hours*. */
1233
                                writew (0, ioaddr + TxStatus);
1234
                                tx_status = readw (ioaddr + TxStatus);
1235
                                if (tx_cnt < 0)
1236
                                        break;
1237
                        }
1238
                        hw_frame_id = (tx_status >> 8) & 0xff;
1239
                } else  {
1240
                        hw_frame_id = readb(ioaddr + TxFrameId);
1241
                }
1242
 
1243
                if (np->pci_rev_id >= 0x14) {
1244
                        spin_lock(&np->lock);
1245
                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1246
                                int entry = np->dirty_tx % TX_RING_SIZE;
1247
                                struct sk_buff *skb;
1248
                                int sw_frame_id;
1249
                                sw_frame_id = (le32_to_cpu(
1250
                                        np->tx_ring[entry].status) >> 2) & 0xff;
1251
                                if (sw_frame_id == hw_frame_id &&
1252
                                        !(le32_to_cpu(np->tx_ring[entry].status)
1253
                                        & 0x00010000))
1254
                                                break;
1255
                                if (sw_frame_id == (hw_frame_id + 1) %
1256
                                        TX_RING_SIZE)
1257
                                                break;
1258
                                skb = np->tx_skbuff[entry];
1259
                                /* Free the original skb. */
1260
                                pci_unmap_single(np->pci_dev,
1261
                                        np->tx_ring[entry].frag[0].addr,
1262
                                        skb->len, PCI_DMA_TODEVICE);
1263
                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1264
                                np->tx_skbuff[entry] = 0;
1265
                                np->tx_ring[entry].frag[0].addr = 0;
1266
                                np->tx_ring[entry].frag[0].length = 0;
1267
                        }
1268
                        spin_unlock(&np->lock);
1269
                } else {
1270
                        spin_lock(&np->lock);
1271
                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1272
                                int entry = np->dirty_tx % TX_RING_SIZE;
1273
                                struct sk_buff *skb;
1274
                                if (!(le32_to_cpu(np->tx_ring[entry].status)
1275
                                                        & 0x00010000))
1276
                                        break;
1277
                                skb = np->tx_skbuff[entry];
1278
                                /* Free the original skb. */
1279
                                pci_unmap_single(np->pci_dev,
1280
                                        np->tx_ring[entry].frag[0].addr,
1281
                                        skb->len, PCI_DMA_TODEVICE);
1282
                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1283
                                np->tx_skbuff[entry] = 0;
1284
                                np->tx_ring[entry].frag[0].addr = 0;
1285
                                np->tx_ring[entry].frag[0].length = 0;
1286
                        }
1287
                        spin_unlock(&np->lock);
1288
                }
1289
 
1290
                if (netif_queue_stopped(dev) &&
1291
                        np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1292
                        /* The ring is no longer full, clear busy flag. */
1293
                        netif_wake_queue (dev);
1294
                }
1295
                /* Abnormal error summary/uncommon events handlers. */
1296
                if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1297
                        netdev_error(dev, intr_status);
1298
        } while (0);
1299
        if (netif_msg_intr(np))
1300
                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1301
                           dev->name, readw(ioaddr + IntrStatus));
1302
        writel(5000, ioaddr + DownCounter);
1303
 
1304
}
1305
 
1306
static void rx_poll(unsigned long data)
1307
{
1308
        struct net_device *dev = (struct net_device *)data;
1309
        struct netdev_private *np = dev->priv;
1310
        int entry = np->cur_rx % RX_RING_SIZE;
1311
        int boguscnt = np->budget;
1312
        long ioaddr = dev->base_addr;
1313
        int received = 0;
1314
 
1315
        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1316
        while (1) {
1317
                struct netdev_desc *desc = &(np->rx_ring[entry]);
1318
                u32 frame_status = le32_to_cpu(desc->status);
1319
                int pkt_len;
1320
 
1321
                if (--boguscnt < 0) {
1322
                        goto not_done;
1323
                }
1324
                if (!(frame_status & DescOwn))
1325
                        break;
1326
                pkt_len = frame_status & 0x1fff;        /* Chip omits the CRC. */
1327
                if (netif_msg_rx_status(np))
1328
                        printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1329
                                   frame_status);
1330
                pci_dma_sync_single(np->pci_dev, desc->frag[0].addr,
1331
                        np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1332
 
1333
                if (frame_status & 0x001f4000) {
1334
                        /* There was a error. */
1335
                        if (netif_msg_rx_err(np))
1336
                                printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1337
                                           frame_status);
1338
                        np->stats.rx_errors++;
1339
                        if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1340
                        if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1341
                        if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1342
                        if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1343
                        if (frame_status & 0x00100000) {
1344
                                printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1345
                                           " status %8.8x.\n",
1346
                                           dev->name, frame_status);
1347
                        }
1348
                } else {
1349
                        struct sk_buff *skb;
1350
#ifndef final_version
1351
                        if (netif_msg_rx_status(np))
1352
                                printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1353
                                           ", bogus_cnt %d.\n",
1354
                                           pkt_len, boguscnt);
1355
#endif
1356
                        /* Check if the packet is long enough to accept without copying
1357
                           to a minimally-sized skbuff. */
1358
                        if (pkt_len < rx_copybreak
1359
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1360
                                skb->dev = dev;
1361
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1362
                                eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1363
                                skb_put(skb, pkt_len);
1364
                        } else {
1365
                                pci_unmap_single(np->pci_dev,
1366
                                        desc->frag[0].addr,
1367
                                        np->rx_buf_sz,
1368
                                        PCI_DMA_FROMDEVICE);
1369
                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
1370
                                np->rx_skbuff[entry] = NULL;
1371
                        }
1372
                        skb->protocol = eth_type_trans(skb, dev);
1373
                        /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1374
                        netif_rx(skb);
1375
                        dev->last_rx = jiffies;
1376
                }
1377
                entry = (entry + 1) % RX_RING_SIZE;
1378
                received++;
1379
        }
1380
        np->cur_rx = entry;
1381
        refill_rx (dev);
1382
        np->budget -= received;
1383
        writew(DEFAULT_INTR, ioaddr + IntrEnable);
1384
        return;
1385
 
1386
not_done:
1387
        np->cur_rx = entry;
1388
        refill_rx (dev);
1389
        if (!received)
1390
                received = 1;
1391
        np->budget -= received;
1392
        if (np->budget <= 0)
1393
                np->budget = RX_BUDGET;
1394
        tasklet_schedule(&np->rx_tasklet);
1395
        return;
1396
}
1397
 
1398
static void refill_rx (struct net_device *dev)
1399
{
1400
        struct netdev_private *np = dev->priv;
1401
        int entry;
1402
        int cnt = 0;
1403
 
1404
        /* Refill the Rx ring buffers. */
1405
        for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1406
                np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1407
                struct sk_buff *skb;
1408
                entry = np->dirty_rx % RX_RING_SIZE;
1409
                if (np->rx_skbuff[entry] == NULL) {
1410
                        skb = dev_alloc_skb(np->rx_buf_sz);
1411
                        np->rx_skbuff[entry] = skb;
1412
                        if (skb == NULL)
1413
                                break;          /* Better luck next round. */
1414
                        skb->dev = dev;         /* Mark as being used by this device. */
1415
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1416
                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1417
                                pci_map_single(np->pci_dev, skb->tail,
1418
                                        np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1419
                }
1420
                /* Perhaps we need not reset this field. */
1421
                np->rx_ring[entry].frag[0].length =
1422
                        cpu_to_le32(np->rx_buf_sz | LastFrag);
1423
                np->rx_ring[entry].status = 0;
1424
                cnt++;
1425
        }
1426
        return;
1427
}
1428
static void netdev_error(struct net_device *dev, int intr_status)
1429
{
1430
        long ioaddr = dev->base_addr;
1431
        struct netdev_private *np = dev->priv;
1432
        u16 mii_ctl, mii_advertise, mii_lpa;
1433
        int speed;
1434
 
1435
        if (intr_status & LinkChange) {
1436
                if (np->an_enable) {
1437
                        mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1438
                        mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1439
                        mii_advertise &= mii_lpa;
1440
                        printk (KERN_INFO "%s: Link changed: ", dev->name);
1441
                        if (mii_advertise & ADVERTISE_100FULL) {
1442
                                np->speed = 100;
1443
                                printk ("100Mbps, full duplex\n");
1444
                        } else if (mii_advertise & ADVERTISE_100HALF) {
1445
                                np->speed = 100;
1446
                                printk ("100Mbps, half duplex\n");
1447
                        } else if (mii_advertise & ADVERTISE_10FULL) {
1448
                                np->speed = 10;
1449
                                printk ("10Mbps, full duplex\n");
1450
                        } else if (mii_advertise & ADVERTISE_10HALF) {
1451
                                np->speed = 10;
1452
                                printk ("10Mbps, half duplex\n");
1453
                        } else
1454
                                printk ("\n");
1455
 
1456
                } else {
1457
                        mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1458
                        speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1459
                        np->speed = speed;
1460
                        printk (KERN_INFO "%s: Link changed: %dMbps ,",
1461
                                dev->name, speed);
1462
                        printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1463
                                "full" : "half");
1464
                }
1465
                check_duplex (dev);
1466
                if (np->flowctrl && np->mii_if.full_duplex) {
1467
                        writew(readw(ioaddr + MulticastFilter1+2) | 0x0200,
1468
                                ioaddr + MulticastFilter1+2);
1469
                        writew(readw(ioaddr + MACCtrl0) | EnbFlowCtrl,
1470
                                ioaddr + MACCtrl0);
1471
                }
1472
        }
1473
        if (intr_status & StatsMax) {
1474
                get_stats(dev);
1475
        }
1476
        if (intr_status & IntrPCIErr) {
1477
                printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1478
                           dev->name, intr_status);
1479
                /* We must do a global reset of DMA to continue. */
1480
        }
1481
}
1482
 
1483
static struct net_device_stats *get_stats(struct net_device *dev)
1484
{
1485
        struct netdev_private *np = dev->priv;
1486
        long ioaddr = dev->base_addr;
1487
        int i;
1488
 
1489
        /* We should lock this segment of code for SMP eventually, although
1490
           the vulnerability window is very small and statistics are
1491
           non-critical. */
1492
        /* The chip only need report frame silently dropped. */
1493
        np->stats.rx_missed_errors      += readb(ioaddr + RxMissed);
1494
        np->stats.tx_packets += readw(ioaddr + TxFramesOK);
1495
        np->stats.rx_packets += readw(ioaddr + RxFramesOK);
1496
        np->stats.collisions += readb(ioaddr + StatsLateColl);
1497
        np->stats.collisions += readb(ioaddr + StatsMultiColl);
1498
        np->stats.collisions += readb(ioaddr + StatsOneColl);
1499
        np->stats.tx_carrier_errors += readb(ioaddr + StatsCarrierError);
1500
        readb(ioaddr + StatsTxDefer);
1501
        for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1502
                readb(ioaddr + i);
1503
        np->stats.tx_bytes += readw(ioaddr + TxOctetsLow);
1504
        np->stats.tx_bytes += readw(ioaddr + TxOctetsHigh) << 16;
1505
        np->stats.rx_bytes += readw(ioaddr + RxOctetsLow);
1506
        np->stats.rx_bytes += readw(ioaddr + RxOctetsHigh) << 16;
1507
 
1508
        return &np->stats;
1509
}
1510
 
1511
static void set_rx_mode(struct net_device *dev)
1512
{
1513
        long ioaddr = dev->base_addr;
1514
        struct netdev_private *np = dev->priv;
1515
        u16 mc_filter[4];                       /* Multicast hash filter */
1516
        u32 rx_mode;
1517
        int i;
1518
 
1519
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1520
                /* Unconditionally log net taps. */
1521
                printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1522
                memset(mc_filter, 0xff, sizeof(mc_filter));
1523
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1524
        } else if ((dev->mc_count > multicast_filter_limit)
1525
                           ||  (dev->flags & IFF_ALLMULTI)) {
1526
                /* Too many to match, or accept all multicasts. */
1527
                memset(mc_filter, 0xff, sizeof(mc_filter));
1528
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1529
        } else if (dev->mc_count) {
1530
                struct dev_mc_list *mclist;
1531
                int bit;
1532
                int index;
1533
                int crc;
1534
                memset (mc_filter, 0, sizeof (mc_filter));
1535
                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1536
                     i++, mclist = mclist->next) {
1537
                        crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1538
                        for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1539
                                if (crc & 0x80000000) index |= 1 << bit;
1540
                        mc_filter[index/16] |= (1 << (index % 16));
1541
                }
1542
                rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1543
        } else {
1544
                writeb(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1545
                return;
1546
        }
1547
        if (np->mii_if.full_duplex && np->flowctrl)
1548
                mc_filter[3] |= 0x0200;
1549
 
1550
        for (i = 0; i < 4; i++)
1551
                writew(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1552
        writeb(rx_mode, ioaddr + RxMode);
1553
}
1554
 
1555
static int __set_mac_addr(struct net_device *dev)
1556
{
1557
        u16 addr16;
1558
 
1559
        addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1560
        writew(addr16, dev->base_addr + StationAddr);
1561
        addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1562
        writew(addr16, dev->base_addr + StationAddr+2);
1563
        addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1564
        writew(addr16, dev->base_addr + StationAddr+4);
1565
        return 0;
1566
}
1567
 
1568
 
1569
static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1570
{
1571
        struct netdev_private *np = dev->priv;
1572
        u32 ethcmd;
1573
 
1574
        if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1575
                return -EFAULT;
1576
 
1577
        switch (ethcmd) {
1578
                /* get constant driver settings/info */
1579
                case ETHTOOL_GDRVINFO: {
1580
                        struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1581
                        strcpy(info.driver, DRV_NAME);
1582
                        strcpy(info.version, DRV_VERSION);
1583
                        strcpy(info.bus_info, np->pci_dev->slot_name);
1584
                        memset(&info.fw_version, 0, sizeof(info.fw_version));
1585
                        if (copy_to_user(useraddr, &info, sizeof(info)))
1586
                                return -EFAULT;
1587
                        return 0;
1588
                }
1589
 
1590
                /* get media settings */
1591
                case ETHTOOL_GSET: {
1592
                        struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1593
                        spin_lock_irq(&np->lock);
1594
                        mii_ethtool_gset(&np->mii_if, &ecmd);
1595
                        spin_unlock_irq(&np->lock);
1596
                        if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1597
                                return -EFAULT;
1598
                        return 0;
1599
                }
1600
                /* set media settings */
1601
                case ETHTOOL_SSET: {
1602
                        int r;
1603
                        struct ethtool_cmd ecmd;
1604
                        if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1605
                                return -EFAULT;
1606
                        spin_lock_irq(&np->lock);
1607
                        r = mii_ethtool_sset(&np->mii_if, &ecmd);
1608
                        spin_unlock_irq(&np->lock);
1609
                        return r;
1610
                }
1611
 
1612
                /* restart autonegotiation */
1613
                case ETHTOOL_NWAY_RST: {
1614
                        return mii_nway_restart(&np->mii_if);
1615
                }
1616
 
1617
                /* get link status */
1618
                case ETHTOOL_GLINK: {
1619
                        struct ethtool_value edata = {ETHTOOL_GLINK};
1620
                        edata.data = mii_link_ok(&np->mii_if);
1621
                        if (copy_to_user(useraddr, &edata, sizeof(edata)))
1622
                                return -EFAULT;
1623
                        return 0;
1624
                }
1625
 
1626
                /* get message-level */
1627
                case ETHTOOL_GMSGLVL: {
1628
                        struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1629
                        edata.data = np->msg_enable;
1630
                        if (copy_to_user(useraddr, &edata, sizeof(edata)))
1631
                                return -EFAULT;
1632
                        return 0;
1633
                }
1634
                /* set message-level */
1635
                case ETHTOOL_SMSGLVL: {
1636
                        struct ethtool_value edata;
1637
                        if (copy_from_user(&edata, useraddr, sizeof(edata)))
1638
                                return -EFAULT;
1639
                        np->msg_enable = edata.data;
1640
                        return 0;
1641
                }
1642
 
1643
                default:
1644
                return -EOPNOTSUPP;
1645
 
1646
        }
1647
}
1648
 
1649
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1650
{
1651
        struct netdev_private *np = dev->priv;
1652
        struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
1653
        int rc;
1654
        int i;
1655
        long ioaddr = dev->base_addr;
1656
 
1657
        if (!netif_running(dev))
1658
                return -EINVAL;
1659
 
1660
        if (cmd == SIOCETHTOOL)
1661
                rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1662
 
1663
        else {
1664
                spin_lock_irq(&np->lock);
1665
                rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1666
                spin_unlock_irq(&np->lock);
1667
        }
1668
        switch (cmd) {
1669
                case SIOCDEVPRIVATE:
1670
                for (i=0; i<TX_RING_SIZE; i++) {
1671
                        printk(KERN_DEBUG "%02x %08x %08x %08x(%02x) %08x %08x\n", i,
1672
                                np->tx_ring_dma + i*sizeof(*np->tx_ring),
1673
                                le32_to_cpu(np->tx_ring[i].next_desc),
1674
                                le32_to_cpu(np->tx_ring[i].status),
1675
                                (le32_to_cpu(np->tx_ring[i].status) >> 2)
1676
                                        & 0xff,
1677
                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1678
                                le32_to_cpu(np->tx_ring[i].frag[0].length));
1679
                }
1680
                printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1681
                        readl(dev->base_addr + TxListPtr),
1682
                        netif_queue_stopped(dev));
1683
                printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1684
                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
1685
                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1686
                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1687
                printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1688
                printk(KERN_DEBUG "TxStatus=%04x\n", readw(ioaddr + TxStatus));
1689
                        return 0;
1690
        }
1691
 
1692
 
1693
        return rc;
1694
}
1695
 
1696
static int netdev_close(struct net_device *dev)
1697
{
1698
        long ioaddr = dev->base_addr;
1699
        struct netdev_private *np = dev->priv;
1700
        struct sk_buff *skb;
1701
        int i;
1702
 
1703
        netif_stop_queue(dev);
1704
 
1705
        if (netif_msg_ifdown(np)) {
1706
                printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1707
                           "Rx %4.4x Int %2.2x.\n",
1708
                           dev->name, readb(ioaddr + TxStatus),
1709
                           readl(ioaddr + RxStatus), readw(ioaddr + IntrStatus));
1710
                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1711
                           dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1712
        }
1713
 
1714
        /* Disable interrupts by clearing the interrupt mask. */
1715
        writew(0x0000, ioaddr + IntrEnable);
1716
 
1717
        /* Stop the chip's Tx and Rx processes. */
1718
        writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1719
 
1720
        /* Wait and kill tasklet */
1721
        tasklet_kill(&np->rx_tasklet);
1722
        tasklet_kill(&np->tx_tasklet);
1723
 
1724
#ifdef __i386__
1725
        if (netif_msg_hw(np)) {
1726
                printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
1727
                           (int)(np->tx_ring_dma));
1728
                for (i = 0; i < TX_RING_SIZE; i++)
1729
                        printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1730
                                   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1731
                                   np->tx_ring[i].frag[0].length);
1732
                printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
1733
                           (int)(np->rx_ring_dma));
1734
                for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1735
                        printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1736
                                   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1737
                                   np->rx_ring[i].frag[0].length);
1738
                }
1739
        }
1740
#endif /* __i386__ debugging only */
1741
 
1742
        free_irq(dev->irq, dev);
1743
 
1744
        del_timer_sync(&np->timer);
1745
 
1746
        /* Free all the skbuffs in the Rx queue. */
1747
        for (i = 0; i < RX_RING_SIZE; i++) {
1748
                np->rx_ring[i].status = 0;
1749
                np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1750
                skb = np->rx_skbuff[i];
1751
                if (skb) {
1752
                        pci_unmap_single(np->pci_dev,
1753
                                np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1754
                                PCI_DMA_FROMDEVICE);
1755
                        dev_kfree_skb(skb);
1756
                        np->rx_skbuff[i] = 0;
1757
                }
1758
        }
1759
        for (i = 0; i < TX_RING_SIZE; i++) {
1760
                skb = np->tx_skbuff[i];
1761
                if (skb) {
1762
                        pci_unmap_single(np->pci_dev,
1763
                                np->tx_ring[i].frag[0].addr, skb->len,
1764
                                PCI_DMA_TODEVICE);
1765
                        dev_kfree_skb(skb);
1766
                        np->tx_skbuff[i] = 0;
1767
                }
1768
        }
1769
 
1770
        return 0;
1771
}
1772
 
1773
static void __devexit sundance_remove1 (struct pci_dev *pdev)
1774
{
1775
        struct net_device *dev = pci_get_drvdata(pdev);
1776
 
1777
        /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1778
        if (dev) {
1779
                struct netdev_private *np = dev->priv;
1780
 
1781
                unregister_netdev(dev);
1782
                pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1783
                        np->rx_ring_dma);
1784
                pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1785
                        np->tx_ring_dma);
1786
                pci_release_regions(pdev);
1787
#ifndef USE_IO_OPS
1788
                iounmap((char *)(dev->base_addr));
1789
#endif
1790
                kfree(dev);
1791
                pci_set_drvdata(pdev, NULL);
1792
        }
1793
}
1794
 
1795
static struct pci_driver sundance_driver = {
1796
        .name           = DRV_NAME,
1797
        .id_table       = sundance_pci_tbl,
1798
        .probe          = sundance_probe1,
1799
        .remove         = __devexit_p(sundance_remove1),
1800
};
1801
 
1802
static int __init sundance_init(void)
1803
{
1804
/* when a module, this is printed whether or not devices are found in probe */
1805
#ifdef MODULE
1806
        printk(version);
1807
#endif
1808
        return pci_module_init(&sundance_driver);
1809
}
1810
 
1811
static void __exit sundance_exit(void)
1812
{
1813
        pci_unregister_driver(&sundance_driver);
1814
}
1815
 
1816
module_init(sundance_init);
1817
module_exit(sundance_exit);
1818
 
1819
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.