OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [net/] [sundance.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2
/*
3
        Written 1999-2000 by Donald Becker.
4
 
5
        This software may be used and distributed according to the terms of
6
        the GNU General Public License (GPL), incorporated herein by reference.
7
        Drivers based on or derived from this code fall under the GPL and must
8
        retain the authorship, copyright and license notice.  This file is not
9
        a complete program and may only be used when the entire operating
10
        system is licensed under the GPL.
11
 
12
        The author may be reached as becker@scyld.com, or C/O
13
        Scyld Computing Corporation
14
        410 Severn Ave., Suite 210
15
        Annapolis MD 21403
16
 
17
        Support and updates available at
18
        http://www.scyld.com/network/sundance.html
19
        [link no longer provides useful info -jgarzik]
20
        Archives of the mailing list are still available at
21
        http://www.beowulf.org/pipermail/netdrivers/
22
 
23
*/
24
 
25
#define DRV_NAME        "sundance"
26
#define DRV_VERSION     "1.2"
27
#define DRV_RELDATE     "11-Sep-2006"
28
 
29
 
30
/* The user-configurable values.
31
   These may be modified when a driver module is loaded.*/
32
static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
33
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34
   Typical is a 64 element hash table based on the Ethernet CRC.  */
35
static const int multicast_filter_limit = 32;
36
 
37
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38
   Setting to > 1518 effectively disables this feature.
39
   This chip can receive into offset buffers, so the Alpha does not
40
   need a copy-align. */
41
static int rx_copybreak;
42
static int flowctrl=1;
43
 
44
/* media[] specifies the media type the NIC operates at.
45
                 autosense      Autosensing active media.
46
                 10mbps_hd      10Mbps half duplex.
47
                 10mbps_fd      10Mbps full duplex.
48
                 100mbps_hd     100Mbps half duplex.
49
                 100mbps_fd     100Mbps full duplex.
50
 
51
                 1              10Mbps half duplex.
52
                 2              10Mbps full duplex.
53
                 3              100Mbps half duplex.
54
                 4              100Mbps full duplex.
55
*/
56
#define MAX_UNITS 8
57
static char *media[MAX_UNITS];
58
 
59
 
60
/* Operational parameters that are set at compile time. */
61
 
62
/* Keep the ring sizes a power of two for compile efficiency.
63
   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64
   Making the Tx ring too large decreases the effectiveness of channel
65
   bonding and packet priority, and more than 128 requires modifying the
66
   Tx error recovery.
67
   Large receive rings merely waste memory. */
68
#define TX_RING_SIZE    32
69
#define TX_QUEUE_LEN    (TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
70
#define RX_RING_SIZE    64
71
#define RX_BUDGET       32
72
#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct netdev_desc)
73
#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct netdev_desc)
74
 
75
/* Operational parameters that usually are not changed. */
76
/* Time in jiffies before concluding the transmitter is hung. */
77
#define TX_TIMEOUT  (4*HZ)
78
#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
79
 
80
/* Include files, designed to support most kernel versions 2.0.0 and later. */
81
#include <linux/module.h>
82
#include <linux/kernel.h>
83
#include <linux/string.h>
84
#include <linux/timer.h>
85
#include <linux/errno.h>
86
#include <linux/ioport.h>
87
#include <linux/slab.h>
88
#include <linux/interrupt.h>
89
#include <linux/pci.h>
90
#include <linux/netdevice.h>
91
#include <linux/etherdevice.h>
92
#include <linux/skbuff.h>
93
#include <linux/init.h>
94
#include <linux/bitops.h>
95
#include <asm/uaccess.h>
96
#include <asm/processor.h>              /* Processor type for cache alignment. */
97
#include <asm/io.h>
98
#include <linux/delay.h>
99
#include <linux/spinlock.h>
100
#ifndef _COMPAT_WITH_OLD_KERNEL
101
#include <linux/crc32.h>
102
#include <linux/ethtool.h>
103
#include <linux/mii.h>
104
#else
105
#include "crc32.h"
106
#include "ethtool.h"
107
#include "mii.h"
108
#include "compat.h"
109
#endif
110
 
111
/* These identify the driver base version and may not be removed. */
112
static char version[] =
113
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "  Written by Donald Becker\n";
114
 
115
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116
MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117
MODULE_LICENSE("GPL");
118
 
119
module_param(debug, int, 0);
120
module_param(rx_copybreak, int, 0);
121
module_param_array(media, charp, NULL, 0);
122
module_param(flowctrl, int, 0);
123
MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124
MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125
MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
126
 
127
/*
128
                                Theory of Operation
129
 
130
I. Board Compatibility
131
 
132
This driver is designed for the Sundance Technologies "Alta" ST201 chip.
133
 
134
II. Board-specific settings
135
 
136
III. Driver operation
137
 
138
IIIa. Ring buffers
139
 
140
This driver uses two statically allocated fixed-size descriptor lists
141
formed into rings by a branch from the final descriptor to the beginning of
142
the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
143
Some chips explicitly use only 2^N sized rings, while others use a
144
'next descriptor' pointer that the driver forms into rings.
145
 
146
IIIb/c. Transmit/Receive Structure
147
 
148
This driver uses a zero-copy receive and transmit scheme.
149
The driver allocates full frame size skbuffs for the Rx ring buffers at
150
open() time and passes the skb->data field to the chip as receive data
151
buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
152
a fresh skbuff is allocated and the frame is copied to the new skbuff.
153
When the incoming frame is larger, the skbuff is passed directly up the
154
protocol stack.  Buffers consumed this way are replaced by newly allocated
155
skbuffs in a later phase of receives.
156
 
157
The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158
using a full-sized skbuff for small frames vs. the copying costs of larger
159
frames.  New boards are typically used in generously configured machines
160
and the underfilled buffers have negligible impact compared to the benefit of
161
a single allocation size, so the default value of zero results in never
162
copying packets.  When copying is done, the cost is usually mitigated by using
163
a combined copy/checksum routine.  Copying also preloads the cache, which is
164
most useful with small frames.
165
 
166
A subtle aspect of the operation is that the IP header at offset 14 in an
167
ethernet frame isn't longword aligned for further processing.
168
Unaligned buffers are permitted by the Sundance hardware, so
169
frames are received into the skbuff at an offset of "+2", 16-byte aligning
170
the IP header.
171
 
172
IIId. Synchronization
173
 
174
The driver runs as two independent, single-threaded flows of control.  One
175
is the send-packet routine, which enforces single-threaded use by the
176
dev->tbusy flag.  The other thread is the interrupt handler, which is single
177
threaded by the hardware and interrupt handling software.
178
 
179
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180
flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182
the 'lp->tx_full' flag.
183
 
184
The interrupt handler has exclusive control over the Rx ring and records stats
185
from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
186
empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187
clears both the tx_full and tbusy flags.
188
 
189
IV. Notes
190
 
191
IVb. References
192
 
193
The Sundance ST201 datasheet, preliminary version.
194
The Kendin KS8723 datasheet, preliminary version.
195
The ICplus IP100 datasheet, preliminary version.
196
http://www.scyld.com/expert/100mbps.html
197
http://www.scyld.com/expert/NWay.html
198
 
199
IVc. Errata
200
 
201
*/
202
 
203
/* Work-around for Kendin chip bugs. */
204
#ifndef CONFIG_SUNDANCE_MMIO
205
#define USE_IO_OPS 1
206
#endif
207
 
208
static const struct pci_device_id sundance_pci_tbl[] = {
209
        { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210
        { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211
        { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212
        { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213
        { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
214
        { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
215
        { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
216
        { }
217
};
218
MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
219
 
220
enum {
221
        netdev_io_size = 128
222
};
223
 
224
struct pci_id_info {
225
        const char *name;
226
};
227
static const struct pci_id_info pci_id_tbl[] __devinitdata = {
228
        {"D-Link DFE-550TX FAST Ethernet Adapter"},
229
        {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230
        {"D-Link DFE-580TX 4 port Server Adapter"},
231
        {"D-Link DFE-530TXS FAST Ethernet Adapter"},
232
        {"D-Link DL10050-based FAST Ethernet Adapter"},
233
        {"Sundance Technology Alta"},
234
        {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
235
        { }     /* terminate list. */
236
};
237
 
238
/* This driver was written to use PCI memory space, however x86-oriented
239
   hardware often uses I/O space accesses. */
240
 
241
/* Offsets to the device registers.
242
   Unlike software-only systems, device drivers interact with complex hardware.
243
   It's not useful to define symbolic names for every register bit in the
244
   device.  The name can only partially document the semantics and make
245
   the driver longer and more difficult to read.
246
   In general, only the important configuration values or bits changed
247
   multiple times should be defined symbolically.
248
*/
249
enum alta_offsets {
250
        DMACtrl = 0x00,
251
        TxListPtr = 0x04,
252
        TxDMABurstThresh = 0x08,
253
        TxDMAUrgentThresh = 0x09,
254
        TxDMAPollPeriod = 0x0a,
255
        RxDMAStatus = 0x0c,
256
        RxListPtr = 0x10,
257
        DebugCtrl0 = 0x1a,
258
        DebugCtrl1 = 0x1c,
259
        RxDMABurstThresh = 0x14,
260
        RxDMAUrgentThresh = 0x15,
261
        RxDMAPollPeriod = 0x16,
262
        LEDCtrl = 0x1a,
263
        ASICCtrl = 0x30,
264
        EEData = 0x34,
265
        EECtrl = 0x36,
266
        FlashAddr = 0x40,
267
        FlashData = 0x44,
268
        TxStatus = 0x46,
269
        TxFrameId = 0x47,
270
        DownCounter = 0x18,
271
        IntrClear = 0x4a,
272
        IntrEnable = 0x4c,
273
        IntrStatus = 0x4e,
274
        MACCtrl0 = 0x50,
275
        MACCtrl1 = 0x52,
276
        StationAddr = 0x54,
277
        MaxFrameSize = 0x5A,
278
        RxMode = 0x5c,
279
        MIICtrl = 0x5e,
280
        MulticastFilter0 = 0x60,
281
        MulticastFilter1 = 0x64,
282
        RxOctetsLow = 0x68,
283
        RxOctetsHigh = 0x6a,
284
        TxOctetsLow = 0x6c,
285
        TxOctetsHigh = 0x6e,
286
        TxFramesOK = 0x70,
287
        RxFramesOK = 0x72,
288
        StatsCarrierError = 0x74,
289
        StatsLateColl = 0x75,
290
        StatsMultiColl = 0x76,
291
        StatsOneColl = 0x77,
292
        StatsTxDefer = 0x78,
293
        RxMissed = 0x79,
294
        StatsTxXSDefer = 0x7a,
295
        StatsTxAbort = 0x7b,
296
        StatsBcastTx = 0x7c,
297
        StatsBcastRx = 0x7d,
298
        StatsMcastTx = 0x7e,
299
        StatsMcastRx = 0x7f,
300
        /* Aliased and bogus values! */
301
        RxStatus = 0x0c,
302
};
303
enum ASICCtrl_HiWord_bit {
304
        GlobalReset = 0x0001,
305
        RxReset = 0x0002,
306
        TxReset = 0x0004,
307
        DMAReset = 0x0008,
308
        FIFOReset = 0x0010,
309
        NetworkReset = 0x0020,
310
        HostReset = 0x0040,
311
        ResetBusy = 0x0400,
312
};
313
 
314
/* Bits in the interrupt status/mask registers. */
315
enum intr_status_bits {
316
        IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
317
        IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
318
        IntrDrvRqst=0x0040,
319
        StatsMax=0x0080, LinkChange=0x0100,
320
        IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
321
};
322
 
323
/* Bits in the RxMode register. */
324
enum rx_mode_bits {
325
        AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
326
        AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
327
};
328
/* Bits in MACCtrl. */
329
enum mac_ctrl0_bits {
330
        EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
331
        EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
332
};
333
enum mac_ctrl1_bits {
334
        StatsEnable=0x0020,     StatsDisable=0x0040, StatsEnabled=0x0080,
335
        TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
336
        RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
337
};
338
 
339
/* The Rx and Tx buffer descriptors. */
340
/* Note that using only 32 bit fields simplifies conversion to big-endian
341
   architectures. */
342
struct netdev_desc {
343
        __le32 next_desc;
344
        __le32 status;
345
        struct desc_frag { __le32 addr, length; } frag[1];
346
};
347
 
348
/* Bits in netdev_desc.status */
349
enum desc_status_bits {
350
        DescOwn=0x8000,
351
        DescEndPacket=0x4000,
352
        DescEndRing=0x2000,
353
        LastFrag=0x80000000,
354
        DescIntrOnTx=0x8000,
355
        DescIntrOnDMADone=0x80000000,
356
        DisableAlign = 0x00000001,
357
};
358
 
359
#define PRIV_ALIGN      15      /* Required alignment mask */
360
/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
361
   within the structure. */
362
#define MII_CNT         4
363
struct netdev_private {
364
        /* Descriptor rings first for alignment. */
365
        struct netdev_desc *rx_ring;
366
        struct netdev_desc *tx_ring;
367
        struct sk_buff* rx_skbuff[RX_RING_SIZE];
368
        struct sk_buff* tx_skbuff[TX_RING_SIZE];
369
        dma_addr_t tx_ring_dma;
370
        dma_addr_t rx_ring_dma;
371
        struct net_device_stats stats;
372
        struct timer_list timer;                /* Media monitoring timer. */
373
        /* Frequently used values: keep some adjacent for cache effect. */
374
        spinlock_t lock;
375
        spinlock_t rx_lock;                     /* Group with Tx control cache line. */
376
        int msg_enable;
377
        int chip_id;
378
        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
379
        unsigned int rx_buf_sz;                 /* Based on MTU+slack. */
380
        struct netdev_desc *last_tx;            /* Last Tx descriptor used. */
381
        unsigned int cur_tx, dirty_tx;
382
        /* These values are keep track of the transceiver/media in use. */
383
        unsigned int flowctrl:1;
384
        unsigned int default_port:4;            /* Last dev->if_port value. */
385
        unsigned int an_enable:1;
386
        unsigned int speed;
387
        struct tasklet_struct rx_tasklet;
388
        struct tasklet_struct tx_tasklet;
389
        int budget;
390
        int cur_task;
391
        /* Multicast and receive mode. */
392
        spinlock_t mcastlock;                   /* SMP lock multicast updates. */
393
        u16 mcast_filter[4];
394
        /* MII transceiver section. */
395
        struct mii_if_info mii_if;
396
        int mii_preamble_required;
397
        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used. */
398
        struct pci_dev *pci_dev;
399
        void __iomem *base;
400
};
401
 
402
/* The station address location in the EEPROM. */
403
#define EEPROM_SA_OFFSET        0x10
404
#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
405
                        IntrDrvRqst | IntrTxDone | StatsMax | \
406
                        LinkChange)
407
 
408
static int  change_mtu(struct net_device *dev, int new_mtu);
409
static int  eeprom_read(void __iomem *ioaddr, int location);
410
static int  mdio_read(struct net_device *dev, int phy_id, int location);
411
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
412
static int  netdev_open(struct net_device *dev);
413
static void check_duplex(struct net_device *dev);
414
static void netdev_timer(unsigned long data);
415
static void tx_timeout(struct net_device *dev);
416
static void init_ring(struct net_device *dev);
417
static int  start_tx(struct sk_buff *skb, struct net_device *dev);
418
static int reset_tx (struct net_device *dev);
419
static irqreturn_t intr_handler(int irq, void *dev_instance);
420
static void rx_poll(unsigned long data);
421
static void tx_poll(unsigned long data);
422
static void refill_rx (struct net_device *dev);
423
static void netdev_error(struct net_device *dev, int intr_status);
424
static void netdev_error(struct net_device *dev, int intr_status);
425
static void set_rx_mode(struct net_device *dev);
426
static int __set_mac_addr(struct net_device *dev);
427
static struct net_device_stats *get_stats(struct net_device *dev);
428
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
429
static int  netdev_close(struct net_device *dev);
430
static const struct ethtool_ops ethtool_ops;
431
 
432
static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
433
{
434
        struct netdev_private *np = netdev_priv(dev);
435
        void __iomem *ioaddr = np->base + ASICCtrl;
436
        int countdown;
437
 
438
        /* ST201 documentation states ASICCtrl is a 32bit register */
439
        iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
440
        /* ST201 documentation states reset can take up to 1 ms */
441
        countdown = 10 + 1;
442
        while (ioread32 (ioaddr) & (ResetBusy << 16)) {
443
                if (--countdown == 0) {
444
                        printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
445
                        break;
446
                }
447
                udelay(100);
448
        }
449
}
450
 
451
static int __devinit sundance_probe1 (struct pci_dev *pdev,
452
                                      const struct pci_device_id *ent)
453
{
454
        struct net_device *dev;
455
        struct netdev_private *np;
456
        static int card_idx;
457
        int chip_idx = ent->driver_data;
458
        int irq;
459
        int i;
460
        void __iomem *ioaddr;
461
        u16 mii_ctl;
462
        void *ring_space;
463
        dma_addr_t ring_dma;
464
#ifdef USE_IO_OPS
465
        int bar = 0;
466
#else
467
        int bar = 1;
468
#endif
469
        int phy, phy_end, phy_idx = 0;
470
        DECLARE_MAC_BUF(mac);
471
 
472
/* when built into the kernel, we only print version if device is found */
473
#ifndef MODULE
474
        static int printed_version;
475
        if (!printed_version++)
476
                printk(version);
477
#endif
478
 
479
        if (pci_enable_device(pdev))
480
                return -EIO;
481
        pci_set_master(pdev);
482
 
483
        irq = pdev->irq;
484
 
485
        dev = alloc_etherdev(sizeof(*np));
486
        if (!dev)
487
                return -ENOMEM;
488
        SET_NETDEV_DEV(dev, &pdev->dev);
489
 
490
        if (pci_request_regions(pdev, DRV_NAME))
491
                goto err_out_netdev;
492
 
493
        ioaddr = pci_iomap(pdev, bar, netdev_io_size);
494
        if (!ioaddr)
495
                goto err_out_res;
496
 
497
        for (i = 0; i < 3; i++)
498
                ((__le16 *)dev->dev_addr)[i] =
499
                        cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
500
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
501
 
502
        dev->base_addr = (unsigned long)ioaddr;
503
        dev->irq = irq;
504
 
505
        np = netdev_priv(dev);
506
        np->base = ioaddr;
507
        np->pci_dev = pdev;
508
        np->chip_id = chip_idx;
509
        np->msg_enable = (1 << debug) - 1;
510
        spin_lock_init(&np->lock);
511
        tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
512
        tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
513
 
514
        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
515
        if (!ring_space)
516
                goto err_out_cleardev;
517
        np->tx_ring = (struct netdev_desc *)ring_space;
518
        np->tx_ring_dma = ring_dma;
519
 
520
        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
521
        if (!ring_space)
522
                goto err_out_unmap_tx;
523
        np->rx_ring = (struct netdev_desc *)ring_space;
524
        np->rx_ring_dma = ring_dma;
525
 
526
        np->mii_if.dev = dev;
527
        np->mii_if.mdio_read = mdio_read;
528
        np->mii_if.mdio_write = mdio_write;
529
        np->mii_if.phy_id_mask = 0x1f;
530
        np->mii_if.reg_num_mask = 0x1f;
531
 
532
        /* The chip-specific entries in the device structure. */
533
        dev->open = &netdev_open;
534
        dev->hard_start_xmit = &start_tx;
535
        dev->stop = &netdev_close;
536
        dev->get_stats = &get_stats;
537
        dev->set_multicast_list = &set_rx_mode;
538
        dev->do_ioctl = &netdev_ioctl;
539
        SET_ETHTOOL_OPS(dev, &ethtool_ops);
540
        dev->tx_timeout = &tx_timeout;
541
        dev->watchdog_timeo = TX_TIMEOUT;
542
        dev->change_mtu = &change_mtu;
543
        pci_set_drvdata(pdev, dev);
544
 
545
        i = register_netdev(dev);
546
        if (i)
547
                goto err_out_unmap_rx;
548
 
549
        printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n",
550
               dev->name, pci_id_tbl[chip_idx].name, ioaddr,
551
               print_mac(mac, dev->dev_addr), irq);
552
 
553
        np->phys[0] = 1;         /* Default setting */
554
        np->mii_preamble_required++;
555
 
556
        /*
557
         * It seems some phys doesn't deal well with address 0 being accessed
558
         * first
559
         */
560
        if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
561
                phy = 0;
562
                phy_end = 31;
563
        } else {
564
                phy = 1;
565
                phy_end = 32;   /* wraps to zero, due to 'phy & 0x1f' */
566
        }
567
        for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
568
                int phyx = phy & 0x1f;
569
                int mii_status = mdio_read(dev, phyx, MII_BMSR);
570
                if (mii_status != 0xffff  &&  mii_status != 0x0000) {
571
                        np->phys[phy_idx++] = phyx;
572
                        np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
573
                        if ((mii_status & 0x0040) == 0)
574
                                np->mii_preamble_required++;
575
                        printk(KERN_INFO "%s: MII PHY found at address %d, status "
576
                                   "0x%4.4x advertising %4.4x.\n",
577
                                   dev->name, phyx, mii_status, np->mii_if.advertising);
578
                }
579
        }
580
        np->mii_preamble_required--;
581
 
582
        if (phy_idx == 0) {
583
                printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
584
                           dev->name, ioread32(ioaddr + ASICCtrl));
585
                goto err_out_unregister;
586
        }
587
 
588
        np->mii_if.phy_id = np->phys[0];
589
 
590
        /* Parse override configuration */
591
        np->an_enable = 1;
592
        if (card_idx < MAX_UNITS) {
593
                if (media[card_idx] != NULL) {
594
                        np->an_enable = 0;
595
                        if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
596
                            strcmp (media[card_idx], "4") == 0) {
597
                                np->speed = 100;
598
                                np->mii_if.full_duplex = 1;
599
                        } else if (strcmp (media[card_idx], "100mbps_hd") == 0
600
                                   || strcmp (media[card_idx], "3") == 0) {
601
                                np->speed = 100;
602
                                np->mii_if.full_duplex = 0;
603
                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
604
                                   strcmp (media[card_idx], "2") == 0) {
605
                                np->speed = 10;
606
                                np->mii_if.full_duplex = 1;
607
                        } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
608
                                   strcmp (media[card_idx], "1") == 0) {
609
                                np->speed = 10;
610
                                np->mii_if.full_duplex = 0;
611
                        } else {
612
                                np->an_enable = 1;
613
                        }
614
                }
615
                if (flowctrl == 1)
616
                        np->flowctrl = 1;
617
        }
618
 
619
        /* Fibre PHY? */
620
        if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
621
                /* Default 100Mbps Full */
622
                if (np->an_enable) {
623
                        np->speed = 100;
624
                        np->mii_if.full_duplex = 1;
625
                        np->an_enable = 0;
626
                }
627
        }
628
        /* Reset PHY */
629
        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
630
        mdelay (300);
631
        /* If flow control enabled, we need to advertise it.*/
632
        if (np->flowctrl)
633
                mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
634
        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
635
        /* Force media type */
636
        if (!np->an_enable) {
637
                mii_ctl = 0;
638
                mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
639
                mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
640
                mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
641
                printk (KERN_INFO "Override speed=%d, %s duplex\n",
642
                        np->speed, np->mii_if.full_duplex ? "Full" : "Half");
643
 
644
        }
645
 
646
        /* Perhaps move the reset here? */
647
        /* Reset the chip to erase previous misconfiguration. */
648
        if (netif_msg_hw(np))
649
                printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
650
        sundance_reset(dev, 0x00ff << 16);
651
        if (netif_msg_hw(np))
652
                printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
653
 
654
        card_idx++;
655
        return 0;
656
 
657
err_out_unregister:
658
        unregister_netdev(dev);
659
err_out_unmap_rx:
660
        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
661
err_out_unmap_tx:
662
        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
663
err_out_cleardev:
664
        pci_set_drvdata(pdev, NULL);
665
        pci_iounmap(pdev, ioaddr);
666
err_out_res:
667
        pci_release_regions(pdev);
668
err_out_netdev:
669
        free_netdev (dev);
670
        return -ENODEV;
671
}
672
 
673
static int change_mtu(struct net_device *dev, int new_mtu)
674
{
675
        if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
676
                return -EINVAL;
677
        if (netif_running(dev))
678
                return -EBUSY;
679
        dev->mtu = new_mtu;
680
        return 0;
681
}
682
 
683
#define eeprom_delay(ee_addr)   ioread32(ee_addr)
684
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
685
static int __devinit eeprom_read(void __iomem *ioaddr, int location)
686
{
687
        int boguscnt = 10000;           /* Typical 1900 ticks. */
688
        iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
689
        do {
690
                eeprom_delay(ioaddr + EECtrl);
691
                if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
692
                        return ioread16(ioaddr + EEData);
693
                }
694
        } while (--boguscnt > 0);
695
        return 0;
696
}
697
 
698
/*  MII transceiver control section.
699
        Read and write the MII registers using software-generated serial
700
        MDIO protocol.  See the MII specifications or DP83840A data sheet
701
        for details.
702
 
703
        The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
704
        met by back-to-back 33Mhz PCI cycles. */
705
#define mdio_delay() ioread8(mdio_addr)
706
 
707
enum mii_reg_bits {
708
        MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
709
};
710
#define MDIO_EnbIn  (0)
711
#define MDIO_WRITE0 (MDIO_EnbOutput)
712
#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
713
 
714
/* Generate the preamble required for initial synchronization and
715
   a few older transceivers. */
716
static void mdio_sync(void __iomem *mdio_addr)
717
{
718
        int bits = 32;
719
 
720
        /* Establish sync by sending at least 32 logic ones. */
721
        while (--bits >= 0) {
722
                iowrite8(MDIO_WRITE1, mdio_addr);
723
                mdio_delay();
724
                iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
725
                mdio_delay();
726
        }
727
}
728
 
729
static int mdio_read(struct net_device *dev, int phy_id, int location)
730
{
731
        struct netdev_private *np = netdev_priv(dev);
732
        void __iomem *mdio_addr = np->base + MIICtrl;
733
        int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
734
        int i, retval = 0;
735
 
736
        if (np->mii_preamble_required)
737
                mdio_sync(mdio_addr);
738
 
739
        /* Shift the read command bits out. */
740
        for (i = 15; i >= 0; i--) {
741
                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
742
 
743
                iowrite8(dataval, mdio_addr);
744
                mdio_delay();
745
                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
746
                mdio_delay();
747
        }
748
        /* Read the two transition, 16 data, and wire-idle bits. */
749
        for (i = 19; i > 0; i--) {
750
                iowrite8(MDIO_EnbIn, mdio_addr);
751
                mdio_delay();
752
                retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
753
                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
754
                mdio_delay();
755
        }
756
        return (retval>>1) & 0xffff;
757
}
758
 
759
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
760
{
761
        struct netdev_private *np = netdev_priv(dev);
762
        void __iomem *mdio_addr = np->base + MIICtrl;
763
        int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
764
        int i;
765
 
766
        if (np->mii_preamble_required)
767
                mdio_sync(mdio_addr);
768
 
769
        /* Shift the command bits out. */
770
        for (i = 31; i >= 0; i--) {
771
                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
772
 
773
                iowrite8(dataval, mdio_addr);
774
                mdio_delay();
775
                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
776
                mdio_delay();
777
        }
778
        /* Clear out extra bits. */
779
        for (i = 2; i > 0; i--) {
780
                iowrite8(MDIO_EnbIn, mdio_addr);
781
                mdio_delay();
782
                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
783
                mdio_delay();
784
        }
785
        return;
786
}
787
 
788
static int netdev_open(struct net_device *dev)
789
{
790
        struct netdev_private *np = netdev_priv(dev);
791
        void __iomem *ioaddr = np->base;
792
        unsigned long flags;
793
        int i;
794
 
795
        /* Do we need to reset the chip??? */
796
 
797
        i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
798
        if (i)
799
                return i;
800
 
801
        if (netif_msg_ifup(np))
802
                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
803
                           dev->name, dev->irq);
804
        init_ring(dev);
805
 
806
        iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
807
        /* The Tx list pointer is written as packets are queued. */
808
 
809
        /* Initialize other registers. */
810
        __set_mac_addr(dev);
811
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
812
        iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
813
#else
814
        iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
815
#endif
816
        if (dev->mtu > 2047)
817
                iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
818
 
819
        /* Configure the PCI bus bursts and FIFO thresholds. */
820
 
821
        if (dev->if_port == 0)
822
                dev->if_port = np->default_port;
823
 
824
        spin_lock_init(&np->mcastlock);
825
 
826
        set_rx_mode(dev);
827
        iowrite16(0, ioaddr + IntrEnable);
828
        iowrite16(0, ioaddr + DownCounter);
829
        /* Set the chip to poll every N*320nsec. */
830
        iowrite8(100, ioaddr + RxDMAPollPeriod);
831
        iowrite8(127, ioaddr + TxDMAPollPeriod);
832
        /* Fix DFE-580TX packet drop issue */
833
        if (np->pci_dev->revision >= 0x14)
834
                iowrite8(0x01, ioaddr + DebugCtrl1);
835
        netif_start_queue(dev);
836
 
837
        spin_lock_irqsave(&np->lock, flags);
838
        reset_tx(dev);
839
        spin_unlock_irqrestore(&np->lock, flags);
840
 
841
        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
842
 
843
        if (netif_msg_ifup(np))
844
                printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
845
                           "MAC Control %x, %4.4x %4.4x.\n",
846
                           dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
847
                           ioread32(ioaddr + MACCtrl0),
848
                           ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
849
 
850
        /* Set the timer to check for link beat. */
851
        init_timer(&np->timer);
852
        np->timer.expires = jiffies + 3*HZ;
853
        np->timer.data = (unsigned long)dev;
854
        np->timer.function = &netdev_timer;                             /* timer handler */
855
        add_timer(&np->timer);
856
 
857
        /* Enable interrupts by setting the interrupt mask. */
858
        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
859
 
860
        return 0;
861
}
862
 
863
static void check_duplex(struct net_device *dev)
864
{
865
        struct netdev_private *np = netdev_priv(dev);
866
        void __iomem *ioaddr = np->base;
867
        int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
868
        int negotiated = mii_lpa & np->mii_if.advertising;
869
        int duplex;
870
 
871
        /* Force media */
872
        if (!np->an_enable || mii_lpa == 0xffff) {
873
                if (np->mii_if.full_duplex)
874
                        iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
875
                                ioaddr + MACCtrl0);
876
                return;
877
        }
878
 
879
        /* Autonegotiation */
880
        duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
881
        if (np->mii_if.full_duplex != duplex) {
882
                np->mii_if.full_duplex = duplex;
883
                if (netif_msg_link(np))
884
                        printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
885
                                   "negotiated capability %4.4x.\n", dev->name,
886
                                   duplex ? "full" : "half", np->phys[0], negotiated);
887
                iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
888
        }
889
}
890
 
891
static void netdev_timer(unsigned long data)
892
{
893
        struct net_device *dev = (struct net_device *)data;
894
        struct netdev_private *np = netdev_priv(dev);
895
        void __iomem *ioaddr = np->base;
896
        int next_tick = 10*HZ;
897
 
898
        if (netif_msg_timer(np)) {
899
                printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
900
                           "Tx %x Rx %x.\n",
901
                           dev->name, ioread16(ioaddr + IntrEnable),
902
                           ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
903
        }
904
        check_duplex(dev);
905
        np->timer.expires = jiffies + next_tick;
906
        add_timer(&np->timer);
907
}
908
 
909
static void tx_timeout(struct net_device *dev)
910
{
911
        struct netdev_private *np = netdev_priv(dev);
912
        void __iomem *ioaddr = np->base;
913
        unsigned long flag;
914
 
915
        netif_stop_queue(dev);
916
        tasklet_disable(&np->tx_tasklet);
917
        iowrite16(0, ioaddr + IntrEnable);
918
        printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
919
                   "TxFrameId %2.2x,"
920
                   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
921
                   ioread8(ioaddr + TxFrameId));
922
 
923
        {
924
                int i;
925
                for (i=0; i<TX_RING_SIZE; i++) {
926
                        printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
927
                                (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
928
                                le32_to_cpu(np->tx_ring[i].next_desc),
929
                                le32_to_cpu(np->tx_ring[i].status),
930
                                (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
931
                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
932
                                le32_to_cpu(np->tx_ring[i].frag[0].length));
933
                }
934
                printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
935
                        ioread32(np->base + TxListPtr),
936
                        netif_queue_stopped(dev));
937
                printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
938
                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
939
                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
940
                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
941
                printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
942
        }
943
        spin_lock_irqsave(&np->lock, flag);
944
 
945
        /* Stop and restart the chip's Tx processes . */
946
        reset_tx(dev);
947
        spin_unlock_irqrestore(&np->lock, flag);
948
 
949
        dev->if_port = 0;
950
 
951
        dev->trans_start = jiffies;
952
        np->stats.tx_errors++;
953
        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
954
                netif_wake_queue(dev);
955
        }
956
        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
957
        tasklet_enable(&np->tx_tasklet);
958
}
959
 
960
 
961
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
962
static void init_ring(struct net_device *dev)
963
{
964
        struct netdev_private *np = netdev_priv(dev);
965
        int i;
966
 
967
        np->cur_rx = np->cur_tx = 0;
968
        np->dirty_rx = np->dirty_tx = 0;
969
        np->cur_task = 0;
970
 
971
        np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
972
 
973
        /* Initialize all Rx descriptors. */
974
        for (i = 0; i < RX_RING_SIZE; i++) {
975
                np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
976
                        ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
977
                np->rx_ring[i].status = 0;
978
                np->rx_ring[i].frag[0].length = 0;
979
                np->rx_skbuff[i] = NULL;
980
        }
981
 
982
        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
983
        for (i = 0; i < RX_RING_SIZE; i++) {
984
                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
985
                np->rx_skbuff[i] = skb;
986
                if (skb == NULL)
987
                        break;
988
                skb->dev = dev;         /* Mark as being used by this device. */
989
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
990
                np->rx_ring[i].frag[0].addr = cpu_to_le32(
991
                        pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
992
                                PCI_DMA_FROMDEVICE));
993
                np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
994
        }
995
        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
996
 
997
        for (i = 0; i < TX_RING_SIZE; i++) {
998
                np->tx_skbuff[i] = NULL;
999
                np->tx_ring[i].status = 0;
1000
        }
1001
        return;
1002
}
1003
 
1004
static void tx_poll (unsigned long data)
1005
{
1006
        struct net_device *dev = (struct net_device *)data;
1007
        struct netdev_private *np = netdev_priv(dev);
1008
        unsigned head = np->cur_task % TX_RING_SIZE;
1009
        struct netdev_desc *txdesc =
1010
                &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1011
 
1012
        /* Chain the next pointer */
1013
        for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1014
                int entry = np->cur_task % TX_RING_SIZE;
1015
                txdesc = &np->tx_ring[entry];
1016
                if (np->last_tx) {
1017
                        np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1018
                                entry*sizeof(struct netdev_desc));
1019
                }
1020
                np->last_tx = txdesc;
1021
        }
1022
        /* Indicate the latest descriptor of tx ring */
1023
        txdesc->status |= cpu_to_le32(DescIntrOnTx);
1024
 
1025
        if (ioread32 (np->base + TxListPtr) == 0)
1026
                iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1027
                        np->base + TxListPtr);
1028
        return;
1029
}
1030
 
1031
static int
1032
start_tx (struct sk_buff *skb, struct net_device *dev)
1033
{
1034
        struct netdev_private *np = netdev_priv(dev);
1035
        struct netdev_desc *txdesc;
1036
        unsigned entry;
1037
 
1038
        /* Calculate the next Tx descriptor entry. */
1039
        entry = np->cur_tx % TX_RING_SIZE;
1040
        np->tx_skbuff[entry] = skb;
1041
        txdesc = &np->tx_ring[entry];
1042
 
1043
        txdesc->next_desc = 0;
1044
        txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1045
        txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1046
                                                        skb->len,
1047
                                                        PCI_DMA_TODEVICE));
1048
        txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1049
 
1050
        /* Increment cur_tx before tasklet_schedule() */
1051
        np->cur_tx++;
1052
        mb();
1053
        /* Schedule a tx_poll() task */
1054
        tasklet_schedule(&np->tx_tasklet);
1055
 
1056
        /* On some architectures: explicitly flush cache lines here. */
1057
        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1058
                        && !netif_queue_stopped(dev)) {
1059
                /* do nothing */
1060
        } else {
1061
                netif_stop_queue (dev);
1062
        }
1063
        dev->trans_start = jiffies;
1064
        if (netif_msg_tx_queued(np)) {
1065
                printk (KERN_DEBUG
1066
                        "%s: Transmit frame #%d queued in slot %d.\n",
1067
                        dev->name, np->cur_tx, entry);
1068
        }
1069
        return 0;
1070
}
1071
 
1072
/* Reset hardware tx and free all of tx buffers */
1073
static int
1074
reset_tx (struct net_device *dev)
1075
{
1076
        struct netdev_private *np = netdev_priv(dev);
1077
        void __iomem *ioaddr = np->base;
1078
        struct sk_buff *skb;
1079
        int i;
1080
        int irq = in_interrupt();
1081
 
1082
        /* Reset tx logic, TxListPtr will be cleaned */
1083
        iowrite16 (TxDisable, ioaddr + MACCtrl1);
1084
        sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1085
 
1086
        /* free all tx skbuff */
1087
        for (i = 0; i < TX_RING_SIZE; i++) {
1088
                np->tx_ring[i].next_desc = 0;
1089
 
1090
                skb = np->tx_skbuff[i];
1091
                if (skb) {
1092
                        pci_unmap_single(np->pci_dev,
1093
                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1094
                                skb->len, PCI_DMA_TODEVICE);
1095
                        if (irq)
1096
                                dev_kfree_skb_irq (skb);
1097
                        else
1098
                                dev_kfree_skb (skb);
1099
                        np->tx_skbuff[i] = NULL;
1100
                        np->stats.tx_dropped++;
1101
                }
1102
        }
1103
        np->cur_tx = np->dirty_tx = 0;
1104
        np->cur_task = 0;
1105
 
1106
        np->last_tx = NULL;
1107
        iowrite8(127, ioaddr + TxDMAPollPeriod);
1108
 
1109
        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1110
        return 0;
1111
}
1112
 
1113
/* The interrupt handler cleans up after the Tx thread,
1114
   and schedule a Rx thread work */
1115
static irqreturn_t intr_handler(int irq, void *dev_instance)
1116
{
1117
        struct net_device *dev = (struct net_device *)dev_instance;
1118
        struct netdev_private *np = netdev_priv(dev);
1119
        void __iomem *ioaddr = np->base;
1120
        int hw_frame_id;
1121
        int tx_cnt;
1122
        int tx_status;
1123
        int handled = 0;
1124
        int i;
1125
 
1126
 
1127
        do {
1128
                int intr_status = ioread16(ioaddr + IntrStatus);
1129
                iowrite16(intr_status, ioaddr + IntrStatus);
1130
 
1131
                if (netif_msg_intr(np))
1132
                        printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1133
                                   dev->name, intr_status);
1134
 
1135
                if (!(intr_status & DEFAULT_INTR))
1136
                        break;
1137
 
1138
                handled = 1;
1139
 
1140
                if (intr_status & (IntrRxDMADone)) {
1141
                        iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1142
                                        ioaddr + IntrEnable);
1143
                        if (np->budget < 0)
1144
                                np->budget = RX_BUDGET;
1145
                        tasklet_schedule(&np->rx_tasklet);
1146
                }
1147
                if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1148
                        tx_status = ioread16 (ioaddr + TxStatus);
1149
                        for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1150
                                if (netif_msg_tx_done(np))
1151
                                        printk
1152
                                            ("%s: Transmit status is %2.2x.\n",
1153
                                        dev->name, tx_status);
1154
                                if (tx_status & 0x1e) {
1155
                                        if (netif_msg_tx_err(np))
1156
                                                printk("%s: Transmit error status %4.4x.\n",
1157
                                                           dev->name, tx_status);
1158
                                        np->stats.tx_errors++;
1159
                                        if (tx_status & 0x10)
1160
                                                np->stats.tx_fifo_errors++;
1161
                                        if (tx_status & 0x08)
1162
                                                np->stats.collisions++;
1163
                                        if (tx_status & 0x04)
1164
                                                np->stats.tx_fifo_errors++;
1165
                                        if (tx_status & 0x02)
1166
                                                np->stats.tx_window_errors++;
1167
 
1168
                                        /*
1169
                                        ** This reset has been verified on
1170
                                        ** DFE-580TX boards ! phdm@macqel.be.
1171
                                        */
1172
                                        if (tx_status & 0x10) { /* TxUnderrun */
1173
                                                /* Restart Tx FIFO and transmitter */
1174
                                                sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1175
                                                /* No need to reset the Tx pointer here */
1176
                                        }
1177
                                        /* Restart the Tx. Need to make sure tx enabled */
1178
                                        i = 10;
1179
                                        do {
1180
                                                iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1181
                                                if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1182
                                                        break;
1183
                                                mdelay(1);
1184
                                        } while (--i);
1185
                                }
1186
                                /* Yup, this is a documentation bug.  It cost me *hours*. */
1187
                                iowrite16 (0, ioaddr + TxStatus);
1188
                                if (tx_cnt < 0) {
1189
                                        iowrite32(5000, ioaddr + DownCounter);
1190
                                        break;
1191
                                }
1192
                                tx_status = ioread16 (ioaddr + TxStatus);
1193
                        }
1194
                        hw_frame_id = (tx_status >> 8) & 0xff;
1195
                } else  {
1196
                        hw_frame_id = ioread8(ioaddr + TxFrameId);
1197
                }
1198
 
1199
                if (np->pci_dev->revision >= 0x14) {
1200
                        spin_lock(&np->lock);
1201
                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1202
                                int entry = np->dirty_tx % TX_RING_SIZE;
1203
                                struct sk_buff *skb;
1204
                                int sw_frame_id;
1205
                                sw_frame_id = (le32_to_cpu(
1206
                                        np->tx_ring[entry].status) >> 2) & 0xff;
1207
                                if (sw_frame_id == hw_frame_id &&
1208
                                        !(le32_to_cpu(np->tx_ring[entry].status)
1209
                                        & 0x00010000))
1210
                                                break;
1211
                                if (sw_frame_id == (hw_frame_id + 1) %
1212
                                        TX_RING_SIZE)
1213
                                                break;
1214
                                skb = np->tx_skbuff[entry];
1215
                                /* Free the original skb. */
1216
                                pci_unmap_single(np->pci_dev,
1217
                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1218
                                        skb->len, PCI_DMA_TODEVICE);
1219
                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1220
                                np->tx_skbuff[entry] = NULL;
1221
                                np->tx_ring[entry].frag[0].addr = 0;
1222
                                np->tx_ring[entry].frag[0].length = 0;
1223
                        }
1224
                        spin_unlock(&np->lock);
1225
                } else {
1226
                        spin_lock(&np->lock);
1227
                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1228
                                int entry = np->dirty_tx % TX_RING_SIZE;
1229
                                struct sk_buff *skb;
1230
                                if (!(le32_to_cpu(np->tx_ring[entry].status)
1231
                                                        & 0x00010000))
1232
                                        break;
1233
                                skb = np->tx_skbuff[entry];
1234
                                /* Free the original skb. */
1235
                                pci_unmap_single(np->pci_dev,
1236
                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1237
                                        skb->len, PCI_DMA_TODEVICE);
1238
                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1239
                                np->tx_skbuff[entry] = NULL;
1240
                                np->tx_ring[entry].frag[0].addr = 0;
1241
                                np->tx_ring[entry].frag[0].length = 0;
1242
                        }
1243
                        spin_unlock(&np->lock);
1244
                }
1245
 
1246
                if (netif_queue_stopped(dev) &&
1247
                        np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1248
                        /* The ring is no longer full, clear busy flag. */
1249
                        netif_wake_queue (dev);
1250
                }
1251
                /* Abnormal error summary/uncommon events handlers. */
1252
                if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1253
                        netdev_error(dev, intr_status);
1254
        } while (0);
1255
        if (netif_msg_intr(np))
1256
                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1257
                           dev->name, ioread16(ioaddr + IntrStatus));
1258
        return IRQ_RETVAL(handled);
1259
}
1260
 
1261
static void rx_poll(unsigned long data)
1262
{
1263
        struct net_device *dev = (struct net_device *)data;
1264
        struct netdev_private *np = netdev_priv(dev);
1265
        int entry = np->cur_rx % RX_RING_SIZE;
1266
        int boguscnt = np->budget;
1267
        void __iomem *ioaddr = np->base;
1268
        int received = 0;
1269
 
1270
        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1271
        while (1) {
1272
                struct netdev_desc *desc = &(np->rx_ring[entry]);
1273
                u32 frame_status = le32_to_cpu(desc->status);
1274
                int pkt_len;
1275
 
1276
                if (--boguscnt < 0) {
1277
                        goto not_done;
1278
                }
1279
                if (!(frame_status & DescOwn))
1280
                        break;
1281
                pkt_len = frame_status & 0x1fff;        /* Chip omits the CRC. */
1282
                if (netif_msg_rx_status(np))
1283
                        printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1284
                                   frame_status);
1285
                if (frame_status & 0x001f4000) {
1286
                        /* There was a error. */
1287
                        if (netif_msg_rx_err(np))
1288
                                printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1289
                                           frame_status);
1290
                        np->stats.rx_errors++;
1291
                        if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1292
                        if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1293
                        if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1294
                        if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1295
                        if (frame_status & 0x00100000) {
1296
                                printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1297
                                           " status %8.8x.\n",
1298
                                           dev->name, frame_status);
1299
                        }
1300
                } else {
1301
                        struct sk_buff *skb;
1302
#ifndef final_version
1303
                        if (netif_msg_rx_status(np))
1304
                                printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1305
                                           ", bogus_cnt %d.\n",
1306
                                           pkt_len, boguscnt);
1307
#endif
1308
                        /* Check if the packet is long enough to accept without copying
1309
                           to a minimally-sized skbuff. */
1310
                        if (pkt_len < rx_copybreak
1311
                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1312
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1313
                                pci_dma_sync_single_for_cpu(np->pci_dev,
1314
                                                            le32_to_cpu(desc->frag[0].addr),
1315
                                                            np->rx_buf_sz,
1316
                                                            PCI_DMA_FROMDEVICE);
1317
 
1318
                                skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1319
                                pci_dma_sync_single_for_device(np->pci_dev,
1320
                                                               le32_to_cpu(desc->frag[0].addr),
1321
                                                               np->rx_buf_sz,
1322
                                                               PCI_DMA_FROMDEVICE);
1323
                                skb_put(skb, pkt_len);
1324
                        } else {
1325
                                pci_unmap_single(np->pci_dev,
1326
                                        le32_to_cpu(desc->frag[0].addr),
1327
                                        np->rx_buf_sz,
1328
                                        PCI_DMA_FROMDEVICE);
1329
                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
1330
                                np->rx_skbuff[entry] = NULL;
1331
                        }
1332
                        skb->protocol = eth_type_trans(skb, dev);
1333
                        /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1334
                        netif_rx(skb);
1335
                        dev->last_rx = jiffies;
1336
                }
1337
                entry = (entry + 1) % RX_RING_SIZE;
1338
                received++;
1339
        }
1340
        np->cur_rx = entry;
1341
        refill_rx (dev);
1342
        np->budget -= received;
1343
        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1344
        return;
1345
 
1346
not_done:
1347
        np->cur_rx = entry;
1348
        refill_rx (dev);
1349
        if (!received)
1350
                received = 1;
1351
        np->budget -= received;
1352
        if (np->budget <= 0)
1353
                np->budget = RX_BUDGET;
1354
        tasklet_schedule(&np->rx_tasklet);
1355
        return;
1356
}
1357
 
1358
static void refill_rx (struct net_device *dev)
1359
{
1360
        struct netdev_private *np = netdev_priv(dev);
1361
        int entry;
1362
        int cnt = 0;
1363
 
1364
        /* Refill the Rx ring buffers. */
1365
        for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1366
                np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1367
                struct sk_buff *skb;
1368
                entry = np->dirty_rx % RX_RING_SIZE;
1369
                if (np->rx_skbuff[entry] == NULL) {
1370
                        skb = dev_alloc_skb(np->rx_buf_sz);
1371
                        np->rx_skbuff[entry] = skb;
1372
                        if (skb == NULL)
1373
                                break;          /* Better luck next round. */
1374
                        skb->dev = dev;         /* Mark as being used by this device. */
1375
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1376
                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1377
                                pci_map_single(np->pci_dev, skb->data,
1378
                                        np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1379
                }
1380
                /* Perhaps we need not reset this field. */
1381
                np->rx_ring[entry].frag[0].length =
1382
                        cpu_to_le32(np->rx_buf_sz | LastFrag);
1383
                np->rx_ring[entry].status = 0;
1384
                cnt++;
1385
        }
1386
        return;
1387
}
1388
static void netdev_error(struct net_device *dev, int intr_status)
1389
{
1390
        struct netdev_private *np = netdev_priv(dev);
1391
        void __iomem *ioaddr = np->base;
1392
        u16 mii_ctl, mii_advertise, mii_lpa;
1393
        int speed;
1394
 
1395
        if (intr_status & LinkChange) {
1396
                if (np->an_enable) {
1397
                        mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1398
                        mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1399
                        mii_advertise &= mii_lpa;
1400
                        printk (KERN_INFO "%s: Link changed: ", dev->name);
1401
                        if (mii_advertise & ADVERTISE_100FULL) {
1402
                                np->speed = 100;
1403
                                printk ("100Mbps, full duplex\n");
1404
                        } else if (mii_advertise & ADVERTISE_100HALF) {
1405
                                np->speed = 100;
1406
                                printk ("100Mbps, half duplex\n");
1407
                        } else if (mii_advertise & ADVERTISE_10FULL) {
1408
                                np->speed = 10;
1409
                                printk ("10Mbps, full duplex\n");
1410
                        } else if (mii_advertise & ADVERTISE_10HALF) {
1411
                                np->speed = 10;
1412
                                printk ("10Mbps, half duplex\n");
1413
                        } else
1414
                                printk ("\n");
1415
 
1416
                } else {
1417
                        mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1418
                        speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1419
                        np->speed = speed;
1420
                        printk (KERN_INFO "%s: Link changed: %dMbps ,",
1421
                                dev->name, speed);
1422
                        printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1423
                                "full" : "half");
1424
                }
1425
                check_duplex (dev);
1426
                if (np->flowctrl && np->mii_if.full_duplex) {
1427
                        iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1428
                                ioaddr + MulticastFilter1+2);
1429
                        iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1430
                                ioaddr + MACCtrl0);
1431
                }
1432
        }
1433
        if (intr_status & StatsMax) {
1434
                get_stats(dev);
1435
        }
1436
        if (intr_status & IntrPCIErr) {
1437
                printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1438
                           dev->name, intr_status);
1439
                /* We must do a global reset of DMA to continue. */
1440
        }
1441
}
1442
 
1443
static struct net_device_stats *get_stats(struct net_device *dev)
1444
{
1445
        struct netdev_private *np = netdev_priv(dev);
1446
        void __iomem *ioaddr = np->base;
1447
        int i;
1448
 
1449
        /* We should lock this segment of code for SMP eventually, although
1450
           the vulnerability window is very small and statistics are
1451
           non-critical. */
1452
        /* The chip only need report frame silently dropped. */
1453
        np->stats.rx_missed_errors      += ioread8(ioaddr + RxMissed);
1454
        np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1455
        np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1456
        np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1457
        np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1458
        np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1459
        np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1460
        ioread8(ioaddr + StatsTxDefer);
1461
        for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1462
                ioread8(ioaddr + i);
1463
        np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1464
        np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1465
        np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1466
        np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1467
 
1468
        return &np->stats;
1469
}
1470
 
1471
static void set_rx_mode(struct net_device *dev)
1472
{
1473
        struct netdev_private *np = netdev_priv(dev);
1474
        void __iomem *ioaddr = np->base;
1475
        u16 mc_filter[4];                       /* Multicast hash filter */
1476
        u32 rx_mode;
1477
        int i;
1478
 
1479
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1480
                memset(mc_filter, 0xff, sizeof(mc_filter));
1481
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1482
        } else if ((dev->mc_count > multicast_filter_limit)
1483
                           ||  (dev->flags & IFF_ALLMULTI)) {
1484
                /* Too many to match, or accept all multicasts. */
1485
                memset(mc_filter, 0xff, sizeof(mc_filter));
1486
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1487
        } else if (dev->mc_count) {
1488
                struct dev_mc_list *mclist;
1489
                int bit;
1490
                int index;
1491
                int crc;
1492
                memset (mc_filter, 0, sizeof (mc_filter));
1493
                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1494
                     i++, mclist = mclist->next) {
1495
                        crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1496
                        for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1497
                                if (crc & 0x80000000) index |= 1 << bit;
1498
                        mc_filter[index/16] |= (1 << (index % 16));
1499
                }
1500
                rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1501
        } else {
1502
                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1503
                return;
1504
        }
1505
        if (np->mii_if.full_duplex && np->flowctrl)
1506
                mc_filter[3] |= 0x0200;
1507
 
1508
        for (i = 0; i < 4; i++)
1509
                iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1510
        iowrite8(rx_mode, ioaddr + RxMode);
1511
}
1512
 
1513
static int __set_mac_addr(struct net_device *dev)
1514
{
1515
        struct netdev_private *np = netdev_priv(dev);
1516
        u16 addr16;
1517
 
1518
        addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1519
        iowrite16(addr16, np->base + StationAddr);
1520
        addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1521
        iowrite16(addr16, np->base + StationAddr+2);
1522
        addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1523
        iowrite16(addr16, np->base + StationAddr+4);
1524
        return 0;
1525
}
1526
 
1527
static int check_if_running(struct net_device *dev)
1528
{
1529
        if (!netif_running(dev))
1530
                return -EINVAL;
1531
        return 0;
1532
}
1533
 
1534
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1535
{
1536
        struct netdev_private *np = netdev_priv(dev);
1537
        strcpy(info->driver, DRV_NAME);
1538
        strcpy(info->version, DRV_VERSION);
1539
        strcpy(info->bus_info, pci_name(np->pci_dev));
1540
}
1541
 
1542
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1543
{
1544
        struct netdev_private *np = netdev_priv(dev);
1545
        spin_lock_irq(&np->lock);
1546
        mii_ethtool_gset(&np->mii_if, ecmd);
1547
        spin_unlock_irq(&np->lock);
1548
        return 0;
1549
}
1550
 
1551
static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1552
{
1553
        struct netdev_private *np = netdev_priv(dev);
1554
        int res;
1555
        spin_lock_irq(&np->lock);
1556
        res = mii_ethtool_sset(&np->mii_if, ecmd);
1557
        spin_unlock_irq(&np->lock);
1558
        return res;
1559
}
1560
 
1561
static int nway_reset(struct net_device *dev)
1562
{
1563
        struct netdev_private *np = netdev_priv(dev);
1564
        return mii_nway_restart(&np->mii_if);
1565
}
1566
 
1567
static u32 get_link(struct net_device *dev)
1568
{
1569
        struct netdev_private *np = netdev_priv(dev);
1570
        return mii_link_ok(&np->mii_if);
1571
}
1572
 
1573
static u32 get_msglevel(struct net_device *dev)
1574
{
1575
        struct netdev_private *np = netdev_priv(dev);
1576
        return np->msg_enable;
1577
}
1578
 
1579
static void set_msglevel(struct net_device *dev, u32 val)
1580
{
1581
        struct netdev_private *np = netdev_priv(dev);
1582
        np->msg_enable = val;
1583
}
1584
 
1585
static const struct ethtool_ops ethtool_ops = {
1586
        .begin = check_if_running,
1587
        .get_drvinfo = get_drvinfo,
1588
        .get_settings = get_settings,
1589
        .set_settings = set_settings,
1590
        .nway_reset = nway_reset,
1591
        .get_link = get_link,
1592
        .get_msglevel = get_msglevel,
1593
        .set_msglevel = set_msglevel,
1594
};
1595
 
1596
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1597
{
1598
        struct netdev_private *np = netdev_priv(dev);
1599
        void __iomem *ioaddr = np->base;
1600
        int rc;
1601
        int i;
1602
 
1603
        if (!netif_running(dev))
1604
                return -EINVAL;
1605
 
1606
        spin_lock_irq(&np->lock);
1607
        rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1608
        spin_unlock_irq(&np->lock);
1609
        switch (cmd) {
1610
                case SIOCDEVPRIVATE:
1611
                for (i=0; i<TX_RING_SIZE; i++) {
1612
                        printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1613
                                (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1614
                                le32_to_cpu(np->tx_ring[i].next_desc),
1615
                                le32_to_cpu(np->tx_ring[i].status),
1616
                                (le32_to_cpu(np->tx_ring[i].status) >> 2)
1617
                                        & 0xff,
1618
                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1619
                                le32_to_cpu(np->tx_ring[i].frag[0].length));
1620
                }
1621
                printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1622
                        ioread32(np->base + TxListPtr),
1623
                        netif_queue_stopped(dev));
1624
                printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1625
                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
1626
                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1627
                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1628
                printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1629
                printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1630
                        return 0;
1631
        }
1632
 
1633
 
1634
        return rc;
1635
}
1636
 
1637
static int netdev_close(struct net_device *dev)
1638
{
1639
        struct netdev_private *np = netdev_priv(dev);
1640
        void __iomem *ioaddr = np->base;
1641
        struct sk_buff *skb;
1642
        int i;
1643
 
1644
        /* Wait and kill tasklet */
1645
        tasklet_kill(&np->rx_tasklet);
1646
        tasklet_kill(&np->tx_tasklet);
1647
        np->cur_tx = 0;
1648
        np->dirty_tx = 0;
1649
        np->cur_task = 0;
1650
        np->last_tx = NULL;
1651
 
1652
        netif_stop_queue(dev);
1653
 
1654
        if (netif_msg_ifdown(np)) {
1655
                printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1656
                           "Rx %4.4x Int %2.2x.\n",
1657
                           dev->name, ioread8(ioaddr + TxStatus),
1658
                           ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1659
                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1660
                           dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1661
        }
1662
 
1663
        /* Disable interrupts by clearing the interrupt mask. */
1664
        iowrite16(0x0000, ioaddr + IntrEnable);
1665
 
1666
        /* Disable Rx and Tx DMA for safely release resource */
1667
        iowrite32(0x500, ioaddr + DMACtrl);
1668
 
1669
        /* Stop the chip's Tx and Rx processes. */
1670
        iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1671
 
1672
        for (i = 2000; i > 0; i--) {
1673
                if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1674
                        break;
1675
                mdelay(1);
1676
        }
1677
 
1678
        iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1679
                        ioaddr +ASICCtrl + 2);
1680
 
1681
        for (i = 2000; i > 0; i--) {
1682
                if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1683
                        break;
1684
                mdelay(1);
1685
        }
1686
 
1687
#ifdef __i386__
1688
        if (netif_msg_hw(np)) {
1689
                printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
1690
                           (int)(np->tx_ring_dma));
1691
                for (i = 0; i < TX_RING_SIZE; i++)
1692
                        printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1693
                                   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1694
                                   np->tx_ring[i].frag[0].length);
1695
                printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
1696
                           (int)(np->rx_ring_dma));
1697
                for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1698
                        printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1699
                                   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1700
                                   np->rx_ring[i].frag[0].length);
1701
                }
1702
        }
1703
#endif /* __i386__ debugging only */
1704
 
1705
        free_irq(dev->irq, dev);
1706
 
1707
        del_timer_sync(&np->timer);
1708
 
1709
        /* Free all the skbuffs in the Rx queue. */
1710
        for (i = 0; i < RX_RING_SIZE; i++) {
1711
                np->rx_ring[i].status = 0;
1712
                skb = np->rx_skbuff[i];
1713
                if (skb) {
1714
                        pci_unmap_single(np->pci_dev,
1715
                                le32_to_cpu(np->rx_ring[i].frag[0].addr),
1716
                                np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1717
                        dev_kfree_skb(skb);
1718
                        np->rx_skbuff[i] = NULL;
1719
                }
1720
                np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1721
        }
1722
        for (i = 0; i < TX_RING_SIZE; i++) {
1723
                np->tx_ring[i].next_desc = 0;
1724
                skb = np->tx_skbuff[i];
1725
                if (skb) {
1726
                        pci_unmap_single(np->pci_dev,
1727
                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1728
                                skb->len, PCI_DMA_TODEVICE);
1729
                        dev_kfree_skb(skb);
1730
                        np->tx_skbuff[i] = NULL;
1731
                }
1732
        }
1733
 
1734
        return 0;
1735
}
1736
 
1737
static void __devexit sundance_remove1 (struct pci_dev *pdev)
1738
{
1739
        struct net_device *dev = pci_get_drvdata(pdev);
1740
 
1741
        if (dev) {
1742
                struct netdev_private *np = netdev_priv(dev);
1743
 
1744
                unregister_netdev(dev);
1745
                pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1746
                        np->rx_ring_dma);
1747
                pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1748
                        np->tx_ring_dma);
1749
                pci_iounmap(pdev, np->base);
1750
                pci_release_regions(pdev);
1751
                free_netdev(dev);
1752
                pci_set_drvdata(pdev, NULL);
1753
        }
1754
}
1755
 
1756
static struct pci_driver sundance_driver = {
1757
        .name           = DRV_NAME,
1758
        .id_table       = sundance_pci_tbl,
1759
        .probe          = sundance_probe1,
1760
        .remove         = __devexit_p(sundance_remove1),
1761
};
1762
 
1763
static int __init sundance_init(void)
1764
{
1765
/* when a module, this is printed whether or not devices are found in probe */
1766
#ifdef MODULE
1767
        printk(version);
1768
#endif
1769
        return pci_register_driver(&sundance_driver);
1770
}
1771
 
1772
static void __exit sundance_exit(void)
1773
{
1774
        pci_unregister_driver(&sundance_driver);
1775
}
1776
 
1777
module_init(sundance_init);
1778
module_exit(sundance_exit);
1779
 
1780
 

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.