OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [net/] [dl2k.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*  D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
2
/*
3
    Copyright (c) 2001, 2002 by D-Link Corporation
4
    Written by Edward Peng.<edward_peng@dlink.com.tw>
5
    Created 03-May-2001, base on Linux' sundance.c.
6
 
7
    This program is free software; you can redistribute it and/or modify
8
    it under the terms of the GNU General Public License as published by
9
    the Free Software Foundation; either version 2 of the License, or
10
    (at your option) any later version.
11
*/
12
 
13
#define DRV_NAME        "DL2000/TC902x-based linux driver"
14
#define DRV_VERSION     "v1.19"
15
#define DRV_RELDATE     "2007/08/12"
16
#include "dl2k.h"
17
#include <linux/dma-mapping.h>
18
 
19
static char version[] __devinitdata =
20
      KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
21
#define MAX_UNITS 8
22
static int mtu[MAX_UNITS];
23
static int vlan[MAX_UNITS];
24
static int jumbo[MAX_UNITS];
25
static char *media[MAX_UNITS];
26
static int tx_flow=-1;
27
static int rx_flow=-1;
28
static int copy_thresh;
29
static int rx_coalesce=10;      /* Rx frame count each interrupt */
30
static int rx_timeout=200;      /* Rx DMA wait time in 640ns increments */
31
static int tx_coalesce=16;      /* HW xmit count each TxDMAComplete */
32
 
33
 
34
MODULE_AUTHOR ("Edward Peng");
35
MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
36
MODULE_LICENSE("GPL");
37
module_param_array(mtu, int, NULL, 0);
38
module_param_array(media, charp, NULL, 0);
39
module_param_array(vlan, int, NULL, 0);
40
module_param_array(jumbo, int, NULL, 0);
41
module_param(tx_flow, int, 0);
42
module_param(rx_flow, int, 0);
43
module_param(copy_thresh, int, 0);
44
module_param(rx_coalesce, int, 0);       /* Rx frame count each interrupt */
45
module_param(rx_timeout, int, 0);        /* Rx DMA wait time in 64ns increments */
46
module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
47
 
48
 
49
/* Enable the default interrupts */
50
#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
51
       UpdateStats | LinkEvent)
52
#define EnableInt() \
53
writew(DEFAULT_INTR, ioaddr + IntEnable)
54
 
55
static const int max_intrloop = 50;
56
static const int multicast_filter_limit = 0x40;
57
 
58
static int rio_open (struct net_device *dev);
59
static void rio_timer (unsigned long data);
60
static void rio_tx_timeout (struct net_device *dev);
61
static void alloc_list (struct net_device *dev);
62
static int start_xmit (struct sk_buff *skb, struct net_device *dev);
63
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
64
static void rio_free_tx (struct net_device *dev, int irq);
65
static void tx_error (struct net_device *dev, int tx_status);
66
static int receive_packet (struct net_device *dev);
67
static void rio_error (struct net_device *dev, int int_status);
68
static int change_mtu (struct net_device *dev, int new_mtu);
69
static void set_multicast (struct net_device *dev);
70
static struct net_device_stats *get_stats (struct net_device *dev);
71
static int clear_stats (struct net_device *dev);
72
static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
73
static int rio_close (struct net_device *dev);
74
static int find_miiphy (struct net_device *dev);
75
static int parse_eeprom (struct net_device *dev);
76
static int read_eeprom (long ioaddr, int eep_addr);
77
static int mii_wait_link (struct net_device *dev, int wait);
78
static int mii_set_media (struct net_device *dev);
79
static int mii_get_media (struct net_device *dev);
80
static int mii_set_media_pcs (struct net_device *dev);
81
static int mii_get_media_pcs (struct net_device *dev);
82
static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
83
static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
84
                      u16 data);
85
 
86
static const struct ethtool_ops ethtool_ops;
87
 
88
static int __devinit
89
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
90
{
91
        struct net_device *dev;
92
        struct netdev_private *np;
93
        static int card_idx;
94
        int chip_idx = ent->driver_data;
95
        int err, irq;
96
        long ioaddr;
97
        static int version_printed;
98
        void *ring_space;
99
        dma_addr_t ring_dma;
100
        DECLARE_MAC_BUF(mac);
101
 
102
        if (!version_printed++)
103
                printk ("%s", version);
104
 
105
        err = pci_enable_device (pdev);
106
        if (err)
107
                return err;
108
 
109
        irq = pdev->irq;
110
        err = pci_request_regions (pdev, "dl2k");
111
        if (err)
112
                goto err_out_disable;
113
 
114
        pci_set_master (pdev);
115
        dev = alloc_etherdev (sizeof (*np));
116
        if (!dev) {
117
                err = -ENOMEM;
118
                goto err_out_res;
119
        }
120
        SET_NETDEV_DEV(dev, &pdev->dev);
121
 
122
#ifdef MEM_MAPPING
123
        ioaddr = pci_resource_start (pdev, 1);
124
        ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
125
        if (!ioaddr) {
126
                err = -ENOMEM;
127
                goto err_out_dev;
128
        }
129
#else
130
        ioaddr = pci_resource_start (pdev, 0);
131
#endif
132
        dev->base_addr = ioaddr;
133
        dev->irq = irq;
134
        np = netdev_priv(dev);
135
        np->chip_id = chip_idx;
136
        np->pdev = pdev;
137
        spin_lock_init (&np->tx_lock);
138
        spin_lock_init (&np->rx_lock);
139
 
140
        /* Parse manual configuration */
141
        np->an_enable = 1;
142
        np->tx_coalesce = 1;
143
        if (card_idx < MAX_UNITS) {
144
                if (media[card_idx] != NULL) {
145
                        np->an_enable = 0;
146
                        if (strcmp (media[card_idx], "auto") == 0 ||
147
                            strcmp (media[card_idx], "autosense") == 0 ||
148
                            strcmp (media[card_idx], "0") == 0 ) {
149
                                np->an_enable = 2;
150
                        } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
151
                            strcmp (media[card_idx], "4") == 0) {
152
                                np->speed = 100;
153
                                np->full_duplex = 1;
154
                        } else if (strcmp (media[card_idx], "100mbps_hd") == 0
155
                                   || strcmp (media[card_idx], "3") == 0) {
156
                                np->speed = 100;
157
                                np->full_duplex = 0;
158
                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
159
                                   strcmp (media[card_idx], "2") == 0) {
160
                                np->speed = 10;
161
                                np->full_duplex = 1;
162
                        } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
163
                                   strcmp (media[card_idx], "1") == 0) {
164
                                np->speed = 10;
165
                                np->full_duplex = 0;
166
                        } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
167
                                 strcmp (media[card_idx], "6") == 0) {
168
                                np->speed=1000;
169
                                np->full_duplex=1;
170
                        } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
171
                                 strcmp (media[card_idx], "5") == 0) {
172
                                np->speed = 1000;
173
                                np->full_duplex = 0;
174
                        } else {
175
                                np->an_enable = 1;
176
                        }
177
                }
178
                if (jumbo[card_idx] != 0) {
179
                        np->jumbo = 1;
180
                        dev->mtu = MAX_JUMBO;
181
                } else {
182
                        np->jumbo = 0;
183
                        if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
184
                                dev->mtu = mtu[card_idx];
185
                }
186
                np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
187
                    vlan[card_idx] : 0;
188
                if (rx_coalesce > 0 && rx_timeout > 0) {
189
                        np->rx_coalesce = rx_coalesce;
190
                        np->rx_timeout = rx_timeout;
191
                        np->coalesce = 1;
192
                }
193
                np->tx_flow = (tx_flow == 0) ? 0 : 1;
194
                np->rx_flow = (rx_flow == 0) ? 0 : 1;
195
 
196
                if (tx_coalesce < 1)
197
                        tx_coalesce = 1;
198
                else if (tx_coalesce > TX_RING_SIZE-1)
199
                        tx_coalesce = TX_RING_SIZE - 1;
200
        }
201
        dev->open = &rio_open;
202
        dev->hard_start_xmit = &start_xmit;
203
        dev->stop = &rio_close;
204
        dev->get_stats = &get_stats;
205
        dev->set_multicast_list = &set_multicast;
206
        dev->do_ioctl = &rio_ioctl;
207
        dev->tx_timeout = &rio_tx_timeout;
208
        dev->watchdog_timeo = TX_TIMEOUT;
209
        dev->change_mtu = &change_mtu;
210
        SET_ETHTOOL_OPS(dev, &ethtool_ops);
211
#if 0
212
        dev->features = NETIF_F_IP_CSUM;
213
#endif
214
        pci_set_drvdata (pdev, dev);
215
 
216
        ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
217
        if (!ring_space)
218
                goto err_out_iounmap;
219
        np->tx_ring = (struct netdev_desc *) ring_space;
220
        np->tx_ring_dma = ring_dma;
221
 
222
        ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
223
        if (!ring_space)
224
                goto err_out_unmap_tx;
225
        np->rx_ring = (struct netdev_desc *) ring_space;
226
        np->rx_ring_dma = ring_dma;
227
 
228
        /* Parse eeprom data */
229
        parse_eeprom (dev);
230
 
231
        /* Find PHY address */
232
        err = find_miiphy (dev);
233
        if (err)
234
                goto err_out_unmap_rx;
235
 
236
        /* Fiber device? */
237
        np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
238
        np->link_status = 0;
239
        /* Set media and reset PHY */
240
        if (np->phy_media) {
241
                /* default Auto-Negotiation for fiber deivices */
242
                if (np->an_enable == 2) {
243
                        np->an_enable = 1;
244
                }
245
                mii_set_media_pcs (dev);
246
        } else {
247
                /* Auto-Negotiation is mandatory for 1000BASE-T,
248
                   IEEE 802.3ab Annex 28D page 14 */
249
                if (np->speed == 1000)
250
                        np->an_enable = 1;
251
                mii_set_media (dev);
252
        }
253
 
254
        err = register_netdev (dev);
255
        if (err)
256
                goto err_out_unmap_rx;
257
 
258
        card_idx++;
259
 
260
        printk (KERN_INFO "%s: %s, %s, IRQ %d\n",
261
                dev->name, np->name, print_mac(mac, dev->dev_addr), irq);
262
        if (tx_coalesce > 1)
263
                printk(KERN_INFO "tx_coalesce:\t%d packets\n",
264
                                tx_coalesce);
265
        if (np->coalesce)
266
                printk(KERN_INFO "rx_coalesce:\t%d packets\n"
267
                       KERN_INFO "rx_timeout: \t%d ns\n",
268
                                np->rx_coalesce, np->rx_timeout*640);
269
        if (np->vlan)
270
                printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
271
        return 0;
272
 
273
      err_out_unmap_rx:
274
        pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
275
      err_out_unmap_tx:
276
        pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
277
      err_out_iounmap:
278
#ifdef MEM_MAPPING
279
        iounmap ((void *) ioaddr);
280
 
281
      err_out_dev:
282
#endif
283
        free_netdev (dev);
284
 
285
      err_out_res:
286
        pci_release_regions (pdev);
287
 
288
      err_out_disable:
289
        pci_disable_device (pdev);
290
        return err;
291
}
292
 
293
static int
294
find_miiphy (struct net_device *dev)
295
{
296
        int i, phy_found = 0;
297
        struct netdev_private *np;
298
        long ioaddr;
299
        np = netdev_priv(dev);
300
        ioaddr = dev->base_addr;
301
        np->phy_addr = 1;
302
 
303
        for (i = 31; i >= 0; i--) {
304
                int mii_status = mii_read (dev, i, 1);
305
                if (mii_status != 0xffff && mii_status != 0x0000) {
306
                        np->phy_addr = i;
307
                        phy_found++;
308
                }
309
        }
310
        if (!phy_found) {
311
                printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
312
                return -ENODEV;
313
        }
314
        return 0;
315
}
316
 
317
static int
318
parse_eeprom (struct net_device *dev)
319
{
320
        int i, j;
321
        long ioaddr = dev->base_addr;
322
        u8 sromdata[256];
323
        u8 *psib;
324
        u32 crc;
325
        PSROM_t psrom = (PSROM_t) sromdata;
326
        struct netdev_private *np = netdev_priv(dev);
327
 
328
        int cid, next;
329
 
330
#ifdef  MEM_MAPPING
331
        ioaddr = pci_resource_start (np->pdev, 0);
332
#endif
333
        /* Read eeprom */
334
        for (i = 0; i < 128; i++) {
335
                ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
336
        }
337
#ifdef  MEM_MAPPING
338
        ioaddr = dev->base_addr;
339
#endif
340
        if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {  /* D-Link Only */
341
                /* Check CRC */
342
                crc = ~ether_crc_le (256 - 4, sromdata);
343
                if (psrom->crc != crc) {
344
                        printk (KERN_ERR "%s: EEPROM data CRC error.\n",
345
                                        dev->name);
346
                        return -1;
347
                }
348
        }
349
 
350
        /* Set MAC address */
351
        for (i = 0; i < 6; i++)
352
                dev->dev_addr[i] = psrom->mac_addr[i];
353
 
354
        if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
355
                return 0;
356
        }
357
 
358
        /* Parse Software Information Block */
359
        i = 0x30;
360
        psib = (u8 *) sromdata;
361
        do {
362
                cid = psib[i++];
363
                next = psib[i++];
364
                if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
365
                        printk (KERN_ERR "Cell data error\n");
366
                        return -1;
367
                }
368
                switch (cid) {
369
                case 0:  /* Format version */
370
                        break;
371
                case 1: /* End of cell */
372
                        return 0;
373
                case 2: /* Duplex Polarity */
374
                        np->duplex_polarity = psib[i];
375
                        writeb (readb (ioaddr + PhyCtrl) | psib[i],
376
                                ioaddr + PhyCtrl);
377
                        break;
378
                case 3: /* Wake Polarity */
379
                        np->wake_polarity = psib[i];
380
                        break;
381
                case 9: /* Adapter description */
382
                        j = (next - i > 255) ? 255 : next - i;
383
                        memcpy (np->name, &(psib[i]), j);
384
                        break;
385
                case 4:
386
                case 5:
387
                case 6:
388
                case 7:
389
                case 8: /* Reversed */
390
                        break;
391
                default:        /* Unknown cell */
392
                        return -1;
393
                }
394
                i = next;
395
        } while (1);
396
 
397
        return 0;
398
}
399
 
400
static int
401
rio_open (struct net_device *dev)
402
{
403
        struct netdev_private *np = netdev_priv(dev);
404
        long ioaddr = dev->base_addr;
405
        int i;
406
        u16 macctrl;
407
 
408
        i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev);
409
        if (i)
410
                return i;
411
 
412
        /* Reset all logic functions */
413
        writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
414
                ioaddr + ASICCtrl + 2);
415
        mdelay(10);
416
 
417
        /* DebugCtrl bit 4, 5, 9 must set */
418
        writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
419
 
420
        /* Jumbo frame */
421
        if (np->jumbo != 0)
422
                writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
423
 
424
        alloc_list (dev);
425
 
426
        /* Get station address */
427
        for (i = 0; i < 6; i++)
428
                writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
429
 
430
        set_multicast (dev);
431
        if (np->coalesce) {
432
                writel (np->rx_coalesce | np->rx_timeout << 16,
433
                        ioaddr + RxDMAIntCtrl);
434
        }
435
        /* Set RIO to poll every N*320nsec. */
436
        writeb (0x20, ioaddr + RxDMAPollPeriod);
437
        writeb (0xff, ioaddr + TxDMAPollPeriod);
438
        writeb (0x30, ioaddr + RxDMABurstThresh);
439
        writeb (0x30, ioaddr + RxDMAUrgentThresh);
440
        writel (0x0007ffff, ioaddr + RmonStatMask);
441
        /* clear statistics */
442
        clear_stats (dev);
443
 
444
        /* VLAN supported */
445
        if (np->vlan) {
446
                /* priority field in RxDMAIntCtrl  */
447
                writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
448
                        ioaddr + RxDMAIntCtrl);
449
                /* VLANId */
450
                writew (np->vlan, ioaddr + VLANId);
451
                /* Length/Type should be 0x8100 */
452
                writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
453
                /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
454
                   VLAN information tagged by TFC' VID, CFI fields. */
455
                writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
456
                        ioaddr + MACCtrl);
457
        }
458
 
459
        init_timer (&np->timer);
460
        np->timer.expires = jiffies + 1*HZ;
461
        np->timer.data = (unsigned long) dev;
462
        np->timer.function = &rio_timer;
463
        add_timer (&np->timer);
464
 
465
        /* Start Tx/Rx */
466
        writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
467
                        ioaddr + MACCtrl);
468
 
469
        macctrl = 0;
470
        macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
471
        macctrl |= (np->full_duplex) ? DuplexSelect : 0;
472
        macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
473
        macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
474
        writew(macctrl, ioaddr + MACCtrl);
475
 
476
        netif_start_queue (dev);
477
 
478
        /* Enable default interrupts */
479
        EnableInt ();
480
        return 0;
481
}
482
 
483
static void
484
rio_timer (unsigned long data)
485
{
486
        struct net_device *dev = (struct net_device *)data;
487
        struct netdev_private *np = netdev_priv(dev);
488
        unsigned int entry;
489
        int next_tick = 1*HZ;
490
        unsigned long flags;
491
 
492
        spin_lock_irqsave(&np->rx_lock, flags);
493
        /* Recover rx ring exhausted error */
494
        if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
495
                printk(KERN_INFO "Try to recover rx ring exhausted...\n");
496
                /* Re-allocate skbuffs to fill the descriptor ring */
497
                for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
498
                        struct sk_buff *skb;
499
                        entry = np->old_rx % RX_RING_SIZE;
500
                        /* Dropped packets don't need to re-allocate */
501
                        if (np->rx_skbuff[entry] == NULL) {
502
                                skb = dev_alloc_skb (np->rx_buf_sz);
503
                                if (skb == NULL) {
504
                                        np->rx_ring[entry].fraginfo = 0;
505
                                        printk (KERN_INFO
506
                                                "%s: Still unable to re-allocate Rx skbuff.#%d\n",
507
                                                dev->name, entry);
508
                                        break;
509
                                }
510
                                np->rx_skbuff[entry] = skb;
511
                                /* 16 byte align the IP header */
512
                                skb_reserve (skb, 2);
513
                                np->rx_ring[entry].fraginfo =
514
                                    cpu_to_le64 (pci_map_single
515
                                         (np->pdev, skb->data, np->rx_buf_sz,
516
                                          PCI_DMA_FROMDEVICE));
517
                        }
518
                        np->rx_ring[entry].fraginfo |=
519
                            cpu_to_le64((u64)np->rx_buf_sz << 48);
520
                        np->rx_ring[entry].status = 0;
521
                } /* end for */
522
        } /* end if */
523
        spin_unlock_irqrestore (&np->rx_lock, flags);
524
        np->timer.expires = jiffies + next_tick;
525
        add_timer(&np->timer);
526
}
527
 
528
static void
529
rio_tx_timeout (struct net_device *dev)
530
{
531
        long ioaddr = dev->base_addr;
532
 
533
        printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
534
                dev->name, readl (ioaddr + TxStatus));
535
        rio_free_tx(dev, 0);
536
        dev->if_port = 0;
537
        dev->trans_start = jiffies;
538
}
539
 
540
 /* allocate and initialize Tx and Rx descriptors */
541
static void
542
alloc_list (struct net_device *dev)
543
{
544
        struct netdev_private *np = netdev_priv(dev);
545
        int i;
546
 
547
        np->cur_rx = np->cur_tx = 0;
548
        np->old_rx = np->old_tx = 0;
549
        np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
550
 
551
        /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
552
        for (i = 0; i < TX_RING_SIZE; i++) {
553
                np->tx_skbuff[i] = NULL;
554
                np->tx_ring[i].status = cpu_to_le64 (TFDDone);
555
                np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
556
                                              ((i+1)%TX_RING_SIZE) *
557
                                              sizeof (struct netdev_desc));
558
        }
559
 
560
        /* Initialize Rx descriptors */
561
        for (i = 0; i < RX_RING_SIZE; i++) {
562
                np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
563
                                                ((i + 1) % RX_RING_SIZE) *
564
                                                sizeof (struct netdev_desc));
565
                np->rx_ring[i].status = 0;
566
                np->rx_ring[i].fraginfo = 0;
567
                np->rx_skbuff[i] = NULL;
568
        }
569
 
570
        /* Allocate the rx buffers */
571
        for (i = 0; i < RX_RING_SIZE; i++) {
572
                /* Allocated fixed size of skbuff */
573
                struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz);
574
                np->rx_skbuff[i] = skb;
575
                if (skb == NULL) {
576
                        printk (KERN_ERR
577
                                "%s: alloc_list: allocate Rx buffer error! ",
578
                                dev->name);
579
                        break;
580
                }
581
                skb_reserve (skb, 2);   /* 16 byte align the IP header. */
582
                /* Rubicon now supports 40 bits of addressing space. */
583
                np->rx_ring[i].fraginfo =
584
                    cpu_to_le64 ( pci_map_single (
585
                                  np->pdev, skb->data, np->rx_buf_sz,
586
                                  PCI_DMA_FROMDEVICE));
587
                np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
588
        }
589
 
590
        /* Set RFDListPtr */
591
        writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
592
        writel (0, dev->base_addr + RFDListPtr1);
593
 
594
        return;
595
}
596
 
597
static int
598
start_xmit (struct sk_buff *skb, struct net_device *dev)
599
{
600
        struct netdev_private *np = netdev_priv(dev);
601
        struct netdev_desc *txdesc;
602
        unsigned entry;
603
        u32 ioaddr;
604
        u64 tfc_vlan_tag = 0;
605
 
606
        if (np->link_status == 0) {      /* Link Down */
607
                dev_kfree_skb(skb);
608
                return 0;
609
        }
610
        ioaddr = dev->base_addr;
611
        entry = np->cur_tx % TX_RING_SIZE;
612
        np->tx_skbuff[entry] = skb;
613
        txdesc = &np->tx_ring[entry];
614
 
615
#if 0
616
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
617
                txdesc->status |=
618
                    cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
619
                                 IPChecksumEnable);
620
        }
621
#endif
622
        if (np->vlan) {
623
                tfc_vlan_tag = VLANTagInsert |
624
                    ((u64)np->vlan << 32) |
625
                    ((u64)skb->priority << 45);
626
        }
627
        txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
628
                                                        skb->len,
629
                                                        PCI_DMA_TODEVICE));
630
        txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
631
 
632
        /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
633
         * Work around: Always use 1 descriptor in 10Mbps mode */
634
        if (entry % np->tx_coalesce == 0 || np->speed == 10)
635
                txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
636
                                              WordAlignDisable |
637
                                              TxDMAIndicate |
638
                                              (1 << FragCountShift));
639
        else
640
                txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
641
                                              WordAlignDisable |
642
                                              (1 << FragCountShift));
643
 
644
        /* TxDMAPollNow */
645
        writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
646
        /* Schedule ISR */
647
        writel(10000, ioaddr + CountDown);
648
        np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
649
        if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
650
                        < TX_QUEUE_LEN - 1 && np->speed != 10) {
651
                /* do nothing */
652
        } else if (!netif_queue_stopped(dev)) {
653
                netif_stop_queue (dev);
654
        }
655
 
656
        /* The first TFDListPtr */
657
        if (readl (dev->base_addr + TFDListPtr0) == 0) {
658
                writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
659
                        dev->base_addr + TFDListPtr0);
660
                writel (0, dev->base_addr + TFDListPtr1);
661
        }
662
 
663
        /* NETDEV WATCHDOG timer */
664
        dev->trans_start = jiffies;
665
        return 0;
666
}
667
 
668
static irqreturn_t
669
rio_interrupt (int irq, void *dev_instance)
670
{
671
        struct net_device *dev = dev_instance;
672
        struct netdev_private *np;
673
        unsigned int_status;
674
        long ioaddr;
675
        int cnt = max_intrloop;
676
        int handled = 0;
677
 
678
        ioaddr = dev->base_addr;
679
        np = netdev_priv(dev);
680
        while (1) {
681
                int_status = readw (ioaddr + IntStatus);
682
                writew (int_status, ioaddr + IntStatus);
683
                int_status &= DEFAULT_INTR;
684
                if (int_status == 0 || --cnt < 0)
685
                        break;
686
                handled = 1;
687
                /* Processing received packets */
688
                if (int_status & RxDMAComplete)
689
                        receive_packet (dev);
690
                /* TxDMAComplete interrupt */
691
                if ((int_status & (TxDMAComplete|IntRequested))) {
692
                        int tx_status;
693
                        tx_status = readl (ioaddr + TxStatus);
694
                        if (tx_status & 0x01)
695
                                tx_error (dev, tx_status);
696
                        /* Free used tx skbuffs */
697
                        rio_free_tx (dev, 1);
698
                }
699
 
700
                /* Handle uncommon events */
701
                if (int_status &
702
                    (HostError | LinkEvent | UpdateStats))
703
                        rio_error (dev, int_status);
704
        }
705
        if (np->cur_tx != np->old_tx)
706
                writel (100, ioaddr + CountDown);
707
        return IRQ_RETVAL(handled);
708
}
709
 
710
static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
711
{
712
        return le64_to_cpu(desc->fraginfo) & DMA_48BIT_MASK;
713
}
714
 
715
static void
716
rio_free_tx (struct net_device *dev, int irq)
717
{
718
        struct netdev_private *np = netdev_priv(dev);
719
        int entry = np->old_tx % TX_RING_SIZE;
720
        int tx_use = 0;
721
        unsigned long flag = 0;
722
 
723
        if (irq)
724
                spin_lock(&np->tx_lock);
725
        else
726
                spin_lock_irqsave(&np->tx_lock, flag);
727
 
728
        /* Free used tx skbuffs */
729
        while (entry != np->cur_tx) {
730
                struct sk_buff *skb;
731
 
732
                if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
733
                        break;
734
                skb = np->tx_skbuff[entry];
735
                pci_unmap_single (np->pdev,
736
                                  desc_to_dma(&np->tx_ring[entry]),
737
                                  skb->len, PCI_DMA_TODEVICE);
738
                if (irq)
739
                        dev_kfree_skb_irq (skb);
740
                else
741
                        dev_kfree_skb (skb);
742
 
743
                np->tx_skbuff[entry] = NULL;
744
                entry = (entry + 1) % TX_RING_SIZE;
745
                tx_use++;
746
        }
747
        if (irq)
748
                spin_unlock(&np->tx_lock);
749
        else
750
                spin_unlock_irqrestore(&np->tx_lock, flag);
751
        np->old_tx = entry;
752
 
753
        /* If the ring is no longer full, clear tx_full and
754
           call netif_wake_queue() */
755
 
756
        if (netif_queue_stopped(dev) &&
757
            ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
758
            < TX_QUEUE_LEN - 1 || np->speed == 10)) {
759
                netif_wake_queue (dev);
760
        }
761
}
762
 
763
static void
764
tx_error (struct net_device *dev, int tx_status)
765
{
766
        struct netdev_private *np;
767
        long ioaddr = dev->base_addr;
768
        int frame_id;
769
        int i;
770
 
771
        np = netdev_priv(dev);
772
 
773
        frame_id = (tx_status & 0xffff0000);
774
        printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
775
                dev->name, tx_status, frame_id);
776
        np->stats.tx_errors++;
777
        /* Ttransmit Underrun */
778
        if (tx_status & 0x10) {
779
                np->stats.tx_fifo_errors++;
780
                writew (readw (ioaddr + TxStartThresh) + 0x10,
781
                        ioaddr + TxStartThresh);
782
                /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
783
                writew (TxReset | DMAReset | FIFOReset | NetworkReset,
784
                        ioaddr + ASICCtrl + 2);
785
                /* Wait for ResetBusy bit clear */
786
                for (i = 50; i > 0; i--) {
787
                        if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
788
                                break;
789
                        mdelay (1);
790
                }
791
                rio_free_tx (dev, 1);
792
                /* Reset TFDListPtr */
793
                writel (np->tx_ring_dma +
794
                        np->old_tx * sizeof (struct netdev_desc),
795
                        dev->base_addr + TFDListPtr0);
796
                writel (0, dev->base_addr + TFDListPtr1);
797
 
798
                /* Let TxStartThresh stay default value */
799
        }
800
        /* Late Collision */
801
        if (tx_status & 0x04) {
802
                np->stats.tx_fifo_errors++;
803
                /* TxReset and clear FIFO */
804
                writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
805
                /* Wait reset done */
806
                for (i = 50; i > 0; i--) {
807
                        if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
808
                                break;
809
                        mdelay (1);
810
                }
811
                /* Let TxStartThresh stay default value */
812
        }
813
        /* Maximum Collisions */
814
#ifdef ETHER_STATS
815
        if (tx_status & 0x08)
816
                np->stats.collisions16++;
817
#else
818
        if (tx_status & 0x08)
819
                np->stats.collisions++;
820
#endif
821
        /* Restart the Tx */
822
        writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
823
}
824
 
825
static int
826
receive_packet (struct net_device *dev)
827
{
828
        struct netdev_private *np = netdev_priv(dev);
829
        int entry = np->cur_rx % RX_RING_SIZE;
830
        int cnt = 30;
831
 
832
        /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
833
        while (1) {
834
                struct netdev_desc *desc = &np->rx_ring[entry];
835
                int pkt_len;
836
                u64 frame_status;
837
 
838
                if (!(desc->status & cpu_to_le64(RFDDone)) ||
839
                    !(desc->status & cpu_to_le64(FrameStart)) ||
840
                    !(desc->status & cpu_to_le64(FrameEnd)))
841
                        break;
842
 
843
                /* Chip omits the CRC. */
844
                frame_status = le64_to_cpu(desc->status);
845
                pkt_len = frame_status & 0xffff;
846
                if (--cnt < 0)
847
                        break;
848
                /* Update rx error statistics, drop packet. */
849
                if (frame_status & RFS_Errors) {
850
                        np->stats.rx_errors++;
851
                        if (frame_status & (RxRuntFrame | RxLengthError))
852
                                np->stats.rx_length_errors++;
853
                        if (frame_status & RxFCSError)
854
                                np->stats.rx_crc_errors++;
855
                        if (frame_status & RxAlignmentError && np->speed != 1000)
856
                                np->stats.rx_frame_errors++;
857
                        if (frame_status & RxFIFOOverrun)
858
                                np->stats.rx_fifo_errors++;
859
                } else {
860
                        struct sk_buff *skb;
861
 
862
                        /* Small skbuffs for short packets */
863
                        if (pkt_len > copy_thresh) {
864
                                pci_unmap_single (np->pdev,
865
                                                  desc_to_dma(desc),
866
                                                  np->rx_buf_sz,
867
                                                  PCI_DMA_FROMDEVICE);
868
                                skb_put (skb = np->rx_skbuff[entry], pkt_len);
869
                                np->rx_skbuff[entry] = NULL;
870
                        } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
871
                                pci_dma_sync_single_for_cpu(np->pdev,
872
                                                            desc_to_dma(desc),
873
                                                            np->rx_buf_sz,
874
                                                            PCI_DMA_FROMDEVICE);
875
                                /* 16 byte align the IP header */
876
                                skb_reserve (skb, 2);
877
                                skb_copy_to_linear_data (skb,
878
                                                  np->rx_skbuff[entry]->data,
879
                                                  pkt_len);
880
                                skb_put (skb, pkt_len);
881
                                pci_dma_sync_single_for_device(np->pdev,
882
                                                               desc_to_dma(desc),
883
                                                               np->rx_buf_sz,
884
                                                               PCI_DMA_FROMDEVICE);
885
                        }
886
                        skb->protocol = eth_type_trans (skb, dev);
887
#if 0
888
                        /* Checksum done by hw, but csum value unavailable. */
889
                        if (np->pdev->pci_rev_id >= 0x0c &&
890
                                !(frame_status & (TCPError | UDPError | IPError))) {
891
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
892
                        }
893
#endif
894
                        netif_rx (skb);
895
                        dev->last_rx = jiffies;
896
                }
897
                entry = (entry + 1) % RX_RING_SIZE;
898
        }
899
        spin_lock(&np->rx_lock);
900
        np->cur_rx = entry;
901
        /* Re-allocate skbuffs to fill the descriptor ring */
902
        entry = np->old_rx;
903
        while (entry != np->cur_rx) {
904
                struct sk_buff *skb;
905
                /* Dropped packets don't need to re-allocate */
906
                if (np->rx_skbuff[entry] == NULL) {
907
                        skb = dev_alloc_skb (np->rx_buf_sz);
908
                        if (skb == NULL) {
909
                                np->rx_ring[entry].fraginfo = 0;
910
                                printk (KERN_INFO
911
                                        "%s: receive_packet: "
912
                                        "Unable to re-allocate Rx skbuff.#%d\n",
913
                                        dev->name, entry);
914
                                break;
915
                        }
916
                        np->rx_skbuff[entry] = skb;
917
                        /* 16 byte align the IP header */
918
                        skb_reserve (skb, 2);
919
                        np->rx_ring[entry].fraginfo =
920
                            cpu_to_le64 (pci_map_single
921
                                         (np->pdev, skb->data, np->rx_buf_sz,
922
                                          PCI_DMA_FROMDEVICE));
923
                }
924
                np->rx_ring[entry].fraginfo |=
925
                    cpu_to_le64((u64)np->rx_buf_sz << 48);
926
                np->rx_ring[entry].status = 0;
927
                entry = (entry + 1) % RX_RING_SIZE;
928
        }
929
        np->old_rx = entry;
930
        spin_unlock(&np->rx_lock);
931
        return 0;
932
}
933
 
934
static void
935
rio_error (struct net_device *dev, int int_status)
936
{
937
        long ioaddr = dev->base_addr;
938
        struct netdev_private *np = netdev_priv(dev);
939
        u16 macctrl;
940
 
941
        /* Link change event */
942
        if (int_status & LinkEvent) {
943
                if (mii_wait_link (dev, 10) == 0) {
944
                        printk (KERN_INFO "%s: Link up\n", dev->name);
945
                        if (np->phy_media)
946
                                mii_get_media_pcs (dev);
947
                        else
948
                                mii_get_media (dev);
949
                        if (np->speed == 1000)
950
                                np->tx_coalesce = tx_coalesce;
951
                        else
952
                                np->tx_coalesce = 1;
953
                        macctrl = 0;
954
                        macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
955
                        macctrl |= (np->full_duplex) ? DuplexSelect : 0;
956
                        macctrl |= (np->tx_flow) ?
957
                                TxFlowControlEnable : 0;
958
                        macctrl |= (np->rx_flow) ?
959
                                RxFlowControlEnable : 0;
960
                        writew(macctrl, ioaddr + MACCtrl);
961
                        np->link_status = 1;
962
                        netif_carrier_on(dev);
963
                } else {
964
                        printk (KERN_INFO "%s: Link off\n", dev->name);
965
                        np->link_status = 0;
966
                        netif_carrier_off(dev);
967
                }
968
        }
969
 
970
        /* UpdateStats statistics registers */
971
        if (int_status & UpdateStats) {
972
                get_stats (dev);
973
        }
974
 
975
        /* PCI Error, a catastronphic error related to the bus interface
976
           occurs, set GlobalReset and HostReset to reset. */
977
        if (int_status & HostError) {
978
                printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
979
                        dev->name, int_status);
980
                writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
981
                mdelay (500);
982
        }
983
}
984
 
985
static struct net_device_stats *
986
get_stats (struct net_device *dev)
987
{
988
        long ioaddr = dev->base_addr;
989
        struct netdev_private *np = netdev_priv(dev);
990
#ifdef MEM_MAPPING
991
        int i;
992
#endif
993
        unsigned int stat_reg;
994
 
995
        /* All statistics registers need to be acknowledged,
996
           else statistic overflow could cause problems */
997
 
998
        np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
999
        np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
1000
        np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
1001
        np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
1002
 
1003
        np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
1004
        np->stats.collisions += readl (ioaddr + SingleColFrames)
1005
                             +  readl (ioaddr + MultiColFrames);
1006
 
1007
        /* detailed tx errors */
1008
        stat_reg = readw (ioaddr + FramesAbortXSColls);
1009
        np->stats.tx_aborted_errors += stat_reg;
1010
        np->stats.tx_errors += stat_reg;
1011
 
1012
        stat_reg = readw (ioaddr + CarrierSenseErrors);
1013
        np->stats.tx_carrier_errors += stat_reg;
1014
        np->stats.tx_errors += stat_reg;
1015
 
1016
        /* Clear all other statistic register. */
1017
        readl (ioaddr + McstOctetXmtOk);
1018
        readw (ioaddr + BcstFramesXmtdOk);
1019
        readl (ioaddr + McstFramesXmtdOk);
1020
        readw (ioaddr + BcstFramesRcvdOk);
1021
        readw (ioaddr + MacControlFramesRcvd);
1022
        readw (ioaddr + FrameTooLongErrors);
1023
        readw (ioaddr + InRangeLengthErrors);
1024
        readw (ioaddr + FramesCheckSeqErrors);
1025
        readw (ioaddr + FramesLostRxErrors);
1026
        readl (ioaddr + McstOctetXmtOk);
1027
        readl (ioaddr + BcstOctetXmtOk);
1028
        readl (ioaddr + McstFramesXmtdOk);
1029
        readl (ioaddr + FramesWDeferredXmt);
1030
        readl (ioaddr + LateCollisions);
1031
        readw (ioaddr + BcstFramesXmtdOk);
1032
        readw (ioaddr + MacControlFramesXmtd);
1033
        readw (ioaddr + FramesWEXDeferal);
1034
 
1035
#ifdef MEM_MAPPING
1036
        for (i = 0x100; i <= 0x150; i += 4)
1037
                readl (ioaddr + i);
1038
#endif
1039
        readw (ioaddr + TxJumboFrames);
1040
        readw (ioaddr + RxJumboFrames);
1041
        readw (ioaddr + TCPCheckSumErrors);
1042
        readw (ioaddr + UDPCheckSumErrors);
1043
        readw (ioaddr + IPCheckSumErrors);
1044
        return &np->stats;
1045
}
1046
 
1047
static int
1048
clear_stats (struct net_device *dev)
1049
{
1050
        long ioaddr = dev->base_addr;
1051
#ifdef MEM_MAPPING
1052
        int i;
1053
#endif
1054
 
1055
        /* All statistics registers need to be acknowledged,
1056
           else statistic overflow could cause problems */
1057
        readl (ioaddr + FramesRcvOk);
1058
        readl (ioaddr + FramesXmtOk);
1059
        readl (ioaddr + OctetRcvOk);
1060
        readl (ioaddr + OctetXmtOk);
1061
 
1062
        readl (ioaddr + McstFramesRcvdOk);
1063
        readl (ioaddr + SingleColFrames);
1064
        readl (ioaddr + MultiColFrames);
1065
        readl (ioaddr + LateCollisions);
1066
        /* detailed rx errors */
1067
        readw (ioaddr + FrameTooLongErrors);
1068
        readw (ioaddr + InRangeLengthErrors);
1069
        readw (ioaddr + FramesCheckSeqErrors);
1070
        readw (ioaddr + FramesLostRxErrors);
1071
 
1072
        /* detailed tx errors */
1073
        readw (ioaddr + FramesAbortXSColls);
1074
        readw (ioaddr + CarrierSenseErrors);
1075
 
1076
        /* Clear all other statistic register. */
1077
        readl (ioaddr + McstOctetXmtOk);
1078
        readw (ioaddr + BcstFramesXmtdOk);
1079
        readl (ioaddr + McstFramesXmtdOk);
1080
        readw (ioaddr + BcstFramesRcvdOk);
1081
        readw (ioaddr + MacControlFramesRcvd);
1082
        readl (ioaddr + McstOctetXmtOk);
1083
        readl (ioaddr + BcstOctetXmtOk);
1084
        readl (ioaddr + McstFramesXmtdOk);
1085
        readl (ioaddr + FramesWDeferredXmt);
1086
        readw (ioaddr + BcstFramesXmtdOk);
1087
        readw (ioaddr + MacControlFramesXmtd);
1088
        readw (ioaddr + FramesWEXDeferal);
1089
#ifdef MEM_MAPPING
1090
        for (i = 0x100; i <= 0x150; i += 4)
1091
                readl (ioaddr + i);
1092
#endif
1093
        readw (ioaddr + TxJumboFrames);
1094
        readw (ioaddr + RxJumboFrames);
1095
        readw (ioaddr + TCPCheckSumErrors);
1096
        readw (ioaddr + UDPCheckSumErrors);
1097
        readw (ioaddr + IPCheckSumErrors);
1098
        return 0;
1099
}
1100
 
1101
 
1102
static int
1103
change_mtu (struct net_device *dev, int new_mtu)
1104
{
1105
        struct netdev_private *np = netdev_priv(dev);
1106
        int max = (np->jumbo) ? MAX_JUMBO : 1536;
1107
 
1108
        if ((new_mtu < 68) || (new_mtu > max)) {
1109
                return -EINVAL;
1110
        }
1111
 
1112
        dev->mtu = new_mtu;
1113
 
1114
        return 0;
1115
}
1116
 
1117
static void
1118
set_multicast (struct net_device *dev)
1119
{
1120
        long ioaddr = dev->base_addr;
1121
        u32 hash_table[2];
1122
        u16 rx_mode = 0;
1123
        struct netdev_private *np = netdev_priv(dev);
1124
 
1125
        hash_table[0] = hash_table[1] = 0;
1126
        /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
1127
        hash_table[1] |= 0x02000000;
1128
        if (dev->flags & IFF_PROMISC) {
1129
                /* Receive all frames promiscuously. */
1130
                rx_mode = ReceiveAllFrames;
1131
        } else if ((dev->flags & IFF_ALLMULTI) ||
1132
                        (dev->mc_count > multicast_filter_limit)) {
1133
                /* Receive broadcast and multicast frames */
1134
                rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1135
        } else if (dev->mc_count > 0) {
1136
                int i;
1137
                struct dev_mc_list *mclist;
1138
                /* Receive broadcast frames and multicast frames filtering
1139
                   by Hashtable */
1140
                rx_mode =
1141
                    ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1142
                for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1143
                                i++, mclist=mclist->next)
1144
                {
1145
                        int bit, index = 0;
1146
                        int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1147
                        /* The inverted high significant 6 bits of CRC are
1148
                           used as an index to hashtable */
1149
                        for (bit = 0; bit < 6; bit++)
1150
                                if (crc & (1 << (31 - bit)))
1151
                                        index |= (1 << bit);
1152
                        hash_table[index / 32] |= (1 << (index % 32));
1153
                }
1154
        } else {
1155
                rx_mode = ReceiveBroadcast | ReceiveUnicast;
1156
        }
1157
        if (np->vlan) {
1158
                /* ReceiveVLANMatch field in ReceiveMode */
1159
                rx_mode |= ReceiveVLANMatch;
1160
        }
1161
 
1162
        writel (hash_table[0], ioaddr + HashTable0);
1163
        writel (hash_table[1], ioaddr + HashTable1);
1164
        writew (rx_mode, ioaddr + ReceiveMode);
1165
}
1166
 
1167
static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1168
{
1169
        struct netdev_private *np = netdev_priv(dev);
1170
        strcpy(info->driver, "dl2k");
1171
        strcpy(info->version, DRV_VERSION);
1172
        strcpy(info->bus_info, pci_name(np->pdev));
1173
}
1174
 
1175
static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1176
{
1177
        struct netdev_private *np = netdev_priv(dev);
1178
        if (np->phy_media) {
1179
                /* fiber device */
1180
                cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1181
                cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
1182
                cmd->port = PORT_FIBRE;
1183
                cmd->transceiver = XCVR_INTERNAL;
1184
        } else {
1185
                /* copper device */
1186
                cmd->supported = SUPPORTED_10baseT_Half |
1187
                        SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
1188
                        | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
1189
                        SUPPORTED_Autoneg | SUPPORTED_MII;
1190
                cmd->advertising = ADVERTISED_10baseT_Half |
1191
                        ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
1192
                        ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
1193
                        ADVERTISED_Autoneg | ADVERTISED_MII;
1194
                cmd->port = PORT_MII;
1195
                cmd->transceiver = XCVR_INTERNAL;
1196
        }
1197
        if ( np->link_status ) {
1198
                cmd->speed = np->speed;
1199
                cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1200
        } else {
1201
                cmd->speed = -1;
1202
                cmd->duplex = -1;
1203
        }
1204
        if ( np->an_enable)
1205
                cmd->autoneg = AUTONEG_ENABLE;
1206
        else
1207
                cmd->autoneg = AUTONEG_DISABLE;
1208
 
1209
        cmd->phy_address = np->phy_addr;
1210
        return 0;
1211
}
1212
 
1213
static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1214
{
1215
        struct netdev_private *np = netdev_priv(dev);
1216
        netif_carrier_off(dev);
1217
        if (cmd->autoneg == AUTONEG_ENABLE) {
1218
                if (np->an_enable)
1219
                        return 0;
1220
                else {
1221
                        np->an_enable = 1;
1222
                        mii_set_media(dev);
1223
                        return 0;
1224
                }
1225
        } else {
1226
                np->an_enable = 0;
1227
                if (np->speed == 1000) {
1228
                        cmd->speed = SPEED_100;
1229
                        cmd->duplex = DUPLEX_FULL;
1230
                        printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
1231
                }
1232
                switch(cmd->speed + cmd->duplex) {
1233
 
1234
                case SPEED_10 + DUPLEX_HALF:
1235
                        np->speed = 10;
1236
                        np->full_duplex = 0;
1237
                        break;
1238
 
1239
                case SPEED_10 + DUPLEX_FULL:
1240
                        np->speed = 10;
1241
                        np->full_duplex = 1;
1242
                        break;
1243
                case SPEED_100 + DUPLEX_HALF:
1244
                        np->speed = 100;
1245
                        np->full_duplex = 0;
1246
                        break;
1247
                case SPEED_100 + DUPLEX_FULL:
1248
                        np->speed = 100;
1249
                        np->full_duplex = 1;
1250
                        break;
1251
                case SPEED_1000 + DUPLEX_HALF:/* not supported */
1252
                case SPEED_1000 + DUPLEX_FULL:/* not supported */
1253
                default:
1254
                        return -EINVAL;
1255
                }
1256
                mii_set_media(dev);
1257
        }
1258
        return 0;
1259
}
1260
 
1261
static u32 rio_get_link(struct net_device *dev)
1262
{
1263
        struct netdev_private *np = netdev_priv(dev);
1264
        return np->link_status;
1265
}
1266
 
1267
static const struct ethtool_ops ethtool_ops = {
1268
        .get_drvinfo = rio_get_drvinfo,
1269
        .get_settings = rio_get_settings,
1270
        .set_settings = rio_set_settings,
1271
        .get_link = rio_get_link,
1272
};
1273
 
1274
static int
1275
rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1276
{
1277
        int phy_addr;
1278
        struct netdev_private *np = netdev_priv(dev);
1279
        struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
1280
 
1281
        struct netdev_desc *desc;
1282
        int i;
1283
 
1284
        phy_addr = np->phy_addr;
1285
        switch (cmd) {
1286
        case SIOCDEVPRIVATE:
1287
                break;
1288
 
1289
        case SIOCDEVPRIVATE + 1:
1290
                miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
1291
                break;
1292
        case SIOCDEVPRIVATE + 2:
1293
                mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
1294
                break;
1295
        case SIOCDEVPRIVATE + 3:
1296
                break;
1297
        case SIOCDEVPRIVATE + 4:
1298
                break;
1299
        case SIOCDEVPRIVATE + 5:
1300
                netif_stop_queue (dev);
1301
                break;
1302
        case SIOCDEVPRIVATE + 6:
1303
                netif_wake_queue (dev);
1304
                break;
1305
        case SIOCDEVPRIVATE + 7:
1306
                printk
1307
                    ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
1308
                     netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
1309
                     np->old_rx);
1310
                break;
1311
        case SIOCDEVPRIVATE + 8:
1312
                printk("TX ring:\n");
1313
                for (i = 0; i < TX_RING_SIZE; i++) {
1314
                        desc = &np->tx_ring[i];
1315
                        printk
1316
                            ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
1317
                             i,
1318
                             (u32) (np->tx_ring_dma + i * sizeof (*desc)),
1319
                             (u32)le64_to_cpu(desc->next_desc),
1320
                             (u32)le64_to_cpu(desc->status),
1321
                             (u32)(le64_to_cpu(desc->fraginfo) >> 32),
1322
                             (u32)le64_to_cpu(desc->fraginfo));
1323
                        printk ("\n");
1324
                }
1325
                printk ("\n");
1326
                break;
1327
 
1328
        default:
1329
                return -EOPNOTSUPP;
1330
        }
1331
        return 0;
1332
}
1333
 
1334
#define EEP_READ 0x0200
1335
#define EEP_BUSY 0x8000
1336
/* Read the EEPROM word */
1337
/* We use I/O instruction to read/write eeprom to avoid fail on some machines */
1338
static int
1339
read_eeprom (long ioaddr, int eep_addr)
1340
{
1341
        int i = 1000;
1342
        outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
1343
        while (i-- > 0) {
1344
                if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) {
1345
                        return inw (ioaddr + EepromData);
1346
                }
1347
        }
1348
        return 0;
1349
}
1350
 
1351
enum phy_ctrl_bits {
1352
        MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
1353
        MII_DUPLEX = 0x08,
1354
};
1355
 
1356
#define mii_delay() readb(ioaddr)
1357
static void
1358
mii_sendbit (struct net_device *dev, u32 data)
1359
{
1360
        long ioaddr = dev->base_addr + PhyCtrl;
1361
        data = (data) ? MII_DATA1 : 0;
1362
        data |= MII_WRITE;
1363
        data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
1364
        writeb (data, ioaddr);
1365
        mii_delay ();
1366
        writeb (data | MII_CLK, ioaddr);
1367
        mii_delay ();
1368
}
1369
 
1370
static int
1371
mii_getbit (struct net_device *dev)
1372
{
1373
        long ioaddr = dev->base_addr + PhyCtrl;
1374
        u8 data;
1375
 
1376
        data = (readb (ioaddr) & 0xf8) | MII_READ;
1377
        writeb (data, ioaddr);
1378
        mii_delay ();
1379
        writeb (data | MII_CLK, ioaddr);
1380
        mii_delay ();
1381
        return ((readb (ioaddr) >> 1) & 1);
1382
}
1383
 
1384
static void
1385
mii_send_bits (struct net_device *dev, u32 data, int len)
1386
{
1387
        int i;
1388
        for (i = len - 1; i >= 0; i--) {
1389
                mii_sendbit (dev, data & (1 << i));
1390
        }
1391
}
1392
 
1393
static int
1394
mii_read (struct net_device *dev, int phy_addr, int reg_num)
1395
{
1396
        u32 cmd;
1397
        int i;
1398
        u32 retval = 0;
1399
 
1400
        /* Preamble */
1401
        mii_send_bits (dev, 0xffffffff, 32);
1402
        /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1403
        /* ST,OP = 0110'b for read operation */
1404
        cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
1405
        mii_send_bits (dev, cmd, 14);
1406
        /* Turnaround */
1407
        if (mii_getbit (dev))
1408
                goto err_out;
1409
        /* Read data */
1410
        for (i = 0; i < 16; i++) {
1411
                retval |= mii_getbit (dev);
1412
                retval <<= 1;
1413
        }
1414
        /* End cycle */
1415
        mii_getbit (dev);
1416
        return (retval >> 1) & 0xffff;
1417
 
1418
      err_out:
1419
        return 0;
1420
}
1421
static int
1422
mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
1423
{
1424
        u32 cmd;
1425
 
1426
        /* Preamble */
1427
        mii_send_bits (dev, 0xffffffff, 32);
1428
        /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1429
        /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1430
        cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
1431
        mii_send_bits (dev, cmd, 32);
1432
        /* End cycle */
1433
        mii_getbit (dev);
1434
        return 0;
1435
}
1436
static int
1437
mii_wait_link (struct net_device *dev, int wait)
1438
{
1439
        __u16 bmsr;
1440
        int phy_addr;
1441
        struct netdev_private *np;
1442
 
1443
        np = netdev_priv(dev);
1444
        phy_addr = np->phy_addr;
1445
 
1446
        do {
1447
                bmsr = mii_read (dev, phy_addr, MII_BMSR);
1448
                if (bmsr & MII_BMSR_LINK_STATUS)
1449
                        return 0;
1450
                mdelay (1);
1451
        } while (--wait > 0);
1452
        return -1;
1453
}
1454
static int
1455
mii_get_media (struct net_device *dev)
1456
{
1457
        __u16 negotiate;
1458
        __u16 bmsr;
1459
        __u16 mscr;
1460
        __u16 mssr;
1461
        int phy_addr;
1462
        struct netdev_private *np;
1463
 
1464
        np = netdev_priv(dev);
1465
        phy_addr = np->phy_addr;
1466
 
1467
        bmsr = mii_read (dev, phy_addr, MII_BMSR);
1468
        if (np->an_enable) {
1469
                if (!(bmsr & MII_BMSR_AN_COMPLETE)) {
1470
                        /* Auto-Negotiation not completed */
1471
                        return -1;
1472
                }
1473
                negotiate = mii_read (dev, phy_addr, MII_ANAR) &
1474
                        mii_read (dev, phy_addr, MII_ANLPAR);
1475
                mscr = mii_read (dev, phy_addr, MII_MSCR);
1476
                mssr = mii_read (dev, phy_addr, MII_MSSR);
1477
                if (mscr & MII_MSCR_1000BT_FD && mssr & MII_MSSR_LP_1000BT_FD) {
1478
                        np->speed = 1000;
1479
                        np->full_duplex = 1;
1480
                        printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1481
                } else if (mscr & MII_MSCR_1000BT_HD && mssr & MII_MSSR_LP_1000BT_HD) {
1482
                        np->speed = 1000;
1483
                        np->full_duplex = 0;
1484
                        printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
1485
                } else if (negotiate & MII_ANAR_100BX_FD) {
1486
                        np->speed = 100;
1487
                        np->full_duplex = 1;
1488
                        printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
1489
                } else if (negotiate & MII_ANAR_100BX_HD) {
1490
                        np->speed = 100;
1491
                        np->full_duplex = 0;
1492
                        printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
1493
                } else if (negotiate & MII_ANAR_10BT_FD) {
1494
                        np->speed = 10;
1495
                        np->full_duplex = 1;
1496
                        printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
1497
                } else if (negotiate & MII_ANAR_10BT_HD) {
1498
                        np->speed = 10;
1499
                        np->full_duplex = 0;
1500
                        printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
1501
                }
1502
                if (negotiate & MII_ANAR_PAUSE) {
1503
                        np->tx_flow &= 1;
1504
                        np->rx_flow &= 1;
1505
                } else if (negotiate & MII_ANAR_ASYMMETRIC) {
1506
                        np->tx_flow = 0;
1507
                        np->rx_flow &= 1;
1508
                }
1509
                /* else tx_flow, rx_flow = user select  */
1510
        } else {
1511
                __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
1512
                switch (bmcr & (MII_BMCR_SPEED_100 | MII_BMCR_SPEED_1000)) {
1513
                case MII_BMCR_SPEED_1000:
1514
                        printk (KERN_INFO "Operating at 1000 Mbps, ");
1515
                        break;
1516
                case MII_BMCR_SPEED_100:
1517
                        printk (KERN_INFO "Operating at 100 Mbps, ");
1518
                        break;
1519
                case 0:
1520
                        printk (KERN_INFO "Operating at 10 Mbps, ");
1521
                }
1522
                if (bmcr & MII_BMCR_DUPLEX_MODE) {
1523
                        printk ("Full duplex\n");
1524
                } else {
1525
                        printk ("Half duplex\n");
1526
                }
1527
        }
1528
        if (np->tx_flow)
1529
                printk(KERN_INFO "Enable Tx Flow Control\n");
1530
        else
1531
                printk(KERN_INFO "Disable Tx Flow Control\n");
1532
        if (np->rx_flow)
1533
                printk(KERN_INFO "Enable Rx Flow Control\n");
1534
        else
1535
                printk(KERN_INFO "Disable Rx Flow Control\n");
1536
 
1537
        return 0;
1538
}
1539
 
1540
static int
1541
mii_set_media (struct net_device *dev)
1542
{
1543
        __u16 pscr;
1544
        __u16 bmcr;
1545
        __u16 bmsr;
1546
        __u16 anar;
1547
        int phy_addr;
1548
        struct netdev_private *np;
1549
        np = netdev_priv(dev);
1550
        phy_addr = np->phy_addr;
1551
 
1552
        /* Does user set speed? */
1553
        if (np->an_enable) {
1554
                /* Advertise capabilities */
1555
                bmsr = mii_read (dev, phy_addr, MII_BMSR);
1556
                anar = mii_read (dev, phy_addr, MII_ANAR) &
1557
                             ~MII_ANAR_100BX_FD &
1558
                             ~MII_ANAR_100BX_HD &
1559
                             ~MII_ANAR_100BT4 &
1560
                             ~MII_ANAR_10BT_FD &
1561
                             ~MII_ANAR_10BT_HD;
1562
                if (bmsr & MII_BMSR_100BX_FD)
1563
                        anar |= MII_ANAR_100BX_FD;
1564
                if (bmsr & MII_BMSR_100BX_HD)
1565
                        anar |= MII_ANAR_100BX_HD;
1566
                if (bmsr & MII_BMSR_100BT4)
1567
                        anar |= MII_ANAR_100BT4;
1568
                if (bmsr & MII_BMSR_10BT_FD)
1569
                        anar |= MII_ANAR_10BT_FD;
1570
                if (bmsr & MII_BMSR_10BT_HD)
1571
                        anar |= MII_ANAR_10BT_HD;
1572
                anar |= MII_ANAR_PAUSE | MII_ANAR_ASYMMETRIC;
1573
                mii_write (dev, phy_addr, MII_ANAR, anar);
1574
 
1575
                /* Enable Auto crossover */
1576
                pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1577
                pscr |= 3 << 5; /* 11'b */
1578
                mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1579
 
1580
                /* Soft reset PHY */
1581
                mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1582
                bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN | MII_BMCR_RESET;
1583
                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1584
                mdelay(1);
1585
        } else {
1586
                /* Force speed setting */
1587
                /* 1) Disable Auto crossover */
1588
                pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1589
                pscr &= ~(3 << 5);
1590
                mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1591
 
1592
                /* 2) PHY Reset */
1593
                bmcr = mii_read (dev, phy_addr, MII_BMCR);
1594
                bmcr |= MII_BMCR_RESET;
1595
                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1596
 
1597
                /* 3) Power Down */
1598
                bmcr = 0x1940;  /* must be 0x1940 */
1599
                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1600
                mdelay (100);   /* wait a certain time */
1601
 
1602
                /* 4) Advertise nothing */
1603
                mii_write (dev, phy_addr, MII_ANAR, 0);
1604
 
1605
                /* 5) Set media and Power Up */
1606
                bmcr = MII_BMCR_POWER_DOWN;
1607
                if (np->speed == 100) {
1608
                        bmcr |= MII_BMCR_SPEED_100;
1609
                        printk (KERN_INFO "Manual 100 Mbps, ");
1610
                } else if (np->speed == 10) {
1611
                        printk (KERN_INFO "Manual 10 Mbps, ");
1612
                }
1613
                if (np->full_duplex) {
1614
                        bmcr |= MII_BMCR_DUPLEX_MODE;
1615
                        printk ("Full duplex\n");
1616
                } else {
1617
                        printk ("Half duplex\n");
1618
                }
1619
#if 0
1620
                /* Set 1000BaseT Master/Slave setting */
1621
                mscr = mii_read (dev, phy_addr, MII_MSCR);
1622
                mscr |= MII_MSCR_CFG_ENABLE;
1623
                mscr &= ~MII_MSCR_CFG_VALUE = 0;
1624
#endif
1625
                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1626
                mdelay(10);
1627
        }
1628
        return 0;
1629
}
1630
 
1631
static int
1632
mii_get_media_pcs (struct net_device *dev)
1633
{
1634
        __u16 negotiate;
1635
        __u16 bmsr;
1636
        int phy_addr;
1637
        struct netdev_private *np;
1638
 
1639
        np = netdev_priv(dev);
1640
        phy_addr = np->phy_addr;
1641
 
1642
        bmsr = mii_read (dev, phy_addr, PCS_BMSR);
1643
        if (np->an_enable) {
1644
                if (!(bmsr & MII_BMSR_AN_COMPLETE)) {
1645
                        /* Auto-Negotiation not completed */
1646
                        return -1;
1647
                }
1648
                negotiate = mii_read (dev, phy_addr, PCS_ANAR) &
1649
                        mii_read (dev, phy_addr, PCS_ANLPAR);
1650
                np->speed = 1000;
1651
                if (negotiate & PCS_ANAR_FULL_DUPLEX) {
1652
                        printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1653
                        np->full_duplex = 1;
1654
                } else {
1655
                        printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
1656
                        np->full_duplex = 0;
1657
                }
1658
                if (negotiate & PCS_ANAR_PAUSE) {
1659
                        np->tx_flow &= 1;
1660
                        np->rx_flow &= 1;
1661
                } else if (negotiate & PCS_ANAR_ASYMMETRIC) {
1662
                        np->tx_flow = 0;
1663
                        np->rx_flow &= 1;
1664
                }
1665
                /* else tx_flow, rx_flow = user select  */
1666
        } else {
1667
                __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
1668
                printk (KERN_INFO "Operating at 1000 Mbps, ");
1669
                if (bmcr & MII_BMCR_DUPLEX_MODE) {
1670
                        printk ("Full duplex\n");
1671
                } else {
1672
                        printk ("Half duplex\n");
1673
                }
1674
        }
1675
        if (np->tx_flow)
1676
                printk(KERN_INFO "Enable Tx Flow Control\n");
1677
        else
1678
                printk(KERN_INFO "Disable Tx Flow Control\n");
1679
        if (np->rx_flow)
1680
                printk(KERN_INFO "Enable Rx Flow Control\n");
1681
        else
1682
                printk(KERN_INFO "Disable Rx Flow Control\n");
1683
 
1684
        return 0;
1685
}
1686
 
1687
static int
1688
mii_set_media_pcs (struct net_device *dev)
1689
{
1690
        __u16 bmcr;
1691
        __u16 esr;
1692
        __u16 anar;
1693
        int phy_addr;
1694
        struct netdev_private *np;
1695
        np = netdev_priv(dev);
1696
        phy_addr = np->phy_addr;
1697
 
1698
        /* Auto-Negotiation? */
1699
        if (np->an_enable) {
1700
                /* Advertise capabilities */
1701
                esr = mii_read (dev, phy_addr, PCS_ESR);
1702
                anar = mii_read (dev, phy_addr, MII_ANAR) &
1703
                        ~PCS_ANAR_HALF_DUPLEX &
1704
                        ~PCS_ANAR_FULL_DUPLEX;
1705
                if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
1706
                        anar |= PCS_ANAR_HALF_DUPLEX;
1707
                if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
1708
                        anar |= PCS_ANAR_FULL_DUPLEX;
1709
                anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
1710
                mii_write (dev, phy_addr, MII_ANAR, anar);
1711
 
1712
                /* Soft reset PHY */
1713
                mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1714
                bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN |
1715
                       MII_BMCR_RESET;
1716
                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1717
                mdelay(1);
1718
        } else {
1719
                /* Force speed setting */
1720
                /* PHY Reset */
1721
                bmcr = MII_BMCR_RESET;
1722
                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1723
                mdelay(10);
1724
                if (np->full_duplex) {
1725
                        bmcr = MII_BMCR_DUPLEX_MODE;
1726
                        printk (KERN_INFO "Manual full duplex\n");
1727
                } else {
1728
                        bmcr = 0;
1729
                        printk (KERN_INFO "Manual half duplex\n");
1730
                }
1731
                mii_write (dev, phy_addr, MII_BMCR, bmcr);
1732
                mdelay(10);
1733
 
1734
                /*  Advertise nothing */
1735
                mii_write (dev, phy_addr, MII_ANAR, 0);
1736
        }
1737
        return 0;
1738
}
1739
 
1740
 
1741
static int
1742
rio_close (struct net_device *dev)
1743
{
1744
        long ioaddr = dev->base_addr;
1745
        struct netdev_private *np = netdev_priv(dev);
1746
        struct sk_buff *skb;
1747
        int i;
1748
 
1749
        netif_stop_queue (dev);
1750
 
1751
        /* Disable interrupts */
1752
        writew (0, ioaddr + IntEnable);
1753
 
1754
        /* Stop Tx and Rx logics */
1755
        writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
1756
        synchronize_irq (dev->irq);
1757
        free_irq (dev->irq, dev);
1758
        del_timer_sync (&np->timer);
1759
 
1760
        /* Free all the skbuffs in the queue. */
1761
        for (i = 0; i < RX_RING_SIZE; i++) {
1762
                np->rx_ring[i].status = 0;
1763
                np->rx_ring[i].fraginfo = 0;
1764
                skb = np->rx_skbuff[i];
1765
                if (skb) {
1766
                        pci_unmap_single(np->pdev,
1767
                                         desc_to_dma(&np->rx_ring[i]),
1768
                                         skb->len, PCI_DMA_FROMDEVICE);
1769
                        dev_kfree_skb (skb);
1770
                        np->rx_skbuff[i] = NULL;
1771
                }
1772
        }
1773
        for (i = 0; i < TX_RING_SIZE; i++) {
1774
                skb = np->tx_skbuff[i];
1775
                if (skb) {
1776
                        pci_unmap_single(np->pdev,
1777
                                         desc_to_dma(&np->tx_ring[i]),
1778
                                         skb->len, PCI_DMA_TODEVICE);
1779
                        dev_kfree_skb (skb);
1780
                        np->tx_skbuff[i] = NULL;
1781
                }
1782
        }
1783
 
1784
        return 0;
1785
}
1786
 
1787
static void __devexit
1788
rio_remove1 (struct pci_dev *pdev)
1789
{
1790
        struct net_device *dev = pci_get_drvdata (pdev);
1791
 
1792
        if (dev) {
1793
                struct netdev_private *np = netdev_priv(dev);
1794
 
1795
                unregister_netdev (dev);
1796
                pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
1797
                                     np->rx_ring_dma);
1798
                pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
1799
                                     np->tx_ring_dma);
1800
#ifdef MEM_MAPPING
1801
                iounmap ((char *) (dev->base_addr));
1802
#endif
1803
                free_netdev (dev);
1804
                pci_release_regions (pdev);
1805
                pci_disable_device (pdev);
1806
        }
1807
        pci_set_drvdata (pdev, NULL);
1808
}
1809
 
1810
static struct pci_driver rio_driver = {
1811
        .name           = "dl2k",
1812
        .id_table       = rio_pci_tbl,
1813
        .probe          = rio_probe1,
1814
        .remove         = __devexit_p(rio_remove1),
1815
};
1816
 
1817
static int __init
1818
rio_init (void)
1819
{
1820
        return pci_register_driver(&rio_driver);
1821
}
1822
 
1823
static void __exit
1824
rio_exit (void)
1825
{
1826
        pci_unregister_driver (&rio_driver);
1827
}
1828
 
1829
module_init (rio_init);
1830
module_exit (rio_exit);
1831
 
1832
/*
1833
 
1834
Compile command:
1835
 
1836
gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
1837
 
1838
Read Documentation/networking/dl2k.txt for details.
1839
 
1840
*/
1841
 

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.