OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [sungem.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* $Id: sungem.c,v 1.1.1.1 2004-04-15 01:40:09 phoenix Exp $
2
 * sungem.c: Sun GEM ethernet driver.
3
 *
4
 * Copyright (C) 2000, 2001, 2002 David S. Miller (davem@redhat.com)
5
 *
6
 * Support for Apple GMAC and assorted PHYs by
7
 * Benjamin Herrenscmidt (benh@kernel.crashing.org)
8
 *
9
 * TODO:
10
 *  - Get rid of all those nasty mdelay's and replace them
11
 * with schedule_timeout.
12
 *  - Implement WOL
13
 *  - Currently, forced Gb mode is only supported on bcm54xx
14
 *    PHY for which I use the SPD2 bit of the control register.
15
 *    On m1011 PHY, I can't force as I don't have the specs, but
16
 *    I can at least detect gigabit with autoneg.
17
 */
18
 
19
#include <linux/config.h>
20
 
21
#include <linux/module.h>
22
 
23
#include <linux/kernel.h>
24
#include <linux/sched.h>
25
#include <linux/types.h>
26
#include <linux/fcntl.h>
27
#include <linux/interrupt.h>
28
#include <linux/ptrace.h>
29
#include <linux/ioport.h>
30
#include <linux/in.h>
31
#include <linux/slab.h>
32
#include <linux/string.h>
33
#include <linux/delay.h>
34
#include <linux/init.h>
35
#include <linux/errno.h>
36
#include <linux/pci.h>
37
#include <linux/netdevice.h>
38
#include <linux/etherdevice.h>
39
#include <linux/skbuff.h>
40
#include <linux/mii.h>
41
#include <linux/ethtool.h>
42
#include <linux/crc32.h>
43
#include <linux/random.h>
44
 
45
#include <asm/system.h>
46
#include <asm/bitops.h>
47
#include <asm/io.h>
48
#include <asm/byteorder.h>
49
#include <asm/uaccess.h>
50
#include <asm/irq.h>
51
 
52
#ifdef __sparc__
53
#include <asm/idprom.h>
54
#include <asm/openprom.h>
55
#include <asm/oplib.h>
56
#include <asm/pbm.h>
57
#endif
58
 
59
#ifdef CONFIG_ALL_PPC
60
#include <asm/pci-bridge.h>
61
#include <asm/prom.h>
62
#include <asm/machdep.h>
63
#include <asm/pmac_feature.h>
64
#endif
65
 
66
#include "sungem.h"
67
 
68
#define DEFAULT_MSG     (NETIF_MSG_DRV          | \
69
                         NETIF_MSG_PROBE        | \
70
                         NETIF_MSG_LINK)
71
 
72
#define DRV_NAME        "sungem"
73
#define DRV_VERSION     "0.97"
74
#define DRV_RELDATE     "3/20/02"
75
#define DRV_AUTHOR      "David S. Miller (davem@redhat.com)"
76
 
77
static char version[] __devinitdata =
78
        DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
79
 
80
MODULE_AUTHOR(DRV_AUTHOR);
81
MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
82
MODULE_LICENSE("GPL");
83
 
84
MODULE_PARM(gem_debug, "i");
85
MODULE_PARM_DESC(gem_debug, "bitmapped message enable number");
86
MODULE_PARM(link_mode, "i");
87
MODULE_PARM_DESC(link_mode, "default link mode");
88
 
89
int gem_debug = -1;
90
static int link_mode;
91
 
92
static u16 link_modes[] __devinitdata = {
93
        BMCR_ANENABLE,                  /* 0 : autoneg */
94
        0,                               /* 1 : 10bt half duplex */
95
        BMCR_SPEED100,                  /* 2 : 100bt half duplex */
96
        BMCR_SPD2, /* bcm54xx only */   /* 3 : 1000bt half duplex */
97
        BMCR_FULLDPLX,                  /* 4 : 10bt full duplex */
98
        BMCR_SPEED100|BMCR_FULLDPLX,    /* 5 : 100bt full duplex */
99
        BMCR_SPD2|BMCR_FULLDPLX         /* 6 : 1000bt full duplex */
100
};
101
 
102
#define GEM_MODULE_NAME "gem"
103
#define PFX GEM_MODULE_NAME ": "
104
 
105
static struct pci_device_id gem_pci_tbl[] __devinitdata = {
106
        { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
107
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
108
 
109
        /* These models only differ from the original GEM in
110
         * that their tx/rx fifos are of a different size and
111
         * they only support 10/100 speeds. -DaveM
112
         *
113
         * Apple's GMAC does support gigabit on machines with
114
         * the BCM54xx PHYs. -BenH
115
         */
116
        { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
117
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
118
        { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
119
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
120
        { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
121
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
122
        {0, }
123
};
124
 
125
MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
126
 
127
static u16 __phy_read(struct gem *gp, int reg, int phy_addr)
128
{
129
        u32 cmd;
130
        int limit = 10000;
131
 
132
        cmd  = (1 << 30);
133
        cmd |= (2 << 28);
134
        cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
135
        cmd |= (reg << 18) & MIF_FRAME_REGAD;
136
        cmd |= (MIF_FRAME_TAMSB);
137
        writel(cmd, gp->regs + MIF_FRAME);
138
 
139
        while (limit--) {
140
                cmd = readl(gp->regs + MIF_FRAME);
141
                if (cmd & MIF_FRAME_TALSB)
142
                        break;
143
 
144
                udelay(10);
145
        }
146
 
147
        if (!limit)
148
                cmd = 0xffff;
149
 
150
        return cmd & MIF_FRAME_DATA;
151
}
152
 
153
static inline u16 phy_read(struct gem *gp, int reg)
154
{
155
        return __phy_read(gp, reg, gp->mii_phy_addr);
156
}
157
 
158
static void __phy_write(struct gem *gp, int reg, u16 val, int phy_addr)
159
{
160
        u32 cmd;
161
        int limit = 10000;
162
 
163
        cmd  = (1 << 30);
164
        cmd |= (1 << 28);
165
        cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
166
        cmd |= (reg << 18) & MIF_FRAME_REGAD;
167
        cmd |= (MIF_FRAME_TAMSB);
168
        cmd |= (val & MIF_FRAME_DATA);
169
        writel(cmd, gp->regs + MIF_FRAME);
170
 
171
        while (limit--) {
172
                cmd = readl(gp->regs + MIF_FRAME);
173
                if (cmd & MIF_FRAME_TALSB)
174
                        break;
175
 
176
                udelay(10);
177
        }
178
}
179
 
180
static inline void phy_write(struct gem *gp, int reg, u16 val)
181
{
182
        __phy_write(gp, reg, val, gp->mii_phy_addr);
183
}
184
 
185
static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
186
{
187
        if (netif_msg_intr(gp))
188
                printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
189
}
190
 
191
static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
192
{
193
        u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
194
        u32 pcs_miistat;
195
 
196
        if (netif_msg_intr(gp))
197
                printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
198
                        gp->dev->name, pcs_istat);
199
 
200
        if (!(pcs_istat & PCS_ISTAT_LSC)) {
201
                printk(KERN_ERR "%s: PCS irq but no link status change???\n",
202
                       dev->name);
203
                return 0;
204
        }
205
 
206
        /* The link status bit latches on zero, so you must
207
         * read it twice in such a case to see a transition
208
         * to the link being up.
209
         */
210
        pcs_miistat = readl(gp->regs + PCS_MIISTAT);
211
        if (!(pcs_miistat & PCS_MIISTAT_LS))
212
                pcs_miistat |=
213
                        (readl(gp->regs + PCS_MIISTAT) &
214
                         PCS_MIISTAT_LS);
215
 
216
        if (pcs_miistat & PCS_MIISTAT_ANC) {
217
                /* The remote-fault indication is only valid
218
                 * when autoneg has completed.
219
                 */
220
                if (pcs_miistat & PCS_MIISTAT_RF)
221
                        printk(KERN_INFO "%s: PCS AutoNEG complete, "
222
                               "RemoteFault\n", dev->name);
223
                else
224
                        printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
225
                               dev->name);
226
        }
227
 
228
        if (pcs_miistat & PCS_MIISTAT_LS) {
229
                printk(KERN_INFO "%s: PCS link is now up.\n",
230
                       dev->name);
231
        } else {
232
                printk(KERN_INFO "%s: PCS link is now down.\n",
233
                       dev->name);
234
 
235
                /* If this happens and the link timer is not running,
236
                 * reset so we re-negotiate.
237
                 */
238
                if (!timer_pending(&gp->link_timer))
239
                        return 1;
240
        }
241
 
242
        return 0;
243
}
244
 
245
static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
246
{
247
        u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
248
 
249
        if (netif_msg_intr(gp))
250
                printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
251
                        gp->dev->name, txmac_stat);
252
 
253
        /* Defer timer expiration is quite normal,
254
         * don't even log the event.
255
         */
256
        if ((txmac_stat & MAC_TXSTAT_DTE) &&
257
            !(txmac_stat & ~MAC_TXSTAT_DTE))
258
                return 0;
259
 
260
        if (txmac_stat & MAC_TXSTAT_URUN) {
261
                printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
262
                       dev->name);
263
                gp->net_stats.tx_fifo_errors++;
264
        }
265
 
266
        if (txmac_stat & MAC_TXSTAT_MPE) {
267
                printk(KERN_ERR "%s: TX MAC max packet size error.\n",
268
                       dev->name);
269
                gp->net_stats.tx_errors++;
270
        }
271
 
272
        /* The rest are all cases of one of the 16-bit TX
273
         * counters expiring.
274
         */
275
        if (txmac_stat & MAC_TXSTAT_NCE)
276
                gp->net_stats.collisions += 0x10000;
277
 
278
        if (txmac_stat & MAC_TXSTAT_ECE) {
279
                gp->net_stats.tx_aborted_errors += 0x10000;
280
                gp->net_stats.collisions += 0x10000;
281
        }
282
 
283
        if (txmac_stat & MAC_TXSTAT_LCE) {
284
                gp->net_stats.tx_aborted_errors += 0x10000;
285
                gp->net_stats.collisions += 0x10000;
286
        }
287
 
288
        /* We do not keep track of MAC_TXSTAT_FCE and
289
         * MAC_TXSTAT_PCE events.
290
         */
291
        return 0;
292
}
293
 
294
/* When we get a RX fifo overflow, the RX unit in GEM is probably hung
295
 * so we do the following.
296
 *
297
 * If any part of the reset goes wrong, we return 1 and that causes the
298
 * whole chip to be reset.
299
 */
300
static int gem_rxmac_reset(struct gem *gp)
301
{
302
        struct net_device *dev = gp->dev;
303
        int limit, i;
304
        u64 desc_dma;
305
        u32 val;
306
 
307
        /* First, reset MAC RX. */
308
        writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
309
               gp->regs + MAC_RXCFG);
310
        for (limit = 0; limit < 5000; limit++) {
311
                if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
312
                        break;
313
                udelay(10);
314
        }
315
        if (limit == 5000) {
316
                printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
317
                       "chip.\n", dev->name);
318
                return 1;
319
        }
320
 
321
        /* Second, disable RX DMA. */
322
        writel(0, gp->regs + RXDMA_CFG);
323
        for (limit = 0; limit < 5000; limit++) {
324
                if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
325
                        break;
326
                udelay(10);
327
        }
328
        if (limit == 5000) {
329
                printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
330
                       "chip.\n", dev->name);
331
                return 1;
332
        }
333
 
334
        udelay(5000);
335
 
336
        /* Execute RX reset command. */
337
        writel(gp->swrst_base | GREG_SWRST_RXRST,
338
               gp->regs + GREG_SWRST);
339
        for (limit = 0; limit < 5000; limit++) {
340
                if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
341
                        break;
342
                udelay(10);
343
        }
344
        if (limit == 5000) {
345
                printk(KERN_ERR "%s: RX reset command will not execute, resetting "
346
                       "whole chip.\n", dev->name);
347
                return 1;
348
        }
349
 
350
        /* Refresh the RX ring. */
351
        for (i = 0; i < RX_RING_SIZE; i++) {
352
                struct gem_rxd *rxd = &gp->init_block->rxd[i];
353
 
354
                if (gp->rx_skbs[i] == NULL) {
355
                        printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
356
                               "whole chip.\n", dev->name);
357
                        return 1;
358
                }
359
 
360
                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
361
        }
362
        gp->rx_new = gp->rx_old = 0;
363
 
364
        /* Now we must reprogram the rest of RX unit. */
365
        desc_dma = (u64) gp->gblock_dvma;
366
        desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
367
        writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
368
        writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
369
        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
370
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
371
               ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
372
        writel(val, gp->regs + RXDMA_CFG);
373
        if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
374
                writel(((5 & RXDMA_BLANK_IPKTS) |
375
                        ((8 << 12) & RXDMA_BLANK_ITIME)),
376
                       gp->regs + RXDMA_BLANK);
377
        else
378
                writel(((5 & RXDMA_BLANK_IPKTS) |
379
                        ((4 << 12) & RXDMA_BLANK_ITIME)),
380
                       gp->regs + RXDMA_BLANK);
381
        val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
382
        val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
383
        writel(val, gp->regs + RXDMA_PTHRESH);
384
        val = readl(gp->regs + RXDMA_CFG);
385
        writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
386
        writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
387
        val = readl(gp->regs + MAC_RXCFG);
388
        writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
389
 
390
        return 0;
391
}
392
 
393
static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
394
{
395
        u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
396
        int ret = 0;
397
 
398
        if (netif_msg_intr(gp))
399
                printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
400
                        gp->dev->name, rxmac_stat);
401
 
402
        if (rxmac_stat & MAC_RXSTAT_OFLW) {
403
                gp->net_stats.rx_over_errors++;
404
                gp->net_stats.rx_fifo_errors++;
405
 
406
                ret = gem_rxmac_reset(gp);
407
        }
408
 
409
        if (rxmac_stat & MAC_RXSTAT_ACE)
410
                gp->net_stats.rx_frame_errors += 0x10000;
411
 
412
        if (rxmac_stat & MAC_RXSTAT_CCE)
413
                gp->net_stats.rx_crc_errors += 0x10000;
414
 
415
        if (rxmac_stat & MAC_RXSTAT_LCE)
416
                gp->net_stats.rx_length_errors += 0x10000;
417
 
418
        /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
419
         * events.
420
         */
421
        return ret;
422
}
423
 
424
static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
425
{
426
        u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
427
 
428
        if (netif_msg_intr(gp))
429
                printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
430
                        gp->dev->name, mac_cstat);
431
 
432
        /* This interrupt is just for pause frame and pause
433
         * tracking.  It is useful for diagnostics and debug
434
         * but probably by default we will mask these events.
435
         */
436
        if (mac_cstat & MAC_CSTAT_PS)
437
                gp->pause_entered++;
438
 
439
        if (mac_cstat & MAC_CSTAT_PRCV)
440
                gp->pause_last_time_recvd = (mac_cstat >> 16);
441
 
442
        return 0;
443
}
444
 
445
static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
446
{
447
        u32 mif_status = readl(gp->regs + MIF_STATUS);
448
        u32 reg_val, changed_bits;
449
 
450
        reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
451
        changed_bits = (mif_status & MIF_STATUS_STAT);
452
 
453
        gem_handle_mif_event(gp, reg_val, changed_bits);
454
 
455
        return 0;
456
}
457
 
458
static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
459
{
460
        u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
461
 
462
        if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
463
            gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
464
                printk(KERN_ERR "%s: PCI error [%04x] ",
465
                       dev->name, pci_estat);
466
 
467
                if (pci_estat & GREG_PCIESTAT_BADACK)
468
                        printk("<No ACK64# during ABS64 cycle> ");
469
                if (pci_estat & GREG_PCIESTAT_DTRTO)
470
                        printk("<Delayed transaction timeout> ");
471
                if (pci_estat & GREG_PCIESTAT_OTHER)
472
                        printk("<other>");
473
                printk("\n");
474
        } else {
475
                pci_estat |= GREG_PCIESTAT_OTHER;
476
                printk(KERN_ERR "%s: PCI error\n", dev->name);
477
        }
478
 
479
        if (pci_estat & GREG_PCIESTAT_OTHER) {
480
                u16 pci_cfg_stat;
481
 
482
                /* Interrogate PCI config space for the
483
                 * true cause.
484
                 */
485
                pci_read_config_word(gp->pdev, PCI_STATUS,
486
                                     &pci_cfg_stat);
487
                printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
488
                       dev->name, pci_cfg_stat);
489
                if (pci_cfg_stat & PCI_STATUS_PARITY)
490
                        printk(KERN_ERR "%s: PCI parity error detected.\n",
491
                               dev->name);
492
                if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
493
                        printk(KERN_ERR "%s: PCI target abort.\n",
494
                               dev->name);
495
                if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
496
                        printk(KERN_ERR "%s: PCI master acks target abort.\n",
497
                               dev->name);
498
                if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
499
                        printk(KERN_ERR "%s: PCI master abort.\n",
500
                               dev->name);
501
                if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
502
                        printk(KERN_ERR "%s: PCI system error SERR#.\n",
503
                               dev->name);
504
                if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
505
                        printk(KERN_ERR "%s: PCI parity error.\n",
506
                               dev->name);
507
 
508
                /* Write the error bits back to clear them. */
509
                pci_cfg_stat &= (PCI_STATUS_PARITY |
510
                                 PCI_STATUS_SIG_TARGET_ABORT |
511
                                 PCI_STATUS_REC_TARGET_ABORT |
512
                                 PCI_STATUS_REC_MASTER_ABORT |
513
                                 PCI_STATUS_SIG_SYSTEM_ERROR |
514
                                 PCI_STATUS_DETECTED_PARITY);
515
                pci_write_config_word(gp->pdev,
516
                                      PCI_STATUS, pci_cfg_stat);
517
        }
518
 
519
        /* For all PCI errors, we should reset the chip. */
520
        return 1;
521
}
522
 
523
/* All non-normal interrupt conditions get serviced here.
524
 * Returns non-zero if we should just exit the interrupt
525
 * handler right now (ie. if we reset the card which invalidates
526
 * all of the other original irq status bits).
527
 */
528
static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
529
{
530
        if (gem_status & GREG_STAT_RXNOBUF) {
531
                /* Frame arrived, no free RX buffers available. */
532
                if (netif_msg_rx_err(gp))
533
                        printk(KERN_DEBUG "%s: no buffer for rx frame\n",
534
                                gp->dev->name);
535
                gp->net_stats.rx_dropped++;
536
        }
537
 
538
        if (gem_status & GREG_STAT_RXTAGERR) {
539
                /* corrupt RX tag framing */
540
                if (netif_msg_rx_err(gp))
541
                        printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
542
                                gp->dev->name);
543
                gp->net_stats.rx_errors++;
544
 
545
                goto do_reset;
546
        }
547
 
548
        if (gem_status & GREG_STAT_PCS) {
549
                if (gem_pcs_interrupt(dev, gp, gem_status))
550
                        goto do_reset;
551
        }
552
 
553
        if (gem_status & GREG_STAT_TXMAC) {
554
                if (gem_txmac_interrupt(dev, gp, gem_status))
555
                        goto do_reset;
556
        }
557
 
558
        if (gem_status & GREG_STAT_RXMAC) {
559
                if (gem_rxmac_interrupt(dev, gp, gem_status))
560
                        goto do_reset;
561
        }
562
 
563
        if (gem_status & GREG_STAT_MAC) {
564
                if (gem_mac_interrupt(dev, gp, gem_status))
565
                        goto do_reset;
566
        }
567
 
568
        if (gem_status & GREG_STAT_MIF) {
569
                if (gem_mif_interrupt(dev, gp, gem_status))
570
                        goto do_reset;
571
        }
572
 
573
        if (gem_status & GREG_STAT_PCIERR) {
574
                if (gem_pci_interrupt(dev, gp, gem_status))
575
                        goto do_reset;
576
        }
577
 
578
        return 0;
579
 
580
do_reset:
581
        gp->reset_task_pending = 2;
582
        schedule_task(&gp->reset_task);
583
 
584
        return 1;
585
}
586
 
587
static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
588
{
589
        int entry, limit;
590
 
591
        if (netif_msg_intr(gp))
592
                printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
593
                        gp->dev->name, gem_status);
594
 
595
        entry = gp->tx_old;
596
        limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
597
        while (entry != limit) {
598
                struct sk_buff *skb;
599
                struct gem_txd *txd;
600
                dma_addr_t dma_addr;
601
                u32 dma_len;
602
                int frag;
603
 
604
                if (netif_msg_tx_done(gp))
605
                        printk(KERN_DEBUG "%s: tx done, slot %d\n",
606
                                gp->dev->name, entry);
607
                skb = gp->tx_skbs[entry];
608
                if (skb_shinfo(skb)->nr_frags) {
609
                        int last = entry + skb_shinfo(skb)->nr_frags;
610
                        int walk = entry;
611
                        int incomplete = 0;
612
 
613
                        last &= (TX_RING_SIZE - 1);
614
                        for (;;) {
615
                                walk = NEXT_TX(walk);
616
                                if (walk == limit)
617
                                        incomplete = 1;
618
                                if (walk == last)
619
                                        break;
620
                        }
621
                        if (incomplete)
622
                                break;
623
                }
624
                gp->tx_skbs[entry] = NULL;
625
                gp->net_stats.tx_bytes += skb->len;
626
 
627
                for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
628
                        txd = &gp->init_block->txd[entry];
629
 
630
                        dma_addr = le64_to_cpu(txd->buffer);
631
                        dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
632
 
633
                        pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
634
                        entry = NEXT_TX(entry);
635
                }
636
 
637
                gp->net_stats.tx_packets++;
638
                dev_kfree_skb_irq(skb);
639
        }
640
        gp->tx_old = entry;
641
 
642
        if (netif_queue_stopped(dev) &&
643
            TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
644
                netif_wake_queue(dev);
645
}
646
 
647
static __inline__ void gem_post_rxds(struct gem *gp, int limit)
648
{
649
        int cluster_start, curr, count, kick;
650
 
651
        cluster_start = curr = (gp->rx_new & ~(4 - 1));
652
        count = 0;
653
        kick = -1;
654
        wmb();
655
        while (curr != limit) {
656
                curr = NEXT_RX(curr);
657
                if (++count == 4) {
658
                        struct gem_rxd *rxd =
659
                                &gp->init_block->rxd[cluster_start];
660
                        for (;;) {
661
                                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
662
                                rxd++;
663
                                cluster_start = NEXT_RX(cluster_start);
664
                                if (cluster_start == curr)
665
                                        break;
666
                        }
667
                        kick = curr;
668
                        count = 0;
669
                }
670
        }
671
        if (kick >= 0) {
672
                mb();
673
                writel(kick, gp->regs + RXDMA_KICK);
674
        }
675
}
676
 
677
static void gem_rx(struct gem *gp)
678
{
679
        int entry, drops;
680
        u32 done;
681
 
682
        if (netif_msg_intr(gp))
683
                printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
684
                        gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
685
 
686
        entry = gp->rx_new;
687
        drops = 0;
688
        done = readl(gp->regs + RXDMA_DONE);
689
        for (;;) {
690
                struct gem_rxd *rxd = &gp->init_block->rxd[entry];
691
                struct sk_buff *skb;
692
                u64 status = cpu_to_le64(rxd->status_word);
693
                dma_addr_t dma_addr;
694
                int len;
695
 
696
                if ((status & RXDCTRL_OWN) != 0)
697
                        break;
698
 
699
                /* When writing back RX descriptor, GEM writes status
700
                 * then buffer address, possibly in seperate transactions.
701
                 * If we don't wait for the chip to write both, we could
702
                 * post a new buffer to this descriptor then have GEM spam
703
                 * on the buffer address.  We sync on the RX completion
704
                 * register to prevent this from happening.
705
                 */
706
                if (entry == done) {
707
                        done = readl(gp->regs + RXDMA_DONE);
708
                        if (entry == done)
709
                                break;
710
                }
711
 
712
                skb = gp->rx_skbs[entry];
713
 
714
                len = (status & RXDCTRL_BUFSZ) >> 16;
715
                if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
716
                        gp->net_stats.rx_errors++;
717
                        if (len < ETH_ZLEN)
718
                                gp->net_stats.rx_length_errors++;
719
                        if (len & RXDCTRL_BAD)
720
                                gp->net_stats.rx_crc_errors++;
721
 
722
                        /* We'll just return it to GEM. */
723
                drop_it:
724
                        gp->net_stats.rx_dropped++;
725
                        goto next;
726
                }
727
 
728
                dma_addr = cpu_to_le64(rxd->buffer);
729
                if (len > RX_COPY_THRESHOLD) {
730
                        struct sk_buff *new_skb;
731
 
732
                        new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
733
                        if (new_skb == NULL) {
734
                                drops++;
735
                                goto drop_it;
736
                        }
737
                        pci_unmap_page(gp->pdev, dma_addr,
738
                                       RX_BUF_ALLOC_SIZE(gp),
739
                                       PCI_DMA_FROMDEVICE);
740
                        gp->rx_skbs[entry] = new_skb;
741
                        new_skb->dev = gp->dev;
742
                        skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET));
743
                        rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
744
                                                               virt_to_page(new_skb->data),
745
                                                               ((unsigned long) new_skb->data &
746
                                                                ~PAGE_MASK),
747
                                                               RX_BUF_ALLOC_SIZE(gp),
748
                                                               PCI_DMA_FROMDEVICE));
749
                        skb_reserve(new_skb, RX_OFFSET);
750
 
751
                        /* Trim the original skb for the netif. */
752
                        skb_trim(skb, len);
753
                } else {
754
                        struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
755
 
756
                        if (copy_skb == NULL) {
757
                                drops++;
758
                                goto drop_it;
759
                        }
760
 
761
                        copy_skb->dev = gp->dev;
762
                        skb_reserve(copy_skb, 2);
763
                        skb_put(copy_skb, len);
764
                        pci_dma_sync_single(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
765
                        memcpy(copy_skb->data, skb->data, len);
766
 
767
                        /* We'll reuse the original ring buffer. */
768
                        skb = copy_skb;
769
                }
770
 
771
                skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
772
                skb->ip_summed = CHECKSUM_HW;
773
                skb->protocol = eth_type_trans(skb, gp->dev);
774
                netif_rx(skb);
775
 
776
                gp->net_stats.rx_packets++;
777
                gp->net_stats.rx_bytes += len;
778
                gp->dev->last_rx = jiffies;
779
 
780
        next:
781
                entry = NEXT_RX(entry);
782
        }
783
 
784
        gem_post_rxds(gp, entry);
785
 
786
        gp->rx_new = entry;
787
 
788
        if (drops)
789
                printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
790
                       gp->dev->name);
791
}
792
 
793
static void gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
794
{
795
        struct net_device *dev = dev_id;
796
        struct gem *gp = dev->priv;
797
        u32 gem_status = readl(gp->regs + GREG_STAT);
798
 
799
        spin_lock(&gp->lock);
800
 
801
        if (gem_status & GREG_STAT_ABNORMAL) {
802
                if (gem_abnormal_irq(dev, gp, gem_status))
803
                        goto out;
804
        }
805
        if (gem_status & (GREG_STAT_TXALL | GREG_STAT_TXINTME))
806
                gem_tx(dev, gp, gem_status);
807
        if (gem_status & GREG_STAT_RXDONE)
808
                gem_rx(gp);
809
 
810
out:
811
        spin_unlock(&gp->lock);
812
}
813
 
814
static void gem_tx_timeout(struct net_device *dev)
815
{
816
        struct gem *gp = dev->priv;
817
 
818
        printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
819
        if (!gp->hw_running) {
820
                printk("%s: hrm.. hw not running !\n", dev->name);
821
                return;
822
        }
823
        printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
824
               dev->name,
825
               readl(gp->regs + TXDMA_CFG),
826
               readl(gp->regs + MAC_TXSTAT),
827
               readl(gp->regs + MAC_TXCFG));
828
        printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
829
               dev->name,
830
               readl(gp->regs + RXDMA_CFG),
831
               readl(gp->regs + MAC_RXSTAT),
832
               readl(gp->regs + MAC_RXCFG));
833
 
834
        spin_lock_irq(&gp->lock);
835
 
836
        gp->reset_task_pending = 2;
837
        schedule_task(&gp->reset_task);
838
 
839
        spin_unlock_irq(&gp->lock);
840
}
841
 
842
static __inline__ int gem_intme(int entry)
843
{
844
        /* Algorithm: IRQ every 1/2 of descriptors. */
845
        if (!(entry & ((TX_RING_SIZE>>1)-1)))
846
                return 1;
847
 
848
        return 0;
849
}
850
 
851
static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
852
{
853
        struct gem *gp = dev->priv;
854
        int entry;
855
        u64 ctrl;
856
 
857
        ctrl = 0;
858
        if (skb->ip_summed == CHECKSUM_HW) {
859
                u64 csum_start_off, csum_stuff_off;
860
 
861
                csum_start_off = (u64) (skb->h.raw - skb->data);
862
                csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
863
 
864
                ctrl = (TXDCTRL_CENAB |
865
                        (csum_start_off << 15) |
866
                        (csum_stuff_off << 21));
867
        }
868
 
869
        spin_lock_irq(&gp->lock);
870
 
871
        /* This is a hard error, log it. */
872
        if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
873
                netif_stop_queue(dev);
874
                spin_unlock_irq(&gp->lock);
875
                printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
876
                       dev->name);
877
                return 1;
878
        }
879
 
880
        entry = gp->tx_new;
881
        gp->tx_skbs[entry] = skb;
882
 
883
        if (skb_shinfo(skb)->nr_frags == 0) {
884
                struct gem_txd *txd = &gp->init_block->txd[entry];
885
                dma_addr_t mapping;
886
                u32 len;
887
 
888
                len = skb->len;
889
                mapping = pci_map_page(gp->pdev,
890
                                       virt_to_page(skb->data),
891
                                       ((unsigned long) skb->data &
892
                                        ~PAGE_MASK),
893
                                       len, PCI_DMA_TODEVICE);
894
                ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
895
                if (gem_intme(entry))
896
                        ctrl |= TXDCTRL_INTME;
897
                txd->buffer = cpu_to_le64(mapping);
898
                wmb();
899
                txd->control_word = cpu_to_le64(ctrl);
900
                entry = NEXT_TX(entry);
901
        } else {
902
                struct gem_txd *txd;
903
                u32 first_len;
904
                u64 intme;
905
                dma_addr_t first_mapping;
906
                int frag, first_entry = entry;
907
 
908
                intme = 0;
909
                if (gem_intme(entry))
910
                        intme |= TXDCTRL_INTME;
911
 
912
                /* We must give this initial chunk to the device last.
913
                 * Otherwise we could race with the device.
914
                 */
915
                first_len = skb->len - skb->data_len;
916
                first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
917
                                             ((unsigned long) skb->data & ~PAGE_MASK),
918
                                             first_len, PCI_DMA_TODEVICE);
919
                entry = NEXT_TX(entry);
920
 
921
                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
922
                        skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
923
                        u32 len;
924
                        dma_addr_t mapping;
925
                        u64 this_ctrl;
926
 
927
                        len = this_frag->size;
928
                        mapping = pci_map_page(gp->pdev,
929
                                               this_frag->page,
930
                                               this_frag->page_offset,
931
                                               len, PCI_DMA_TODEVICE);
932
                        this_ctrl = ctrl;
933
                        if (frag == skb_shinfo(skb)->nr_frags - 1)
934
                                this_ctrl |= TXDCTRL_EOF;
935
 
936
                        txd = &gp->init_block->txd[entry];
937
                        txd->buffer = cpu_to_le64(mapping);
938
                        wmb();
939
                        txd->control_word = cpu_to_le64(this_ctrl | len);
940
 
941
                        if (gem_intme(entry))
942
                                intme |= TXDCTRL_INTME;
943
 
944
                        entry = NEXT_TX(entry);
945
                }
946
                txd = &gp->init_block->txd[first_entry];
947
                txd->buffer = cpu_to_le64(first_mapping);
948
                wmb();
949
                txd->control_word =
950
                        cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
951
        }
952
 
953
        gp->tx_new = entry;
954
        if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
955
                netif_stop_queue(dev);
956
 
957
        if (netif_msg_tx_queued(gp))
958
                printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
959
                       dev->name, entry, skb->len);
960
        mb();
961
        writel(gp->tx_new, gp->regs + TXDMA_KICK);
962
        spin_unlock_irq(&gp->lock);
963
 
964
        dev->trans_start = jiffies;
965
 
966
        return 0;
967
}
968
 
969
/* Jumbo-grams don't seem to work :-( */
970
#define GEM_MIN_MTU     68
971
#if 1
972
#define GEM_MAX_MTU     1500
973
#else
974
#define GEM_MAX_MTU     9000
975
#endif
976
 
977
static int gem_change_mtu(struct net_device *dev, int new_mtu)
978
{
979
        struct gem *gp = dev->priv;
980
 
981
        if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
982
                return -EINVAL;
983
 
984
        if (!netif_running(dev) || !netif_device_present(dev)) {
985
                /* We'll just catch it later when the
986
                 * device is up'd or resumed.
987
                 */
988
                dev->mtu = new_mtu;
989
                return 0;
990
        }
991
 
992
        spin_lock_irq(&gp->lock);
993
        dev->mtu = new_mtu;
994
        gp->reset_task_pending = 1;
995
        schedule_task(&gp->reset_task);
996
        spin_unlock_irq(&gp->lock);
997
 
998
        flush_scheduled_tasks();
999
 
1000
        return 0;
1001
}
1002
 
1003
#define STOP_TRIES 32
1004
 
1005
/* Must be invoked under gp->lock. */
1006
static void gem_stop(struct gem *gp)
1007
{
1008
        int limit;
1009
        u32 val;
1010
 
1011
        /* Make sure we won't get any more interrupts */
1012
        writel(0xffffffff, gp->regs + GREG_IMASK);
1013
 
1014
        /* Reset the chip */
1015
        writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1016
               gp->regs + GREG_SWRST);
1017
 
1018
        limit = STOP_TRIES;
1019
 
1020
        do {
1021
                udelay(20);
1022
                val = readl(gp->regs + GREG_SWRST);
1023
                if (limit-- <= 0)
1024
                        break;
1025
        } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1026
 
1027
        if (limit <= 0)
1028
                printk(KERN_ERR "gem: SW reset is ghetto.\n");
1029
}
1030
 
1031
/* Must be invoked under gp->lock. */
1032
static void gem_start_dma(struct gem *gp)
1033
{
1034
        unsigned long val;
1035
 
1036
        /* We are ready to rock, turn everything on. */
1037
        val = readl(gp->regs + TXDMA_CFG);
1038
        writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1039
        val = readl(gp->regs + RXDMA_CFG);
1040
        writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1041
        val = readl(gp->regs + MAC_TXCFG);
1042
        writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1043
        val = readl(gp->regs + MAC_RXCFG);
1044
        writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1045
 
1046
        (void) readl(gp->regs + MAC_RXCFG);
1047
        udelay(100);
1048
 
1049
        writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
1050
 
1051
        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1052
 
1053
}
1054
 
1055
/* Link modes of the BCM5400 PHY */
1056
static int phy_BCM5400_link_table[8][3] = {
1057
        { 0, 0, 0 },       /* No link */
1058
        { 0, 0, 0 },       /* 10BT Half Duplex */
1059
        { 1, 0, 0 },      /* 10BT Full Duplex */
1060
        { 0, 1, 0 },      /* 100BT Half Duplex */
1061
        { 0, 1, 0 },      /* 100BT Half Duplex */
1062
        { 1, 1, 0 },     /* 100BT Full Duplex*/
1063
        { 1, 0, 1 },     /* 1000BT */
1064
        { 1, 0, 1 },     /* 1000BT */
1065
};
1066
 
1067
/* Must be invoked under gp->lock. */
1068
static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1069
{
1070
        u16 ctl;
1071
 
1072
        /* Setup link parameters */
1073
        if (!ep)
1074
                goto start_aneg;
1075
        if (ep->autoneg == AUTONEG_ENABLE) {
1076
                /* TODO: parse ep->advertising */
1077
                gp->link_advertise |= (ADVERTISE_10HALF | ADVERTISE_10FULL);
1078
                gp->link_advertise |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1079
                /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
1080
                gp->link_cntl = BMCR_ANENABLE;
1081
        } else {
1082
                gp->link_cntl = 0;
1083
                if (ep->speed == SPEED_100)
1084
                        gp->link_cntl |= BMCR_SPEED100;
1085
                else if (ep->speed == SPEED_1000 && gp->gigabit_capable)
1086
                        /* Hrm... check if this is right... */
1087
                        gp->link_cntl |= BMCR_SPD2;
1088
                if (ep->duplex == DUPLEX_FULL)
1089
                        gp->link_cntl |= BMCR_FULLDPLX;
1090
        }
1091
 
1092
start_aneg:
1093
        if (!gp->hw_running)
1094
                return;
1095
 
1096
        /* Configure PHY & start aneg */
1097
        ctl = phy_read(gp, MII_BMCR);
1098
        ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
1099
        ctl |= gp->link_cntl;
1100
        if (ctl & BMCR_ANENABLE) {
1101
                ctl |= BMCR_ANRESTART;
1102
                gp->lstate = link_aneg;
1103
        } else {
1104
                gp->lstate = link_force_ok;
1105
        }
1106
        phy_write(gp, MII_BMCR, ctl);
1107
 
1108
        gp->timer_ticks = 0;
1109
        mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1110
}
1111
 
1112
/* Must be invoked under gp->lock. */
1113
static void gem_read_mii_link_mode(struct gem *gp, int *fd, int *spd, int *pause)
1114
{
1115
        u32 val;
1116
 
1117
        *fd = 0;
1118
        *spd = 10;
1119
        *pause = 0;
1120
 
1121
        if (gp->phy_mod == phymod_bcm5400 ||
1122
            gp->phy_mod == phymod_bcm5401 ||
1123
            gp->phy_mod == phymod_bcm5411) {
1124
                int link_mode;
1125
 
1126
                val = phy_read(gp, MII_BCM5400_AUXSTATUS);
1127
                link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
1128
                             MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
1129
                *fd = phy_BCM5400_link_table[link_mode][0];
1130
                *spd = phy_BCM5400_link_table[link_mode][2] ?
1131
                        1000 :
1132
                        (phy_BCM5400_link_table[link_mode][1] ? 100 : 10);
1133
                val = phy_read(gp, MII_LPA);
1134
                if (val & LPA_PAUSE)
1135
                        *pause = 1;
1136
        } else {
1137
                val = phy_read(gp, MII_LPA);
1138
 
1139
                if (val & (LPA_10FULL | LPA_100FULL))
1140
                        *fd = 1;
1141
                if (val & (LPA_100FULL | LPA_100HALF))
1142
                        *spd = 100;
1143
 
1144
                if (gp->phy_mod == phymod_m1011) {
1145
                        val = phy_read(gp, 0x0a);
1146
                        if (val & 0xc00)
1147
                                *spd = 1000;
1148
                        if (val & 0x800)
1149
                                *fd = 1;
1150
                }
1151
        }
1152
}
1153
 
1154
/* A link-up condition has occurred, initialize and enable the
1155
 * rest of the chip.
1156
 *
1157
 * Must be invoked under gp->lock.
1158
 */
1159
static void gem_set_link_modes(struct gem *gp)
1160
{
1161
        u32 val;
1162
        int full_duplex, speed, pause;
1163
 
1164
        full_duplex = 0;
1165
        speed = 10;
1166
        pause = 0;
1167
 
1168
        if (gp->phy_type == phy_mii_mdio0 ||
1169
            gp->phy_type == phy_mii_mdio1) {
1170
                val = phy_read(gp, MII_BMCR);
1171
                if (val & BMCR_ANENABLE)
1172
                        gem_read_mii_link_mode(gp, &full_duplex, &speed, &pause);
1173
                else {
1174
                        if (val & BMCR_FULLDPLX)
1175
                                full_duplex = 1;
1176
                        if (val & BMCR_SPEED100)
1177
                                speed = 100;
1178
                }
1179
        } else {
1180
                u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1181
 
1182
                if (pcs_lpa & PCS_MIIADV_FD)
1183
                        full_duplex = 1;
1184
                speed = 1000;
1185
        }
1186
 
1187
        if (netif_msg_link(gp))
1188
                printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
1189
                        gp->dev->name, speed, (full_duplex ? "full" : "half"));
1190
 
1191
        val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1192
        if (full_duplex) {
1193
                val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1194
        } else {
1195
                /* MAC_TXCFG_NBO must be zero. */
1196
        }
1197
        writel(val, gp->regs + MAC_TXCFG);
1198
 
1199
        val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1200
        if (!full_duplex &&
1201
            (gp->phy_type == phy_mii_mdio0 ||
1202
             gp->phy_type == phy_mii_mdio1)) {
1203
                val |= MAC_XIFCFG_DISE;
1204
        } else if (full_duplex) {
1205
                val |= MAC_XIFCFG_FLED;
1206
        }
1207
 
1208
        if (speed == 1000)
1209
                val |= (MAC_XIFCFG_GMII);
1210
 
1211
        writel(val, gp->regs + MAC_XIFCFG);
1212
 
1213
        /* If gigabit and half-duplex, enable carrier extension
1214
         * mode.  Else, disable it.
1215
         */
1216
        if (speed == 1000 && !full_duplex) {
1217
                val = readl(gp->regs + MAC_TXCFG);
1218
                writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1219
 
1220
                val = readl(gp->regs + MAC_RXCFG);
1221
                writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1222
        } else {
1223
                val = readl(gp->regs + MAC_TXCFG);
1224
                writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1225
 
1226
                val = readl(gp->regs + MAC_RXCFG);
1227
                writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1228
        }
1229
 
1230
        if (gp->phy_type == phy_serialink ||
1231
            gp->phy_type == phy_serdes) {
1232
                u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1233
 
1234
                if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1235
                        pause = 1;
1236
        }
1237
 
1238
        if (netif_msg_link(gp)) {
1239
                if (pause) {
1240
                        printk(KERN_INFO "%s: Pause is enabled "
1241
                               "(rxfifo: %d off: %d on: %d)\n",
1242
                               gp->dev->name,
1243
                               gp->rx_fifo_sz,
1244
                               gp->rx_pause_off,
1245
                               gp->rx_pause_on);
1246
                } else {
1247
                        printk(KERN_INFO "%s: Pause is disabled\n",
1248
                               gp->dev->name);
1249
                }
1250
        }
1251
 
1252
        if (!full_duplex)
1253
                writel(512, gp->regs + MAC_STIME);
1254
        else
1255
                writel(64, gp->regs + MAC_STIME);
1256
        val = readl(gp->regs + MAC_MCCFG);
1257
        if (pause)
1258
                val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1259
        else
1260
                val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1261
        writel(val, gp->regs + MAC_MCCFG);
1262
 
1263
        gem_start_dma(gp);
1264
}
1265
 
1266
/* Must be invoked under gp->lock. */
1267
static int gem_mdio_link_not_up(struct gem *gp)
1268
{
1269
        u16 val;
1270
 
1271
        if (gp->lstate == link_force_ret) {
1272
                if (netif_msg_link(gp))
1273
                        printk(KERN_INFO "%s: Autoneg failed again, keeping"
1274
                                " forced mode\n", gp->dev->name);
1275
                phy_write(gp, MII_BMCR, gp->link_fcntl);
1276
                gp->timer_ticks = 5;
1277
                gp->lstate = link_force_ok;
1278
        } else if (gp->lstate == link_aneg) {
1279
                val = phy_read(gp, MII_BMCR);
1280
 
1281
                if (netif_msg_link(gp))
1282
                        printk(KERN_INFO "%s: switching to forced 100bt\n",
1283
                                gp->dev->name);
1284
                /* Try forced modes. */
1285
                val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1286
                val &= ~(BMCR_FULLDPLX);
1287
                val |= BMCR_SPEED100;
1288
                phy_write(gp, MII_BMCR, val);
1289
                gp->timer_ticks = 5;
1290
                gp->lstate = link_force_try;
1291
        } else {
1292
                /* Downgrade from 100 to 10 Mbps if necessary.
1293
                 * If already at 10Mbps, warn user about the
1294
                 * situation every 10 ticks.
1295
                 */
1296
                val = phy_read(gp, MII_BMCR);
1297
                if (val & BMCR_SPEED100) {
1298
                        val &= ~BMCR_SPEED100;
1299
                        phy_write(gp, MII_BMCR, val);
1300
                        gp->timer_ticks = 5;
1301
                        if (netif_msg_link(gp))
1302
                                printk(KERN_INFO "%s: switching to forced 10bt\n",
1303
                                        gp->dev->name);
1304
                } else
1305
                        return 1;
1306
        }
1307
        return 0;
1308
}
1309
 
1310
static void gem_init_rings(struct gem *);
1311
static void gem_init_hw(struct gem *, int);
1312
 
1313
static void gem_reset_task(void *data)
1314
{
1315
        struct gem *gp = (struct gem *) data;
1316
 
1317
        /* The link went down, we reset the ring, but keep
1318
         * DMA stopped. Todo: Use this function for reset
1319
         * on error as well.
1320
         */
1321
 
1322
        spin_lock_irq(&gp->lock);
1323
 
1324
        if (gp->hw_running && gp->opened) {
1325
                /* Make sure we don't get interrupts or tx packets */
1326
                netif_stop_queue(gp->dev);
1327
 
1328
                writel(0xffffffff, gp->regs + GREG_IMASK);
1329
 
1330
                /* Reset the chip & rings */
1331
                gem_stop(gp);
1332
                gem_init_rings(gp);
1333
 
1334
                gem_init_hw(gp,
1335
                            (gp->reset_task_pending == 2));
1336
 
1337
                netif_wake_queue(gp->dev);
1338
        }
1339
        gp->reset_task_pending = 0;
1340
 
1341
        spin_unlock_irq(&gp->lock);
1342
}
1343
 
1344
static void gem_link_timer(unsigned long data)
1345
{
1346
        struct gem *gp = (struct gem *) data;
1347
 
1348
        if (!gp->hw_running)
1349
                return;
1350
 
1351
        spin_lock_irq(&gp->lock);
1352
 
1353
        /* If the link of task is still pending, we just
1354
         * reschedule the link timer
1355
         */
1356
        if (gp->reset_task_pending)
1357
                goto restart;
1358
 
1359
        if (gp->phy_type == phy_mii_mdio0 ||
1360
            gp->phy_type == phy_mii_mdio1) {
1361
                u16 val = phy_read(gp, MII_BMSR);
1362
                u16 cntl = phy_read(gp, MII_BMCR);
1363
                int up;
1364
 
1365
                /* When using autoneg, we really wait for ANEGCOMPLETE or we may
1366
                 * get a "transcient" incorrect link state
1367
                 */
1368
                if (cntl & BMCR_ANENABLE)
1369
                        up = (val & (BMSR_ANEGCOMPLETE | BMSR_LSTATUS)) == (BMSR_ANEGCOMPLETE | BMSR_LSTATUS);
1370
                else
1371
                        up = (val & BMSR_LSTATUS) != 0;
1372
                if (up) {
1373
                        /* Ok, here we got a link. If we had it due to a forced
1374
                         * fallback, and we were configured for autoneg, we do
1375
                         * retry a short autoneg pass. If you know your hub is
1376
                         * broken, use ethtool ;)
1377
                         */
1378
                        if (gp->lstate == link_force_try && (gp->link_cntl & BMCR_ANENABLE)) {
1379
                                gp->lstate = link_force_ret;
1380
                                gp->link_fcntl = phy_read(gp, MII_BMCR);
1381
                                gp->timer_ticks = 5;
1382
                                if (netif_msg_link(gp))
1383
                                        printk(KERN_INFO "%s: Got link after fallback, retrying"
1384
                                                " autoneg once...\n", gp->dev->name);
1385
                                phy_write(gp, MII_BMCR,
1386
                                          gp->link_fcntl | BMCR_ANENABLE | BMCR_ANRESTART);
1387
                        } else if (gp->lstate != link_up) {
1388
                                gp->lstate = link_up;
1389
                                if (gp->opened)
1390
                                        gem_set_link_modes(gp);
1391
                        }
1392
                } else {
1393
                        int restart = 0;
1394
 
1395
                        /* If the link was previously up, we restart the
1396
                         * whole process
1397
                         */
1398
                        if (gp->lstate == link_up) {
1399
                                gp->lstate = link_down;
1400
                                if (netif_msg_link(gp))
1401
                                        printk(KERN_INFO "%s: Link down\n",
1402
                                                gp->dev->name);
1403
                                gp->reset_task_pending = 2;
1404
                                schedule_task(&gp->reset_task);
1405
                                restart = 1;
1406
                        } else if (++gp->timer_ticks > 10)
1407
                                restart = gem_mdio_link_not_up(gp);
1408
 
1409
                        if (restart) {
1410
                                gem_begin_auto_negotiation(gp, NULL);
1411
                                goto out_unlock;
1412
                        }
1413
                }
1414
        } else {
1415
                u32 val = readl(gp->regs + PCS_MIISTAT);
1416
 
1417
                if (!(val & PCS_MIISTAT_LS))
1418
                        val = readl(gp->regs + PCS_MIISTAT);
1419
 
1420
                if ((val & PCS_MIISTAT_LS) != 0) {
1421
                        gp->lstate = link_up;
1422
                        if (gp->opened)
1423
                                gem_set_link_modes(gp);
1424
                }
1425
        }
1426
 
1427
restart:
1428
        mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1429
out_unlock:
1430
        spin_unlock_irq(&gp->lock);
1431
}
1432
 
1433
/* Must be invoked under gp->lock. */
1434
static void gem_clean_rings(struct gem *gp)
1435
{
1436
        struct gem_init_block *gb = gp->init_block;
1437
        struct sk_buff *skb;
1438
        int i;
1439
        dma_addr_t dma_addr;
1440
 
1441
        for (i = 0; i < RX_RING_SIZE; i++) {
1442
                struct gem_rxd *rxd;
1443
 
1444
                rxd = &gb->rxd[i];
1445
                if (gp->rx_skbs[i] != NULL) {
1446
                        skb = gp->rx_skbs[i];
1447
                        dma_addr = le64_to_cpu(rxd->buffer);
1448
                        pci_unmap_page(gp->pdev, dma_addr,
1449
                                       RX_BUF_ALLOC_SIZE(gp),
1450
                                       PCI_DMA_FROMDEVICE);
1451
                        dev_kfree_skb_any(skb);
1452
                        gp->rx_skbs[i] = NULL;
1453
                }
1454
                rxd->status_word = 0;
1455
                wmb();
1456
                rxd->buffer = 0;
1457
        }
1458
 
1459
        for (i = 0; i < TX_RING_SIZE; i++) {
1460
                if (gp->tx_skbs[i] != NULL) {
1461
                        struct gem_txd *txd;
1462
                        int frag;
1463
 
1464
                        skb = gp->tx_skbs[i];
1465
                        gp->tx_skbs[i] = NULL;
1466
 
1467
                        for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1468
                                int ent = i & (TX_RING_SIZE - 1);
1469
 
1470
                                txd = &gb->txd[ent];
1471
                                dma_addr = le64_to_cpu(txd->buffer);
1472
                                pci_unmap_page(gp->pdev, dma_addr,
1473
                                               le64_to_cpu(txd->control_word) &
1474
                                               TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
1475
 
1476
                                if (frag != skb_shinfo(skb)->nr_frags)
1477
                                        i++;
1478
                        }
1479
                        dev_kfree_skb_any(skb);
1480
                }
1481
        }
1482
}
1483
 
1484
/* Must be invoked under gp->lock. */
1485
static void gem_init_rings(struct gem *gp)
1486
{
1487
        struct gem_init_block *gb = gp->init_block;
1488
        struct net_device *dev = gp->dev;
1489
        int i;
1490
        dma_addr_t dma_addr;
1491
 
1492
        gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1493
 
1494
        gem_clean_rings(gp);
1495
 
1496
        for (i = 0; i < RX_RING_SIZE; i++) {
1497
                struct sk_buff *skb;
1498
                struct gem_rxd *rxd = &gb->rxd[i];
1499
 
1500
                skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
1501
                if (!skb) {
1502
                        rxd->buffer = 0;
1503
                        rxd->status_word = 0;
1504
                        continue;
1505
                }
1506
 
1507
                gp->rx_skbs[i] = skb;
1508
                skb->dev = dev;
1509
                skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET));
1510
                dma_addr = pci_map_page(gp->pdev,
1511
                                        virt_to_page(skb->data),
1512
                                        ((unsigned long) skb->data &
1513
                                         ~PAGE_MASK),
1514
                                        RX_BUF_ALLOC_SIZE(gp),
1515
                                        PCI_DMA_FROMDEVICE);
1516
                rxd->buffer = cpu_to_le64(dma_addr);
1517
                wmb();
1518
                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1519
                skb_reserve(skb, RX_OFFSET);
1520
        }
1521
 
1522
        for (i = 0; i < TX_RING_SIZE; i++) {
1523
                struct gem_txd *txd = &gb->txd[i];
1524
 
1525
                txd->control_word = 0;
1526
                wmb();
1527
                txd->buffer = 0;
1528
        }
1529
        wmb();
1530
}
1531
 
1532
/* Must be invoked under gp->lock. */
1533
static int gem_reset_one_mii_phy(struct gem *gp, int phy_addr)
1534
{
1535
        u16 val;
1536
        int limit = 10000;
1537
 
1538
        val = __phy_read(gp, MII_BMCR, phy_addr);
1539
        val &= ~BMCR_ISOLATE;
1540
        val |= BMCR_RESET;
1541
        __phy_write(gp, MII_BMCR, val, phy_addr);
1542
 
1543
        udelay(100);
1544
 
1545
        while (limit--) {
1546
                val = __phy_read(gp, MII_BMCR, phy_addr);
1547
                if ((val & BMCR_RESET) == 0)
1548
                        break;
1549
                udelay(10);
1550
        }
1551
        if ((val & BMCR_ISOLATE) && limit > 0)
1552
                __phy_write(gp, MII_BMCR, val & ~BMCR_ISOLATE, phy_addr);
1553
 
1554
        return (limit <= 0);
1555
}
1556
 
1557
/* Must be invoked under gp->lock. */
1558
static void gem_init_bcm5201_phy(struct gem *gp)
1559
{
1560
        u16 data;
1561
 
1562
        data = phy_read(gp, MII_BCM5201_MULTIPHY);
1563
        data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE;
1564
        phy_write(gp, MII_BCM5201_MULTIPHY, data);
1565
}
1566
 
1567
/* Must be invoked under gp->lock. */
1568
static void gem_init_bcm5400_phy(struct gem *gp)
1569
{
1570
        u16 data;
1571
 
1572
        /* Configure for gigabit full duplex */
1573
        data = phy_read(gp, MII_BCM5400_AUXCONTROL);
1574
        data |= MII_BCM5400_AUXCONTROL_PWR10BASET;
1575
        phy_write(gp, MII_BCM5400_AUXCONTROL, data);
1576
 
1577
        data = phy_read(gp, MII_BCM5400_GB_CONTROL);
1578
        data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
1579
        phy_write(gp, MII_BCM5400_GB_CONTROL, data);
1580
 
1581
        mdelay(10);
1582
 
1583
        /* Reset and configure cascaded 10/100 PHY */
1584
        gem_reset_one_mii_phy(gp, 0x1f);
1585
 
1586
        data = __phy_read(gp, MII_BCM5201_MULTIPHY, 0x1f);
1587
        data |= MII_BCM5201_MULTIPHY_SERIALMODE;
1588
        __phy_write(gp, MII_BCM5201_MULTIPHY, data, 0x1f);
1589
 
1590
        data = phy_read(gp, MII_BCM5400_AUXCONTROL);
1591
        data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
1592
        phy_write(gp, MII_BCM5400_AUXCONTROL, data);
1593
}
1594
 
1595
/* Must be invoked under gp->lock. */
1596
static void gem_init_bcm5401_phy(struct gem *gp)
1597
{
1598
        u16 data;
1599
        int rev;
1600
 
1601
        rev = phy_read(gp, MII_PHYSID2) & 0x000f;
1602
        if (rev == 0 || rev == 3) {
1603
                /* Some revisions of 5401 appear to need this
1604
                 * initialisation sequence to disable, according
1605
                 * to OF, "tap power management"
1606
                 *
1607
                 * WARNING ! OF and Darwin don't agree on the
1608
                 * register addresses. OF seem to interpret the
1609
                 * register numbers below as decimal
1610
                 *
1611
                 * Note: This should (and does) match tg3_init_5401phy_dsp
1612
                 *       in the tg3.c driver. -DaveM
1613
                 */
1614
                phy_write(gp, 0x18, 0x0c20);
1615
                phy_write(gp, 0x17, 0x0012);
1616
                phy_write(gp, 0x15, 0x1804);
1617
                phy_write(gp, 0x17, 0x0013);
1618
                phy_write(gp, 0x15, 0x1204);
1619
                phy_write(gp, 0x17, 0x8006);
1620
                phy_write(gp, 0x15, 0x0132);
1621
                phy_write(gp, 0x17, 0x8006);
1622
                phy_write(gp, 0x15, 0x0232);
1623
                phy_write(gp, 0x17, 0x201f);
1624
                phy_write(gp, 0x15, 0x0a20);
1625
        }
1626
 
1627
        /* Configure for gigabit full duplex */
1628
        data = phy_read(gp, MII_BCM5400_GB_CONTROL);
1629
        data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
1630
        phy_write(gp, MII_BCM5400_GB_CONTROL, data);
1631
 
1632
        mdelay(1);
1633
 
1634
        /* Reset and configure cascaded 10/100 PHY */
1635
        gem_reset_one_mii_phy(gp, 0x1f);
1636
 
1637
        data = __phy_read(gp, MII_BCM5201_MULTIPHY, 0x1f);
1638
        data |= MII_BCM5201_MULTIPHY_SERIALMODE;
1639
        __phy_write(gp, MII_BCM5201_MULTIPHY, data, 0x1f);
1640
}
1641
 
1642
/* Must be invoked under gp->lock. */
1643
static void gem_init_bcm5411_phy(struct gem *gp)
1644
{
1645
        u16 data;
1646
 
1647
        /* Here's some more Apple black magic to setup
1648
         * some voltage stuffs.
1649
         */
1650
        phy_write(gp, 0x1c, 0x8c23);
1651
        phy_write(gp, 0x1c, 0x8ca3);
1652
        phy_write(gp, 0x1c, 0x8c23);
1653
 
1654
        /* Here, Apple seems to want to reset it, do
1655
         * it as well
1656
         */
1657
        phy_write(gp, MII_BMCR, BMCR_RESET);
1658
 
1659
        /* Start autoneg */
1660
        phy_write(gp, MII_BMCR,
1661
                  (BMCR_ANENABLE | BMCR_FULLDPLX |
1662
                   BMCR_ANRESTART | BMCR_SPD2));
1663
 
1664
        data = phy_read(gp, MII_BCM5400_GB_CONTROL);
1665
        data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
1666
        phy_write(gp, MII_BCM5400_GB_CONTROL, data);
1667
}
1668
 
1669
/* Must be invoked under gp->lock. */
1670
static void gem_init_phy(struct gem *gp)
1671
{
1672
        u32 mifcfg;
1673
 
1674
        if (!gp->wake_on_lan && gp->phy_mod == phymod_bcm5201)
1675
                phy_write(gp, MII_BCM5201_INTERRUPT, 0);
1676
 
1677
        /* Revert MIF CFG setting done on stop_phy */
1678
        mifcfg = readl(gp->regs + MIF_CFG);
1679
        mifcfg &= ~MIF_CFG_BBMODE;
1680
        writel(mifcfg, gp->regs + MIF_CFG);
1681
 
1682
#ifdef CONFIG_ALL_PPC
1683
        if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1684
                int i;
1685
 
1686
                pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1687
                for (i = 0; i < 32; i++) {
1688
                        gp->mii_phy_addr = i;
1689
                        if (phy_read(gp, MII_BMCR) != 0xffff)
1690
                                break;
1691
                }
1692
                if (i == 32) {
1693
                        printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
1694
                               gp->dev->name);
1695
                        return;
1696
                }
1697
        }
1698
#endif /* CONFIG_ALL_PPC */
1699
 
1700
        if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1701
            gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1702
                u32 val;
1703
 
1704
                /* Init datapath mode register. */
1705
                if (gp->phy_type == phy_mii_mdio0 ||
1706
                    gp->phy_type == phy_mii_mdio1) {
1707
                        val = PCS_DMODE_MGM;
1708
                } else if (gp->phy_type == phy_serialink) {
1709
                        val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1710
                } else {
1711
                        val = PCS_DMODE_ESM;
1712
                }
1713
 
1714
                writel(val, gp->regs + PCS_DMODE);
1715
        }
1716
 
1717
        if (gp->phy_type == phy_mii_mdio0 ||
1718
            gp->phy_type == phy_mii_mdio1) {
1719
                u32 phy_id;
1720
                u16 val;
1721
 
1722
                /* Take PHY out of isloate mode and reset it. */
1723
                gem_reset_one_mii_phy(gp, gp->mii_phy_addr);
1724
 
1725
                phy_id = (phy_read(gp, MII_PHYSID1) << 16 | phy_read(gp, MII_PHYSID2))
1726
                                & 0xfffffff0;
1727
                printk(KERN_INFO "%s: MII PHY ID: %x ", gp->dev->name, phy_id);
1728
                switch(phy_id) {
1729
                case 0x406210:
1730
                        gp->phy_mod = phymod_bcm5201;
1731
                        gem_init_bcm5201_phy(gp);
1732
                        printk("BCM 5201\n");
1733
                        break;
1734
 
1735
                case 0x4061e0:
1736
                        printk("BCM 5221\n");
1737
                        gp->phy_mod = phymod_bcm5221;
1738
                        break;
1739
 
1740
                case 0x206040:
1741
                        printk("BCM 5400\n");
1742
                        gp->phy_mod = phymod_bcm5400;
1743
                        gem_init_bcm5400_phy(gp);
1744
                        gp->gigabit_capable = 1;
1745
                        break;
1746
 
1747
                case 0x206050:
1748
                        printk("BCM 5401\n");
1749
                        gp->phy_mod = phymod_bcm5401;
1750
                        gem_init_bcm5401_phy(gp);
1751
                        gp->gigabit_capable = 1;
1752
                        break;
1753
 
1754
                case 0x206070:
1755
                        printk("BCM 5411\n");
1756
                        gp->phy_mod = phymod_bcm5411;
1757
                        gem_init_bcm5411_phy(gp);
1758
                        gp->gigabit_capable = 1;
1759
                        break;
1760
                case 0x1410c60:
1761
                        printk("M1011 (Marvel ?)\n");
1762
                        gp->phy_mod = phymod_m1011;
1763
                        gp->gigabit_capable = 1;
1764
                        break;
1765
 
1766
                case 0x18074c0:
1767
                        printk("Lucent\n");
1768
                        gp->phy_mod = phymod_generic;
1769
                        break;
1770
 
1771
                case 0x437420:
1772
                        printk("Enable Semiconductor\n");
1773
                        gp->phy_mod = phymod_generic;
1774
                        break;
1775
 
1776
                default:
1777
                        printk("Unknown (Using generic mode)\n");
1778
                        gp->phy_mod = phymod_generic;
1779
                        break;
1780
                };
1781
 
1782
                /* Init advertisement and enable autonegotiation. */
1783
                val = phy_read(gp, MII_BMCR);
1784
                val &= ~BMCR_ANENABLE;
1785
                phy_write(gp, MII_BMCR, val);
1786
                udelay(10);
1787
 
1788
                phy_write(gp, MII_ADVERTISE,
1789
                          phy_read(gp, MII_ADVERTISE) |
1790
                          (ADVERTISE_10HALF | ADVERTISE_10FULL |
1791
                           ADVERTISE_100HALF | ADVERTISE_100FULL));
1792
        } else {
1793
                u32 val;
1794
                int limit;
1795
 
1796
                /* Reset PCS unit. */
1797
                val = readl(gp->regs + PCS_MIICTRL);
1798
                val |= PCS_MIICTRL_RST;
1799
                writeb(val, gp->regs + PCS_MIICTRL);
1800
 
1801
                limit = 32;
1802
                while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1803
                        udelay(100);
1804
                        if (limit-- <= 0)
1805
                                break;
1806
                }
1807
                if (limit <= 0)
1808
                        printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1809
                               gp->dev->name);
1810
 
1811
                /* Make sure PCS is disabled while changing advertisement
1812
                 * configuration.
1813
                 */
1814
                val = readl(gp->regs + PCS_CFG);
1815
                val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1816
                writel(val, gp->regs + PCS_CFG);
1817
 
1818
                /* Advertise all capabilities except assymetric
1819
                 * pause.
1820
                 */
1821
                val = readl(gp->regs + PCS_MIIADV);
1822
                val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1823
                        PCS_MIIADV_SP | PCS_MIIADV_AP);
1824
                writel(val, gp->regs + PCS_MIIADV);
1825
 
1826
                /* Enable and restart auto-negotiation, disable wrapback/loopback,
1827
                 * and re-enable PCS.
1828
                 */
1829
                val = readl(gp->regs + PCS_MIICTRL);
1830
                val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1831
                val &= ~PCS_MIICTRL_WB;
1832
                writel(val, gp->regs + PCS_MIICTRL);
1833
 
1834
                val = readl(gp->regs + PCS_CFG);
1835
                val |= PCS_CFG_ENABLE;
1836
                writel(val, gp->regs + PCS_CFG);
1837
 
1838
                /* Make sure serialink loopback is off.  The meaning
1839
                 * of this bit is logically inverted based upon whether
1840
                 * you are in Serialink or SERDES mode.
1841
                 */
1842
                val = readl(gp->regs + PCS_SCTRL);
1843
                if (gp->phy_type == phy_serialink)
1844
                        val &= ~PCS_SCTRL_LOOP;
1845
                else
1846
                        val |= PCS_SCTRL_LOOP;
1847
                writel(val, gp->regs + PCS_SCTRL);
1848
                gp->gigabit_capable = 1;
1849
        }
1850
 
1851
        /* BMCR_SPD2 is a broadcom 54xx specific thing afaik */
1852
        if (gp->phy_mod != phymod_bcm5400 && gp->phy_mod != phymod_bcm5401 &&
1853
            gp->phy_mod != phymod_bcm5411)
1854
                gp->link_cntl &= ~BMCR_SPD2;
1855
}
1856
 
1857
/* Must be invoked under gp->lock. */
1858
static void gem_init_dma(struct gem *gp)
1859
{
1860
        u64 desc_dma = (u64) gp->gblock_dvma;
1861
        u32 val;
1862
 
1863
        val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1864
        writel(val, gp->regs + TXDMA_CFG);
1865
 
1866
        writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1867
        writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1868
        desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1869
 
1870
        writel(0, gp->regs + TXDMA_KICK);
1871
 
1872
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1873
               ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1874
        writel(val, gp->regs + RXDMA_CFG);
1875
 
1876
        writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1877
        writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1878
 
1879
        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1880
 
1881
        val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1882
        val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1883
        writel(val, gp->regs + RXDMA_PTHRESH);
1884
 
1885
        if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1886
                writel(((5 & RXDMA_BLANK_IPKTS) |
1887
                        ((8 << 12) & RXDMA_BLANK_ITIME)),
1888
                       gp->regs + RXDMA_BLANK);
1889
        else
1890
                writel(((5 & RXDMA_BLANK_IPKTS) |
1891
                        ((4 << 12) & RXDMA_BLANK_ITIME)),
1892
                       gp->regs + RXDMA_BLANK);
1893
}
1894
 
1895
/* Must be invoked under gp->lock. */
1896
static u32
1897
gem_setup_multicast(struct gem *gp)
1898
{
1899
        u32 rxcfg = 0;
1900
        int i;
1901
 
1902
        if ((gp->dev->flags & IFF_ALLMULTI) ||
1903
            (gp->dev->mc_count > 256)) {
1904
                for (i=0; i<16; i++)
1905
                        writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1906
                rxcfg |= MAC_RXCFG_HFE;
1907
        } else if (gp->dev->flags & IFF_PROMISC) {
1908
                rxcfg |= MAC_RXCFG_PROM;
1909
        } else {
1910
                u16 hash_table[16];
1911
                u32 crc;
1912
                struct dev_mc_list *dmi = gp->dev->mc_list;
1913
                int i;
1914
 
1915
                for (i = 0; i < 16; i++)
1916
                        hash_table[i] = 0;
1917
 
1918
                for (i = 0; i < gp->dev->mc_count; i++) {
1919
                        char *addrs = dmi->dmi_addr;
1920
 
1921
                        dmi = dmi->next;
1922
 
1923
                        if (!(*addrs & 1))
1924
                                continue;
1925
 
1926
                        crc = ether_crc_le(6, addrs);
1927
                        crc >>= 24;
1928
                        hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1929
                }
1930
                for (i=0; i<16; i++)
1931
                        writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1932
                rxcfg |= MAC_RXCFG_HFE;
1933
        }
1934
 
1935
        return rxcfg;
1936
}
1937
 
1938
/* Must be invoked under gp->lock. */
1939
static void gem_init_mac(struct gem *gp)
1940
{
1941
        unsigned char *e = &gp->dev->dev_addr[0];
1942
 
1943
        if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1944
            gp->pdev->device == PCI_DEVICE_ID_SUN_GEM)
1945
                writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1946
 
1947
        writel(0x00, gp->regs + MAC_IPG0);
1948
        writel(0x08, gp->regs + MAC_IPG1);
1949
        writel(0x04, gp->regs + MAC_IPG2);
1950
        writel(0x40, gp->regs + MAC_STIME);
1951
        writel(0x40, gp->regs + MAC_MINFSZ);
1952
 
1953
        /* Ethernet payload + header + FCS + optional VLAN tag. */
1954
        writel(0x20000000 | (gp->dev->mtu + ETH_HLEN + 4 + 4), gp->regs + MAC_MAXFSZ);
1955
 
1956
        writel(0x07, gp->regs + MAC_PASIZE);
1957
        writel(0x04, gp->regs + MAC_JAMSIZE);
1958
        writel(0x10, gp->regs + MAC_ATTLIM);
1959
        writel(0x8808, gp->regs + MAC_MCTYPE);
1960
 
1961
        writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1962
 
1963
        writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1964
        writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1965
        writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1966
 
1967
        writel(0, gp->regs + MAC_ADDR3);
1968
        writel(0, gp->regs + MAC_ADDR4);
1969
        writel(0, gp->regs + MAC_ADDR5);
1970
 
1971
        writel(0x0001, gp->regs + MAC_ADDR6);
1972
        writel(0xc200, gp->regs + MAC_ADDR7);
1973
        writel(0x0180, gp->regs + MAC_ADDR8);
1974
 
1975
        writel(0, gp->regs + MAC_AFILT0);
1976
        writel(0, gp->regs + MAC_AFILT1);
1977
        writel(0, gp->regs + MAC_AFILT2);
1978
        writel(0, gp->regs + MAC_AF21MSK);
1979
        writel(0, gp->regs + MAC_AF0MSK);
1980
 
1981
        gp->mac_rx_cfg = gem_setup_multicast(gp);
1982
 
1983
        writel(0, gp->regs + MAC_NCOLL);
1984
        writel(0, gp->regs + MAC_FASUCC);
1985
        writel(0, gp->regs + MAC_ECOLL);
1986
        writel(0, gp->regs + MAC_LCOLL);
1987
        writel(0, gp->regs + MAC_DTIMER);
1988
        writel(0, gp->regs + MAC_PATMPS);
1989
        writel(0, gp->regs + MAC_RFCTR);
1990
        writel(0, gp->regs + MAC_LERR);
1991
        writel(0, gp->regs + MAC_AERR);
1992
        writel(0, gp->regs + MAC_FCSERR);
1993
        writel(0, gp->regs + MAC_RXCVERR);
1994
 
1995
        /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1996
         * them once a link is established.
1997
         */
1998
        writel(0, gp->regs + MAC_TXCFG);
1999
        writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
2000
        writel(0, gp->regs + MAC_MCCFG);
2001
        writel(0, gp->regs + MAC_XIFCFG);
2002
 
2003
        /* Setup MAC interrupts.  We want to get all of the interesting
2004
         * counter expiration events, but we do not want to hear about
2005
         * normal rx/tx as the DMA engine tells us that.
2006
         */
2007
        writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
2008
        writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
2009
 
2010
        /* Don't enable even the PAUSE interrupts for now, we
2011
         * make no use of those events other than to record them.
2012
         */
2013
        writel(0xffffffff, gp->regs + MAC_MCMASK);
2014
}
2015
 
2016
/* Must be invoked under gp->lock. */
2017
static void gem_init_pause_thresholds(struct gem *gp)
2018
{
2019
        /* Calculate pause thresholds.  Setting the OFF threshold to the
2020
         * full RX fifo size effectively disables PAUSE generation which
2021
         * is what we do for 10/100 only GEMs which have FIFOs too small
2022
         * to make real gains from PAUSE.
2023
         */
2024
        if (gp->rx_fifo_sz <= (2 * 1024)) {
2025
                gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
2026
        } else {
2027
                int max_frame = (gp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
2028
                int off = (gp->rx_fifo_sz - (max_frame * 2));
2029
                int on = off - max_frame;
2030
 
2031
                gp->rx_pause_off = off;
2032
                gp->rx_pause_on = on;
2033
        }
2034
 
2035
        {
2036
                u32 cfg;
2037
 
2038
                cfg  = 0;
2039
#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
2040
                cfg |= GREG_CFG_IBURST;
2041
#endif
2042
                cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
2043
                cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
2044
                writel(cfg, gp->regs + GREG_CFG);
2045
        }
2046
}
2047
 
2048
static int gem_check_invariants(struct gem *gp)
2049
{
2050
        struct pci_dev *pdev = gp->pdev;
2051
        u32 mif_cfg;
2052
 
2053
        /* On Apple's sungem, we can't rely on registers as the chip
2054
         * was been powered down by the firmware. The PHY is looked
2055
         * up later on.
2056
         */
2057
        if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
2058
                gp->phy_type = phy_mii_mdio0;
2059
                gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2060
                gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2061
                gp->swrst_base = 0;
2062
                return 0;
2063
        }
2064
 
2065
        mif_cfg = readl(gp->regs + MIF_CFG);
2066
 
2067
        if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2068
            pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
2069
                /* One of the MII PHYs _must_ be present
2070
                 * as this chip has no gigabit PHY.
2071
                 */
2072
                if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
2073
                        printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2074
                               mif_cfg);
2075
                        return -1;
2076
                }
2077
        }
2078
 
2079
        /* Determine initial PHY interface type guess.  MDIO1 is the
2080
         * external PHY and thus takes precedence over MDIO0.
2081
         */
2082
 
2083
        if (mif_cfg & MIF_CFG_MDI1) {
2084
                gp->phy_type = phy_mii_mdio1;
2085
                mif_cfg |= MIF_CFG_PSELECT;
2086
                writel(mif_cfg, gp->regs + MIF_CFG);
2087
        } else if (mif_cfg & MIF_CFG_MDI0) {
2088
                gp->phy_type = phy_mii_mdio0;
2089
                mif_cfg &= ~MIF_CFG_PSELECT;
2090
                writel(mif_cfg, gp->regs + MIF_CFG);
2091
        } else {
2092
                gp->phy_type = phy_serialink;
2093
        }
2094
        if (gp->phy_type == phy_mii_mdio1 ||
2095
            gp->phy_type == phy_mii_mdio0) {
2096
                int i;
2097
 
2098
                for (i = 0; i < 32; i++) {
2099
                        gp->mii_phy_addr = i;
2100
                        if (phy_read(gp, MII_BMCR) != 0xffff)
2101
                                break;
2102
                }
2103
                if (i == 32) {
2104
                        if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2105
                                printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
2106
                                return -1;
2107
                        }
2108
                        gp->phy_type = phy_serdes;
2109
                }
2110
        }
2111
 
2112
        /* Fetch the FIFO configurations now too. */
2113
        gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2114
        gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2115
 
2116
        if (pdev->vendor == PCI_VENDOR_ID_SUN) {
2117
                if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2118
                        if (gp->tx_fifo_sz != (9 * 1024) ||
2119
                            gp->rx_fifo_sz != (20 * 1024)) {
2120
                                printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2121
                                       gp->tx_fifo_sz, gp->rx_fifo_sz);
2122
                                return -1;
2123
                        }
2124
                        gp->swrst_base = 0;
2125
                } else {
2126
                        if (gp->tx_fifo_sz != (2 * 1024) ||
2127
                            gp->rx_fifo_sz != (2 * 1024)) {
2128
                                printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2129
                                       gp->tx_fifo_sz, gp->rx_fifo_sz);
2130
                                return -1;
2131
                        }
2132
                        gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2133
                }
2134
        }
2135
 
2136
        return 0;
2137
}
2138
 
2139
/* Must be invoked under gp->lock. */
2140
static void gem_init_hw(struct gem *gp, int restart_link)
2141
{
2142
        /* On Apple's gmac, I initialize the PHY only after
2143
         * setting up the chip. It appears the gigabit PHYs
2144
         * don't quite like beeing talked to on the GII when
2145
         * the chip is not running, I suspect it might not
2146
         * be clocked at that point. --BenH
2147
         */
2148
        if (restart_link)
2149
                gem_init_phy(gp);
2150
        gem_init_pause_thresholds(gp);
2151
        gem_init_dma(gp);
2152
        gem_init_mac(gp);
2153
 
2154
        if (restart_link) {
2155
                /* Default aneg parameters */
2156
                gp->timer_ticks = 0;
2157
                gp->lstate = link_down;
2158
 
2159
                /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
2160
                gem_begin_auto_negotiation(gp, NULL);
2161
        } else {
2162
                if (gp->lstate == link_up)
2163
                        gem_set_link_modes(gp);
2164
        }
2165
}
2166
 
2167
#ifdef CONFIG_ALL_PPC
2168
/* Enable the chip's clock and make sure it's config space is
2169
 * setup properly. There appear to be no need to restore the
2170
 * base addresses.
2171
 */
2172
static void gem_apple_powerup(struct gem *gp)
2173
{
2174
        u16 cmd;
2175
        u32 mif_cfg;
2176
 
2177
        pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
2178
 
2179
        current->state = TASK_UNINTERRUPTIBLE;
2180
        schedule_timeout((21 * HZ) / 1000);
2181
 
2182
        pci_read_config_word(gp->pdev, PCI_COMMAND, &cmd);
2183
        cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
2184
        pci_write_config_word(gp->pdev, PCI_COMMAND, cmd);
2185
        pci_write_config_byte(gp->pdev, PCI_LATENCY_TIMER, 6);
2186
        pci_write_config_byte(gp->pdev, PCI_CACHE_LINE_SIZE, 8);
2187
 
2188
        mdelay(1);
2189
 
2190
        mif_cfg = readl(gp->regs + MIF_CFG);
2191
        mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
2192
        mif_cfg |= MIF_CFG_MDI0;
2193
        writel(mif_cfg, gp->regs + MIF_CFG);
2194
        writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
2195
        writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
2196
 
2197
        mdelay(1);
2198
}
2199
 
2200
/* Turn off the chip's clock */
2201
static void gem_apple_powerdown(struct gem *gp)
2202
{
2203
        pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
2204
}
2205
 
2206
#endif /* CONFIG_ALL_PPC */
2207
 
2208
/* Must be invoked under gp->lock. */
2209
static void gem_stop_phy(struct gem *gp)
2210
{
2211
        u32 mifcfg;
2212
 
2213
        if (!gp->wake_on_lan && gp->phy_mod == phymod_bcm5201)
2214
                phy_write(gp, MII_BCM5201_INTERRUPT, 0);
2215
 
2216
        /* Make sure we aren't polling PHY status change. We
2217
         * don't currently use that feature though
2218
         */
2219
        mifcfg = readl(gp->regs + MIF_CFG);
2220
        mifcfg &= ~MIF_CFG_POLL;
2221
        writel(mifcfg, gp->regs + MIF_CFG);
2222
 
2223
        /* Here's a strange hack used by both MacOS 9 and X */
2224
        phy_write(gp, MII_LPA, phy_read(gp, MII_LPA));
2225
 
2226
        if (gp->wake_on_lan) {
2227
                /* Setup wake-on-lan */
2228
        } else
2229
                writel(0, gp->regs + MAC_RXCFG);
2230
        writel(0, gp->regs + MAC_TXCFG);
2231
        writel(0, gp->regs + MAC_XIFCFG);
2232
        writel(0, gp->regs + TXDMA_CFG);
2233
        writel(0, gp->regs + RXDMA_CFG);
2234
 
2235
        if (!gp->wake_on_lan) {
2236
                gem_stop(gp);
2237
                writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2238
                writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2239
                if (gp->phy_mod == phymod_bcm5400 || gp->phy_mod == phymod_bcm5401 ||
2240
                    gp->phy_mod == phymod_bcm5411) {
2241
#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
2242
                        phy_write(gp, MII_BMCR, BMCR_PDOWN);
2243
#endif
2244
                } else if (gp->phy_mod == phymod_bcm5201 || gp->phy_mod == phymod_bcm5221) {
2245
#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
2246
                        u16 val = phy_read(gp, MII_BCM5201_AUXMODE2)
2247
                        phy_write(gp, MII_BCM5201_AUXMODE2,
2248
                                  val & ~MII_BCM5201_AUXMODE2_LOWPOWER);
2249
#endif                          
2250
                        phy_write(gp, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE);
2251
                } else if (gp->phy_mod == phymod_m1011)
2252
                        phy_write(gp, MII_BMCR, BMCR_PDOWN);
2253
 
2254
                /* According to Apple, we must set the MDIO pins to this begnign
2255
                 * state or we may 1) eat more current, 2) damage some PHYs
2256
                 */
2257
                writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2258
                writel(0, gp->regs + MIF_BBCLK);
2259
                writel(0, gp->regs + MIF_BBDATA);
2260
                writel(0, gp->regs + MIF_BBOENAB);
2261
                writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2262
                (void) readl(gp->regs + MAC_XIFCFG);
2263
        }
2264
}
2265
 
2266
/* Shut down the chip, must be called with pm_sem held.  */
2267
static void gem_shutdown(struct gem *gp)
2268
{
2269
        /* Make us not-running to avoid timers respawning */
2270
        gp->hw_running = 0;
2271
 
2272
        /* Stop the link timer */
2273
        del_timer_sync(&gp->link_timer);
2274
 
2275
        /* Stop the reset task */
2276
        while (gp->reset_task_pending)
2277
                schedule();
2278
 
2279
        /* Actually stop the chip */
2280
        spin_lock_irq(&gp->lock);
2281
        if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
2282
                gem_stop_phy(gp);
2283
 
2284
                spin_unlock_irq(&gp->lock);
2285
 
2286
#ifdef CONFIG_ALL_PPC
2287
                /* Power down the chip */
2288
                gem_apple_powerdown(gp);
2289
#endif /* CONFIG_ALL_PPC */
2290
        } else {
2291
                gem_stop(gp);
2292
 
2293
                spin_unlock_irq(&gp->lock);
2294
        }
2295
}
2296
 
2297
static void gem_pm_task(void *data)
2298
{
2299
        struct gem *gp = (struct gem *) data;
2300
 
2301
        /* We assume if we can't lock the pm_sem, then open() was
2302
         * called again (or suspend()), and we can safely ignore
2303
         * the PM request
2304
         */
2305
        if (down_trylock(&gp->pm_sem))
2306
                return;
2307
 
2308
        /* Driver was re-opened or already shut down */
2309
        if (gp->opened || !gp->hw_running) {
2310
                up(&gp->pm_sem);
2311
                return;
2312
        }
2313
 
2314
        gem_shutdown(gp);
2315
 
2316
        up(&gp->pm_sem);
2317
}
2318
 
2319
static void gem_pm_timer(unsigned long data)
2320
{
2321
        struct gem *gp = (struct gem *) data;
2322
 
2323
        schedule_task(&gp->pm_task);
2324
}
2325
 
2326
static int gem_open(struct net_device *dev)
2327
{
2328
        struct gem *gp = dev->priv;
2329
        int hw_was_up;
2330
 
2331
        down(&gp->pm_sem);
2332
 
2333
        hw_was_up = gp->hw_running;
2334
 
2335
        /* Stop the PM timer/task */
2336
        del_timer(&gp->pm_timer);
2337
        flush_scheduled_tasks();
2338
 
2339
        /* The power-management semaphore protects the hw_running
2340
         * etc. state so it is safe to do this bit without gp->lock
2341
         */
2342
        if (!gp->hw_running) {
2343
#ifdef CONFIG_ALL_PPC
2344
                /* First, we need to bring up the chip */
2345
                if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
2346
                        gem_apple_powerup(gp);
2347
                        gem_check_invariants(gp);
2348
                }
2349
#endif /* CONFIG_ALL_PPC */
2350
 
2351
                /* Reset the chip */
2352
                spin_lock_irq(&gp->lock);
2353
                gem_stop(gp);
2354
                spin_unlock_irq(&gp->lock);
2355
 
2356
                gp->hw_running = 1;
2357
        }
2358
 
2359
        /* We can now request the interrupt as we know it's masked
2360
         * on the controller
2361
         */
2362
        if (request_irq(gp->pdev->irq, gem_interrupt,
2363
                        SA_SHIRQ, dev->name, (void *)dev)) {
2364
                printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
2365
 
2366
                spin_lock_irq(&gp->lock);
2367
#ifdef CONFIG_ALL_PPC
2368
                if (!hw_was_up && gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
2369
                        gem_apple_powerdown(gp);
2370
#endif /* CONFIG_ALL_PPC */
2371
                /* Fire the PM timer that will shut us down in about 10 seconds */
2372
                gp->pm_timer.expires = jiffies + 10*HZ;
2373
                add_timer(&gp->pm_timer);
2374
                up(&gp->pm_sem);
2375
                spin_unlock_irq(&gp->lock);
2376
 
2377
                return -EAGAIN;
2378
        }
2379
 
2380
        spin_lock_irq(&gp->lock);
2381
 
2382
        /* Allocate & setup ring buffers */
2383
        gem_init_rings(gp);
2384
 
2385
        /* Init & setup chip hardware */
2386
        gem_init_hw(gp, !hw_was_up);
2387
 
2388
        gp->opened = 1;
2389
 
2390
        spin_unlock_irq(&gp->lock);
2391
 
2392
        up(&gp->pm_sem);
2393
 
2394
        return 0;
2395
}
2396
 
2397
static int gem_close(struct net_device *dev)
2398
{
2399
        struct gem *gp = dev->priv;
2400
 
2401
        /* Make sure we don't get distracted by suspend/resume */
2402
        down(&gp->pm_sem);
2403
 
2404
        /* Stop traffic, mark us closed */
2405
        spin_lock_irq(&gp->lock);
2406
 
2407
        gp->opened = 0;
2408
        writel(0xffffffff, gp->regs + GREG_IMASK);
2409
        netif_stop_queue(dev);
2410
 
2411
        /* Stop chip */
2412
        gem_stop(gp);
2413
 
2414
        /* Get rid of rings */
2415
        gem_clean_rings(gp);
2416
 
2417
        /* Bye, the pm timer will finish the job */
2418
        free_irq(gp->pdev->irq, (void *) dev);
2419
 
2420
        spin_unlock_irq(&gp->lock);
2421
 
2422
        /* Fire the PM timer that will shut us down in about 10 seconds */
2423
        gp->pm_timer.expires = jiffies + 10*HZ;
2424
        add_timer(&gp->pm_timer);
2425
 
2426
        up(&gp->pm_sem);
2427
 
2428
        return 0;
2429
}
2430
 
2431
#ifdef CONFIG_PM
2432
static int gem_suspend(struct pci_dev *pdev, u32 state)
2433
{
2434
        struct net_device *dev = pci_get_drvdata(pdev);
2435
        struct gem *gp = dev->priv;
2436
 
2437
        /* We hold the PM semaphore during entire driver
2438
         * sleep time
2439
         */
2440
        down(&gp->pm_sem);
2441
 
2442
        printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
2443
               dev->name, gp->wake_on_lan ? "enabled" : "disabled");
2444
 
2445
        /* If the driver is opened, we stop the DMA */
2446
        if (gp->opened) {
2447
                spin_lock_irq(&gp->lock);
2448
 
2449
                /* Stop traffic, mark us closed */
2450
                netif_device_detach(dev);
2451
 
2452
                writel(0xffffffff, gp->regs + GREG_IMASK);
2453
 
2454
                /* Stop chip */
2455
                gem_stop(gp);
2456
 
2457
                /* Get rid of ring buffers */
2458
                gem_clean_rings(gp);
2459
 
2460
                spin_unlock_irq(&gp->lock);
2461
 
2462
                if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
2463
                        disable_irq(gp->pdev->irq);
2464
        }
2465
 
2466
        if (gp->hw_running) {
2467
                /* Kill PM timer if any */
2468
                del_timer_sync(&gp->pm_timer);
2469
                flush_scheduled_tasks();
2470
 
2471
                gem_shutdown(gp);
2472
        }
2473
 
2474
        return 0;
2475
}
2476
 
2477
static int gem_resume(struct pci_dev *pdev)
2478
{
2479
        struct net_device *dev = pci_get_drvdata(pdev);
2480
        struct gem *gp = dev->priv;
2481
 
2482
        printk(KERN_INFO "%s: resuming\n", dev->name);
2483
 
2484
        if (gp->opened) {
2485
#ifdef CONFIG_ALL_PPC
2486
                /* First, we need to bring up the chip */
2487
                if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
2488
                        gem_apple_powerup(gp);
2489
                        gem_check_invariants(gp);
2490
                }
2491
#endif /* CONFIG_ALL_PPC */
2492
                spin_lock_irq(&gp->lock);
2493
 
2494
                gem_stop(gp);
2495
                gp->hw_running = 1;
2496
                gem_init_rings(gp);
2497
                gem_init_hw(gp, 1);
2498
 
2499
                spin_unlock_irq(&gp->lock);
2500
 
2501
                netif_device_attach(dev);
2502
                if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
2503
                        enable_irq(gp->pdev->irq);
2504
        }
2505
        up(&gp->pm_sem);
2506
 
2507
        return 0;
2508
}
2509
#endif /* CONFIG_PM */
2510
 
2511
static struct net_device_stats *gem_get_stats(struct net_device *dev)
2512
{
2513
        struct gem *gp = dev->priv;
2514
        struct net_device_stats *stats = &gp->net_stats;
2515
 
2516
        spin_lock_irq(&gp->lock);
2517
 
2518
        if (gp->hw_running) {
2519
                stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2520
                writel(0, gp->regs + MAC_FCSERR);
2521
 
2522
                stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
2523
                writel(0, gp->regs + MAC_AERR);
2524
 
2525
                stats->rx_length_errors += readl(gp->regs + MAC_LERR);
2526
                writel(0, gp->regs + MAC_LERR);
2527
 
2528
                stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2529
                stats->collisions +=
2530
                        (readl(gp->regs + MAC_ECOLL) +
2531
                         readl(gp->regs + MAC_LCOLL));
2532
                writel(0, gp->regs + MAC_ECOLL);
2533
                writel(0, gp->regs + MAC_LCOLL);
2534
        }
2535
 
2536
        spin_unlock_irq(&gp->lock);
2537
 
2538
        return &gp->net_stats;
2539
}
2540
 
2541
static void gem_set_multicast(struct net_device *dev)
2542
{
2543
        struct gem *gp = dev->priv;
2544
        u32 rxcfg, rxcfg_new;
2545
        int limit = 10000;
2546
 
2547
        if (!gp->hw_running)
2548
                return;
2549
 
2550
        spin_lock_irq(&gp->lock);
2551
 
2552
        netif_stop_queue(dev);
2553
 
2554
        rxcfg = readl(gp->regs + MAC_RXCFG);
2555
        gp->mac_rx_cfg = rxcfg_new = gem_setup_multicast(gp);
2556
 
2557
        writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2558
        while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2559
                if (!limit--)
2560
                        break;
2561
                udelay(10);
2562
        }
2563
 
2564
        rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2565
        rxcfg |= rxcfg_new;
2566
 
2567
        writel(rxcfg, gp->regs + MAC_RXCFG);
2568
 
2569
        netif_wake_queue(dev);
2570
 
2571
        spin_unlock_irq(&gp->lock);
2572
}
2573
 
2574
/* Eventually add support for changing the advertisement
2575
 * on autoneg.
2576
 */
2577
static int gem_ethtool_ioctl(struct net_device *dev, void *ep_user)
2578
{
2579
        struct gem *gp = dev->priv;
2580
        u16 bmcr;
2581
        int full_duplex, speed, pause;
2582
        struct ethtool_cmd ecmd;
2583
 
2584
        if (copy_from_user(&ecmd, ep_user, sizeof(ecmd)))
2585
                return -EFAULT;
2586
 
2587
        switch(ecmd.cmd) {
2588
        case ETHTOOL_GDRVINFO: {
2589
                struct ethtool_drvinfo info = { cmd: ETHTOOL_GDRVINFO };
2590
 
2591
                strncpy(info.driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
2592
                strncpy(info.version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
2593
                info.fw_version[0] = '\0';
2594
                strncpy(info.bus_info, gp->pdev->slot_name, ETHTOOL_BUSINFO_LEN);
2595
                info.regdump_len = 0; /*SUNGEM_NREGS;*/
2596
 
2597
                if (copy_to_user(ep_user, &info, sizeof(info)))
2598
                        return -EFAULT;
2599
 
2600
                return 0;
2601
        }
2602
 
2603
        case ETHTOOL_GSET:
2604
                ecmd.supported =
2605
                        (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2606
                         SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2607
                         SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2608
 
2609
                if (gp->gigabit_capable)
2610
                        ecmd.supported |=
2611
                                (SUPPORTED_1000baseT_Half |
2612
                                 SUPPORTED_1000baseT_Full);
2613
 
2614
                /* XXX hardcoded stuff for now */
2615
                ecmd.port = PORT_MII;
2616
                ecmd.transceiver = XCVR_EXTERNAL;
2617
                ecmd.phy_address = 0; /* XXX fixed PHYAD */
2618
 
2619
                /* Record PHY settings if HW is on. */
2620
                spin_lock_irq(&gp->lock);
2621
                if (gp->hw_running) {
2622
                        bmcr = phy_read(gp, MII_BMCR);
2623
                        gem_read_mii_link_mode(gp, &full_duplex, &speed, &pause);
2624
                } else
2625
                        bmcr = 0;
2626
                spin_unlock_irq(&gp->lock);
2627
                if (bmcr & BMCR_ANENABLE) {
2628
                        ecmd.autoneg = AUTONEG_ENABLE;
2629
                        ecmd.speed = speed == 10 ? SPEED_10 : (speed == 1000 ? SPEED_1000 : SPEED_100);
2630
                        ecmd.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
2631
                } else {
2632
                        ecmd.autoneg = AUTONEG_DISABLE;
2633
                        ecmd.speed =
2634
                                (bmcr & BMCR_SPEED100) ?
2635
                                SPEED_100 : SPEED_10;
2636
                        ecmd.duplex =
2637
                                (bmcr & BMCR_FULLDPLX) ?
2638
                                DUPLEX_FULL : DUPLEX_HALF;
2639
                }
2640
                if (copy_to_user(ep_user, &ecmd, sizeof(ecmd)))
2641
                        return -EFAULT;
2642
                return 0;
2643
 
2644
        case ETHTOOL_SSET:
2645
                /* Verify the settings we care about. */
2646
                if (ecmd.autoneg != AUTONEG_ENABLE &&
2647
                    ecmd.autoneg != AUTONEG_DISABLE)
2648
                        return -EINVAL;
2649
 
2650
                if (ecmd.autoneg == AUTONEG_DISABLE &&
2651
                    ((ecmd.speed != SPEED_100 &&
2652
                      ecmd.speed != SPEED_10) ||
2653
                     (ecmd.duplex != DUPLEX_HALF &&
2654
                      ecmd.duplex != DUPLEX_FULL)))
2655
                        return -EINVAL;
2656
 
2657
                /* Apply settings and restart link process. */
2658
                spin_lock_irq(&gp->lock);
2659
                gem_begin_auto_negotiation(gp, &ecmd);
2660
                spin_unlock_irq(&gp->lock);
2661
 
2662
                return 0;
2663
 
2664
        case ETHTOOL_NWAY_RST:
2665
                if ((gp->link_cntl & BMCR_ANENABLE) == 0)
2666
                        return -EINVAL;
2667
 
2668
                /* Restart link process. */
2669
                spin_lock_irq(&gp->lock);
2670
                gem_begin_auto_negotiation(gp, NULL);
2671
                spin_unlock_irq(&gp->lock);
2672
 
2673
                return 0;
2674
 
2675
        case ETHTOOL_GWOL:
2676
        case ETHTOOL_SWOL:
2677
                break; /* todo */
2678
 
2679
        /* get link status */
2680
        case ETHTOOL_GLINK: {
2681
                struct ethtool_value edata = { cmd: ETHTOOL_GLINK };
2682
 
2683
                edata.data = (gp->lstate == link_up);
2684
                if (copy_to_user(ep_user, &edata, sizeof(edata)))
2685
                        return -EFAULT;
2686
                return 0;
2687
        }
2688
 
2689
        /* get message-level */
2690
        case ETHTOOL_GMSGLVL: {
2691
                struct ethtool_value edata = { cmd: ETHTOOL_GMSGLVL };
2692
 
2693
                edata.data = gp->msg_enable;
2694
                if (copy_to_user(ep_user, &edata, sizeof(edata)))
2695
                        return -EFAULT;
2696
                return 0;
2697
        }
2698
 
2699
        /* set message-level */
2700
        case ETHTOOL_SMSGLVL: {
2701
                struct ethtool_value edata;
2702
 
2703
                if (copy_from_user(&edata, ep_user, sizeof(edata)))
2704
                        return -EFAULT;
2705
                gp->msg_enable = edata.data;
2706
                return 0;
2707
        }
2708
 
2709
#if 0
2710
        case ETHTOOL_GREGS: {
2711
                struct ethtool_regs regs;
2712
                u32 *regbuf;
2713
                int r = 0;
2714
 
2715
                if (copy_from_user(&regs, useraddr, sizeof(regs)))
2716
                        return -EFAULT;
2717
 
2718
                if (regs.len > SUNGEM_NREGS) {
2719
                        regs.len = SUNGEM_NREGS;
2720
                }
2721
                regs.version = 0;
2722
                if (copy_to_user(useraddr, &regs, sizeof(regs)))
2723
                        return -EFAULT;
2724
 
2725
                if (!gp->hw_running)
2726
                        return -ENODEV;
2727
                useraddr += offsetof(struct ethtool_regs, data);
2728
 
2729
                /* Use kmalloc to avoid bloating the stack */
2730
                regbuf = kmalloc(4 * SUNGEM_NREGS, GFP_KERNEL);
2731
                if (!regbuf)
2732
                        return -ENOMEM;
2733
                spin_lock_irq(&np->lock);
2734
                gem_get_regs(gp, regbuf);
2735
                spin_unlock_irq(&np->lock);
2736
 
2737
                if (copy_to_user(useraddr, regbuf, regs.len*sizeof(u32)))
2738
                        r = -EFAULT;
2739
                kfree(regbuf);
2740
                return r;
2741
        }
2742
#endif  
2743
        };
2744
 
2745
        return -EOPNOTSUPP;
2746
}
2747
 
2748
static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2749
{
2750
        struct gem *gp = dev->priv;
2751
        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
2752
        int rc = -EOPNOTSUPP;
2753
 
2754
        /* Hold the PM semaphore while doing ioctl's or we may collide
2755
         * with open/close and power management and oops.
2756
         */
2757
        down(&gp->pm_sem);
2758
 
2759
        switch (cmd) {
2760
        case SIOCETHTOOL:
2761
                rc = gem_ethtool_ioctl(dev, ifr->ifr_data);
2762
                break;
2763
 
2764
        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
2765
                data->phy_id = gp->mii_phy_addr;
2766
                /* Fallthrough... */
2767
 
2768
        case SIOCGMIIREG:               /* Read MII PHY register. */
2769
                data->val_out = __phy_read(gp, data->reg_num & 0x1f, data->phy_id & 0x1f);
2770
                rc = 0;
2771
                break;
2772
 
2773
        case SIOCSMIIREG:               /* Write MII PHY register. */
2774
                if (!capable(CAP_NET_ADMIN)) {
2775
                        rc = -EPERM;
2776
                } else {
2777
                        __phy_write(gp, data->reg_num & 0x1f, data->val_in, data->phy_id & 0x1f);
2778
                        rc = 0;
2779
                }
2780
                break;
2781
        };
2782
 
2783
        up(&gp->pm_sem);
2784
 
2785
        return rc;
2786
}
2787
 
2788
#if (!defined(__sparc__) && !defined(CONFIG_ALL_PPC))
2789
/* Fetch MAC address from vital product data of PCI ROM. */
2790
static void find_eth_addr_in_vpd(void *rom_base, int len, unsigned char *dev_addr)
2791
{
2792
        int this_offset;
2793
 
2794
        for (this_offset = 0x20; this_offset < len; this_offset++) {
2795
                void *p = rom_base + this_offset;
2796
                int i;
2797
 
2798
                if (readb(p + 0) != 0x90 ||
2799
                    readb(p + 1) != 0x00 ||
2800
                    readb(p + 2) != 0x09 ||
2801
                    readb(p + 3) != 0x4e ||
2802
                    readb(p + 4) != 0x41 ||
2803
                    readb(p + 5) != 0x06)
2804
                        continue;
2805
 
2806
                this_offset += 6;
2807
                p += 6;
2808
 
2809
                for (i = 0; i < 6; i++)
2810
                        dev_addr[i] = readb(p + i);
2811
                break;
2812
        }
2813
}
2814
 
2815
static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2816
{
2817
        u32 rom_reg_orig;
2818
        void *p;
2819
 
2820
        if (pdev->resource[PCI_ROM_RESOURCE].parent == NULL) {
2821
                if (pci_assign_resource(pdev, PCI_ROM_RESOURCE) < 0)
2822
                        goto use_random;
2823
        }
2824
 
2825
        pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_reg_orig);
2826
        pci_write_config_dword(pdev, pdev->rom_base_reg,
2827
                               rom_reg_orig | PCI_ROM_ADDRESS_ENABLE);
2828
 
2829
        p = ioremap(pci_resource_start(pdev, PCI_ROM_RESOURCE), (64 * 1024));
2830
        if (p != NULL && readb(p) == 0x55 && readb(p + 1) == 0xaa)
2831
                find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2832
 
2833
        if (p != NULL)
2834
                iounmap(p);
2835
 
2836
        pci_write_config_dword(pdev, pdev->rom_base_reg, rom_reg_orig);
2837
        return;
2838
 
2839
use_random:
2840
        /* Sun MAC prefix then 3 random bytes. */
2841
        dev_addr[0] = 0x08;
2842
        dev_addr[1] = 0x00;
2843
        dev_addr[2] = 0x20;
2844
        get_random_bytes(dev_addr + 3, 3);
2845
        return;
2846
}
2847
#endif /* not Sparc and not PPC */
2848
 
2849
static int __devinit gem_get_device_address(struct gem *gp)
2850
{
2851
#if defined(__sparc__) || defined(CONFIG_ALL_PPC)
2852
        struct net_device *dev = gp->dev;
2853
#endif
2854
 
2855
#if defined(__sparc__)
2856
        struct pci_dev *pdev = gp->pdev;
2857
        struct pcidev_cookie *pcp = pdev->sysdata;
2858
        int node = -1;
2859
 
2860
        if (pcp != NULL) {
2861
                node = pcp->prom_node;
2862
                if (prom_getproplen(node, "local-mac-address") == 6)
2863
                        prom_getproperty(node, "local-mac-address",
2864
                                         dev->dev_addr, 6);
2865
                else
2866
                        node = -1;
2867
        }
2868
        if (node == -1)
2869
                memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2870
#elif defined(CONFIG_ALL_PPC)
2871
        unsigned char *addr;
2872
 
2873
        addr = get_property(gp->of_node, "local-mac-address", NULL);
2874
        if (addr == NULL) {
2875
                printk("\n");
2876
                printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
2877
                return -1;
2878
        }
2879
        memcpy(dev->dev_addr, addr, MAX_ADDR_LEN);
2880
#else
2881
        get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2882
#endif
2883
        return 0;
2884
}
2885
 
2886
static int __devinit gem_init_one(struct pci_dev *pdev,
2887
                                  const struct pci_device_id *ent)
2888
{
2889
        static int gem_version_printed = 0;
2890
        unsigned long gemreg_base, gemreg_len;
2891
        struct net_device *dev;
2892
        struct gem *gp;
2893
        int i, err, pci_using_dac;
2894
 
2895
        if (gem_version_printed++ == 0)
2896
                printk(KERN_INFO "%s", version);
2897
 
2898
        /* Apple gmac note: during probe, the chip is powered up by
2899
         * the arch code to allow the code below to work (and to let
2900
         * the chip be probed on the config space. It won't stay powered
2901
         * up until the interface is brought up however, so we can't rely
2902
         * on register configuration done at this point.
2903
         */
2904
        err = pci_enable_device(pdev);
2905
        if (err) {
2906
                printk(KERN_ERR PFX "Cannot enable MMIO operation, "
2907
                       "aborting.\n");
2908
                return err;
2909
        }
2910
        pci_set_master(pdev);
2911
 
2912
        /* Configure DMA attributes. */
2913
 
2914
        /* All of the GEM documentation states that 64-bit DMA addressing
2915
         * is fully supported and should work just fine.  However the
2916
         * front end for RIO based GEMs is different and only supports
2917
         * 32-bit addressing.
2918
         *
2919
         * For now we assume the various PPC GEMs are 32-bit only as well.
2920
         */
2921
        if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2922
            pdev->device == PCI_DEVICE_ID_SUN_GEM &&
2923
            !pci_set_dma_mask(pdev, (u64) 0xffffffffffffffff)) {
2924
                pci_using_dac = 1;
2925
        } else {
2926
                err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
2927
                if (err) {
2928
                        printk(KERN_ERR PFX "No usable DMA configuration, "
2929
                               "aborting.\n");
2930
                        return err;
2931
                }
2932
                pci_using_dac = 0;
2933
        }
2934
 
2935
        gemreg_base = pci_resource_start(pdev, 0);
2936
        gemreg_len = pci_resource_len(pdev, 0);
2937
 
2938
        if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
2939
                printk(KERN_ERR PFX "Cannot find proper PCI device "
2940
                       "base address, aborting.\n");
2941
                return -ENODEV;
2942
        }
2943
 
2944
        dev = alloc_etherdev(sizeof(*gp));
2945
        if (!dev) {
2946
                printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
2947
                return -ENOMEM;
2948
        }
2949
        SET_MODULE_OWNER(dev);
2950
 
2951
        gp = dev->priv;
2952
 
2953
        if (pci_request_regions(pdev, dev->name)) {
2954
                printk(KERN_ERR PFX "Cannot obtain PCI resources, "
2955
                       "aborting.\n");
2956
                goto err_out_free_netdev;
2957
        }
2958
 
2959
        gp->pdev = pdev;
2960
        dev->base_addr = (long) pdev;
2961
        gp->dev = dev;
2962
 
2963
        gp->msg_enable = (gem_debug < 0 ? DEFAULT_MSG : gem_debug);
2964
 
2965
        spin_lock_init(&gp->lock);
2966
        init_MUTEX(&gp->pm_sem);
2967
 
2968
        init_timer(&gp->link_timer);
2969
        gp->link_timer.function = gem_link_timer;
2970
        gp->link_timer.data = (unsigned long) gp;
2971
 
2972
        init_timer(&gp->pm_timer);
2973
        gp->pm_timer.function = gem_pm_timer;
2974
        gp->pm_timer.data = (unsigned long) gp;
2975
 
2976
        INIT_TQUEUE(&gp->pm_task, gem_pm_task, gp);
2977
        INIT_TQUEUE(&gp->reset_task, gem_reset_task, gp);
2978
 
2979
        /* Default link parameters */
2980
        if (link_mode >= 0 && link_mode <= 6)
2981
                gp->link_cntl = link_modes[link_mode];
2982
        else
2983
                gp->link_cntl = BMCR_ANENABLE;
2984
        gp->lstate = link_down;
2985
        gp->timer_ticks = 0;
2986
 
2987
        gp->regs = (unsigned long) ioremap(gemreg_base, gemreg_len);
2988
        if (gp->regs == 0UL) {
2989
                printk(KERN_ERR PFX "Cannot map device registers, "
2990
                       "aborting.\n");
2991
                goto err_out_free_res;
2992
        }
2993
 
2994
        /* On Apple, we power the chip up now in order for check
2995
         * invariants to work, but also because the firmware might
2996
         * not have properly shut down the PHY.
2997
         */
2998
#ifdef CONFIG_ALL_PPC
2999
        if (pdev->vendor == PCI_VENDOR_ID_APPLE)
3000
                gem_apple_powerup(gp);
3001
#endif
3002
        spin_lock_irq(&gp->lock);
3003
        gem_stop(gp);
3004
        spin_unlock_irq(&gp->lock);
3005
 
3006
        if (gem_check_invariants(gp))
3007
                goto err_out_iounmap;
3008
 
3009
        spin_lock_irq(&gp->lock);
3010
        gp->hw_running = 1;
3011
        gem_init_phy(gp);
3012
        gem_begin_auto_negotiation(gp, NULL);
3013
        spin_unlock_irq(&gp->lock);
3014
 
3015
        /* It is guarenteed that the returned buffer will be at least
3016
         * PAGE_SIZE aligned.
3017
         */
3018
        gp->init_block = (struct gem_init_block *)
3019
                pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
3020
                                     &gp->gblock_dvma);
3021
        if (!gp->init_block) {
3022
                printk(KERN_ERR PFX "Cannot allocate init block, "
3023
                       "aborting.\n");
3024
                goto err_out_iounmap;
3025
        }
3026
 
3027
#ifdef CONFIG_ALL_PPC
3028
        gp->of_node = pci_device_to_OF_node(pdev);
3029
#endif  
3030
        if (gem_get_device_address(gp))
3031
                goto err_out_free_consistent;
3032
 
3033
        if (register_netdev(dev)) {
3034
                printk(KERN_ERR PFX "Cannot register net device, "
3035
                       "aborting.\n");
3036
                goto err_out_free_consistent;
3037
        }
3038
 
3039
        printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ",
3040
               dev->name);
3041
 
3042
        for (i = 0; i < 6; i++)
3043
                printk("%2.2x%c", dev->dev_addr[i],
3044
                       i == 5 ? ' ' : ':');
3045
        printk("\n");
3046
 
3047
        pci_set_drvdata(pdev, dev);
3048
 
3049
        dev->open = gem_open;
3050
        dev->stop = gem_close;
3051
        dev->hard_start_xmit = gem_start_xmit;
3052
        dev->get_stats = gem_get_stats;
3053
        dev->set_multicast_list = gem_set_multicast;
3054
        dev->do_ioctl = gem_ioctl;
3055
        dev->tx_timeout = gem_tx_timeout;
3056
        dev->watchdog_timeo = 5 * HZ;
3057
        dev->change_mtu = gem_change_mtu;
3058
        dev->irq = pdev->irq;
3059
        dev->dma = 0;
3060
 
3061
        /* GEM can do it all... */
3062
        dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
3063
        if (pci_using_dac)
3064
                dev->features |= NETIF_F_HIGHDMA;
3065
 
3066
        /* Fire the PM timer that will shut us down in about 10 seconds */
3067
        gp->pm_timer.expires = jiffies + 10*HZ;
3068
        add_timer(&gp->pm_timer);
3069
 
3070
        return 0;
3071
 
3072
err_out_free_consistent:
3073
        pci_free_consistent(pdev,
3074
                            sizeof(struct gem_init_block),
3075
                            gp->init_block,
3076
                            gp->gblock_dvma);
3077
 
3078
err_out_iounmap:
3079
        down(&gp->pm_sem);
3080
        /* Stop the PM timer & task */
3081
        del_timer_sync(&gp->pm_timer);
3082
        flush_scheduled_tasks();
3083
        if (gp->hw_running)
3084
                gem_shutdown(gp);
3085
        up(&gp->pm_sem);
3086
 
3087
        iounmap((void *) gp->regs);
3088
 
3089
err_out_free_res:
3090
        pci_release_regions(pdev);
3091
 
3092
err_out_free_netdev:
3093
        kfree(dev);
3094
 
3095
        return -ENODEV;
3096
 
3097
}
3098
 
3099
static void __devexit gem_remove_one(struct pci_dev *pdev)
3100
{
3101
        struct net_device *dev = pci_get_drvdata(pdev);
3102
 
3103
        if (dev) {
3104
                struct gem *gp = dev->priv;
3105
 
3106
                unregister_netdev(dev);
3107
 
3108
                down(&gp->pm_sem);
3109
                /* Stop the PM timer & task */
3110
                del_timer_sync(&gp->pm_timer);
3111
                flush_scheduled_tasks();
3112
                if (gp->hw_running)
3113
                        gem_shutdown(gp);
3114
                up(&gp->pm_sem);
3115
 
3116
                pci_free_consistent(pdev,
3117
                                    sizeof(struct gem_init_block),
3118
                                    gp->init_block,
3119
                                    gp->gblock_dvma);
3120
                iounmap((void *) gp->regs);
3121
                pci_release_regions(pdev);
3122
                kfree(dev);
3123
 
3124
                pci_set_drvdata(pdev, NULL);
3125
        }
3126
}
3127
 
3128
static struct pci_driver gem_driver = {
3129
        name:           GEM_MODULE_NAME,
3130
        id_table:       gem_pci_tbl,
3131
        probe:          gem_init_one,
3132
        remove:         __devexit_p(gem_remove_one),
3133
#ifdef CONFIG_PM
3134
        suspend:        gem_suspend,
3135
        resume:         gem_resume,
3136
#endif /* CONFIG_PM */
3137
};
3138
 
3139
static int __init gem_init(void)
3140
{
3141
        return pci_module_init(&gem_driver);
3142
}
3143
 
3144
static void __exit gem_cleanup(void)
3145
{
3146
        pci_unregister_driver(&gem_driver);
3147
}
3148
 
3149
module_init(gem_init);
3150
module_exit(gem_cleanup);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.