OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [net/] [sky2.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * New driver for Marvell Yukon 2 chipset.
3
 * Based on earlier sk98lin, and skge driver.
4
 *
5
 * This driver intentionally does not support all the features
6
 * of the original driver such as link fail-over and link management because
7
 * those should be done at higher levels.
8
 *
9
 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10
 *
11
 * This program is free software; you can redistribute it and/or modify
12
 * it under the terms of the GNU General Public License as published by
13
 * the Free Software Foundation; either version 2 of the License.
14
 *
15
 * This program is distributed in the hope that it will be useful,
16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
 * GNU General Public License for more details.
19
 *
20
 * You should have received a copy of the GNU General Public License
21
 * along with this program; if not, write to the Free Software
22
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23
 */
24
 
25
#include <linux/crc32.h>
26
#include <linux/kernel.h>
27
#include <linux/version.h>
28
#include <linux/module.h>
29
#include <linux/netdevice.h>
30
#include <linux/dma-mapping.h>
31
#include <linux/etherdevice.h>
32
#include <linux/ethtool.h>
33
#include <linux/pci.h>
34
#include <linux/ip.h>
35
#include <net/ip.h>
36
#include <linux/tcp.h>
37
#include <linux/in.h>
38
#include <linux/delay.h>
39
#include <linux/workqueue.h>
40
#include <linux/if_vlan.h>
41
#include <linux/prefetch.h>
42
#include <linux/debugfs.h>
43
#include <linux/mii.h>
44
 
45
#include <asm/irq.h>
46
 
47
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
48
#define SKY2_VLAN_TAG_USED 1
49
#endif
50
 
51
#include "sky2.h"
52
 
53
#define DRV_NAME                "sky2"
54
#define DRV_VERSION             "1.20"
55
#define PFX                     DRV_NAME " "
56
 
57
/*
58
 * The Yukon II chipset takes 64 bit command blocks (called list elements)
59
 * that are organized into three (receive, transmit, status) different rings
60
 * similar to Tigon3.
61
 */
62
 
63
#define RX_LE_SIZE              1024
64
#define RX_LE_BYTES             (RX_LE_SIZE*sizeof(struct sky2_rx_le))
65
#define RX_MAX_PENDING          (RX_LE_SIZE/6 - 2)
66
#define RX_DEF_PENDING          RX_MAX_PENDING
67
#define RX_SKB_ALIGN            8
68
 
69
#define TX_RING_SIZE            512
70
#define TX_DEF_PENDING          (TX_RING_SIZE - 1)
71
#define TX_MIN_PENDING          64
72
#define MAX_SKB_TX_LE           (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
73
 
74
#define STATUS_RING_SIZE        2048    /* 2 ports * (TX + 2*RX) */
75
#define STATUS_LE_BYTES         (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
76
#define TX_WATCHDOG             (5 * HZ)
77
#define NAPI_WEIGHT             64
78
#define PHY_RETRIES             1000
79
 
80
#define SKY2_EEPROM_MAGIC       0x9955aabb
81
 
82
 
83
#define RING_NEXT(x,s)  (((x)+1) & ((s)-1))
84
 
85
static const u32 default_msg =
86
    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
87
    | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
88
    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
89
 
90
static int debug = -1;          /* defaults above */
91
module_param(debug, int, 0);
92
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
93
 
94
static int copybreak __read_mostly = 128;
95
module_param(copybreak, int, 0);
96
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
97
 
98
static int disable_msi = 0;
99
module_param(disable_msi, int, 0);
100
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
101
 
102
static const struct pci_device_id sky2_id_table[] = {
103
        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
104
        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
105
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },    /* DGE-560T */
106
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) },    /* DGE-550SX */
107
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },    /* DGE-560SX */
108
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) },    /* DGE-550T */
109
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
110
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
111
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
112
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
113
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
114
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
115
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
116
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
117
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
118
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
119
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
120
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
121
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
122
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
123
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
124
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
125
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
126
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
127
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
128
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
129
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
130
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
131
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
132
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
133
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
134
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
135
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
136
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
137
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
138
        { 0 }
139
};
140
 
141
MODULE_DEVICE_TABLE(pci, sky2_id_table);
142
 
143
/* Avoid conditionals by using array */
144
static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
145
static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
146
static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
147
 
148
/* This driver supports yukon2 chipset only */
149
static const char *yukon2_name[] = {
150
        "XL",           /* 0xb3 */
151
        "EC Ultra",     /* 0xb4 */
152
        "Extreme",      /* 0xb5 */
153
        "EC",           /* 0xb6 */
154
        "FE",           /* 0xb7 */
155
        "FE+",          /* 0xb8 */
156
};
157
 
158
static void sky2_set_multicast(struct net_device *dev);
159
 
160
/* Access to PHY via serial interconnect */
161
static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
162
{
163
        int i;
164
 
165
        gma_write16(hw, port, GM_SMI_DATA, val);
166
        gma_write16(hw, port, GM_SMI_CTRL,
167
                    GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
168
 
169
        for (i = 0; i < PHY_RETRIES; i++) {
170
                u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
171
                if (ctrl == 0xffff)
172
                        goto io_error;
173
 
174
                if (!(ctrl & GM_SMI_CT_BUSY))
175
                        return 0;
176
 
177
                udelay(10);
178
        }
179
 
180
        dev_warn(&hw->pdev->dev,"%s: phy write timeout\n", hw->dev[port]->name);
181
        return -ETIMEDOUT;
182
 
183
io_error:
184
        dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
185
        return -EIO;
186
}
187
 
188
static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
189
{
190
        int i;
191
 
192
        gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
193
                    | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
194
 
195
        for (i = 0; i < PHY_RETRIES; i++) {
196
                u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
197
                if (ctrl == 0xffff)
198
                        goto io_error;
199
 
200
                if (ctrl & GM_SMI_CT_RD_VAL) {
201
                        *val = gma_read16(hw, port, GM_SMI_DATA);
202
                        return 0;
203
                }
204
 
205
                udelay(10);
206
        }
207
 
208
        dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
209
        return -ETIMEDOUT;
210
io_error:
211
        dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
212
        return -EIO;
213
}
214
 
215
static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
216
{
217
        u16 v;
218
        __gm_phy_read(hw, port, reg, &v);
219
        return v;
220
}
221
 
222
 
223
static void sky2_power_on(struct sky2_hw *hw)
224
{
225
        /* switch power to VCC (WA for VAUX problem) */
226
        sky2_write8(hw, B0_POWER_CTRL,
227
                    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
228
 
229
        /* disable Core Clock Division, */
230
        sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
231
 
232
        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
233
                /* enable bits are inverted */
234
                sky2_write8(hw, B2_Y2_CLK_GATE,
235
                            Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
236
                            Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
237
                            Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
238
        else
239
                sky2_write8(hw, B2_Y2_CLK_GATE, 0);
240
 
241
        if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
242
                u32 reg;
243
 
244
                sky2_pci_write32(hw, PCI_DEV_REG3, 0);
245
 
246
                reg = sky2_pci_read32(hw, PCI_DEV_REG4);
247
                /* set all bits to 0 except bits 15..12 and 8 */
248
                reg &= P_ASPM_CONTROL_MSK;
249
                sky2_pci_write32(hw, PCI_DEV_REG4, reg);
250
 
251
                reg = sky2_pci_read32(hw, PCI_DEV_REG5);
252
                /* set all bits to 0 except bits 28 & 27 */
253
                reg &= P_CTL_TIM_VMAIN_AV_MSK;
254
                sky2_pci_write32(hw, PCI_DEV_REG5, reg);
255
 
256
                sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
257
 
258
                /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
259
                reg = sky2_read32(hw, B2_GP_IO);
260
                reg |= GLB_GPIO_STAT_RACE_DIS;
261
                sky2_write32(hw, B2_GP_IO, reg);
262
 
263
                sky2_read32(hw, B2_GP_IO);
264
        }
265
}
266
 
267
static void sky2_power_aux(struct sky2_hw *hw)
268
{
269
        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
270
                sky2_write8(hw, B2_Y2_CLK_GATE, 0);
271
        else
272
                /* enable bits are inverted */
273
                sky2_write8(hw, B2_Y2_CLK_GATE,
274
                            Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
275
                            Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
276
                            Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
277
 
278
        /* switch power to VAUX */
279
        if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
280
                sky2_write8(hw, B0_POWER_CTRL,
281
                            (PC_VAUX_ENA | PC_VCC_ENA |
282
                             PC_VAUX_ON | PC_VCC_OFF));
283
}
284
 
285
static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
286
{
287
        u16 reg;
288
 
289
        /* disable all GMAC IRQ's */
290
        sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
291
 
292
        gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
293
        gma_write16(hw, port, GM_MC_ADDR_H2, 0);
294
        gma_write16(hw, port, GM_MC_ADDR_H3, 0);
295
        gma_write16(hw, port, GM_MC_ADDR_H4, 0);
296
 
297
        reg = gma_read16(hw, port, GM_RX_CTRL);
298
        reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
299
        gma_write16(hw, port, GM_RX_CTRL, reg);
300
}
301
 
302
/* flow control to advertise bits */
303
static const u16 copper_fc_adv[] = {
304
        [FC_NONE]       = 0,
305
        [FC_TX]         = PHY_M_AN_ASP,
306
        [FC_RX]         = PHY_M_AN_PC,
307
        [FC_BOTH]       = PHY_M_AN_PC | PHY_M_AN_ASP,
308
};
309
 
310
/* flow control to advertise bits when using 1000BaseX */
311
static const u16 fiber_fc_adv[] = {
312
        [FC_NONE] = PHY_M_P_NO_PAUSE_X,
313
        [FC_TX]   = PHY_M_P_ASYM_MD_X,
314
        [FC_RX]   = PHY_M_P_SYM_MD_X,
315
        [FC_BOTH] = PHY_M_P_BOTH_MD_X,
316
};
317
 
318
/* flow control to GMA disable bits */
319
static const u16 gm_fc_disable[] = {
320
        [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
321
        [FC_TX]   = GM_GPCR_FC_RX_DIS,
322
        [FC_RX]   = GM_GPCR_FC_TX_DIS,
323
        [FC_BOTH] = 0,
324
};
325
 
326
 
327
static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
328
{
329
        struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
330
        u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
331
 
332
        if (sky2->autoneg == AUTONEG_ENABLE &&
333
            !(hw->flags & SKY2_HW_NEWER_PHY)) {
334
                u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
335
 
336
                ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
337
                           PHY_M_EC_MAC_S_MSK);
338
                ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
339
 
340
                /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
341
                if (hw->chip_id == CHIP_ID_YUKON_EC)
342
                        /* set downshift counter to 3x and enable downshift */
343
                        ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
344
                else
345
                        /* set master & slave downshift counter to 1x */
346
                        ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
347
 
348
                gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
349
        }
350
 
351
        ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
352
        if (sky2_is_copper(hw)) {
353
                if (!(hw->flags & SKY2_HW_GIGABIT)) {
354
                        /* enable automatic crossover */
355
                        ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
356
 
357
                        if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
358
                            hw->chip_rev == CHIP_REV_YU_FE2_A0) {
359
                                u16 spec;
360
 
361
                                /* Enable Class A driver for FE+ A0 */
362
                                spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
363
                                spec |= PHY_M_FESC_SEL_CL_A;
364
                                gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
365
                        }
366
                } else {
367
                        /* disable energy detect */
368
                        ctrl &= ~PHY_M_PC_EN_DET_MSK;
369
 
370
                        /* enable automatic crossover */
371
                        ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
372
 
373
                        /* downshift on PHY 88E1112 and 88E1149 is changed */
374
                        if (sky2->autoneg == AUTONEG_ENABLE
375
                            && (hw->flags & SKY2_HW_NEWER_PHY)) {
376
                                /* set downshift counter to 3x and enable downshift */
377
                                ctrl &= ~PHY_M_PC_DSC_MSK;
378
                                ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
379
                        }
380
                }
381
        } else {
382
                /* workaround for deviation #4.88 (CRC errors) */
383
                /* disable Automatic Crossover */
384
 
385
                ctrl &= ~PHY_M_PC_MDIX_MSK;
386
        }
387
 
388
        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
389
 
390
        /* special setup for PHY 88E1112 Fiber */
391
        if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
392
                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
393
 
394
                /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
395
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
396
                ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
397
                ctrl &= ~PHY_M_MAC_MD_MSK;
398
                ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
399
                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
400
 
401
                if (hw->pmd_type  == 'P') {
402
                        /* select page 1 to access Fiber registers */
403
                        gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
404
 
405
                        /* for SFP-module set SIGDET polarity to low */
406
                        ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
407
                        ctrl |= PHY_M_FIB_SIGD_POL;
408
                        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
409
                }
410
 
411
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
412
        }
413
 
414
        ctrl = PHY_CT_RESET;
415
        ct1000 = 0;
416
        adv = PHY_AN_CSMA;
417
        reg = 0;
418
 
419
        if (sky2->autoneg == AUTONEG_ENABLE) {
420
                if (sky2_is_copper(hw)) {
421
                        if (sky2->advertising & ADVERTISED_1000baseT_Full)
422
                                ct1000 |= PHY_M_1000C_AFD;
423
                        if (sky2->advertising & ADVERTISED_1000baseT_Half)
424
                                ct1000 |= PHY_M_1000C_AHD;
425
                        if (sky2->advertising & ADVERTISED_100baseT_Full)
426
                                adv |= PHY_M_AN_100_FD;
427
                        if (sky2->advertising & ADVERTISED_100baseT_Half)
428
                                adv |= PHY_M_AN_100_HD;
429
                        if (sky2->advertising & ADVERTISED_10baseT_Full)
430
                                adv |= PHY_M_AN_10_FD;
431
                        if (sky2->advertising & ADVERTISED_10baseT_Half)
432
                                adv |= PHY_M_AN_10_HD;
433
 
434
                        adv |= copper_fc_adv[sky2->flow_mode];
435
                } else {        /* special defines for FIBER (88E1040S only) */
436
                        if (sky2->advertising & ADVERTISED_1000baseT_Full)
437
                                adv |= PHY_M_AN_1000X_AFD;
438
                        if (sky2->advertising & ADVERTISED_1000baseT_Half)
439
                                adv |= PHY_M_AN_1000X_AHD;
440
 
441
                        adv |= fiber_fc_adv[sky2->flow_mode];
442
                }
443
 
444
                /* Restart Auto-negotiation */
445
                ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
446
        } else {
447
                /* forced speed/duplex settings */
448
                ct1000 = PHY_M_1000C_MSE;
449
 
450
                /* Disable auto update for duplex flow control and speed */
451
                reg |= GM_GPCR_AU_ALL_DIS;
452
 
453
                switch (sky2->speed) {
454
                case SPEED_1000:
455
                        ctrl |= PHY_CT_SP1000;
456
                        reg |= GM_GPCR_SPEED_1000;
457
                        break;
458
                case SPEED_100:
459
                        ctrl |= PHY_CT_SP100;
460
                        reg |= GM_GPCR_SPEED_100;
461
                        break;
462
                }
463
 
464
                if (sky2->duplex == DUPLEX_FULL) {
465
                        reg |= GM_GPCR_DUP_FULL;
466
                        ctrl |= PHY_CT_DUP_MD;
467
                } else if (sky2->speed < SPEED_1000)
468
                        sky2->flow_mode = FC_NONE;
469
 
470
 
471
                reg |= gm_fc_disable[sky2->flow_mode];
472
 
473
                /* Forward pause packets to GMAC? */
474
                if (sky2->flow_mode & FC_RX)
475
                        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
476
                else
477
                        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
478
        }
479
 
480
        gma_write16(hw, port, GM_GP_CTRL, reg);
481
 
482
        if (hw->flags & SKY2_HW_GIGABIT)
483
                gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
484
 
485
        gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
486
        gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
487
 
488
        /* Setup Phy LED's */
489
        ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
490
        ledover = 0;
491
 
492
        switch (hw->chip_id) {
493
        case CHIP_ID_YUKON_FE:
494
                /* on 88E3082 these bits are at 11..9 (shifted left) */
495
                ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
496
 
497
                ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
498
 
499
                /* delete ACT LED control bits */
500
                ctrl &= ~PHY_M_FELP_LED1_MSK;
501
                /* change ACT LED control to blink mode */
502
                ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
503
                gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
504
                break;
505
 
506
        case CHIP_ID_YUKON_FE_P:
507
                /* Enable Link Partner Next Page */
508
                ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
509
                ctrl |= PHY_M_PC_ENA_LIP_NP;
510
 
511
                /* disable Energy Detect and enable scrambler */
512
                ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
513
                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
514
 
515
                /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
516
                ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
517
                        PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
518
                        PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
519
 
520
                gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
521
                break;
522
 
523
        case CHIP_ID_YUKON_XL:
524
                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
525
 
526
                /* select page 3 to access LED control register */
527
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
528
 
529
                /* set LED Function Control register */
530
                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
531
                             (PHY_M_LEDC_LOS_CTRL(1) |  /* LINK/ACT */
532
                              PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
533
                              PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
534
                              PHY_M_LEDC_STA0_CTRL(7)));        /* 1000 Mbps */
535
 
536
                /* set Polarity Control register */
537
                gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
538
                             (PHY_M_POLC_LS1_P_MIX(4) |
539
                              PHY_M_POLC_IS0_P_MIX(4) |
540
                              PHY_M_POLC_LOS_CTRL(2) |
541
                              PHY_M_POLC_INIT_CTRL(2) |
542
                              PHY_M_POLC_STA1_CTRL(2) |
543
                              PHY_M_POLC_STA0_CTRL(2)));
544
 
545
                /* restore page register */
546
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
547
                break;
548
 
549
        case CHIP_ID_YUKON_EC_U:
550
        case CHIP_ID_YUKON_EX:
551
                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
552
 
553
                /* select page 3 to access LED control register */
554
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
555
 
556
                /* set LED Function Control register */
557
                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
558
                             (PHY_M_LEDC_LOS_CTRL(1) |  /* LINK/ACT */
559
                              PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
560
                              PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
561
                              PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
562
 
563
                /* set Blink Rate in LED Timer Control Register */
564
                gm_phy_write(hw, port, PHY_MARV_INT_MASK,
565
                             ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
566
                /* restore page register */
567
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
568
                break;
569
 
570
        default:
571
                /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
572
                ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
573
                /* turn off the Rx LED (LED_RX) */
574
                ledover &= ~PHY_M_LED_MO_RX;
575
        }
576
 
577
        if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
578
            hw->chip_rev == CHIP_REV_YU_EC_U_A1) {
579
                /* apply fixes in PHY AFE */
580
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
581
 
582
                /* increase differential signal amplitude in 10BASE-T */
583
                gm_phy_write(hw, port, 0x18, 0xaa99);
584
                gm_phy_write(hw, port, 0x17, 0x2011);
585
 
586
                /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
587
                gm_phy_write(hw, port, 0x18, 0xa204);
588
                gm_phy_write(hw, port, 0x17, 0x2002);
589
 
590
                /* set page register to 0 */
591
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
592
        } else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
593
                   hw->chip_rev == CHIP_REV_YU_FE2_A0) {
594
                /* apply workaround for integrated resistors calibration */
595
                gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
596
                gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
597
        } else if (hw->chip_id != CHIP_ID_YUKON_EX) {
598
                /* no effect on Yukon-XL */
599
                gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
600
 
601
                if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
602
                        /* turn on 100 Mbps LED (LED_LINK100) */
603
                        ledover |= PHY_M_LED_MO_100;
604
                }
605
 
606
                if (ledover)
607
                        gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
608
 
609
        }
610
 
611
        /* Enable phy interrupt on auto-negotiation complete (or link up) */
612
        if (sky2->autoneg == AUTONEG_ENABLE)
613
                gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
614
        else
615
                gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
616
}
617
 
618
static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
619
{
620
        u32 reg1;
621
        static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
622
        static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
623
 
624
        reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
625
        /* Turn on/off phy power saving */
626
        if (onoff)
627
                reg1 &= ~phy_power[port];
628
        else
629
                reg1 |= phy_power[port];
630
 
631
        if (onoff && hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
632
                reg1 |= coma_mode[port];
633
 
634
        sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
635
        reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
636
 
637
        udelay(100);
638
}
639
 
640
/* Force a renegotiation */
641
static void sky2_phy_reinit(struct sky2_port *sky2)
642
{
643
        spin_lock_bh(&sky2->phy_lock);
644
        sky2_phy_init(sky2->hw, sky2->port);
645
        spin_unlock_bh(&sky2->phy_lock);
646
}
647
 
648
/* Put device in state to listen for Wake On Lan */
649
static void sky2_wol_init(struct sky2_port *sky2)
650
{
651
        struct sky2_hw *hw = sky2->hw;
652
        unsigned port = sky2->port;
653
        enum flow_control save_mode;
654
        u16 ctrl;
655
        u32 reg1;
656
 
657
        /* Bring hardware out of reset */
658
        sky2_write16(hw, B0_CTST, CS_RST_CLR);
659
        sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
660
 
661
        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
662
        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
663
 
664
        /* Force to 10/100
665
         * sky2_reset will re-enable on resume
666
         */
667
        save_mode = sky2->flow_mode;
668
        ctrl = sky2->advertising;
669
 
670
        sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
671
        sky2->flow_mode = FC_NONE;
672
        sky2_phy_power(hw, port, 1);
673
        sky2_phy_reinit(sky2);
674
 
675
        sky2->flow_mode = save_mode;
676
        sky2->advertising = ctrl;
677
 
678
        /* Set GMAC to no flow control and auto update for speed/duplex */
679
        gma_write16(hw, port, GM_GP_CTRL,
680
                    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
681
                    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
682
 
683
        /* Set WOL address */
684
        memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
685
                    sky2->netdev->dev_addr, ETH_ALEN);
686
 
687
        /* Turn on appropriate WOL control bits */
688
        sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
689
        ctrl = 0;
690
        if (sky2->wol & WAKE_PHY)
691
                ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
692
        else
693
                ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
694
 
695
        if (sky2->wol & WAKE_MAGIC)
696
                ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
697
        else
698
                ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
699
 
700
        ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
701
        sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
702
 
703
        /* Turn on legacy PCI-Express PME mode */
704
        reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
705
        reg1 |= PCI_Y2_PME_LEGACY;
706
        sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
707
 
708
        /* block receiver */
709
        sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
710
 
711
}
712
 
713
static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
714
{
715
        struct net_device *dev = hw->dev[port];
716
 
717
        if (dev->mtu <= ETH_DATA_LEN)
718
                sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
719
                             TX_JUMBO_DIS | TX_STFW_ENA);
720
 
721
        else if (hw->chip_id != CHIP_ID_YUKON_EC_U)
722
                sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
723
                             TX_STFW_ENA | TX_JUMBO_ENA);
724
        else {
725
                /* set Tx GMAC FIFO Almost Empty Threshold */
726
                sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
727
                             (ECU_JUMBO_WM << 16) | ECU_AE_THR);
728
 
729
                sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
730
                             TX_JUMBO_ENA | TX_STFW_DIS);
731
 
732
                /* Can't do offload because of lack of store/forward */
733
                dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM);
734
        }
735
}
736
 
737
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
738
{
739
        struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
740
        u16 reg;
741
        u32 rx_reg;
742
        int i;
743
        const u8 *addr = hw->dev[port]->dev_addr;
744
 
745
        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
746
        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
747
 
748
        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
749
 
750
        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
751
                /* WA DEV_472 -- looks like crossed wires on port 2 */
752
                /* clear GMAC 1 Control reset */
753
                sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
754
                do {
755
                        sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
756
                        sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
757
                } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
758
                         gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
759
                         gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
760
        }
761
 
762
        sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
763
 
764
        /* Enable Transmit FIFO Underrun */
765
        sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
766
 
767
        spin_lock_bh(&sky2->phy_lock);
768
        sky2_phy_init(hw, port);
769
        spin_unlock_bh(&sky2->phy_lock);
770
 
771
        /* MIB clear */
772
        reg = gma_read16(hw, port, GM_PHY_ADDR);
773
        gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
774
 
775
        for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
776
                gma_read16(hw, port, i);
777
        gma_write16(hw, port, GM_PHY_ADDR, reg);
778
 
779
        /* transmit control */
780
        gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
781
 
782
        /* receive control reg: unicast + multicast + no FCS  */
783
        gma_write16(hw, port, GM_RX_CTRL,
784
                    GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
785
 
786
        /* transmit flow control */
787
        gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
788
 
789
        /* transmit parameter */
790
        gma_write16(hw, port, GM_TX_PARAM,
791
                    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
792
                    TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
793
                    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
794
                    TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
795
 
796
        /* serial mode register */
797
        reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
798
                GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
799
 
800
        if (hw->dev[port]->mtu > ETH_DATA_LEN)
801
                reg |= GM_SMOD_JUMBO_ENA;
802
 
803
        gma_write16(hw, port, GM_SERIAL_MODE, reg);
804
 
805
        /* virtual address for data */
806
        gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
807
 
808
        /* physical address: used for pause frames */
809
        gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
810
 
811
        /* ignore counter overflows */
812
        gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
813
        gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
814
        gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
815
 
816
        /* Configure Rx MAC FIFO */
817
        sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
818
        rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
819
        if (hw->chip_id == CHIP_ID_YUKON_EX ||
820
            hw->chip_id == CHIP_ID_YUKON_FE_P)
821
                rx_reg |= GMF_RX_OVER_ON;
822
 
823
        sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
824
 
825
        if (hw->chip_id == CHIP_ID_YUKON_XL) {
826
                /* Hardware errata - clear flush mask */
827
                sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
828
        } else {
829
                /* Flush Rx MAC FIFO on any flow control or error */
830
                sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
831
        }
832
 
833
        /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
834
        reg = RX_GMF_FL_THR_DEF + 1;
835
        /* Another magic mystery workaround from sk98lin */
836
        if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
837
            hw->chip_rev == CHIP_REV_YU_FE2_A0)
838
                reg = 0x178;
839
        sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
840
 
841
        /* Configure Tx MAC FIFO */
842
        sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
843
        sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
844
 
845
        /* On chips without ram buffer, pause is controled by MAC level */
846
        if (sky2_read8(hw, B2_E_0) == 0) {
847
                sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
848
                sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
849
 
850
                sky2_set_tx_stfwd(hw, port);
851
        }
852
 
853
        if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
854
            hw->chip_rev == CHIP_REV_YU_FE2_A0) {
855
                /* disable dynamic watermark */
856
                reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
857
                reg &= ~TX_DYN_WM_ENA;
858
                sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
859
        }
860
}
861
 
862
/* Assign Ram Buffer allocation to queue */
863
static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
864
{
865
        u32 end;
866
 
867
        /* convert from K bytes to qwords used for hw register */
868
        start *= 1024/8;
869
        space *= 1024/8;
870
        end = start + space - 1;
871
 
872
        sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
873
        sky2_write32(hw, RB_ADDR(q, RB_START), start);
874
        sky2_write32(hw, RB_ADDR(q, RB_END), end);
875
        sky2_write32(hw, RB_ADDR(q, RB_WP), start);
876
        sky2_write32(hw, RB_ADDR(q, RB_RP), start);
877
 
878
        if (q == Q_R1 || q == Q_R2) {
879
                u32 tp = space - space/4;
880
 
881
                /* On receive queue's set the thresholds
882
                 * give receiver priority when > 3/4 full
883
                 * send pause when down to 2K
884
                 */
885
                sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
886
                sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
887
 
888
                tp = space - 2048/8;
889
                sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
890
                sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
891
        } else {
892
                /* Enable store & forward on Tx queue's because
893
                 * Tx FIFO is only 1K on Yukon
894
                 */
895
                sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
896
        }
897
 
898
        sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
899
        sky2_read8(hw, RB_ADDR(q, RB_CTRL));
900
}
901
 
902
/* Setup Bus Memory Interface */
903
static void sky2_qset(struct sky2_hw *hw, u16 q)
904
{
905
        sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
906
        sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
907
        sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
908
        sky2_write32(hw, Q_ADDR(q, Q_WM),  BMU_WM_DEFAULT);
909
}
910
 
911
/* Setup prefetch unit registers. This is the interface between
912
 * hardware and driver list elements
913
 */
914
static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
915
                                      u64 addr, u32 last)
916
{
917
        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
918
        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
919
        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
920
        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
921
        sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
922
        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
923
 
924
        sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
925
}
926
 
927
static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
928
{
929
        struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
930
 
931
        sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
932
        le->ctrl = 0;
933
        return le;
934
}
935
 
936
static void tx_init(struct sky2_port *sky2)
937
{
938
        struct sky2_tx_le *le;
939
 
940
        sky2->tx_prod = sky2->tx_cons = 0;
941
        sky2->tx_tcpsum = 0;
942
        sky2->tx_last_mss = 0;
943
 
944
        le = get_tx_le(sky2);
945
        le->addr = 0;
946
        le->opcode = OP_ADDR64 | HW_OWNER;
947
}
948
 
949
static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
950
                                            struct sky2_tx_le *le)
951
{
952
        return sky2->tx_ring + (le - sky2->tx_le);
953
}
954
 
955
/* Update chip's next pointer */
956
static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
957
{
958
        /* Make sure write' to descriptors are complete before we tell hardware */
959
        wmb();
960
        sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
961
 
962
        /* Synchronize I/O on since next processor may write to tail */
963
        mmiowb();
964
}
965
 
966
 
967
static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
968
{
969
        struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
970
        sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
971
        le->ctrl = 0;
972
        return le;
973
}
974
 
975
/* Build description to hardware for one receive segment */
976
static void sky2_rx_add(struct sky2_port *sky2,  u8 op,
977
                        dma_addr_t map, unsigned len)
978
{
979
        struct sky2_rx_le *le;
980
 
981
        if (sizeof(dma_addr_t) > sizeof(u32)) {
982
                le = sky2_next_rx(sky2);
983
                le->addr = cpu_to_le32(upper_32_bits(map));
984
                le->opcode = OP_ADDR64 | HW_OWNER;
985
        }
986
 
987
        le = sky2_next_rx(sky2);
988
        le->addr = cpu_to_le32((u32) map);
989
        le->length = cpu_to_le16(len);
990
        le->opcode = op | HW_OWNER;
991
}
992
 
993
/* Build description to hardware for one possibly fragmented skb */
994
static void sky2_rx_submit(struct sky2_port *sky2,
995
                           const struct rx_ring_info *re)
996
{
997
        int i;
998
 
999
        sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
1000
 
1001
        for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
1002
                sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
1003
}
1004
 
1005
 
1006
static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1007
                            unsigned size)
1008
{
1009
        struct sk_buff *skb = re->skb;
1010
        int i;
1011
 
1012
        re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1013
        pci_unmap_len_set(re, data_size, size);
1014
 
1015
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1016
                re->frag_addr[i] = pci_map_page(pdev,
1017
                                                skb_shinfo(skb)->frags[i].page,
1018
                                                skb_shinfo(skb)->frags[i].page_offset,
1019
                                                skb_shinfo(skb)->frags[i].size,
1020
                                                PCI_DMA_FROMDEVICE);
1021
}
1022
 
1023
static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
1024
{
1025
        struct sk_buff *skb = re->skb;
1026
        int i;
1027
 
1028
        pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
1029
                         PCI_DMA_FROMDEVICE);
1030
 
1031
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1032
                pci_unmap_page(pdev, re->frag_addr[i],
1033
                               skb_shinfo(skb)->frags[i].size,
1034
                               PCI_DMA_FROMDEVICE);
1035
}
1036
 
1037
/* Tell chip where to start receive checksum.
1038
 * Actually has two checksums, but set both same to avoid possible byte
1039
 * order problems.
1040
 */
1041
static void rx_set_checksum(struct sky2_port *sky2)
1042
{
1043
        struct sky2_rx_le *le = sky2_next_rx(sky2);
1044
 
1045
        le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
1046
        le->ctrl = 0;
1047
        le->opcode = OP_TCPSTART | HW_OWNER;
1048
 
1049
        sky2_write32(sky2->hw,
1050
                     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1051
                     sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1052
}
1053
 
1054
/*
1055
 * The RX Stop command will not work for Yukon-2 if the BMU does not
1056
 * reach the end of packet and since we can't make sure that we have
1057
 * incoming data, we must reset the BMU while it is not doing a DMA
1058
 * transfer. Since it is possible that the RX path is still active,
1059
 * the RX RAM buffer will be stopped first, so any possible incoming
1060
 * data will not trigger a DMA. After the RAM buffer is stopped, the
1061
 * BMU is polled until any DMA in progress is ended and only then it
1062
 * will be reset.
1063
 */
1064
static void sky2_rx_stop(struct sky2_port *sky2)
1065
{
1066
        struct sky2_hw *hw = sky2->hw;
1067
        unsigned rxq = rxqaddr[sky2->port];
1068
        int i;
1069
 
1070
        /* disable the RAM Buffer receive queue */
1071
        sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
1072
 
1073
        for (i = 0; i < 0xffff; i++)
1074
                if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
1075
                    == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
1076
                        goto stopped;
1077
 
1078
        printk(KERN_WARNING PFX "%s: receiver stop failed\n",
1079
               sky2->netdev->name);
1080
stopped:
1081
        sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
1082
 
1083
        /* reset the Rx prefetch unit */
1084
        sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1085
        mmiowb();
1086
}
1087
 
1088
/* Clean out receive buffer area, assumes receiver hardware stopped */
1089
static void sky2_rx_clean(struct sky2_port *sky2)
1090
{
1091
        unsigned i;
1092
 
1093
        memset(sky2->rx_le, 0, RX_LE_BYTES);
1094
        for (i = 0; i < sky2->rx_pending; i++) {
1095
                struct rx_ring_info *re = sky2->rx_ring + i;
1096
 
1097
                if (re->skb) {
1098
                        sky2_rx_unmap_skb(sky2->hw->pdev, re);
1099
                        kfree_skb(re->skb);
1100
                        re->skb = NULL;
1101
                }
1102
        }
1103
}
1104
 
1105
/* Basic MII support */
1106
static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1107
{
1108
        struct mii_ioctl_data *data = if_mii(ifr);
1109
        struct sky2_port *sky2 = netdev_priv(dev);
1110
        struct sky2_hw *hw = sky2->hw;
1111
        int err = -EOPNOTSUPP;
1112
 
1113
        if (!netif_running(dev))
1114
                return -ENODEV; /* Phy still in reset */
1115
 
1116
        switch (cmd) {
1117
        case SIOCGMIIPHY:
1118
                data->phy_id = PHY_ADDR_MARV;
1119
 
1120
                /* fallthru */
1121
        case SIOCGMIIREG: {
1122
                u16 val = 0;
1123
 
1124
                spin_lock_bh(&sky2->phy_lock);
1125
                err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
1126
                spin_unlock_bh(&sky2->phy_lock);
1127
 
1128
                data->val_out = val;
1129
                break;
1130
        }
1131
 
1132
        case SIOCSMIIREG:
1133
                if (!capable(CAP_NET_ADMIN))
1134
                        return -EPERM;
1135
 
1136
                spin_lock_bh(&sky2->phy_lock);
1137
                err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
1138
                                   data->val_in);
1139
                spin_unlock_bh(&sky2->phy_lock);
1140
                break;
1141
        }
1142
        return err;
1143
}
1144
 
1145
#ifdef SKY2_VLAN_TAG_USED
1146
static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1147
{
1148
        struct sky2_port *sky2 = netdev_priv(dev);
1149
        struct sky2_hw *hw = sky2->hw;
1150
        u16 port = sky2->port;
1151
 
1152
        netif_tx_lock_bh(dev);
1153
        napi_disable(&hw->napi);
1154
 
1155
        sky2->vlgrp = grp;
1156
        if (grp) {
1157
                sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1158
                             RX_VLAN_STRIP_ON);
1159
                sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1160
                             TX_VLAN_TAG_ON);
1161
        } else {
1162
                sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1163
                             RX_VLAN_STRIP_OFF);
1164
                sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1165
                             TX_VLAN_TAG_OFF);
1166
        }
1167
 
1168
        sky2_read32(hw, B0_Y2_SP_LISR);
1169
        napi_enable(&hw->napi);
1170
        netif_tx_unlock_bh(dev);
1171
}
1172
#endif
1173
 
1174
/*
1175
 * Allocate an skb for receiving. If the MTU is large enough
1176
 * make the skb non-linear with a fragment list of pages.
1177
 *
1178
 * It appears the hardware has a bug in the FIFO logic that
1179
 * cause it to hang if the FIFO gets overrun and the receive buffer
1180
 * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is
1181
 * aligned except if slab debugging is enabled.
1182
 */
1183
static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
1184
{
1185
        struct sk_buff *skb;
1186
        unsigned long p;
1187
        int i;
1188
 
1189
        skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + RX_SKB_ALIGN);
1190
        if (!skb)
1191
                goto nomem;
1192
 
1193
        p = (unsigned long) skb->data;
1194
        skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
1195
 
1196
        for (i = 0; i < sky2->rx_nfrags; i++) {
1197
                struct page *page = alloc_page(GFP_ATOMIC);
1198
 
1199
                if (!page)
1200
                        goto free_partial;
1201
                skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
1202
        }
1203
 
1204
        return skb;
1205
free_partial:
1206
        kfree_skb(skb);
1207
nomem:
1208
        return NULL;
1209
}
1210
 
1211
static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1212
{
1213
        sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
1214
}
1215
 
1216
/*
1217
 * Allocate and setup receiver buffer pool.
1218
 * Normal case this ends up creating one list element for skb
1219
 * in the receive ring. Worst case if using large MTU and each
1220
 * allocation falls on a different 64 bit region, that results
1221
 * in 6 list elements per ring entry.
1222
 * One element is used for checksum enable/disable, and one
1223
 * extra to avoid wrap.
1224
 */
1225
static int sky2_rx_start(struct sky2_port *sky2)
1226
{
1227
        struct sky2_hw *hw = sky2->hw;
1228
        struct rx_ring_info *re;
1229
        unsigned rxq = rxqaddr[sky2->port];
1230
        unsigned i, size, space, thresh;
1231
 
1232
        sky2->rx_put = sky2->rx_next = 0;
1233
        sky2_qset(hw, rxq);
1234
 
1235
        /* On PCI express lowering the watermark gives better performance */
1236
        if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
1237
                sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
1238
 
1239
        /* These chips have no ram buffer?
1240
         * MAC Rx RAM Read is controlled by hardware */
1241
        if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1242
            (hw->chip_rev == CHIP_REV_YU_EC_U_A1
1243
             || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1244
                sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1245
 
1246
        sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1247
 
1248
        if (!(hw->flags & SKY2_HW_NEW_LE))
1249
                rx_set_checksum(sky2);
1250
 
1251
        /* Space needed for frame data + headers rounded up */
1252
        size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1253
 
1254
        /* Stopping point for hardware truncation */
1255
        thresh = (size - 8) / sizeof(u32);
1256
 
1257
        /* Account for overhead of skb - to avoid order > 0 allocation */
1258
        space = SKB_DATA_ALIGN(size) + NET_SKB_PAD
1259
                + sizeof(struct skb_shared_info);
1260
 
1261
        sky2->rx_nfrags = space >> PAGE_SHIFT;
1262
        BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1263
 
1264
        if (sky2->rx_nfrags != 0) {
1265
                /* Compute residue after pages */
1266
                space = sky2->rx_nfrags << PAGE_SHIFT;
1267
 
1268
                if (space < size)
1269
                        size -= space;
1270
                else
1271
                        size = 0;
1272
 
1273
                /* Optimize to handle small packets and headers */
1274
                if (size < copybreak)
1275
                        size = copybreak;
1276
                if (size < ETH_HLEN)
1277
                        size = ETH_HLEN;
1278
        }
1279
        sky2->rx_data_size = size;
1280
 
1281
        /* Fill Rx ring */
1282
        for (i = 0; i < sky2->rx_pending; i++) {
1283
                re = sky2->rx_ring + i;
1284
 
1285
                re->skb = sky2_rx_alloc(sky2);
1286
                if (!re->skb)
1287
                        goto nomem;
1288
 
1289
                sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size);
1290
                sky2_rx_submit(sky2, re);
1291
        }
1292
 
1293
        /*
1294
         * The receiver hangs if it receives frames larger than the
1295
         * packet buffer. As a workaround, truncate oversize frames, but
1296
         * the register is limited to 9 bits, so if you do frames > 2052
1297
         * you better get the MTU right!
1298
         */
1299
        if (thresh > 0x1ff)
1300
                sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1301
        else {
1302
                sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1303
                sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1304
        }
1305
 
1306
        /* Tell chip about available buffers */
1307
        sky2_rx_update(sky2, rxq);
1308
        return 0;
1309
nomem:
1310
        sky2_rx_clean(sky2);
1311
        return -ENOMEM;
1312
}
1313
 
1314
/* Bring up network interface. */
1315
static int sky2_up(struct net_device *dev)
1316
{
1317
        struct sky2_port *sky2 = netdev_priv(dev);
1318
        struct sky2_hw *hw = sky2->hw;
1319
        unsigned port = sky2->port;
1320
        u32 imask, ramsize;
1321
        int cap, err = -ENOMEM;
1322
        struct net_device *otherdev = hw->dev[sky2->port^1];
1323
 
1324
        /*
1325
         * On dual port PCI-X card, there is an problem where status
1326
         * can be received out of order due to split transactions
1327
         */
1328
        if (otherdev && netif_running(otherdev) &&
1329
            (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1330
                u16 cmd;
1331
 
1332
                cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1333
                cmd &= ~PCI_X_CMD_MAX_SPLIT;
1334
                sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1335
 
1336
        }
1337
 
1338
        if (netif_msg_ifup(sky2))
1339
                printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1340
 
1341
        netif_carrier_off(dev);
1342
 
1343
        /* must be power of 2 */
1344
        sky2->tx_le = pci_alloc_consistent(hw->pdev,
1345
                                           TX_RING_SIZE *
1346
                                           sizeof(struct sky2_tx_le),
1347
                                           &sky2->tx_le_map);
1348
        if (!sky2->tx_le)
1349
                goto err_out;
1350
 
1351
        sky2->tx_ring = kcalloc(TX_RING_SIZE, sizeof(struct tx_ring_info),
1352
                                GFP_KERNEL);
1353
        if (!sky2->tx_ring)
1354
                goto err_out;
1355
 
1356
        tx_init(sky2);
1357
 
1358
        sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
1359
                                           &sky2->rx_le_map);
1360
        if (!sky2->rx_le)
1361
                goto err_out;
1362
        memset(sky2->rx_le, 0, RX_LE_BYTES);
1363
 
1364
        sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
1365
                                GFP_KERNEL);
1366
        if (!sky2->rx_ring)
1367
                goto err_out;
1368
 
1369
        sky2_phy_power(hw, port, 1);
1370
 
1371
        sky2_mac_init(hw, port);
1372
 
1373
        /* Register is number of 4K blocks on internal RAM buffer. */
1374
        ramsize = sky2_read8(hw, B2_E_0) * 4;
1375
        if (ramsize > 0) {
1376
                u32 rxspace;
1377
 
1378
                pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
1379
                if (ramsize < 16)
1380
                        rxspace = ramsize / 2;
1381
                else
1382
                        rxspace = 8 + (2*(ramsize - 16))/3;
1383
 
1384
                sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1385
                sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1386
 
1387
                /* Make sure SyncQ is disabled */
1388
                sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1389
                            RB_RST_SET);
1390
        }
1391
 
1392
        sky2_qset(hw, txqaddr[port]);
1393
 
1394
        /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
1395
        if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
1396
                sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
1397
 
1398
        /* Set almost empty threshold */
1399
        if (hw->chip_id == CHIP_ID_YUKON_EC_U
1400
            && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1401
                sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
1402
 
1403
        sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1404
                           TX_RING_SIZE - 1);
1405
 
1406
        err = sky2_rx_start(sky2);
1407
        if (err)
1408
                goto err_out;
1409
 
1410
        /* Enable interrupts from phy/mac for port */
1411
        imask = sky2_read32(hw, B0_IMSK);
1412
        imask |= portirq_msk[port];
1413
        sky2_write32(hw, B0_IMSK, imask);
1414
 
1415
        return 0;
1416
 
1417
err_out:
1418
        if (sky2->rx_le) {
1419
                pci_free_consistent(hw->pdev, RX_LE_BYTES,
1420
                                    sky2->rx_le, sky2->rx_le_map);
1421
                sky2->rx_le = NULL;
1422
        }
1423
        if (sky2->tx_le) {
1424
                pci_free_consistent(hw->pdev,
1425
                                    TX_RING_SIZE * sizeof(struct sky2_tx_le),
1426
                                    sky2->tx_le, sky2->tx_le_map);
1427
                sky2->tx_le = NULL;
1428
        }
1429
        kfree(sky2->tx_ring);
1430
        kfree(sky2->rx_ring);
1431
 
1432
        sky2->tx_ring = NULL;
1433
        sky2->rx_ring = NULL;
1434
        return err;
1435
}
1436
 
1437
/* Modular subtraction in ring */
1438
static inline int tx_dist(unsigned tail, unsigned head)
1439
{
1440
        return (head - tail) & (TX_RING_SIZE - 1);
1441
}
1442
 
1443
/* Number of list elements available for next tx */
1444
static inline int tx_avail(const struct sky2_port *sky2)
1445
{
1446
        return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1447
}
1448
 
1449
/* Estimate of number of transmit list elements required */
1450
static unsigned tx_le_req(const struct sk_buff *skb)
1451
{
1452
        unsigned count;
1453
 
1454
        count = sizeof(dma_addr_t) / sizeof(u32);
1455
        count += skb_shinfo(skb)->nr_frags * count;
1456
 
1457
        if (skb_is_gso(skb))
1458
                ++count;
1459
 
1460
        if (skb->ip_summed == CHECKSUM_PARTIAL)
1461
                ++count;
1462
 
1463
        return count;
1464
}
1465
 
1466
/*
1467
 * Put one packet in ring for transmit.
1468
 * A single packet can generate multiple list elements, and
1469
 * the number of ring elements will probably be less than the number
1470
 * of list elements used.
1471
 */
1472
static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1473
{
1474
        struct sky2_port *sky2 = netdev_priv(dev);
1475
        struct sky2_hw *hw = sky2->hw;
1476
        struct sky2_tx_le *le = NULL;
1477
        struct tx_ring_info *re;
1478
        unsigned i, len;
1479
        dma_addr_t mapping;
1480
        u16 mss;
1481
        u8 ctrl;
1482
 
1483
        if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
1484
                return NETDEV_TX_BUSY;
1485
 
1486
        if (unlikely(netif_msg_tx_queued(sky2)))
1487
                printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1488
                       dev->name, sky2->tx_prod, skb->len);
1489
 
1490
        len = skb_headlen(skb);
1491
        mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1492
 
1493
        /* Send high bits if needed */
1494
        if (sizeof(dma_addr_t) > sizeof(u32)) {
1495
                le = get_tx_le(sky2);
1496
                le->addr = cpu_to_le32(upper_32_bits(mapping));
1497
                le->opcode = OP_ADDR64 | HW_OWNER;
1498
        }
1499
 
1500
        /* Check for TCP Segmentation Offload */
1501
        mss = skb_shinfo(skb)->gso_size;
1502
        if (mss != 0) {
1503
 
1504
                if (!(hw->flags & SKY2_HW_NEW_LE))
1505
                        mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1506
 
1507
                if (mss != sky2->tx_last_mss) {
1508
                        le = get_tx_le(sky2);
1509
                        le->addr = cpu_to_le32(mss);
1510
 
1511
                        if (hw->flags & SKY2_HW_NEW_LE)
1512
                                le->opcode = OP_MSS | HW_OWNER;
1513
                        else
1514
                                le->opcode = OP_LRGLEN | HW_OWNER;
1515
                        sky2->tx_last_mss = mss;
1516
                }
1517
        }
1518
 
1519
        ctrl = 0;
1520
#ifdef SKY2_VLAN_TAG_USED
1521
        /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1522
        if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
1523
                if (!le) {
1524
                        le = get_tx_le(sky2);
1525
                        le->addr = 0;
1526
                        le->opcode = OP_VLAN|HW_OWNER;
1527
                } else
1528
                        le->opcode |= OP_VLAN;
1529
                le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1530
                ctrl |= INS_VLAN;
1531
        }
1532
#endif
1533
 
1534
        /* Handle TCP checksum offload */
1535
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1536
                /* On Yukon EX (some versions) encoding change. */
1537
                if (hw->flags & SKY2_HW_AUTO_TX_SUM)
1538
                        ctrl |= CALSUM; /* auto checksum */
1539
                else {
1540
                        const unsigned offset = skb_transport_offset(skb);
1541
                        u32 tcpsum;
1542
 
1543
                        tcpsum = offset << 16;                  /* sum start */
1544
                        tcpsum |= offset + skb->csum_offset;    /* sum write */
1545
 
1546
                        ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1547
                        if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1548
                                ctrl |= UDPTCP;
1549
 
1550
                        if (tcpsum != sky2->tx_tcpsum) {
1551
                                sky2->tx_tcpsum = tcpsum;
1552
 
1553
                                le = get_tx_le(sky2);
1554
                                le->addr = cpu_to_le32(tcpsum);
1555
                                le->length = 0;  /* initial checksum value */
1556
                                le->ctrl = 1;   /* one packet */
1557
                                le->opcode = OP_TCPLISW | HW_OWNER;
1558
                        }
1559
                }
1560
        }
1561
 
1562
        le = get_tx_le(sky2);
1563
        le->addr = cpu_to_le32((u32) mapping);
1564
        le->length = cpu_to_le16(len);
1565
        le->ctrl = ctrl;
1566
        le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1567
 
1568
        re = tx_le_re(sky2, le);
1569
        re->skb = skb;
1570
        pci_unmap_addr_set(re, mapaddr, mapping);
1571
        pci_unmap_len_set(re, maplen, len);
1572
 
1573
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1574
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1575
 
1576
                mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1577
                                       frag->size, PCI_DMA_TODEVICE);
1578
 
1579
                if (sizeof(dma_addr_t) > sizeof(u32)) {
1580
                        le = get_tx_le(sky2);
1581
                        le->addr = cpu_to_le32(upper_32_bits(mapping));
1582
                        le->ctrl = 0;
1583
                        le->opcode = OP_ADDR64 | HW_OWNER;
1584
                }
1585
 
1586
                le = get_tx_le(sky2);
1587
                le->addr = cpu_to_le32((u32) mapping);
1588
                le->length = cpu_to_le16(frag->size);
1589
                le->ctrl = ctrl;
1590
                le->opcode = OP_BUFFER | HW_OWNER;
1591
 
1592
                re = tx_le_re(sky2, le);
1593
                re->skb = skb;
1594
                pci_unmap_addr_set(re, mapaddr, mapping);
1595
                pci_unmap_len_set(re, maplen, frag->size);
1596
        }
1597
 
1598
        le->ctrl |= EOP;
1599
 
1600
        if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1601
                netif_stop_queue(dev);
1602
 
1603
        sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1604
 
1605
        dev->trans_start = jiffies;
1606
        return NETDEV_TX_OK;
1607
}
1608
 
1609
/*
1610
 * Free ring elements from starting at tx_cons until "done"
1611
 *
1612
 * NB: the hardware will tell us about partial completion of multi-part
1613
 *     buffers so make sure not to free skb to early.
1614
 */
1615
static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1616
{
1617
        struct net_device *dev = sky2->netdev;
1618
        struct pci_dev *pdev = sky2->hw->pdev;
1619
        unsigned idx;
1620
 
1621
        BUG_ON(done >= TX_RING_SIZE);
1622
 
1623
        for (idx = sky2->tx_cons; idx != done;
1624
             idx = RING_NEXT(idx, TX_RING_SIZE)) {
1625
                struct sky2_tx_le *le = sky2->tx_le + idx;
1626
                struct tx_ring_info *re = sky2->tx_ring + idx;
1627
 
1628
                switch(le->opcode & ~HW_OWNER) {
1629
                case OP_LARGESEND:
1630
                case OP_PACKET:
1631
                        pci_unmap_single(pdev,
1632
                                         pci_unmap_addr(re, mapaddr),
1633
                                         pci_unmap_len(re, maplen),
1634
                                         PCI_DMA_TODEVICE);
1635
                        break;
1636
                case OP_BUFFER:
1637
                        pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1638
                                       pci_unmap_len(re, maplen),
1639
                                       PCI_DMA_TODEVICE);
1640
                        break;
1641
                }
1642
 
1643
                if (le->ctrl & EOP) {
1644
                        if (unlikely(netif_msg_tx_done(sky2)))
1645
                                printk(KERN_DEBUG "%s: tx done %u\n",
1646
                                       dev->name, idx);
1647
 
1648
                        dev->stats.tx_packets++;
1649
                        dev->stats.tx_bytes += re->skb->len;
1650
 
1651
                        dev_kfree_skb_any(re->skb);
1652
                        sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE);
1653
                }
1654
        }
1655
 
1656
        sky2->tx_cons = idx;
1657
        smp_mb();
1658
 
1659
        if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
1660
                netif_wake_queue(dev);
1661
}
1662
 
1663
/* Cleanup all untransmitted buffers, assume transmitter not running */
1664
static void sky2_tx_clean(struct net_device *dev)
1665
{
1666
        struct sky2_port *sky2 = netdev_priv(dev);
1667
 
1668
        netif_tx_lock_bh(dev);
1669
        sky2_tx_complete(sky2, sky2->tx_prod);
1670
        netif_tx_unlock_bh(dev);
1671
}
1672
 
1673
/* Network shutdown */
1674
static int sky2_down(struct net_device *dev)
1675
{
1676
        struct sky2_port *sky2 = netdev_priv(dev);
1677
        struct sky2_hw *hw = sky2->hw;
1678
        unsigned port = sky2->port;
1679
        u16 ctrl;
1680
        u32 imask;
1681
 
1682
        /* Never really got started! */
1683
        if (!sky2->tx_le)
1684
                return 0;
1685
 
1686
        if (netif_msg_ifdown(sky2))
1687
                printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1688
 
1689
        /* Stop more packets from being queued */
1690
        netif_stop_queue(dev);
1691
 
1692
        /* Disable port IRQ */
1693
        imask = sky2_read32(hw, B0_IMSK);
1694
        imask &= ~portirq_msk[port];
1695
        sky2_write32(hw, B0_IMSK, imask);
1696
 
1697
        synchronize_irq(hw->pdev->irq);
1698
 
1699
        sky2_gmac_reset(hw, port);
1700
 
1701
        /* Stop transmitter */
1702
        sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1703
        sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1704
 
1705
        sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1706
                     RB_RST_SET | RB_DIS_OP_MD);
1707
 
1708
        ctrl = gma_read16(hw, port, GM_GP_CTRL);
1709
        ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1710
        gma_write16(hw, port, GM_GP_CTRL, ctrl);
1711
 
1712
        /* Make sure no packets are pending */
1713
        napi_synchronize(&hw->napi);
1714
 
1715
        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1716
 
1717
        /* Workaround shared GMAC reset */
1718
        if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1719
              && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1720
                sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1721
 
1722
        /* Disable Force Sync bit and Enable Alloc bit */
1723
        sky2_write8(hw, SK_REG(port, TXA_CTRL),
1724
                    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1725
 
1726
        /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1727
        sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1728
        sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1729
 
1730
        /* Reset the PCI FIFO of the async Tx queue */
1731
        sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1732
                     BMU_RST_SET | BMU_FIFO_RST);
1733
 
1734
        /* Reset the Tx prefetch units */
1735
        sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1736
                     PREF_UNIT_RST_SET);
1737
 
1738
        sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1739
 
1740
        sky2_rx_stop(sky2);
1741
 
1742
        sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1743
        sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1744
 
1745
        sky2_phy_power(hw, port, 0);
1746
 
1747
        netif_carrier_off(dev);
1748
 
1749
        /* turn off LED's */
1750
        sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1751
 
1752
        sky2_tx_clean(dev);
1753
        sky2_rx_clean(sky2);
1754
 
1755
        pci_free_consistent(hw->pdev, RX_LE_BYTES,
1756
                            sky2->rx_le, sky2->rx_le_map);
1757
        kfree(sky2->rx_ring);
1758
 
1759
        pci_free_consistent(hw->pdev,
1760
                            TX_RING_SIZE * sizeof(struct sky2_tx_le),
1761
                            sky2->tx_le, sky2->tx_le_map);
1762
        kfree(sky2->tx_ring);
1763
 
1764
        sky2->tx_le = NULL;
1765
        sky2->rx_le = NULL;
1766
 
1767
        sky2->rx_ring = NULL;
1768
        sky2->tx_ring = NULL;
1769
 
1770
        return 0;
1771
}
1772
 
1773
static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1774
{
1775
        if (hw->flags & SKY2_HW_FIBRE_PHY)
1776
                return SPEED_1000;
1777
 
1778
        if (!(hw->flags & SKY2_HW_GIGABIT)) {
1779
                if (aux & PHY_M_PS_SPEED_100)
1780
                        return SPEED_100;
1781
                else
1782
                        return SPEED_10;
1783
        }
1784
 
1785
        switch (aux & PHY_M_PS_SPEED_MSK) {
1786
        case PHY_M_PS_SPEED_1000:
1787
                return SPEED_1000;
1788
        case PHY_M_PS_SPEED_100:
1789
                return SPEED_100;
1790
        default:
1791
                return SPEED_10;
1792
        }
1793
}
1794
 
1795
static void sky2_link_up(struct sky2_port *sky2)
1796
{
1797
        struct sky2_hw *hw = sky2->hw;
1798
        unsigned port = sky2->port;
1799
        u16 reg;
1800
        static const char *fc_name[] = {
1801
                [FC_NONE]       = "none",
1802
                [FC_TX]         = "tx",
1803
                [FC_RX]         = "rx",
1804
                [FC_BOTH]       = "both",
1805
        };
1806
 
1807
        /* enable Rx/Tx */
1808
        reg = gma_read16(hw, port, GM_GP_CTRL);
1809
        reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1810
        gma_write16(hw, port, GM_GP_CTRL, reg);
1811
 
1812
        gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1813
 
1814
        netif_carrier_on(sky2->netdev);
1815
 
1816
        mod_timer(&hw->watchdog_timer, jiffies + 1);
1817
 
1818
        /* Turn on link LED */
1819
        sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1820
                    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1821
 
1822
        if (netif_msg_link(sky2))
1823
                printk(KERN_INFO PFX
1824
                       "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1825
                       sky2->netdev->name, sky2->speed,
1826
                       sky2->duplex == DUPLEX_FULL ? "full" : "half",
1827
                       fc_name[sky2->flow_status]);
1828
}
1829
 
1830
static void sky2_link_down(struct sky2_port *sky2)
1831
{
1832
        struct sky2_hw *hw = sky2->hw;
1833
        unsigned port = sky2->port;
1834
        u16 reg;
1835
 
1836
        gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1837
 
1838
        reg = gma_read16(hw, port, GM_GP_CTRL);
1839
        reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1840
        gma_write16(hw, port, GM_GP_CTRL, reg);
1841
 
1842
        netif_carrier_off(sky2->netdev);
1843
 
1844
        /* Turn on link LED */
1845
        sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1846
 
1847
        if (netif_msg_link(sky2))
1848
                printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1849
 
1850
        sky2_phy_init(hw, port);
1851
}
1852
 
1853
static enum flow_control sky2_flow(int rx, int tx)
1854
{
1855
        if (rx)
1856
                return tx ? FC_BOTH : FC_RX;
1857
        else
1858
                return tx ? FC_TX : FC_NONE;
1859
}
1860
 
1861
static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1862
{
1863
        struct sky2_hw *hw = sky2->hw;
1864
        unsigned port = sky2->port;
1865
        u16 advert, lpa;
1866
 
1867
        advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
1868
        lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1869
        if (lpa & PHY_M_AN_RF) {
1870
                printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
1871
                return -1;
1872
        }
1873
 
1874
        if (!(aux & PHY_M_PS_SPDUP_RES)) {
1875
                printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1876
                       sky2->netdev->name);
1877
                return -1;
1878
        }
1879
 
1880
        sky2->speed = sky2_phy_speed(hw, aux);
1881
        sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1882
 
1883
        /* Since the pause result bits seem to in different positions on
1884
         * different chips. look at registers.
1885
         */
1886
        if (hw->flags & SKY2_HW_FIBRE_PHY) {
1887
                /* Shift for bits in fiber PHY */
1888
                advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
1889
                lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
1890
 
1891
                if (advert & ADVERTISE_1000XPAUSE)
1892
                        advert |= ADVERTISE_PAUSE_CAP;
1893
                if (advert & ADVERTISE_1000XPSE_ASYM)
1894
                        advert |= ADVERTISE_PAUSE_ASYM;
1895
                if (lpa & LPA_1000XPAUSE)
1896
                        lpa |= LPA_PAUSE_CAP;
1897
                if (lpa & LPA_1000XPAUSE_ASYM)
1898
                        lpa |= LPA_PAUSE_ASYM;
1899
        }
1900
 
1901
        sky2->flow_status = FC_NONE;
1902
        if (advert & ADVERTISE_PAUSE_CAP) {
1903
                if (lpa & LPA_PAUSE_CAP)
1904
                        sky2->flow_status = FC_BOTH;
1905
                else if (advert & ADVERTISE_PAUSE_ASYM)
1906
                        sky2->flow_status = FC_RX;
1907
        } else if (advert & ADVERTISE_PAUSE_ASYM) {
1908
                if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
1909
                        sky2->flow_status = FC_TX;
1910
        }
1911
 
1912
        if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
1913
            && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
1914
                sky2->flow_status = FC_NONE;
1915
 
1916
        if (sky2->flow_status & FC_TX)
1917
                sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1918
        else
1919
                sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1920
 
1921
        return 0;
1922
}
1923
 
1924
/* Interrupt from PHY */
1925
static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1926
{
1927
        struct net_device *dev = hw->dev[port];
1928
        struct sky2_port *sky2 = netdev_priv(dev);
1929
        u16 istatus, phystat;
1930
 
1931
        if (!netif_running(dev))
1932
                return;
1933
 
1934
        spin_lock(&sky2->phy_lock);
1935
        istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1936
        phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1937
 
1938
        if (netif_msg_intr(sky2))
1939
                printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
1940
                       sky2->netdev->name, istatus, phystat);
1941
 
1942
        if (sky2->autoneg == AUTONEG_ENABLE && (istatus & PHY_M_IS_AN_COMPL)) {
1943
                if (sky2_autoneg_done(sky2, phystat) == 0)
1944
                        sky2_link_up(sky2);
1945
                goto out;
1946
        }
1947
 
1948
        if (istatus & PHY_M_IS_LSP_CHANGE)
1949
                sky2->speed = sky2_phy_speed(hw, phystat);
1950
 
1951
        if (istatus & PHY_M_IS_DUP_CHANGE)
1952
                sky2->duplex =
1953
                    (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1954
 
1955
        if (istatus & PHY_M_IS_LST_CHANGE) {
1956
                if (phystat & PHY_M_PS_LINK_UP)
1957
                        sky2_link_up(sky2);
1958
                else
1959
                        sky2_link_down(sky2);
1960
        }
1961
out:
1962
        spin_unlock(&sky2->phy_lock);
1963
}
1964
 
1965
/* Transmit timeout is only called if we are running, carrier is up
1966
 * and tx queue is full (stopped).
1967
 */
1968
static void sky2_tx_timeout(struct net_device *dev)
1969
{
1970
        struct sky2_port *sky2 = netdev_priv(dev);
1971
        struct sky2_hw *hw = sky2->hw;
1972
 
1973
        if (netif_msg_timer(sky2))
1974
                printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1975
 
1976
        printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
1977
               dev->name, sky2->tx_cons, sky2->tx_prod,
1978
               sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
1979
               sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
1980
 
1981
        /* can't restart safely under softirq */
1982
        schedule_work(&hw->restart_work);
1983
}
1984
 
1985
static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1986
{
1987
        struct sky2_port *sky2 = netdev_priv(dev);
1988
        struct sky2_hw *hw = sky2->hw;
1989
        unsigned port = sky2->port;
1990
        int err;
1991
        u16 ctl, mode;
1992
        u32 imask;
1993
 
1994
        if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1995
                return -EINVAL;
1996
 
1997
        if (new_mtu > ETH_DATA_LEN &&
1998
            (hw->chip_id == CHIP_ID_YUKON_FE ||
1999
             hw->chip_id == CHIP_ID_YUKON_FE_P))
2000
                return -EINVAL;
2001
 
2002
        if (!netif_running(dev)) {
2003
                dev->mtu = new_mtu;
2004
                return 0;
2005
        }
2006
 
2007
        imask = sky2_read32(hw, B0_IMSK);
2008
        sky2_write32(hw, B0_IMSK, 0);
2009
 
2010
        dev->trans_start = jiffies;     /* prevent tx timeout */
2011
        netif_stop_queue(dev);
2012
        napi_disable(&hw->napi);
2013
 
2014
        synchronize_irq(hw->pdev->irq);
2015
 
2016
        if (sky2_read8(hw, B2_E_0) == 0)
2017
                sky2_set_tx_stfwd(hw, port);
2018
 
2019
        ctl = gma_read16(hw, port, GM_GP_CTRL);
2020
        gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
2021
        sky2_rx_stop(sky2);
2022
        sky2_rx_clean(sky2);
2023
 
2024
        dev->mtu = new_mtu;
2025
 
2026
        mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
2027
                GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
2028
 
2029
        if (dev->mtu > ETH_DATA_LEN)
2030
                mode |= GM_SMOD_JUMBO_ENA;
2031
 
2032
        gma_write16(hw, port, GM_SERIAL_MODE, mode);
2033
 
2034
        sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
2035
 
2036
        err = sky2_rx_start(sky2);
2037
        sky2_write32(hw, B0_IMSK, imask);
2038
 
2039
        sky2_read32(hw, B0_Y2_SP_LISR);
2040
        napi_enable(&hw->napi);
2041
 
2042
        if (err)
2043
                dev_close(dev);
2044
        else {
2045
                gma_write16(hw, port, GM_GP_CTRL, ctl);
2046
 
2047
                netif_wake_queue(dev);
2048
        }
2049
 
2050
        return err;
2051
}
2052
 
2053
/* For small just reuse existing skb for next receive */
2054
static struct sk_buff *receive_copy(struct sky2_port *sky2,
2055
                                    const struct rx_ring_info *re,
2056
                                    unsigned length)
2057
{
2058
        struct sk_buff *skb;
2059
 
2060
        skb = netdev_alloc_skb(sky2->netdev, length + 2);
2061
        if (likely(skb)) {
2062
                skb_reserve(skb, 2);
2063
                pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
2064
                                            length, PCI_DMA_FROMDEVICE);
2065
                skb_copy_from_linear_data(re->skb, skb->data, length);
2066
                skb->ip_summed = re->skb->ip_summed;
2067
                skb->csum = re->skb->csum;
2068
                pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
2069
                                               length, PCI_DMA_FROMDEVICE);
2070
                re->skb->ip_summed = CHECKSUM_NONE;
2071
                skb_put(skb, length);
2072
        }
2073
        return skb;
2074
}
2075
 
2076
/* Adjust length of skb with fragments to match received data */
2077
static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
2078
                          unsigned int length)
2079
{
2080
        int i, num_frags;
2081
        unsigned int size;
2082
 
2083
        /* put header into skb */
2084
        size = min(length, hdr_space);
2085
        skb->tail += size;
2086
        skb->len += size;
2087
        length -= size;
2088
 
2089
        num_frags = skb_shinfo(skb)->nr_frags;
2090
        for (i = 0; i < num_frags; i++) {
2091
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2092
 
2093
                if (length == 0) {
2094
                        /* don't need this page */
2095
                        __free_page(frag->page);
2096
                        --skb_shinfo(skb)->nr_frags;
2097
                } else {
2098
                        size = min(length, (unsigned) PAGE_SIZE);
2099
 
2100
                        frag->size = size;
2101
                        skb->data_len += size;
2102
                        skb->truesize += size;
2103
                        skb->len += size;
2104
                        length -= size;
2105
                }
2106
        }
2107
}
2108
 
2109
/* Normal packet - take skb from ring element and put in a new one  */
2110
static struct sk_buff *receive_new(struct sky2_port *sky2,
2111
                                   struct rx_ring_info *re,
2112
                                   unsigned int length)
2113
{
2114
        struct sk_buff *skb, *nskb;
2115
        unsigned hdr_space = sky2->rx_data_size;
2116
 
2117
        /* Don't be tricky about reusing pages (yet) */
2118
        nskb = sky2_rx_alloc(sky2);
2119
        if (unlikely(!nskb))
2120
                return NULL;
2121
 
2122
        skb = re->skb;
2123
        sky2_rx_unmap_skb(sky2->hw->pdev, re);
2124
 
2125
        prefetch(skb->data);
2126
        re->skb = nskb;
2127
        sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space);
2128
 
2129
        if (skb_shinfo(skb)->nr_frags)
2130
                skb_put_frags(skb, hdr_space, length);
2131
        else
2132
                skb_put(skb, length);
2133
        return skb;
2134
}
2135
 
2136
/*
2137
 * Receive one packet.
2138
 * For larger packets, get new buffer.
2139
 */
2140
static struct sk_buff *sky2_receive(struct net_device *dev,
2141
                                    u16 length, u32 status)
2142
{
2143
        struct sky2_port *sky2 = netdev_priv(dev);
2144
        struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
2145
        struct sk_buff *skb = NULL;
2146
        u16 count = (status & GMR_FS_LEN) >> 16;
2147
 
2148
#ifdef SKY2_VLAN_TAG_USED
2149
        /* Account for vlan tag */
2150
        if (sky2->vlgrp && (status & GMR_FS_VLAN))
2151
                count -= VLAN_HLEN;
2152
#endif
2153
 
2154
        if (unlikely(netif_msg_rx_status(sky2)))
2155
                printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
2156
                       dev->name, sky2->rx_next, status, length);
2157
 
2158
        sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2159
        prefetch(sky2->rx_ring + sky2->rx_next);
2160
 
2161
        /* This chip has hardware problems that generates bogus status.
2162
         * So do only marginal checking and expect higher level protocols
2163
         * to handle crap frames.
2164
         */
2165
        if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
2166
            sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
2167
            length != count)
2168
                goto okay;
2169
 
2170
        if (status & GMR_FS_ANY_ERR)
2171
                goto error;
2172
 
2173
        if (!(status & GMR_FS_RX_OK))
2174
                goto resubmit;
2175
 
2176
        /* if length reported by DMA does not match PHY, packet was truncated */
2177
        if (length != count)
2178
                goto len_error;
2179
 
2180
okay:
2181
        if (length < copybreak)
2182
                skb = receive_copy(sky2, re, length);
2183
        else
2184
                skb = receive_new(sky2, re, length);
2185
resubmit:
2186
        sky2_rx_submit(sky2, re);
2187
 
2188
        return skb;
2189
 
2190
len_error:
2191
        /* Truncation of overlength packets
2192
           causes PHY length to not match MAC length */
2193
        ++dev->stats.rx_length_errors;
2194
        if (netif_msg_rx_err(sky2) && net_ratelimit())
2195
                pr_info(PFX "%s: rx length error: status %#x length %d\n",
2196
                        dev->name, status, length);
2197
        goto resubmit;
2198
 
2199
error:
2200
        ++dev->stats.rx_errors;
2201
        if (status & GMR_FS_RX_FF_OV) {
2202
                dev->stats.rx_over_errors++;
2203
                goto resubmit;
2204
        }
2205
 
2206
        if (netif_msg_rx_err(sky2) && net_ratelimit())
2207
                printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
2208
                       dev->name, status, length);
2209
 
2210
        if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
2211
                dev->stats.rx_length_errors++;
2212
        if (status & GMR_FS_FRAGMENT)
2213
                dev->stats.rx_frame_errors++;
2214
        if (status & GMR_FS_CRC_ERR)
2215
                dev->stats.rx_crc_errors++;
2216
 
2217
        goto resubmit;
2218
}
2219
 
2220
/* Transmit complete */
2221
static inline void sky2_tx_done(struct net_device *dev, u16 last)
2222
{
2223
        struct sky2_port *sky2 = netdev_priv(dev);
2224
 
2225
        if (netif_running(dev)) {
2226
                netif_tx_lock(dev);
2227
                sky2_tx_complete(sky2, last);
2228
                netif_tx_unlock(dev);
2229
        }
2230
}
2231
 
2232
/* Process status response ring */
2233
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2234
{
2235
        int work_done = 0;
2236
        unsigned rx[2] = { 0, 0 };
2237
 
2238
        rmb();
2239
        do {
2240
                struct sky2_port *sky2;
2241
                struct sky2_status_le *le  = hw->st_le + hw->st_idx;
2242
                unsigned port;
2243
                struct net_device *dev;
2244
                struct sk_buff *skb;
2245
                u32 status;
2246
                u16 length;
2247
                u8 opcode = le->opcode;
2248
 
2249
                if (!(opcode & HW_OWNER))
2250
                        break;
2251
 
2252
                hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
2253
 
2254
                port = le->css & CSS_LINK_BIT;
2255
                dev = hw->dev[port];
2256
                sky2 = netdev_priv(dev);
2257
                length = le16_to_cpu(le->length);
2258
                status = le32_to_cpu(le->status);
2259
 
2260
                le->opcode = 0;
2261
                switch (opcode & ~HW_OWNER) {
2262
                case OP_RXSTAT:
2263
                        ++rx[port];
2264
                        skb = sky2_receive(dev, length, status);
2265
                        if (unlikely(!skb)) {
2266
                                dev->stats.rx_dropped++;
2267
                                break;
2268
                        }
2269
 
2270
                        /* This chip reports checksum status differently */
2271
                        if (hw->flags & SKY2_HW_NEW_LE) {
2272
                                if (sky2->rx_csum &&
2273
                                    (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
2274
                                    (le->css & CSS_TCPUDPCSOK))
2275
                                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2276
                                else
2277
                                        skb->ip_summed = CHECKSUM_NONE;
2278
                        }
2279
 
2280
                        skb->protocol = eth_type_trans(skb, dev);
2281
                        dev->stats.rx_packets++;
2282
                        dev->stats.rx_bytes += skb->len;
2283
                        dev->last_rx = jiffies;
2284
 
2285
#ifdef SKY2_VLAN_TAG_USED
2286
                        if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
2287
                                vlan_hwaccel_receive_skb(skb,
2288
                                                         sky2->vlgrp,
2289
                                                         be16_to_cpu(sky2->rx_tag));
2290
                        } else
2291
#endif
2292
                                netif_receive_skb(skb);
2293
 
2294
                        /* Stop after net poll weight */
2295
                        if (++work_done >= to_do)
2296
                                goto exit_loop;
2297
                        break;
2298
 
2299
#ifdef SKY2_VLAN_TAG_USED
2300
                case OP_RXVLAN:
2301
                        sky2->rx_tag = length;
2302
                        break;
2303
 
2304
                case OP_RXCHKSVLAN:
2305
                        sky2->rx_tag = length;
2306
                        /* fall through */
2307
#endif
2308
                case OP_RXCHKS:
2309
                        if (!sky2->rx_csum)
2310
                                break;
2311
 
2312
                        /* If this happens then driver assuming wrong format */
2313
                        if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
2314
                                if (net_ratelimit())
2315
                                        printk(KERN_NOTICE "%s: unexpected"
2316
                                               " checksum status\n",
2317
                                               dev->name);
2318
                                break;
2319
                        }
2320
 
2321
                        /* Both checksum counters are programmed to start at
2322
                         * the same offset, so unless there is a problem they
2323
                         * should match. This failure is an early indication that
2324
                         * hardware receive checksumming won't work.
2325
                         */
2326
                        if (likely(status >> 16 == (status & 0xffff))) {
2327
                                skb = sky2->rx_ring[sky2->rx_next].skb;
2328
                                skb->ip_summed = CHECKSUM_COMPLETE;
2329
                                skb->csum = status & 0xffff;
2330
                        } else {
2331
                                printk(KERN_NOTICE PFX "%s: hardware receive "
2332
                                       "checksum problem (status = %#x)\n",
2333
                                       dev->name, status);
2334
                                sky2->rx_csum = 0;
2335
                                sky2_write32(sky2->hw,
2336
                                             Q_ADDR(rxqaddr[port], Q_CSR),
2337
                                             BMU_DIS_RX_CHKSUM);
2338
                        }
2339
                        break;
2340
 
2341
                case OP_TXINDEXLE:
2342
                        /* TX index reports status for both ports */
2343
                        BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
2344
                        sky2_tx_done(hw->dev[0], status & 0xfff);
2345
                        if (hw->dev[1])
2346
                                sky2_tx_done(hw->dev[1],
2347
                                     ((status >> 24) & 0xff)
2348
                                             | (u16)(length & 0xf) << 8);
2349
                        break;
2350
 
2351
                default:
2352
                        if (net_ratelimit())
2353
                                printk(KERN_WARNING PFX
2354
                                       "unknown status opcode 0x%x\n", opcode);
2355
                }
2356
        } while (hw->st_idx != idx);
2357
 
2358
        /* Fully processed status ring so clear irq */
2359
        sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2360
 
2361
exit_loop:
2362
        if (rx[0])
2363
                sky2_rx_update(netdev_priv(hw->dev[0]), Q_R1);
2364
 
2365
        if (rx[1])
2366
                sky2_rx_update(netdev_priv(hw->dev[1]), Q_R2);
2367
 
2368
        return work_done;
2369
}
2370
 
2371
static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
2372
{
2373
        struct net_device *dev = hw->dev[port];
2374
 
2375
        if (net_ratelimit())
2376
                printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
2377
                       dev->name, status);
2378
 
2379
        if (status & Y2_IS_PAR_RD1) {
2380
                if (net_ratelimit())
2381
                        printk(KERN_ERR PFX "%s: ram data read parity error\n",
2382
                               dev->name);
2383
                /* Clear IRQ */
2384
                sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
2385
        }
2386
 
2387
        if (status & Y2_IS_PAR_WR1) {
2388
                if (net_ratelimit())
2389
                        printk(KERN_ERR PFX "%s: ram data write parity error\n",
2390
                               dev->name);
2391
 
2392
                sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
2393
        }
2394
 
2395
        if (status & Y2_IS_PAR_MAC1) {
2396
                if (net_ratelimit())
2397
                        printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
2398
                sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2399
        }
2400
 
2401
        if (status & Y2_IS_PAR_RX1) {
2402
                if (net_ratelimit())
2403
                        printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
2404
                sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
2405
        }
2406
 
2407
        if (status & Y2_IS_TCP_TXA1) {
2408
                if (net_ratelimit())
2409
                        printk(KERN_ERR PFX "%s: TCP segmentation error\n",
2410
                               dev->name);
2411
                sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
2412
        }
2413
}
2414
 
2415
static void sky2_hw_intr(struct sky2_hw *hw)
2416
{
2417
        struct pci_dev *pdev = hw->pdev;
2418
        u32 status = sky2_read32(hw, B0_HWE_ISRC);
2419
        u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2420
 
2421
        status &= hwmsk;
2422
 
2423
        if (status & Y2_IS_TIST_OV)
2424
                sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2425
 
2426
        if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2427
                u16 pci_err;
2428
 
2429
                pci_err = sky2_pci_read16(hw, PCI_STATUS);
2430
                if (net_ratelimit())
2431
                        dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
2432
                                pci_err);
2433
 
2434
                sky2_pci_write16(hw, PCI_STATUS,
2435
                                      pci_err | PCI_STATUS_ERROR_BITS);
2436
        }
2437
 
2438
        if (status & Y2_IS_PCI_EXP) {
2439
                /* PCI-Express uncorrectable Error occurred */
2440
                u32 err;
2441
 
2442
                err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2443
                sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2444
                             0xfffffffful);
2445
                if (net_ratelimit())
2446
                        dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2447
 
2448
                sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2449
        }
2450
 
2451
        if (status & Y2_HWE_L1_MASK)
2452
                sky2_hw_error(hw, 0, status);
2453
        status >>= 8;
2454
        if (status & Y2_HWE_L1_MASK)
2455
                sky2_hw_error(hw, 1, status);
2456
}
2457
 
2458
static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2459
{
2460
        struct net_device *dev = hw->dev[port];
2461
        struct sky2_port *sky2 = netdev_priv(dev);
2462
        u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2463
 
2464
        if (netif_msg_intr(sky2))
2465
                printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
2466
                       dev->name, status);
2467
 
2468
        if (status & GM_IS_RX_CO_OV)
2469
                gma_read16(hw, port, GM_RX_IRQ_SRC);
2470
 
2471
        if (status & GM_IS_TX_CO_OV)
2472
                gma_read16(hw, port, GM_TX_IRQ_SRC);
2473
 
2474
        if (status & GM_IS_RX_FF_OR) {
2475
                ++dev->stats.rx_fifo_errors;
2476
                sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2477
        }
2478
 
2479
        if (status & GM_IS_TX_FF_UR) {
2480
                ++dev->stats.tx_fifo_errors;
2481
                sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2482
        }
2483
}
2484
 
2485
/* This should never happen it is a bug. */
2486
static void sky2_le_error(struct sky2_hw *hw, unsigned port,
2487
                          u16 q, unsigned ring_size)
2488
{
2489
        struct net_device *dev = hw->dev[port];
2490
        struct sky2_port *sky2 = netdev_priv(dev);
2491
        unsigned idx;
2492
        const u64 *le = (q == Q_R1 || q == Q_R2)
2493
                ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le;
2494
 
2495
        idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
2496
        printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n",
2497
               dev->name, (unsigned) q, idx, (unsigned long long) le[idx],
2498
               (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
2499
 
2500
        sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
2501
}
2502
 
2503
static int sky2_rx_hung(struct net_device *dev)
2504
{
2505
        struct sky2_port *sky2 = netdev_priv(dev);
2506
        struct sky2_hw *hw = sky2->hw;
2507
        unsigned port = sky2->port;
2508
        unsigned rxq = rxqaddr[port];
2509
        u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
2510
        u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
2511
        u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
2512
        u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
2513
 
2514
        /* If idle and MAC or PCI is stuck */
2515
        if (sky2->check.last == dev->last_rx &&
2516
            ((mac_rp == sky2->check.mac_rp &&
2517
              mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
2518
             /* Check if the PCI RX hang */
2519
             (fifo_rp == sky2->check.fifo_rp &&
2520
              fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
2521
                printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n",
2522
                       dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp,
2523
                       sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
2524
                return 1;
2525
        } else {
2526
                sky2->check.last = dev->last_rx;
2527
                sky2->check.mac_rp = mac_rp;
2528
                sky2->check.mac_lev = mac_lev;
2529
                sky2->check.fifo_rp = fifo_rp;
2530
                sky2->check.fifo_lev = fifo_lev;
2531
                return 0;
2532
        }
2533
}
2534
 
2535
static void sky2_watchdog(unsigned long arg)
2536
{
2537
        struct sky2_hw *hw = (struct sky2_hw *) arg;
2538
 
2539
        /* Check for lost IRQ once a second */
2540
        if (sky2_read32(hw, B0_ISRC)) {
2541
                napi_schedule(&hw->napi);
2542
        } else {
2543
                int i, active = 0;
2544
 
2545
                for (i = 0; i < hw->ports; i++) {
2546
                        struct net_device *dev = hw->dev[i];
2547
                        if (!netif_running(dev))
2548
                                continue;
2549
                        ++active;
2550
 
2551
                        /* For chips with Rx FIFO, check if stuck */
2552
                        if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
2553
                             sky2_rx_hung(dev)) {
2554
                                pr_info(PFX "%s: receiver hang detected\n",
2555
                                        dev->name);
2556
                                schedule_work(&hw->restart_work);
2557
                                return;
2558
                        }
2559
                }
2560
 
2561
                if (active == 0)
2562
                        return;
2563
        }
2564
 
2565
        mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
2566
}
2567
 
2568
/* Hardware/software error handling */
2569
static void sky2_err_intr(struct sky2_hw *hw, u32 status)
2570
{
2571
        if (net_ratelimit())
2572
                dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
2573
 
2574
        if (status & Y2_IS_HW_ERR)
2575
                sky2_hw_intr(hw);
2576
 
2577
        if (status & Y2_IS_IRQ_MAC1)
2578
                sky2_mac_intr(hw, 0);
2579
 
2580
        if (status & Y2_IS_IRQ_MAC2)
2581
                sky2_mac_intr(hw, 1);
2582
 
2583
        if (status & Y2_IS_CHK_RX1)
2584
                sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE);
2585
 
2586
        if (status & Y2_IS_CHK_RX2)
2587
                sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE);
2588
 
2589
        if (status & Y2_IS_CHK_TXA1)
2590
                sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE);
2591
 
2592
        if (status & Y2_IS_CHK_TXA2)
2593
                sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE);
2594
}
2595
 
2596
static int sky2_poll(struct napi_struct *napi, int work_limit)
2597
{
2598
        struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
2599
        u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2600
        int work_done = 0;
2601
        u16 idx;
2602
 
2603
        if (unlikely(status & Y2_IS_ERROR))
2604
                sky2_err_intr(hw, status);
2605
 
2606
        if (status & Y2_IS_IRQ_PHY1)
2607
                sky2_phy_intr(hw, 0);
2608
 
2609
        if (status & Y2_IS_IRQ_PHY2)
2610
                sky2_phy_intr(hw, 1);
2611
 
2612
        while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
2613
                work_done += sky2_status_intr(hw, work_limit - work_done, idx);
2614
 
2615
                if (work_done >= work_limit)
2616
                        goto done;
2617
        }
2618
 
2619
        /* Bug/Errata workaround?
2620
         * Need to kick the TX irq moderation timer.
2621
         */
2622
        if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
2623
                sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
2624
                sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2625
        }
2626
        napi_complete(napi);
2627
        sky2_read32(hw, B0_Y2_SP_LISR);
2628
done:
2629
 
2630
        return work_done;
2631
}
2632
 
2633
static irqreturn_t sky2_intr(int irq, void *dev_id)
2634
{
2635
        struct sky2_hw *hw = dev_id;
2636
        u32 status;
2637
 
2638
        /* Reading this mask interrupts as side effect */
2639
        status = sky2_read32(hw, B0_Y2_SP_ISRC2);
2640
        if (status == 0 || status == ~0)
2641
                return IRQ_NONE;
2642
 
2643
        prefetch(&hw->st_le[hw->st_idx]);
2644
 
2645
        napi_schedule(&hw->napi);
2646
 
2647
        return IRQ_HANDLED;
2648
}
2649
 
2650
#ifdef CONFIG_NET_POLL_CONTROLLER
2651
static void sky2_netpoll(struct net_device *dev)
2652
{
2653
        struct sky2_port *sky2 = netdev_priv(dev);
2654
 
2655
        napi_schedule(&sky2->hw->napi);
2656
}
2657
#endif
2658
 
2659
/* Chip internal frequency for clock calculations */
2660
static u32 sky2_mhz(const struct sky2_hw *hw)
2661
{
2662
        switch (hw->chip_id) {
2663
        case CHIP_ID_YUKON_EC:
2664
        case CHIP_ID_YUKON_EC_U:
2665
        case CHIP_ID_YUKON_EX:
2666
                return 125;
2667
 
2668
        case CHIP_ID_YUKON_FE:
2669
                return 100;
2670
 
2671
        case CHIP_ID_YUKON_FE_P:
2672
                return 50;
2673
 
2674
        case CHIP_ID_YUKON_XL:
2675
                return 156;
2676
 
2677
        default:
2678
                BUG();
2679
        }
2680
}
2681
 
2682
static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
2683
{
2684
        return sky2_mhz(hw) * us;
2685
}
2686
 
2687
static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2688
{
2689
        return clk / sky2_mhz(hw);
2690
}
2691
 
2692
 
2693
static int __devinit sky2_init(struct sky2_hw *hw)
2694
{
2695
        u8 t8;
2696
 
2697
        /* Enable all clocks and check for bad PCI access */
2698
        sky2_pci_write32(hw, PCI_DEV_REG3, 0);
2699
 
2700
        sky2_write8(hw, B0_CTST, CS_RST_CLR);
2701
 
2702
        hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2703
        hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2704
 
2705
        switch(hw->chip_id) {
2706
        case CHIP_ID_YUKON_XL:
2707
                hw->flags = SKY2_HW_GIGABIT
2708
                        | SKY2_HW_NEWER_PHY;
2709
                if (hw->chip_rev < 3)
2710
                        hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
2711
 
2712
                break;
2713
 
2714
        case CHIP_ID_YUKON_EC_U:
2715
                hw->flags = SKY2_HW_GIGABIT
2716
                        | SKY2_HW_NEWER_PHY
2717
                        | SKY2_HW_ADV_POWER_CTL;
2718
                break;
2719
 
2720
        case CHIP_ID_YUKON_EX:
2721
                hw->flags = SKY2_HW_GIGABIT
2722
                        | SKY2_HW_NEWER_PHY
2723
                        | SKY2_HW_NEW_LE
2724
                        | SKY2_HW_ADV_POWER_CTL;
2725
 
2726
                /* New transmit checksum */
2727
                if (hw->chip_rev != CHIP_REV_YU_EX_B0)
2728
                        hw->flags |= SKY2_HW_AUTO_TX_SUM;
2729
                break;
2730
 
2731
        case CHIP_ID_YUKON_EC:
2732
                /* This rev is really old, and requires untested workarounds */
2733
                if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
2734
                        dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
2735
                        return -EOPNOTSUPP;
2736
                }
2737
                hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
2738
                break;
2739
 
2740
        case CHIP_ID_YUKON_FE:
2741
                break;
2742
 
2743
        case CHIP_ID_YUKON_FE_P:
2744
                hw->flags = SKY2_HW_NEWER_PHY
2745
                        | SKY2_HW_NEW_LE
2746
                        | SKY2_HW_AUTO_TX_SUM
2747
                        | SKY2_HW_ADV_POWER_CTL;
2748
                break;
2749
        default:
2750
                dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
2751
                        hw->chip_id);
2752
                return -EOPNOTSUPP;
2753
        }
2754
 
2755
        hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2756
        if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
2757
                hw->flags |= SKY2_HW_FIBRE_PHY;
2758
 
2759
 
2760
        hw->ports = 1;
2761
        t8 = sky2_read8(hw, B2_Y2_HW_RES);
2762
        if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2763
                if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2764
                        ++hw->ports;
2765
        }
2766
 
2767
        return 0;
2768
}
2769
 
2770
static void sky2_reset(struct sky2_hw *hw)
2771
{
2772
        struct pci_dev *pdev = hw->pdev;
2773
        u16 status;
2774
        int i, cap;
2775
        u32 hwe_mask = Y2_HWE_ALL_MASK;
2776
 
2777
        /* disable ASF */
2778
        if (hw->chip_id == CHIP_ID_YUKON_EX) {
2779
                status = sky2_read16(hw, HCU_CCSR);
2780
                status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
2781
                            HCU_CCSR_UC_STATE_MSK);
2782
                sky2_write16(hw, HCU_CCSR, status);
2783
        } else
2784
                sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2785
        sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2786
 
2787
        /* do a SW reset */
2788
        sky2_write8(hw, B0_CTST, CS_RST_SET);
2789
        sky2_write8(hw, B0_CTST, CS_RST_CLR);
2790
 
2791
        /* allow writes to PCI config */
2792
        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2793
 
2794
        /* clear PCI errors, if any */
2795
        status = sky2_pci_read16(hw, PCI_STATUS);
2796
        status |= PCI_STATUS_ERROR_BITS;
2797
        sky2_pci_write16(hw, PCI_STATUS, status);
2798
 
2799
        sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2800
 
2801
        cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2802
        if (cap) {
2803
                sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2804
                             0xfffffffful);
2805
 
2806
                /* If error bit is stuck on ignore it */
2807
                if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
2808
                        dev_info(&pdev->dev, "ignoring stuck error report bit\n");
2809
                else
2810
                        hwe_mask |= Y2_IS_PCI_EXP;
2811
        }
2812
 
2813
        sky2_power_on(hw);
2814
 
2815
        for (i = 0; i < hw->ports; i++) {
2816
                sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2817
                sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2818
 
2819
                if (hw->chip_id == CHIP_ID_YUKON_EX)
2820
                        sky2_write16(hw, SK_REG(i, GMAC_CTRL),
2821
                                     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
2822
                                     | GMC_BYP_RETR_ON);
2823
        }
2824
 
2825
        /* Clear I2C IRQ noise */
2826
        sky2_write32(hw, B2_I2C_IRQ, 1);
2827
 
2828
        /* turn off hardware timer (unused) */
2829
        sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
2830
        sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2831
 
2832
        sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
2833
 
2834
        /* Turn off descriptor polling */
2835
        sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
2836
 
2837
        /* Turn off receive timestamp */
2838
        sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
2839
        sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2840
 
2841
        /* enable the Tx Arbiters */
2842
        for (i = 0; i < hw->ports; i++)
2843
                sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2844
 
2845
        /* Initialize ram interface */
2846
        for (i = 0; i < hw->ports; i++) {
2847
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2848
 
2849
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
2850
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
2851
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
2852
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
2853
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
2854
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
2855
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
2856
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
2857
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
2858
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
2859
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
2860
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2861
        }
2862
 
2863
        sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
2864
 
2865
        for (i = 0; i < hw->ports; i++)
2866
                sky2_gmac_reset(hw, i);
2867
 
2868
        memset(hw->st_le, 0, STATUS_LE_BYTES);
2869
        hw->st_idx = 0;
2870
 
2871
        sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
2872
        sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
2873
 
2874
        sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
2875
        sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2876
 
2877
        /* Set the list last index */
2878
        sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2879
 
2880
        sky2_write16(hw, STAT_TX_IDX_TH, 10);
2881
        sky2_write8(hw, STAT_FIFO_WM, 16);
2882
 
2883
        /* set Status-FIFO ISR watermark */
2884
        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2885
                sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2886
        else
2887
                sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2888
 
2889
        sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2890
        sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2891
        sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2892
 
2893
        /* enable status unit */
2894
        sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
2895
 
2896
        sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2897
        sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2898
        sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2899
}
2900
 
2901
static void sky2_restart(struct work_struct *work)
2902
{
2903
        struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
2904
        struct net_device *dev;
2905
        int i, err;
2906
 
2907
        rtnl_lock();
2908
        for (i = 0; i < hw->ports; i++) {
2909
                dev = hw->dev[i];
2910
                if (netif_running(dev))
2911
                        sky2_down(dev);
2912
        }
2913
 
2914
        napi_disable(&hw->napi);
2915
        sky2_write32(hw, B0_IMSK, 0);
2916
        sky2_reset(hw);
2917
        sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
2918
        napi_enable(&hw->napi);
2919
 
2920
        for (i = 0; i < hw->ports; i++) {
2921
                dev = hw->dev[i];
2922
                if (netif_running(dev)) {
2923
                        err = sky2_up(dev);
2924
                        if (err) {
2925
                                printk(KERN_INFO PFX "%s: could not restart %d\n",
2926
                                       dev->name, err);
2927
                                dev_close(dev);
2928
                        }
2929
                }
2930
        }
2931
 
2932
        rtnl_unlock();
2933
}
2934
 
2935
static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
2936
{
2937
        return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
2938
}
2939
 
2940
static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2941
{
2942
        const struct sky2_port *sky2 = netdev_priv(dev);
2943
 
2944
        wol->supported = sky2_wol_supported(sky2->hw);
2945
        wol->wolopts = sky2->wol;
2946
}
2947
 
2948
static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2949
{
2950
        struct sky2_port *sky2 = netdev_priv(dev);
2951
        struct sky2_hw *hw = sky2->hw;
2952
 
2953
        if (wol->wolopts & ~sky2_wol_supported(sky2->hw))
2954
                return -EOPNOTSUPP;
2955
 
2956
        sky2->wol = wol->wolopts;
2957
 
2958
        if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
2959
            hw->chip_id == CHIP_ID_YUKON_EX ||
2960
            hw->chip_id == CHIP_ID_YUKON_FE_P)
2961
                sky2_write32(hw, B0_CTST, sky2->wol
2962
                             ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
2963
 
2964
        if (!netif_running(dev))
2965
                sky2_wol_init(sky2);
2966
        return 0;
2967
}
2968
 
2969
static u32 sky2_supported_modes(const struct sky2_hw *hw)
2970
{
2971
        if (sky2_is_copper(hw)) {
2972
                u32 modes = SUPPORTED_10baseT_Half
2973
                        | SUPPORTED_10baseT_Full
2974
                        | SUPPORTED_100baseT_Half
2975
                        | SUPPORTED_100baseT_Full
2976
                        | SUPPORTED_Autoneg | SUPPORTED_TP;
2977
 
2978
                if (hw->flags & SKY2_HW_GIGABIT)
2979
                        modes |= SUPPORTED_1000baseT_Half
2980
                                | SUPPORTED_1000baseT_Full;
2981
                return modes;
2982
        } else
2983
                return  SUPPORTED_1000baseT_Half
2984
                        | SUPPORTED_1000baseT_Full
2985
                        | SUPPORTED_Autoneg
2986
                        | SUPPORTED_FIBRE;
2987
}
2988
 
2989
static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2990
{
2991
        struct sky2_port *sky2 = netdev_priv(dev);
2992
        struct sky2_hw *hw = sky2->hw;
2993
 
2994
        ecmd->transceiver = XCVR_INTERNAL;
2995
        ecmd->supported = sky2_supported_modes(hw);
2996
        ecmd->phy_address = PHY_ADDR_MARV;
2997
        if (sky2_is_copper(hw)) {
2998
                ecmd->port = PORT_TP;
2999
                ecmd->speed = sky2->speed;
3000
        } else {
3001
                ecmd->speed = SPEED_1000;
3002
                ecmd->port = PORT_FIBRE;
3003
        }
3004
 
3005
        ecmd->advertising = sky2->advertising;
3006
        ecmd->autoneg = sky2->autoneg;
3007
        ecmd->duplex = sky2->duplex;
3008
        return 0;
3009
}
3010
 
3011
static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3012
{
3013
        struct sky2_port *sky2 = netdev_priv(dev);
3014
        const struct sky2_hw *hw = sky2->hw;
3015
        u32 supported = sky2_supported_modes(hw);
3016
 
3017
        if (ecmd->autoneg == AUTONEG_ENABLE) {
3018
                ecmd->advertising = supported;
3019
                sky2->duplex = -1;
3020
                sky2->speed = -1;
3021
        } else {
3022
                u32 setting;
3023
 
3024
                switch (ecmd->speed) {
3025
                case SPEED_1000:
3026
                        if (ecmd->duplex == DUPLEX_FULL)
3027
                                setting = SUPPORTED_1000baseT_Full;
3028
                        else if (ecmd->duplex == DUPLEX_HALF)
3029
                                setting = SUPPORTED_1000baseT_Half;
3030
                        else
3031
                                return -EINVAL;
3032
                        break;
3033
                case SPEED_100:
3034
                        if (ecmd->duplex == DUPLEX_FULL)
3035
                                setting = SUPPORTED_100baseT_Full;
3036
                        else if (ecmd->duplex == DUPLEX_HALF)
3037
                                setting = SUPPORTED_100baseT_Half;
3038
                        else
3039
                                return -EINVAL;
3040
                        break;
3041
 
3042
                case SPEED_10:
3043
                        if (ecmd->duplex == DUPLEX_FULL)
3044
                                setting = SUPPORTED_10baseT_Full;
3045
                        else if (ecmd->duplex == DUPLEX_HALF)
3046
                                setting = SUPPORTED_10baseT_Half;
3047
                        else
3048
                                return -EINVAL;
3049
                        break;
3050
                default:
3051
                        return -EINVAL;
3052
                }
3053
 
3054
                if ((setting & supported) == 0)
3055
                        return -EINVAL;
3056
 
3057
                sky2->speed = ecmd->speed;
3058
                sky2->duplex = ecmd->duplex;
3059
        }
3060
 
3061
        sky2->autoneg = ecmd->autoneg;
3062
        sky2->advertising = ecmd->advertising;
3063
 
3064
        if (netif_running(dev)) {
3065
                sky2_phy_reinit(sky2);
3066
                sky2_set_multicast(dev);
3067
        }
3068
 
3069
        return 0;
3070
}
3071
 
3072
static void sky2_get_drvinfo(struct net_device *dev,
3073
                             struct ethtool_drvinfo *info)
3074
{
3075
        struct sky2_port *sky2 = netdev_priv(dev);
3076
 
3077
        strcpy(info->driver, DRV_NAME);
3078
        strcpy(info->version, DRV_VERSION);
3079
        strcpy(info->fw_version, "N/A");
3080
        strcpy(info->bus_info, pci_name(sky2->hw->pdev));
3081
}
3082
 
3083
static const struct sky2_stat {
3084
        char name[ETH_GSTRING_LEN];
3085
        u16 offset;
3086
} sky2_stats[] = {
3087
        { "tx_bytes",      GM_TXO_OK_HI },
3088
        { "rx_bytes",      GM_RXO_OK_HI },
3089
        { "tx_broadcast",  GM_TXF_BC_OK },
3090
        { "rx_broadcast",  GM_RXF_BC_OK },
3091
        { "tx_multicast",  GM_TXF_MC_OK },
3092
        { "rx_multicast",  GM_RXF_MC_OK },
3093
        { "tx_unicast",    GM_TXF_UC_OK },
3094
        { "rx_unicast",    GM_RXF_UC_OK },
3095
        { "tx_mac_pause",  GM_TXF_MPAUSE },
3096
        { "rx_mac_pause",  GM_RXF_MPAUSE },
3097
        { "collisions",    GM_TXF_COL },
3098
        { "late_collision",GM_TXF_LAT_COL },
3099
        { "aborted",       GM_TXF_ABO_COL },
3100
        { "single_collisions", GM_TXF_SNG_COL },
3101
        { "multi_collisions", GM_TXF_MUL_COL },
3102
 
3103
        { "rx_short",      GM_RXF_SHT },
3104
        { "rx_runt",       GM_RXE_FRAG },
3105
        { "rx_64_byte_packets", GM_RXF_64B },
3106
        { "rx_65_to_127_byte_packets", GM_RXF_127B },
3107
        { "rx_128_to_255_byte_packets", GM_RXF_255B },
3108
        { "rx_256_to_511_byte_packets", GM_RXF_511B },
3109
        { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
3110
        { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
3111
        { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
3112
        { "rx_too_long",   GM_RXF_LNG_ERR },
3113
        { "rx_fifo_overflow", GM_RXE_FIFO_OV },
3114
        { "rx_jabber",     GM_RXF_JAB_PKT },
3115
        { "rx_fcs_error",   GM_RXF_FCS_ERR },
3116
 
3117
        { "tx_64_byte_packets", GM_TXF_64B },
3118
        { "tx_65_to_127_byte_packets", GM_TXF_127B },
3119
        { "tx_128_to_255_byte_packets", GM_TXF_255B },
3120
        { "tx_256_to_511_byte_packets", GM_TXF_511B },
3121
        { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
3122
        { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
3123
        { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
3124
        { "tx_fifo_underrun", GM_TXE_FIFO_UR },
3125
};
3126
 
3127
static u32 sky2_get_rx_csum(struct net_device *dev)
3128
{
3129
        struct sky2_port *sky2 = netdev_priv(dev);
3130
 
3131
        return sky2->rx_csum;
3132
}
3133
 
3134
static int sky2_set_rx_csum(struct net_device *dev, u32 data)
3135
{
3136
        struct sky2_port *sky2 = netdev_priv(dev);
3137
 
3138
        sky2->rx_csum = data;
3139
 
3140
        sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
3141
                     data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
3142
 
3143
        return 0;
3144
}
3145
 
3146
static u32 sky2_get_msglevel(struct net_device *netdev)
3147
{
3148
        struct sky2_port *sky2 = netdev_priv(netdev);
3149
        return sky2->msg_enable;
3150
}
3151
 
3152
static int sky2_nway_reset(struct net_device *dev)
3153
{
3154
        struct sky2_port *sky2 = netdev_priv(dev);
3155
 
3156
        if (!netif_running(dev) || sky2->autoneg != AUTONEG_ENABLE)
3157
                return -EINVAL;
3158
 
3159
        sky2_phy_reinit(sky2);
3160
        sky2_set_multicast(dev);
3161
 
3162
        return 0;
3163
}
3164
 
3165
static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
3166
{
3167
        struct sky2_hw *hw = sky2->hw;
3168
        unsigned port = sky2->port;
3169
        int i;
3170
 
3171
        data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
3172
            | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
3173
        data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
3174
            | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
3175
 
3176
        for (i = 2; i < count; i++)
3177
                data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
3178
}
3179
 
3180
static void sky2_set_msglevel(struct net_device *netdev, u32 value)
3181
{
3182
        struct sky2_port *sky2 = netdev_priv(netdev);
3183
        sky2->msg_enable = value;
3184
}
3185
 
3186
static int sky2_get_sset_count(struct net_device *dev, int sset)
3187
{
3188
        switch (sset) {
3189
        case ETH_SS_STATS:
3190
                return ARRAY_SIZE(sky2_stats);
3191
        default:
3192
                return -EOPNOTSUPP;
3193
        }
3194
}
3195
 
3196
static void sky2_get_ethtool_stats(struct net_device *dev,
3197
                                   struct ethtool_stats *stats, u64 * data)
3198
{
3199
        struct sky2_port *sky2 = netdev_priv(dev);
3200
 
3201
        sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
3202
}
3203
 
3204
static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
3205
{
3206
        int i;
3207
 
3208
        switch (stringset) {
3209
        case ETH_SS_STATS:
3210
                for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
3211
                        memcpy(data + i * ETH_GSTRING_LEN,
3212
                               sky2_stats[i].name, ETH_GSTRING_LEN);
3213
                break;
3214
        }
3215
}
3216
 
3217
static int sky2_set_mac_address(struct net_device *dev, void *p)
3218
{
3219
        struct sky2_port *sky2 = netdev_priv(dev);
3220
        struct sky2_hw *hw = sky2->hw;
3221
        unsigned port = sky2->port;
3222
        const struct sockaddr *addr = p;
3223
 
3224
        if (!is_valid_ether_addr(addr->sa_data))
3225
                return -EADDRNOTAVAIL;
3226
 
3227
        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3228
        memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
3229
                    dev->dev_addr, ETH_ALEN);
3230
        memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
3231
                    dev->dev_addr, ETH_ALEN);
3232
 
3233
        /* virtual address for data */
3234
        gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3235
 
3236
        /* physical address: used for pause frames */
3237
        gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3238
 
3239
        return 0;
3240
}
3241
 
3242
static void inline sky2_add_filter(u8 filter[8], const u8 *addr)
3243
{
3244
        u32 bit;
3245
 
3246
        bit = ether_crc(ETH_ALEN, addr) & 63;
3247
        filter[bit >> 3] |= 1 << (bit & 7);
3248
}
3249
 
3250
static void sky2_set_multicast(struct net_device *dev)
3251
{
3252
        struct sky2_port *sky2 = netdev_priv(dev);
3253
        struct sky2_hw *hw = sky2->hw;
3254
        unsigned port = sky2->port;
3255
        struct dev_mc_list *list = dev->mc_list;
3256
        u16 reg;
3257
        u8 filter[8];
3258
        int rx_pause;
3259
        static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
3260
 
3261
        rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
3262
        memset(filter, 0, sizeof(filter));
3263
 
3264
        reg = gma_read16(hw, port, GM_RX_CTRL);
3265
        reg |= GM_RXCR_UCF_ENA;
3266
 
3267
        if (dev->flags & IFF_PROMISC)   /* promiscuous */
3268
                reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
3269
        else if (dev->flags & IFF_ALLMULTI)
3270
                memset(filter, 0xff, sizeof(filter));
3271
        else if (dev->mc_count == 0 && !rx_pause)
3272
                reg &= ~GM_RXCR_MCF_ENA;
3273
        else {
3274
                int i;
3275
                reg |= GM_RXCR_MCF_ENA;
3276
 
3277
                if (rx_pause)
3278
                        sky2_add_filter(filter, pause_mc_addr);
3279
 
3280
                for (i = 0; list && i < dev->mc_count; i++, list = list->next)
3281
                        sky2_add_filter(filter, list->dmi_addr);
3282
        }
3283
 
3284
        gma_write16(hw, port, GM_MC_ADDR_H1,
3285
                    (u16) filter[0] | ((u16) filter[1] << 8));
3286
        gma_write16(hw, port, GM_MC_ADDR_H2,
3287
                    (u16) filter[2] | ((u16) filter[3] << 8));
3288
        gma_write16(hw, port, GM_MC_ADDR_H3,
3289
                    (u16) filter[4] | ((u16) filter[5] << 8));
3290
        gma_write16(hw, port, GM_MC_ADDR_H4,
3291
                    (u16) filter[6] | ((u16) filter[7] << 8));
3292
 
3293
        gma_write16(hw, port, GM_RX_CTRL, reg);
3294
}
3295
 
3296
/* Can have one global because blinking is controlled by
3297
 * ethtool and that is always under RTNL mutex
3298
 */
3299
static void sky2_led(struct sky2_hw *hw, unsigned port, int on)
3300
{
3301
        u16 pg;
3302
 
3303
        switch (hw->chip_id) {
3304
        case CHIP_ID_YUKON_XL:
3305
                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3306
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3307
                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3308
                             on ? (PHY_M_LEDC_LOS_CTRL(1) |
3309
                                   PHY_M_LEDC_INIT_CTRL(7) |
3310
                                   PHY_M_LEDC_STA1_CTRL(7) |
3311
                                   PHY_M_LEDC_STA0_CTRL(7))
3312
                             : 0);
3313
 
3314
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3315
                break;
3316
 
3317
        default:
3318
                gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
3319
                gm_phy_write(hw, port, PHY_MARV_LED_OVER,
3320
                             on ? PHY_M_LED_ALL : 0);
3321
        }
3322
}
3323
 
3324
/* blink LED's for finding board */
3325
static int sky2_phys_id(struct net_device *dev, u32 data)
3326
{
3327
        struct sky2_port *sky2 = netdev_priv(dev);
3328
        struct sky2_hw *hw = sky2->hw;
3329
        unsigned port = sky2->port;
3330
        u16 ledctrl, ledover = 0;
3331
        long ms;
3332
        int interrupted;
3333
        int onoff = 1;
3334
 
3335
        if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
3336
                ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
3337
        else
3338
                ms = data * 1000;
3339
 
3340
        /* save initial values */
3341
        spin_lock_bh(&sky2->phy_lock);
3342
        if (hw->chip_id == CHIP_ID_YUKON_XL) {
3343
                u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3344
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3345
                ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
3346
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3347
        } else {
3348
                ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
3349
                ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
3350
        }
3351
 
3352
        interrupted = 0;
3353
        while (!interrupted && ms > 0) {
3354
                sky2_led(hw, port, onoff);
3355
                onoff = !onoff;
3356
 
3357
                spin_unlock_bh(&sky2->phy_lock);
3358
                interrupted = msleep_interruptible(250);
3359
                spin_lock_bh(&sky2->phy_lock);
3360
 
3361
                ms -= 250;
3362
        }
3363
 
3364
        /* resume regularly scheduled programming */
3365
        if (hw->chip_id == CHIP_ID_YUKON_XL) {
3366
                u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3367
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3368
                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl);
3369
                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3370
        } else {
3371
                gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
3372
                gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
3373
        }
3374
        spin_unlock_bh(&sky2->phy_lock);
3375
 
3376
        return 0;
3377
}
3378
 
3379
static void sky2_get_pauseparam(struct net_device *dev,
3380
                                struct ethtool_pauseparam *ecmd)
3381
{
3382
        struct sky2_port *sky2 = netdev_priv(dev);
3383
 
3384
        switch (sky2->flow_mode) {
3385
        case FC_NONE:
3386
                ecmd->tx_pause = ecmd->rx_pause = 0;
3387
                break;
3388
        case FC_TX:
3389
                ecmd->tx_pause = 1, ecmd->rx_pause = 0;
3390
                break;
3391
        case FC_RX:
3392
                ecmd->tx_pause = 0, ecmd->rx_pause = 1;
3393
                break;
3394
        case FC_BOTH:
3395
                ecmd->tx_pause = ecmd->rx_pause = 1;
3396
        }
3397
 
3398
        ecmd->autoneg = sky2->autoneg;
3399
}
3400
 
3401
static int sky2_set_pauseparam(struct net_device *dev,
3402
                               struct ethtool_pauseparam *ecmd)
3403
{
3404
        struct sky2_port *sky2 = netdev_priv(dev);
3405
 
3406
        sky2->autoneg = ecmd->autoneg;
3407
        sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
3408
 
3409
        if (netif_running(dev))
3410
                sky2_phy_reinit(sky2);
3411
 
3412
        return 0;
3413
}
3414
 
3415
static int sky2_get_coalesce(struct net_device *dev,
3416
                             struct ethtool_coalesce *ecmd)
3417
{
3418
        struct sky2_port *sky2 = netdev_priv(dev);
3419
        struct sky2_hw *hw = sky2->hw;
3420
 
3421
        if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
3422
                ecmd->tx_coalesce_usecs = 0;
3423
        else {
3424
                u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
3425
                ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
3426
        }
3427
        ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
3428
 
3429
        if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
3430
                ecmd->rx_coalesce_usecs = 0;
3431
        else {
3432
                u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
3433
                ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
3434
        }
3435
        ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
3436
 
3437
        if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
3438
                ecmd->rx_coalesce_usecs_irq = 0;
3439
        else {
3440
                u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
3441
                ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
3442
        }
3443
 
3444
        ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
3445
 
3446
        return 0;
3447
}
3448
 
3449
/* Note: this affect both ports */
3450
static int sky2_set_coalesce(struct net_device *dev,
3451
                             struct ethtool_coalesce *ecmd)
3452
{
3453
        struct sky2_port *sky2 = netdev_priv(dev);
3454
        struct sky2_hw *hw = sky2->hw;
3455
        const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
3456
 
3457
        if (ecmd->tx_coalesce_usecs > tmax ||
3458
            ecmd->rx_coalesce_usecs > tmax ||
3459
            ecmd->rx_coalesce_usecs_irq > tmax)
3460
                return -EINVAL;
3461
 
3462
        if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
3463
                return -EINVAL;
3464
        if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
3465
                return -EINVAL;
3466
        if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
3467
                return -EINVAL;
3468
 
3469
        if (ecmd->tx_coalesce_usecs == 0)
3470
                sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
3471
        else {
3472
                sky2_write32(hw, STAT_TX_TIMER_INI,
3473
                             sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
3474
                sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
3475
        }
3476
        sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
3477
 
3478
        if (ecmd->rx_coalesce_usecs == 0)
3479
                sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
3480
        else {
3481
                sky2_write32(hw, STAT_LEV_TIMER_INI,
3482
                             sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
3483
                sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
3484
        }
3485
        sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
3486
 
3487
        if (ecmd->rx_coalesce_usecs_irq == 0)
3488
                sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
3489
        else {
3490
                sky2_write32(hw, STAT_ISR_TIMER_INI,
3491
                             sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
3492
                sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
3493
        }
3494
        sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
3495
        return 0;
3496
}
3497
 
3498
static void sky2_get_ringparam(struct net_device *dev,
3499
                               struct ethtool_ringparam *ering)
3500
{
3501
        struct sky2_port *sky2 = netdev_priv(dev);
3502
 
3503
        ering->rx_max_pending = RX_MAX_PENDING;
3504
        ering->rx_mini_max_pending = 0;
3505
        ering->rx_jumbo_max_pending = 0;
3506
        ering->tx_max_pending = TX_RING_SIZE - 1;
3507
 
3508
        ering->rx_pending = sky2->rx_pending;
3509
        ering->rx_mini_pending = 0;
3510
        ering->rx_jumbo_pending = 0;
3511
        ering->tx_pending = sky2->tx_pending;
3512
}
3513
 
3514
static int sky2_set_ringparam(struct net_device *dev,
3515
                              struct ethtool_ringparam *ering)
3516
{
3517
        struct sky2_port *sky2 = netdev_priv(dev);
3518
        int err = 0;
3519
 
3520
        if (ering->rx_pending > RX_MAX_PENDING ||
3521
            ering->rx_pending < 8 ||
3522
            ering->tx_pending < MAX_SKB_TX_LE ||
3523
            ering->tx_pending > TX_RING_SIZE - 1)
3524
                return -EINVAL;
3525
 
3526
        if (netif_running(dev))
3527
                sky2_down(dev);
3528
 
3529
        sky2->rx_pending = ering->rx_pending;
3530
        sky2->tx_pending = ering->tx_pending;
3531
 
3532
        if (netif_running(dev)) {
3533
                err = sky2_up(dev);
3534
                if (err)
3535
                        dev_close(dev);
3536
                else
3537
                        sky2_set_multicast(dev);
3538
        }
3539
 
3540
        return err;
3541
}
3542
 
3543
static int sky2_get_regs_len(struct net_device *dev)
3544
{
3545
        return 0x4000;
3546
}
3547
 
3548
/*
3549
 * Returns copy of control register region
3550
 * Note: ethtool_get_regs always provides full size (16k) buffer
3551
 */
3552
static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3553
                          void *p)
3554
{
3555
        const struct sky2_port *sky2 = netdev_priv(dev);
3556
        const void __iomem *io = sky2->hw->regs;
3557
        unsigned int b;
3558
 
3559
        regs->version = 1;
3560
 
3561
        for (b = 0; b < 128; b++) {
3562
                /* This complicated switch statement is to make sure and
3563
                 * only access regions that are unreserved.
3564
                 * Some blocks are only valid on dual port cards.
3565
                 * and block 3 has some special diagnostic registers that
3566
                 * are poison.
3567
                 */
3568
                switch (b) {
3569
                case 3:
3570
                        /* skip diagnostic ram region */
3571
                        memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
3572
                        break;
3573
 
3574
                /* dual port cards only */
3575
                case 5:         /* Tx Arbiter 2 */
3576
                case 9:         /* RX2 */
3577
                case 14 ... 15: /* TX2 */
3578
                case 17: case 19: /* Ram Buffer 2 */
3579
                case 22 ... 23: /* Tx Ram Buffer 2 */
3580
                case 25:        /* Rx MAC Fifo 1 */
3581
                case 27:        /* Tx MAC Fifo 2 */
3582
                case 31:        /* GPHY 2 */
3583
                case 40 ... 47: /* Pattern Ram 2 */
3584
                case 52: case 54: /* TCP Segmentation 2 */
3585
                case 112 ... 116: /* GMAC 2 */
3586
                        if (sky2->hw->ports == 1)
3587
                                goto reserved;
3588
                        /* fall through */
3589
                case 0:          /* Control */
3590
                case 2:         /* Mac address */
3591
                case 4:         /* Tx Arbiter 1 */
3592
                case 7:         /* PCI express reg */
3593
                case 8:         /* RX1 */
3594
                case 12 ... 13: /* TX1 */
3595
                case 16: case 18:/* Rx Ram Buffer 1 */
3596
                case 20 ... 21: /* Tx Ram Buffer 1 */
3597
                case 24:        /* Rx MAC Fifo 1 */
3598
                case 26:        /* Tx MAC Fifo 1 */
3599
                case 28 ... 29: /* Descriptor and status unit */
3600
                case 30:        /* GPHY 1*/
3601
                case 32 ... 39: /* Pattern Ram 1 */
3602
                case 48: case 50: /* TCP Segmentation 1 */
3603
                case 56 ... 60: /* PCI space */
3604
                case 80 ... 84: /* GMAC 1 */
3605
                        memcpy_fromio(p, io, 128);
3606
                        break;
3607
                default:
3608
reserved:
3609
                        memset(p, 0, 128);
3610
                }
3611
 
3612
                p += 128;
3613
                io += 128;
3614
        }
3615
}
3616
 
3617
/* In order to do Jumbo packets on these chips, need to turn off the
3618
 * transmit store/forward. Therefore checksum offload won't work.
3619
 */
3620
static int no_tx_offload(struct net_device *dev)
3621
{
3622
        const struct sky2_port *sky2 = netdev_priv(dev);
3623
        const struct sky2_hw *hw = sky2->hw;
3624
 
3625
        return dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U;
3626
}
3627
 
3628
static int sky2_set_tx_csum(struct net_device *dev, u32 data)
3629
{
3630
        if (data && no_tx_offload(dev))
3631
                return -EINVAL;
3632
 
3633
        return ethtool_op_set_tx_csum(dev, data);
3634
}
3635
 
3636
 
3637
static int sky2_set_tso(struct net_device *dev, u32 data)
3638
{
3639
        if (data && no_tx_offload(dev))
3640
                return -EINVAL;
3641
 
3642
        return ethtool_op_set_tso(dev, data);
3643
}
3644
 
3645
static int sky2_get_eeprom_len(struct net_device *dev)
3646
{
3647
        struct sky2_port *sky2 = netdev_priv(dev);
3648
        struct sky2_hw *hw = sky2->hw;
3649
        u16 reg2;
3650
 
3651
        reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
3652
        return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
3653
}
3654
 
3655
static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset)
3656
{
3657
        u32 val;
3658
 
3659
        sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
3660
 
3661
        do {
3662
                offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR);
3663
        } while (!(offset & PCI_VPD_ADDR_F));
3664
 
3665
        val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
3666
        return val;
3667
}
3668
 
3669
static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val)
3670
{
3671
        sky2_pci_write16(hw, cap + PCI_VPD_DATA, val);
3672
        sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
3673
        do {
3674
                offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR);
3675
        } while (offset & PCI_VPD_ADDR_F);
3676
}
3677
 
3678
static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
3679
                           u8 *data)
3680
{
3681
        struct sky2_port *sky2 = netdev_priv(dev);
3682
        int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
3683
        int length = eeprom->len;
3684
        u16 offset = eeprom->offset;
3685
 
3686
        if (!cap)
3687
                return -EINVAL;
3688
 
3689
        eeprom->magic = SKY2_EEPROM_MAGIC;
3690
 
3691
        while (length > 0) {
3692
                u32 val = sky2_vpd_read(sky2->hw, cap, offset);
3693
                int n = min_t(int, length, sizeof(val));
3694
 
3695
                memcpy(data, &val, n);
3696
                length -= n;
3697
                data += n;
3698
                offset += n;
3699
        }
3700
        return 0;
3701
}
3702
 
3703
static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
3704
                           u8 *data)
3705
{
3706
        struct sky2_port *sky2 = netdev_priv(dev);
3707
        int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
3708
        int length = eeprom->len;
3709
        u16 offset = eeprom->offset;
3710
 
3711
        if (!cap)
3712
                return -EINVAL;
3713
 
3714
        if (eeprom->magic != SKY2_EEPROM_MAGIC)
3715
                return -EINVAL;
3716
 
3717
        while (length > 0) {
3718
                u32 val;
3719
                int n = min_t(int, length, sizeof(val));
3720
 
3721
                if (n < sizeof(val))
3722
                        val = sky2_vpd_read(sky2->hw, cap, offset);
3723
                memcpy(&val, data, n);
3724
 
3725
                sky2_vpd_write(sky2->hw, cap, offset, val);
3726
 
3727
                length -= n;
3728
                data += n;
3729
                offset += n;
3730
        }
3731
        return 0;
3732
}
3733
 
3734
 
3735
static const struct ethtool_ops sky2_ethtool_ops = {
3736
        .get_settings   = sky2_get_settings,
3737
        .set_settings   = sky2_set_settings,
3738
        .get_drvinfo    = sky2_get_drvinfo,
3739
        .get_wol        = sky2_get_wol,
3740
        .set_wol        = sky2_set_wol,
3741
        .get_msglevel   = sky2_get_msglevel,
3742
        .set_msglevel   = sky2_set_msglevel,
3743
        .nway_reset     = sky2_nway_reset,
3744
        .get_regs_len   = sky2_get_regs_len,
3745
        .get_regs       = sky2_get_regs,
3746
        .get_link       = ethtool_op_get_link,
3747
        .get_eeprom_len = sky2_get_eeprom_len,
3748
        .get_eeprom     = sky2_get_eeprom,
3749
        .set_eeprom     = sky2_set_eeprom,
3750
        .set_sg         = ethtool_op_set_sg,
3751
        .set_tx_csum    = sky2_set_tx_csum,
3752
        .set_tso        = sky2_set_tso,
3753
        .get_rx_csum    = sky2_get_rx_csum,
3754
        .set_rx_csum    = sky2_set_rx_csum,
3755
        .get_strings    = sky2_get_strings,
3756
        .get_coalesce   = sky2_get_coalesce,
3757
        .set_coalesce   = sky2_set_coalesce,
3758
        .get_ringparam  = sky2_get_ringparam,
3759
        .set_ringparam  = sky2_set_ringparam,
3760
        .get_pauseparam = sky2_get_pauseparam,
3761
        .set_pauseparam = sky2_set_pauseparam,
3762
        .phys_id        = sky2_phys_id,
3763
        .get_sset_count = sky2_get_sset_count,
3764
        .get_ethtool_stats = sky2_get_ethtool_stats,
3765
};
3766
 
3767
#ifdef CONFIG_SKY2_DEBUG
3768
 
3769
static struct dentry *sky2_debug;
3770
 
3771
static int sky2_debug_show(struct seq_file *seq, void *v)
3772
{
3773
        struct net_device *dev = seq->private;
3774
        const struct sky2_port *sky2 = netdev_priv(dev);
3775
        struct sky2_hw *hw = sky2->hw;
3776
        unsigned port = sky2->port;
3777
        unsigned idx, last;
3778
        int sop;
3779
 
3780
        if (!netif_running(dev))
3781
                return -ENETDOWN;
3782
 
3783
        seq_printf(seq, "IRQ src=%x mask=%x control=%x\n",
3784
                   sky2_read32(hw, B0_ISRC),
3785
                   sky2_read32(hw, B0_IMSK),
3786
                   sky2_read32(hw, B0_Y2_SP_ICR));
3787
 
3788
        napi_disable(&hw->napi);
3789
        last = sky2_read16(hw, STAT_PUT_IDX);
3790
 
3791
        if (hw->st_idx == last)
3792
                seq_puts(seq, "Status ring (empty)\n");
3793
        else {
3794
                seq_puts(seq, "Status ring\n");
3795
                for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE;
3796
                     idx = RING_NEXT(idx, STATUS_RING_SIZE)) {
3797
                        const struct sky2_status_le *le = hw->st_le + idx;
3798
                        seq_printf(seq, "[%d] %#x %d %#x\n",
3799
                                   idx, le->opcode, le->length, le->status);
3800
                }
3801
                seq_puts(seq, "\n");
3802
        }
3803
 
3804
        seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
3805
                   sky2->tx_cons, sky2->tx_prod,
3806
                   sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
3807
                   sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
3808
 
3809
        /* Dump contents of tx ring */
3810
        sop = 1;
3811
        for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < TX_RING_SIZE;
3812
             idx = RING_NEXT(idx, TX_RING_SIZE)) {
3813
                const struct sky2_tx_le *le = sky2->tx_le + idx;
3814
                u32 a = le32_to_cpu(le->addr);
3815
 
3816
                if (sop)
3817
                        seq_printf(seq, "%u:", idx);
3818
                sop = 0;
3819
 
3820
                switch(le->opcode & ~HW_OWNER) {
3821
                case OP_ADDR64:
3822
                        seq_printf(seq, " %#x:", a);
3823
                        break;
3824
                case OP_LRGLEN:
3825
                        seq_printf(seq, " mtu=%d", a);
3826
                        break;
3827
                case OP_VLAN:
3828
                        seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
3829
                        break;
3830
                case OP_TCPLISW:
3831
                        seq_printf(seq, " csum=%#x", a);
3832
                        break;
3833
                case OP_LARGESEND:
3834
                        seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
3835
                        break;
3836
                case OP_PACKET:
3837
                        seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
3838
                        break;
3839
                case OP_BUFFER:
3840
                        seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
3841
                        break;
3842
                default:
3843
                        seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
3844
                                   a, le16_to_cpu(le->length));
3845
                }
3846
 
3847
                if (le->ctrl & EOP) {
3848
                        seq_putc(seq, '\n');
3849
                        sop = 1;
3850
                }
3851
        }
3852
 
3853
        seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
3854
                   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
3855
                   last = sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
3856
                   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
3857
 
3858
        sky2_read32(hw, B0_Y2_SP_LISR);
3859
        napi_enable(&hw->napi);
3860
        return 0;
3861
}
3862
 
3863
static int sky2_debug_open(struct inode *inode, struct file *file)
3864
{
3865
        return single_open(file, sky2_debug_show, inode->i_private);
3866
}
3867
 
3868
static const struct file_operations sky2_debug_fops = {
3869
        .owner          = THIS_MODULE,
3870
        .open           = sky2_debug_open,
3871
        .read           = seq_read,
3872
        .llseek         = seq_lseek,
3873
        .release        = single_release,
3874
};
3875
 
3876
/*
3877
 * Use network device events to create/remove/rename
3878
 * debugfs file entries
3879
 */
3880
static int sky2_device_event(struct notifier_block *unused,
3881
                             unsigned long event, void *ptr)
3882
{
3883
        struct net_device *dev = ptr;
3884
        struct sky2_port *sky2 = netdev_priv(dev);
3885
 
3886
        if (dev->open != sky2_up || !sky2_debug)
3887
                return NOTIFY_DONE;
3888
 
3889
        switch(event) {
3890
        case NETDEV_CHANGENAME:
3891
                if (sky2->debugfs) {
3892
                        sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
3893
                                                       sky2_debug, dev->name);
3894
                }
3895
                break;
3896
 
3897
        case NETDEV_GOING_DOWN:
3898
                if (sky2->debugfs) {
3899
                        printk(KERN_DEBUG PFX "%s: remove debugfs\n",
3900
                               dev->name);
3901
                        debugfs_remove(sky2->debugfs);
3902
                        sky2->debugfs = NULL;
3903
                }
3904
                break;
3905
 
3906
        case NETDEV_UP:
3907
                sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO,
3908
                                                    sky2_debug, dev,
3909
                                                    &sky2_debug_fops);
3910
                if (IS_ERR(sky2->debugfs))
3911
                        sky2->debugfs = NULL;
3912
        }
3913
 
3914
        return NOTIFY_DONE;
3915
}
3916
 
3917
static struct notifier_block sky2_notifier = {
3918
        .notifier_call = sky2_device_event,
3919
};
3920
 
3921
 
3922
static __init void sky2_debug_init(void)
3923
{
3924
        struct dentry *ent;
3925
 
3926
        ent = debugfs_create_dir("sky2", NULL);
3927
        if (!ent || IS_ERR(ent))
3928
                return;
3929
 
3930
        sky2_debug = ent;
3931
        register_netdevice_notifier(&sky2_notifier);
3932
}
3933
 
3934
static __exit void sky2_debug_cleanup(void)
3935
{
3936
        if (sky2_debug) {
3937
                unregister_netdevice_notifier(&sky2_notifier);
3938
                debugfs_remove(sky2_debug);
3939
                sky2_debug = NULL;
3940
        }
3941
}
3942
 
3943
#else
3944
#define sky2_debug_init()
3945
#define sky2_debug_cleanup()
3946
#endif
3947
 
3948
 
3949
/* Initialize network device */
3950
static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3951
                                                     unsigned port,
3952
                                                     int highmem, int wol)
3953
{
3954
        struct sky2_port *sky2;
3955
        struct net_device *dev = alloc_etherdev(sizeof(*sky2));
3956
 
3957
        if (!dev) {
3958
                dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
3959
                return NULL;
3960
        }
3961
 
3962
        SET_NETDEV_DEV(dev, &hw->pdev->dev);
3963
        dev->irq = hw->pdev->irq;
3964
        dev->open = sky2_up;
3965
        dev->stop = sky2_down;
3966
        dev->do_ioctl = sky2_ioctl;
3967
        dev->hard_start_xmit = sky2_xmit_frame;
3968
        dev->set_multicast_list = sky2_set_multicast;
3969
        dev->set_mac_address = sky2_set_mac_address;
3970
        dev->change_mtu = sky2_change_mtu;
3971
        SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
3972
        dev->tx_timeout = sky2_tx_timeout;
3973
        dev->watchdog_timeo = TX_WATCHDOG;
3974
#ifdef CONFIG_NET_POLL_CONTROLLER
3975
        if (port == 0)
3976
                dev->poll_controller = sky2_netpoll;
3977
#endif
3978
 
3979
        sky2 = netdev_priv(dev);
3980
        sky2->netdev = dev;
3981
        sky2->hw = hw;
3982
        sky2->msg_enable = netif_msg_init(debug, default_msg);
3983
 
3984
        /* Auto speed and flow control */
3985
        sky2->autoneg = AUTONEG_ENABLE;
3986
        sky2->flow_mode = FC_BOTH;
3987
 
3988
        sky2->duplex = -1;
3989
        sky2->speed = -1;
3990
        sky2->advertising = sky2_supported_modes(hw);
3991
        sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
3992
        sky2->wol = wol;
3993
 
3994
        spin_lock_init(&sky2->phy_lock);
3995
        sky2->tx_pending = TX_DEF_PENDING;
3996
        sky2->rx_pending = RX_DEF_PENDING;
3997
 
3998
        hw->dev[port] = dev;
3999
 
4000
        sky2->port = port;
4001
 
4002
        dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
4003
        if (highmem)
4004
                dev->features |= NETIF_F_HIGHDMA;
4005
 
4006
#ifdef SKY2_VLAN_TAG_USED
4007
        /* The workaround for FE+ status conflicts with VLAN tag detection. */
4008
        if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
4009
              sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
4010
                dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4011
                dev->vlan_rx_register = sky2_vlan_rx_register;
4012
        }
4013
#endif
4014
 
4015
        /* read the mac address */
4016
        memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
4017
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4018
 
4019
        return dev;
4020
}
4021
 
4022
static void __devinit sky2_show_addr(struct net_device *dev)
4023
{
4024
        const struct sky2_port *sky2 = netdev_priv(dev);
4025
        DECLARE_MAC_BUF(mac);
4026
 
4027
        if (netif_msg_probe(sky2))
4028
                printk(KERN_INFO PFX "%s: addr %s\n",
4029
                       dev->name, print_mac(mac, dev->dev_addr));
4030
}
4031
 
4032
/* Handle software interrupt used during MSI test */
4033
static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
4034
{
4035
        struct sky2_hw *hw = dev_id;
4036
        u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
4037
 
4038
        if (status == 0)
4039
                return IRQ_NONE;
4040
 
4041
        if (status & Y2_IS_IRQ_SW) {
4042
                hw->flags |= SKY2_HW_USE_MSI;
4043
                wake_up(&hw->msi_wait);
4044
                sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4045
        }
4046
        sky2_write32(hw, B0_Y2_SP_ICR, 2);
4047
 
4048
        return IRQ_HANDLED;
4049
}
4050
 
4051
/* Test interrupt path by forcing a a software IRQ */
4052
static int __devinit sky2_test_msi(struct sky2_hw *hw)
4053
{
4054
        struct pci_dev *pdev = hw->pdev;
4055
        int err;
4056
 
4057
        init_waitqueue_head (&hw->msi_wait);
4058
 
4059
        sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4060
 
4061
        err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
4062
        if (err) {
4063
                dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4064
                return err;
4065
        }
4066
 
4067
        sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
4068
        sky2_read8(hw, B0_CTST);
4069
 
4070
        wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
4071
 
4072
        if (!(hw->flags & SKY2_HW_USE_MSI)) {
4073
                /* MSI test failed, go back to INTx mode */
4074
                dev_info(&pdev->dev, "No interrupt generated using MSI, "
4075
                         "switching to INTx mode.\n");
4076
 
4077
                err = -EOPNOTSUPP;
4078
                sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4079
        }
4080
 
4081
        sky2_write32(hw, B0_IMSK, 0);
4082
        sky2_read32(hw, B0_IMSK);
4083
 
4084
        free_irq(pdev->irq, hw);
4085
 
4086
        return err;
4087
}
4088
 
4089
static int __devinit pci_wake_enabled(struct pci_dev *dev)
4090
{
4091
        int pm  = pci_find_capability(dev, PCI_CAP_ID_PM);
4092
        u16 value;
4093
 
4094
        if (!pm)
4095
                return 0;
4096
        if (pci_read_config_word(dev, pm + PCI_PM_CTRL, &value))
4097
                return 0;
4098
        return value & PCI_PM_CTRL_PME_ENABLE;
4099
}
4100
 
4101
static int __devinit sky2_probe(struct pci_dev *pdev,
4102
                                const struct pci_device_id *ent)
4103
{
4104
        struct net_device *dev;
4105
        struct sky2_hw *hw;
4106
        int err, using_dac = 0, wol_default;
4107
 
4108
        err = pci_enable_device(pdev);
4109
        if (err) {
4110
                dev_err(&pdev->dev, "cannot enable PCI device\n");
4111
                goto err_out;
4112
        }
4113
 
4114
        err = pci_request_regions(pdev, DRV_NAME);
4115
        if (err) {
4116
                dev_err(&pdev->dev, "cannot obtain PCI resources\n");
4117
                goto err_out_disable;
4118
        }
4119
 
4120
        pci_set_master(pdev);
4121
 
4122
        if (sizeof(dma_addr_t) > sizeof(u32) &&
4123
            !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
4124
                using_dac = 1;
4125
                err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4126
                if (err < 0) {
4127
                        dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
4128
                                "for consistent allocations\n");
4129
                        goto err_out_free_regions;
4130
                }
4131
        } else {
4132
                err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4133
                if (err) {
4134
                        dev_err(&pdev->dev, "no usable DMA configuration\n");
4135
                        goto err_out_free_regions;
4136
                }
4137
        }
4138
 
4139
        wol_default = pci_wake_enabled(pdev) ? WAKE_MAGIC : 0;
4140
 
4141
        err = -ENOMEM;
4142
        hw = kzalloc(sizeof(*hw), GFP_KERNEL);
4143
        if (!hw) {
4144
                dev_err(&pdev->dev, "cannot allocate hardware struct\n");
4145
                goto err_out_free_regions;
4146
        }
4147
 
4148
        hw->pdev = pdev;
4149
 
4150
        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4151
        if (!hw->regs) {
4152
                dev_err(&pdev->dev, "cannot map device registers\n");
4153
                goto err_out_free_hw;
4154
        }
4155
 
4156
#ifdef __BIG_ENDIAN
4157
        /* The sk98lin vendor driver uses hardware byte swapping but
4158
         * this driver uses software swapping.
4159
         */
4160
        {
4161
                u32 reg;
4162
                reg = sky2_pci_read32(hw, PCI_DEV_REG2);
4163
                reg &= ~PCI_REV_DESC;
4164
                sky2_pci_write32(hw, PCI_DEV_REG2, reg);
4165
        }
4166
#endif
4167
 
4168
        /* ring for status responses */
4169
        hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
4170
        if (!hw->st_le)
4171
                goto err_out_iounmap;
4172
 
4173
        err = sky2_init(hw);
4174
        if (err)
4175
                goto err_out_iounmap;
4176
 
4177
        dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
4178
               DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
4179
               pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
4180
               hw->chip_id, hw->chip_rev);
4181
 
4182
        sky2_reset(hw);
4183
 
4184
        dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
4185
        if (!dev) {
4186
                err = -ENOMEM;
4187
                goto err_out_free_pci;
4188
        }
4189
 
4190
        if (!disable_msi && pci_enable_msi(pdev) == 0) {
4191
                err = sky2_test_msi(hw);
4192
                if (err == -EOPNOTSUPP)
4193
                        pci_disable_msi(pdev);
4194
                else if (err)
4195
                        goto err_out_free_netdev;
4196
        }
4197
 
4198
        err = register_netdev(dev);
4199
        if (err) {
4200
                dev_err(&pdev->dev, "cannot register net device\n");
4201
                goto err_out_free_netdev;
4202
        }
4203
 
4204
        netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
4205
 
4206
        err = request_irq(pdev->irq, sky2_intr,
4207
                          (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
4208
                          dev->name, hw);
4209
        if (err) {
4210
                dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4211
                goto err_out_unregister;
4212
        }
4213
        sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
4214
        napi_enable(&hw->napi);
4215
 
4216
        sky2_show_addr(dev);
4217
 
4218
        if (hw->ports > 1) {
4219
                struct net_device *dev1;
4220
 
4221
                dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
4222
                if (!dev1)
4223
                        dev_warn(&pdev->dev, "allocation for second device failed\n");
4224
                else if ((err = register_netdev(dev1))) {
4225
                        dev_warn(&pdev->dev,
4226
                                 "register of second port failed (%d)\n", err);
4227
                        hw->dev[1] = NULL;
4228
                        free_netdev(dev1);
4229
                } else
4230
                        sky2_show_addr(dev1);
4231
        }
4232
 
4233
        setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
4234
        INIT_WORK(&hw->restart_work, sky2_restart);
4235
 
4236
        pci_set_drvdata(pdev, hw);
4237
 
4238
        return 0;
4239
 
4240
err_out_unregister:
4241
        if (hw->flags & SKY2_HW_USE_MSI)
4242
                pci_disable_msi(pdev);
4243
        unregister_netdev(dev);
4244
err_out_free_netdev:
4245
        free_netdev(dev);
4246
err_out_free_pci:
4247
        sky2_write8(hw, B0_CTST, CS_RST_SET);
4248
        pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
4249
err_out_iounmap:
4250
        iounmap(hw->regs);
4251
err_out_free_hw:
4252
        kfree(hw);
4253
err_out_free_regions:
4254
        pci_release_regions(pdev);
4255
err_out_disable:
4256
        pci_disable_device(pdev);
4257
err_out:
4258
        pci_set_drvdata(pdev, NULL);
4259
        return err;
4260
}
4261
 
4262
static void __devexit sky2_remove(struct pci_dev *pdev)
4263
{
4264
        struct sky2_hw *hw = pci_get_drvdata(pdev);
4265
        int i;
4266
 
4267
        if (!hw)
4268
                return;
4269
 
4270
        del_timer_sync(&hw->watchdog_timer);
4271
        cancel_work_sync(&hw->restart_work);
4272
 
4273
        for (i = hw->ports-1; i >= 0; --i)
4274
                unregister_netdev(hw->dev[i]);
4275
 
4276
        sky2_write32(hw, B0_IMSK, 0);
4277
 
4278
        sky2_power_aux(hw);
4279
 
4280
        sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
4281
        sky2_write8(hw, B0_CTST, CS_RST_SET);
4282
        sky2_read8(hw, B0_CTST);
4283
 
4284
        free_irq(pdev->irq, hw);
4285
        if (hw->flags & SKY2_HW_USE_MSI)
4286
                pci_disable_msi(pdev);
4287
        pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
4288
        pci_release_regions(pdev);
4289
        pci_disable_device(pdev);
4290
 
4291
        for (i = hw->ports-1; i >= 0; --i)
4292
                free_netdev(hw->dev[i]);
4293
 
4294
        iounmap(hw->regs);
4295
        kfree(hw);
4296
 
4297
        pci_set_drvdata(pdev, NULL);
4298
}
4299
 
4300
#ifdef CONFIG_PM
4301
static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4302
{
4303
        struct sky2_hw *hw = pci_get_drvdata(pdev);
4304
        int i, wol = 0;
4305
 
4306
        if (!hw)
4307
                return 0;
4308
 
4309
        for (i = 0; i < hw->ports; i++) {
4310
                struct net_device *dev = hw->dev[i];
4311
                struct sky2_port *sky2 = netdev_priv(dev);
4312
 
4313
                if (netif_running(dev))
4314
                        sky2_down(dev);
4315
 
4316
                if (sky2->wol)
4317
                        sky2_wol_init(sky2);
4318
 
4319
                wol |= sky2->wol;
4320
        }
4321
 
4322
        sky2_write32(hw, B0_IMSK, 0);
4323
        napi_disable(&hw->napi);
4324
        sky2_power_aux(hw);
4325
 
4326
        pci_save_state(pdev);
4327
        pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
4328
        pci_set_power_state(pdev, pci_choose_state(pdev, state));
4329
 
4330
        return 0;
4331
}
4332
 
4333
static int sky2_resume(struct pci_dev *pdev)
4334
{
4335
        struct sky2_hw *hw = pci_get_drvdata(pdev);
4336
        int i, err;
4337
 
4338
        if (!hw)
4339
                return 0;
4340
 
4341
        err = pci_set_power_state(pdev, PCI_D0);
4342
        if (err)
4343
                goto out;
4344
 
4345
        err = pci_restore_state(pdev);
4346
        if (err)
4347
                goto out;
4348
 
4349
        pci_enable_wake(pdev, PCI_D0, 0);
4350
 
4351
        /* Re-enable all clocks */
4352
        if (hw->chip_id == CHIP_ID_YUKON_EX ||
4353
            hw->chip_id == CHIP_ID_YUKON_EC_U ||
4354
            hw->chip_id == CHIP_ID_YUKON_FE_P)
4355
                sky2_pci_write32(hw, PCI_DEV_REG3, 0);
4356
 
4357
        sky2_reset(hw);
4358
        sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
4359
        napi_enable(&hw->napi);
4360
 
4361
        for (i = 0; i < hw->ports; i++) {
4362
                struct net_device *dev = hw->dev[i];
4363
                if (netif_running(dev)) {
4364
                        err = sky2_up(dev);
4365
                        if (err) {
4366
                                printk(KERN_ERR PFX "%s: could not up: %d\n",
4367
                                       dev->name, err);
4368
                                dev_close(dev);
4369
                                goto out;
4370
                        }
4371
 
4372
                        sky2_set_multicast(dev);
4373
                }
4374
        }
4375
 
4376
        return 0;
4377
out:
4378
        dev_err(&pdev->dev, "resume failed (%d)\n", err);
4379
        pci_disable_device(pdev);
4380
        return err;
4381
}
4382
#endif
4383
 
4384
static void sky2_shutdown(struct pci_dev *pdev)
4385
{
4386
        struct sky2_hw *hw = pci_get_drvdata(pdev);
4387
        int i, wol = 0;
4388
 
4389
        if (!hw)
4390
                return;
4391
 
4392
        del_timer_sync(&hw->watchdog_timer);
4393
 
4394
        for (i = 0; i < hw->ports; i++) {
4395
                struct net_device *dev = hw->dev[i];
4396
                struct sky2_port *sky2 = netdev_priv(dev);
4397
 
4398
                if (sky2->wol) {
4399
                        wol = 1;
4400
                        sky2_wol_init(sky2);
4401
                }
4402
        }
4403
 
4404
        if (wol)
4405
                sky2_power_aux(hw);
4406
 
4407
        pci_enable_wake(pdev, PCI_D3hot, wol);
4408
        pci_enable_wake(pdev, PCI_D3cold, wol);
4409
 
4410
        pci_disable_device(pdev);
4411
        pci_set_power_state(pdev, PCI_D3hot);
4412
 
4413
}
4414
 
4415
static struct pci_driver sky2_driver = {
4416
        .name = DRV_NAME,
4417
        .id_table = sky2_id_table,
4418
        .probe = sky2_probe,
4419
        .remove = __devexit_p(sky2_remove),
4420
#ifdef CONFIG_PM
4421
        .suspend = sky2_suspend,
4422
        .resume = sky2_resume,
4423
#endif
4424
        .shutdown = sky2_shutdown,
4425
};
4426
 
4427
static int __init sky2_init_module(void)
4428
{
4429
        sky2_debug_init();
4430
        return pci_register_driver(&sky2_driver);
4431
}
4432
 
4433
static void __exit sky2_cleanup_module(void)
4434
{
4435
        pci_unregister_driver(&sky2_driver);
4436
        sky2_debug_cleanup();
4437
}
4438
 
4439
module_init(sky2_init_module);
4440
module_exit(sky2_cleanup_module);
4441
 
4442
MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
4443
MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
4444
MODULE_LICENSE("GPL");
4445
MODULE_VERSION(DRV_VERSION);

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.