OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [net/] [8139cp.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2
/*
3
        Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4
 
5
        Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6
        Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7
        Copyright 2001 Manfred Spraul                               [natsemi.c]
8
        Copyright 1999-2001 by Donald Becker.                       [natsemi.c]
9
        Written 1997-2001 by Donald Becker.                         [8139too.c]
10
        Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11
 
12
        This software may be used and distributed according to the terms of
13
        the GNU General Public License (GPL), incorporated herein by reference.
14
        Drivers based on or derived from this code fall under the GPL and must
15
        retain the authorship, copyright and license notice.  This file is not
16
        a complete program and may only be used when the entire operating
17
        system is licensed under the GPL.
18
 
19
        See the file COPYING in this distribution for more information.
20
 
21
        Contributors:
22
 
23
                Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24
                PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
25
                LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
26
 
27
        TODO:
28
        * Test Tx checksumming thoroughly
29
 
30
        Low priority TODO:
31
        * Complete reset on PciErr
32
        * Consider Rx interrupt mitigation using TimerIntr
33
        * Investigate using skb->priority with h/w VLAN priority
34
        * Investigate using High Priority Tx Queue with skb->priority
35
        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36
        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37
        * Implement Tx software interrupt mitigation via
38
          Tx descriptor bit
39
        * The real minimum of CP_MIN_MTU is 4 bytes.  However,
40
          for this to be supported, one must(?) turn on packet padding.
41
        * Support external MII transceivers (patch available)
42
 
43
        NOTES:
44
        * TX checksumming is considered experimental.  It is off by
45
          default, use ethtool to turn it on.
46
 
47
 */
48
 
49
#define DRV_NAME                "8139cp"
50
#define DRV_VERSION             "1.3"
51
#define DRV_RELDATE             "Mar 22, 2004"
52
 
53
 
54
#include <linux/module.h>
55
#include <linux/moduleparam.h>
56
#include <linux/kernel.h>
57
#include <linux/compiler.h>
58
#include <linux/netdevice.h>
59
#include <linux/etherdevice.h>
60
#include <linux/init.h>
61
#include <linux/pci.h>
62
#include <linux/dma-mapping.h>
63
#include <linux/delay.h>
64
#include <linux/ethtool.h>
65
#include <linux/mii.h>
66
#include <linux/if_vlan.h>
67
#include <linux/crc32.h>
68
#include <linux/in.h>
69
#include <linux/ip.h>
70
#include <linux/tcp.h>
71
#include <linux/udp.h>
72
#include <linux/cache.h>
73
#include <asm/io.h>
74
#include <asm/irq.h>
75
#include <asm/uaccess.h>
76
 
77
/* VLAN tagging feature enable/disable */
78
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
79
#define CP_VLAN_TAG_USED 1
80
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
81
        do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0)
82
#else
83
#define CP_VLAN_TAG_USED 0
84
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
85
        do { (tx_desc)->opts2 = 0; } while (0)
86
#endif
87
 
88
/* These identify the driver base version and may not be removed. */
89
static char version[] =
90
KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
91
 
92
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
93
MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
94
MODULE_VERSION(DRV_VERSION);
95
MODULE_LICENSE("GPL");
96
 
97
static int debug = -1;
98
module_param(debug, int, 0);
99
MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
100
 
101
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
102
   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
103
static int multicast_filter_limit = 32;
104
module_param(multicast_filter_limit, int, 0);
105
MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
106
 
107
#define PFX                     DRV_NAME ": "
108
 
109
#define CP_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
110
                                 NETIF_MSG_PROBE        | \
111
                                 NETIF_MSG_LINK)
112
#define CP_NUM_STATS            14      /* struct cp_dma_stats, plus one */
113
#define CP_STATS_SIZE           64      /* size in bytes of DMA stats block */
114
#define CP_REGS_SIZE            (0xff + 1)
115
#define CP_REGS_VER             1               /* version 1 */
116
#define CP_RX_RING_SIZE         64
117
#define CP_TX_RING_SIZE         64
118
#define CP_RING_BYTES           \
119
                ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
120
                 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
121
                 CP_STATS_SIZE)
122
#define NEXT_TX(N)              (((N) + 1) & (CP_TX_RING_SIZE - 1))
123
#define NEXT_RX(N)              (((N) + 1) & (CP_RX_RING_SIZE - 1))
124
#define TX_BUFFS_AVAIL(CP)                                      \
125
        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
126
          (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :       \
127
          (CP)->tx_tail - (CP)->tx_head - 1)
128
 
129
#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
130
#define RX_OFFSET               2
131
#define CP_INTERNAL_PHY         32
132
 
133
/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
134
#define RX_FIFO_THRESH          5       /* Rx buffer level before first PCI xfer.  */
135
#define RX_DMA_BURST            4       /* Maximum PCI burst, '4' is 256 */
136
#define TX_DMA_BURST            6       /* Maximum PCI burst, '6' is 1024 */
137
#define TX_EARLY_THRESH         256     /* Early Tx threshold, in bytes */
138
 
139
/* Time in jiffies before concluding the transmitter is hung. */
140
#define TX_TIMEOUT              (6*HZ)
141
 
142
/* hardware minimum and maximum for a single frame's data payload */
143
#define CP_MIN_MTU              60      /* TODO: allow lower, but pad */
144
#define CP_MAX_MTU              4096
145
 
146
enum {
147
        /* NIC register offsets */
148
        MAC0            = 0x00, /* Ethernet hardware address. */
149
        MAR0            = 0x08, /* Multicast filter. */
150
        StatsAddr       = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
151
        TxRingAddr      = 0x20, /* 64-bit start addr of Tx ring */
152
        HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
153
        Cmd             = 0x37, /* Command register */
154
        IntrMask        = 0x3C, /* Interrupt mask */
155
        IntrStatus      = 0x3E, /* Interrupt status */
156
        TxConfig        = 0x40, /* Tx configuration */
157
        ChipVersion     = 0x43, /* 8-bit chip version, inside TxConfig */
158
        RxConfig        = 0x44, /* Rx configuration */
159
        RxMissed        = 0x4C, /* 24 bits valid, write clears */
160
        Cfg9346         = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
161
        Config1         = 0x52, /* Config1 */
162
        Config3         = 0x59, /* Config3 */
163
        Config4         = 0x5A, /* Config4 */
164
        MultiIntr       = 0x5C, /* Multiple interrupt select */
165
        BasicModeCtrl   = 0x62, /* MII BMCR */
166
        BasicModeStatus = 0x64, /* MII BMSR */
167
        NWayAdvert      = 0x66, /* MII ADVERTISE */
168
        NWayLPAR        = 0x68, /* MII LPA */
169
        NWayExpansion   = 0x6A, /* MII Expansion */
170
        Config5         = 0xD8, /* Config5 */
171
        TxPoll          = 0xD9, /* Tell chip to check Tx descriptors for work */
172
        RxMaxSize       = 0xDA, /* Max size of an Rx packet (8169 only) */
173
        CpCmd           = 0xE0, /* C+ Command register (C+ mode only) */
174
        IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
175
        RxRingAddr      = 0xE4, /* 64-bit start addr of Rx ring */
176
        TxThresh        = 0xEC, /* Early Tx threshold */
177
        OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
178
        OldTSD0         = 0x10, /* DMA address of first Tx desc (C mode) */
179
 
180
        /* Tx and Rx status descriptors */
181
        DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
182
        RingEnd         = (1 << 30), /* End of descriptor ring */
183
        FirstFrag       = (1 << 29), /* First segment of a packet */
184
        LastFrag        = (1 << 28), /* Final segment of a packet */
185
        LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
186
        MSSShift        = 16,        /* MSS value position */
187
        MSSMask         = 0xfff,     /* MSS value: 11 bits */
188
        TxError         = (1 << 23), /* Tx error summary */
189
        RxError         = (1 << 20), /* Rx error summary */
190
        IPCS            = (1 << 18), /* Calculate IP checksum */
191
        UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
192
        TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
193
        TxVlanTag       = (1 << 17), /* Add VLAN tag */
194
        RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
195
        IPFail          = (1 << 15), /* IP checksum failed */
196
        UDPFail         = (1 << 14), /* UDP/IP checksum failed */
197
        TCPFail         = (1 << 13), /* TCP/IP checksum failed */
198
        NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
199
        PID1            = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
200
        PID0            = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
201
        RxProtoTCP      = 1,
202
        RxProtoUDP      = 2,
203
        RxProtoIP       = 3,
204
        TxFIFOUnder     = (1 << 25), /* Tx FIFO underrun */
205
        TxOWC           = (1 << 22), /* Tx Out-of-window collision */
206
        TxLinkFail      = (1 << 21), /* Link failed during Tx of packet */
207
        TxMaxCol        = (1 << 20), /* Tx aborted due to excessive collisions */
208
        TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
209
        TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
210
        RxErrFrame      = (1 << 27), /* Rx frame alignment error */
211
        RxMcast         = (1 << 26), /* Rx multicast packet rcv'd */
212
        RxErrCRC        = (1 << 18), /* Rx CRC error */
213
        RxErrRunt       = (1 << 19), /* Rx error, packet < 64 bytes */
214
        RxErrLong       = (1 << 21), /* Rx error, packet > 4096 bytes */
215
        RxErrFIFO       = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
216
 
217
        /* StatsAddr register */
218
        DumpStats       = (1 << 3),  /* Begin stats dump */
219
 
220
        /* RxConfig register */
221
        RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
222
        RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
223
        AcceptErr       = 0x20,      /* Accept packets with CRC errors */
224
        AcceptRunt      = 0x10,      /* Accept runt (<64 bytes) packets */
225
        AcceptBroadcast = 0x08,      /* Accept broadcast packets */
226
        AcceptMulticast = 0x04,      /* Accept multicast packets */
227
        AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
228
        AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
229
 
230
        /* IntrMask / IntrStatus registers */
231
        PciErr          = (1 << 15), /* System error on the PCI bus */
232
        TimerIntr       = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
233
        LenChg          = (1 << 13), /* Cable length change */
234
        SWInt           = (1 << 8),  /* Software-requested interrupt */
235
        TxEmpty         = (1 << 7),  /* No Tx descriptors available */
236
        RxFIFOOvr       = (1 << 6),  /* Rx FIFO Overflow */
237
        LinkChg         = (1 << 5),  /* Packet underrun, or link change */
238
        RxEmpty         = (1 << 4),  /* No Rx descriptors available */
239
        TxErr           = (1 << 3),  /* Tx error */
240
        TxOK            = (1 << 2),  /* Tx packet sent */
241
        RxErr           = (1 << 1),  /* Rx error */
242
        RxOK            = (1 << 0),  /* Rx packet received */
243
        IntrResvd       = (1 << 10), /* reserved, according to RealTek engineers,
244
                                        but hardware likes to raise it */
245
 
246
        IntrAll         = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
247
                          RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
248
                          RxErr | RxOK | IntrResvd,
249
 
250
        /* C mode command register */
251
        CmdReset        = (1 << 4),  /* Enable to reset; self-clearing */
252
        RxOn            = (1 << 3),  /* Rx mode enable */
253
        TxOn            = (1 << 2),  /* Tx mode enable */
254
 
255
        /* C+ mode command register */
256
        RxVlanOn        = (1 << 6),  /* Rx VLAN de-tagging enable */
257
        RxChkSum        = (1 << 5),  /* Rx checksum offload enable */
258
        PCIDAC          = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
259
        PCIMulRW        = (1 << 3),  /* Enable PCI read/write multiple */
260
        CpRxOn          = (1 << 1),  /* Rx mode enable */
261
        CpTxOn          = (1 << 0),  /* Tx mode enable */
262
 
263
        /* Cfg9436 EEPROM control register */
264
        Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
265
        Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
266
 
267
        /* TxConfig register */
268
        IFG             = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
269
        TxDMAShift      = 8,         /* DMA burst value (0-7) is shift this many bits */
270
 
271
        /* Early Tx Threshold register */
272
        TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
273
        TxThreshMax     = 2048,      /* Max early Tx threshold */
274
 
275
        /* Config1 register */
276
        DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
277
        LWACT           = (1 << 4),  /* LWAKE active mode */
278
        PMEnable        = (1 << 0),  /* Enable various PM features of chip */
279
 
280
        /* Config3 register */
281
        PARMEnable      = (1 << 6),  /* Enable auto-loading of PHY parms */
282
        MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
283
        LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
284
 
285
        /* Config4 register */
286
        LWPTN           = (1 << 1),  /* LWAKE Pattern */
287
        LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
288
 
289
        /* Config5 register */
290
        BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
291
        MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
292
        UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
293
        LANWake         = (1 << 1),  /* Enable LANWake signal */
294
        PMEStatus       = (1 << 0),  /* PME status can be reset by PCI RST# */
295
 
296
        cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
297
        cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
298
        cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
299
};
300
 
301
static const unsigned int cp_rx_config =
302
          (RX_FIFO_THRESH << RxCfgFIFOShift) |
303
          (RX_DMA_BURST << RxCfgDMAShift);
304
 
305
struct cp_desc {
306
        __le32          opts1;
307
        __le32          opts2;
308
        __le64          addr;
309
};
310
 
311
struct cp_dma_stats {
312
        __le64                  tx_ok;
313
        __le64                  rx_ok;
314
        __le64                  tx_err;
315
        __le32                  rx_err;
316
        __le16                  rx_fifo;
317
        __le16                  frame_align;
318
        __le32                  tx_ok_1col;
319
        __le32                  tx_ok_mcol;
320
        __le64                  rx_ok_phys;
321
        __le64                  rx_ok_bcast;
322
        __le32                  rx_ok_mcast;
323
        __le16                  tx_abort;
324
        __le16                  tx_underrun;
325
} __attribute__((packed));
326
 
327
struct cp_extra_stats {
328
        unsigned long           rx_frags;
329
};
330
 
331
struct cp_private {
332
        void                    __iomem *regs;
333
        struct net_device       *dev;
334
        spinlock_t              lock;
335
        u32                     msg_enable;
336
 
337
        struct napi_struct      napi;
338
 
339
        struct pci_dev          *pdev;
340
        u32                     rx_config;
341
        u16                     cpcmd;
342
 
343
        struct net_device_stats net_stats;
344
        struct cp_extra_stats   cp_stats;
345
 
346
        unsigned                rx_head         ____cacheline_aligned;
347
        unsigned                rx_tail;
348
        struct cp_desc          *rx_ring;
349
        struct sk_buff          *rx_skb[CP_RX_RING_SIZE];
350
 
351
        unsigned                tx_head         ____cacheline_aligned;
352
        unsigned                tx_tail;
353
        struct cp_desc          *tx_ring;
354
        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
355
 
356
        unsigned                rx_buf_sz;
357
        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
358
 
359
#if CP_VLAN_TAG_USED
360
        struct vlan_group       *vlgrp;
361
#endif
362
        dma_addr_t              ring_dma;
363
 
364
        struct mii_if_info      mii_if;
365
};
366
 
367
#define cpr8(reg)       readb(cp->regs + (reg))
368
#define cpr16(reg)      readw(cp->regs + (reg))
369
#define cpr32(reg)      readl(cp->regs + (reg))
370
#define cpw8(reg,val)   writeb((val), cp->regs + (reg))
371
#define cpw16(reg,val)  writew((val), cp->regs + (reg))
372
#define cpw32(reg,val)  writel((val), cp->regs + (reg))
373
#define cpw8_f(reg,val) do {                    \
374
        writeb((val), cp->regs + (reg));        \
375
        readb(cp->regs + (reg));                \
376
        } while (0)
377
#define cpw16_f(reg,val) do {                   \
378
        writew((val), cp->regs + (reg));        \
379
        readw(cp->regs + (reg));                \
380
        } while (0)
381
#define cpw32_f(reg,val) do {                   \
382
        writel((val), cp->regs + (reg));        \
383
        readl(cp->regs + (reg));                \
384
        } while (0)
385
 
386
 
387
static void __cp_set_rx_mode (struct net_device *dev);
388
static void cp_tx (struct cp_private *cp);
389
static void cp_clean_rings (struct cp_private *cp);
390
#ifdef CONFIG_NET_POLL_CONTROLLER
391
static void cp_poll_controller(struct net_device *dev);
392
#endif
393
static int cp_get_eeprom_len(struct net_device *dev);
394
static int cp_get_eeprom(struct net_device *dev,
395
                         struct ethtool_eeprom *eeprom, u8 *data);
396
static int cp_set_eeprom(struct net_device *dev,
397
                         struct ethtool_eeprom *eeprom, u8 *data);
398
 
399
static struct pci_device_id cp_pci_tbl[] = {
400
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
401
        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
402
        { },
403
};
404
MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
405
 
406
static struct {
407
        const char str[ETH_GSTRING_LEN];
408
} ethtool_stats_keys[] = {
409
        { "tx_ok" },
410
        { "rx_ok" },
411
        { "tx_err" },
412
        { "rx_err" },
413
        { "rx_fifo" },
414
        { "frame_align" },
415
        { "tx_ok_1col" },
416
        { "tx_ok_mcol" },
417
        { "rx_ok_phys" },
418
        { "rx_ok_bcast" },
419
        { "rx_ok_mcast" },
420
        { "tx_abort" },
421
        { "tx_underrun" },
422
        { "rx_frags" },
423
};
424
 
425
 
426
#if CP_VLAN_TAG_USED
427
static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
428
{
429
        struct cp_private *cp = netdev_priv(dev);
430
        unsigned long flags;
431
 
432
        spin_lock_irqsave(&cp->lock, flags);
433
        cp->vlgrp = grp;
434
        if (grp)
435
                cp->cpcmd |= RxVlanOn;
436
        else
437
                cp->cpcmd &= ~RxVlanOn;
438
 
439
        cpw16(CpCmd, cp->cpcmd);
440
        spin_unlock_irqrestore(&cp->lock, flags);
441
}
442
#endif /* CP_VLAN_TAG_USED */
443
 
444
static inline void cp_set_rxbufsize (struct cp_private *cp)
445
{
446
        unsigned int mtu = cp->dev->mtu;
447
 
448
        if (mtu > ETH_DATA_LEN)
449
                /* MTU + ethernet header + FCS + optional VLAN tag */
450
                cp->rx_buf_sz = mtu + ETH_HLEN + 8;
451
        else
452
                cp->rx_buf_sz = PKT_BUF_SZ;
453
}
454
 
455
static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
456
                              struct cp_desc *desc)
457
{
458
        skb->protocol = eth_type_trans (skb, cp->dev);
459
 
460
        cp->net_stats.rx_packets++;
461
        cp->net_stats.rx_bytes += skb->len;
462
        cp->dev->last_rx = jiffies;
463
 
464
#if CP_VLAN_TAG_USED
465
        if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
466
                vlan_hwaccel_receive_skb(skb, cp->vlgrp,
467
                                         swab16(le32_to_cpu(desc->opts2) & 0xffff));
468
        } else
469
#endif
470
                netif_receive_skb(skb);
471
}
472
 
473
static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
474
                            u32 status, u32 len)
475
{
476
        if (netif_msg_rx_err (cp))
477
                printk (KERN_DEBUG
478
                        "%s: rx err, slot %d status 0x%x len %d\n",
479
                        cp->dev->name, rx_tail, status, len);
480
        cp->net_stats.rx_errors++;
481
        if (status & RxErrFrame)
482
                cp->net_stats.rx_frame_errors++;
483
        if (status & RxErrCRC)
484
                cp->net_stats.rx_crc_errors++;
485
        if ((status & RxErrRunt) || (status & RxErrLong))
486
                cp->net_stats.rx_length_errors++;
487
        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
488
                cp->net_stats.rx_length_errors++;
489
        if (status & RxErrFIFO)
490
                cp->net_stats.rx_fifo_errors++;
491
}
492
 
493
static inline unsigned int cp_rx_csum_ok (u32 status)
494
{
495
        unsigned int protocol = (status >> 16) & 0x3;
496
 
497
        if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
498
                return 1;
499
        else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
500
                return 1;
501
        else if ((protocol == RxProtoIP) && (!(status & IPFail)))
502
                return 1;
503
        return 0;
504
}
505
 
506
static int cp_rx_poll(struct napi_struct *napi, int budget)
507
{
508
        struct cp_private *cp = container_of(napi, struct cp_private, napi);
509
        struct net_device *dev = cp->dev;
510
        unsigned int rx_tail = cp->rx_tail;
511
        int rx;
512
 
513
rx_status_loop:
514
        rx = 0;
515
        cpw16(IntrStatus, cp_rx_intr_mask);
516
 
517
        while (1) {
518
                u32 status, len;
519
                dma_addr_t mapping;
520
                struct sk_buff *skb, *new_skb;
521
                struct cp_desc *desc;
522
                unsigned buflen;
523
 
524
                skb = cp->rx_skb[rx_tail];
525
                BUG_ON(!skb);
526
 
527
                desc = &cp->rx_ring[rx_tail];
528
                status = le32_to_cpu(desc->opts1);
529
                if (status & DescOwn)
530
                        break;
531
 
532
                len = (status & 0x1fff) - 4;
533
                mapping = le64_to_cpu(desc->addr);
534
 
535
                if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
536
                        /* we don't support incoming fragmented frames.
537
                         * instead, we attempt to ensure that the
538
                         * pre-allocated RX skbs are properly sized such
539
                         * that RX fragments are never encountered
540
                         */
541
                        cp_rx_err_acct(cp, rx_tail, status, len);
542
                        cp->net_stats.rx_dropped++;
543
                        cp->cp_stats.rx_frags++;
544
                        goto rx_next;
545
                }
546
 
547
                if (status & (RxError | RxErrFIFO)) {
548
                        cp_rx_err_acct(cp, rx_tail, status, len);
549
                        goto rx_next;
550
                }
551
 
552
                if (netif_msg_rx_status(cp))
553
                        printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
554
                               dev->name, rx_tail, status, len);
555
 
556
                buflen = cp->rx_buf_sz + RX_OFFSET;
557
                new_skb = dev_alloc_skb (buflen);
558
                if (!new_skb) {
559
                        cp->net_stats.rx_dropped++;
560
                        goto rx_next;
561
                }
562
 
563
                skb_reserve(new_skb, RX_OFFSET);
564
 
565
                dma_unmap_single(&cp->pdev->dev, mapping,
566
                                 buflen, PCI_DMA_FROMDEVICE);
567
 
568
                /* Handle checksum offloading for incoming packets. */
569
                if (cp_rx_csum_ok(status))
570
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
571
                else
572
                        skb->ip_summed = CHECKSUM_NONE;
573
 
574
                skb_put(skb, len);
575
 
576
                mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
577
                                         PCI_DMA_FROMDEVICE);
578
                cp->rx_skb[rx_tail] = new_skb;
579
 
580
                cp_rx_skb(cp, skb, desc);
581
                rx++;
582
 
583
rx_next:
584
                cp->rx_ring[rx_tail].opts2 = 0;
585
                cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
586
                if (rx_tail == (CP_RX_RING_SIZE - 1))
587
                        desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
588
                                                  cp->rx_buf_sz);
589
                else
590
                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
591
                rx_tail = NEXT_RX(rx_tail);
592
 
593
                if (rx >= budget)
594
                        break;
595
        }
596
 
597
        cp->rx_tail = rx_tail;
598
 
599
        /* if we did not reach work limit, then we're done with
600
         * this round of polling
601
         */
602
        if (rx < budget) {
603
                unsigned long flags;
604
 
605
                if (cpr16(IntrStatus) & cp_rx_intr_mask)
606
                        goto rx_status_loop;
607
 
608
                spin_lock_irqsave(&cp->lock, flags);
609
                cpw16_f(IntrMask, cp_intr_mask);
610
                __netif_rx_complete(dev, napi);
611
                spin_unlock_irqrestore(&cp->lock, flags);
612
        }
613
 
614
        return rx;
615
}
616
 
617
static irqreturn_t cp_interrupt (int irq, void *dev_instance)
618
{
619
        struct net_device *dev = dev_instance;
620
        struct cp_private *cp;
621
        u16 status;
622
 
623
        if (unlikely(dev == NULL))
624
                return IRQ_NONE;
625
        cp = netdev_priv(dev);
626
 
627
        status = cpr16(IntrStatus);
628
        if (!status || (status == 0xFFFF))
629
                return IRQ_NONE;
630
 
631
        if (netif_msg_intr(cp))
632
                printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
633
                        dev->name, status, cpr8(Cmd), cpr16(CpCmd));
634
 
635
        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
636
 
637
        spin_lock(&cp->lock);
638
 
639
        /* close possible race's with dev_close */
640
        if (unlikely(!netif_running(dev))) {
641
                cpw16(IntrMask, 0);
642
                spin_unlock(&cp->lock);
643
                return IRQ_HANDLED;
644
        }
645
 
646
        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
647
                if (netif_rx_schedule_prep(dev, &cp->napi)) {
648
                        cpw16_f(IntrMask, cp_norx_intr_mask);
649
                        __netif_rx_schedule(dev, &cp->napi);
650
                }
651
 
652
        if (status & (TxOK | TxErr | TxEmpty | SWInt))
653
                cp_tx(cp);
654
        if (status & LinkChg)
655
                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
656
 
657
        spin_unlock(&cp->lock);
658
 
659
        if (status & PciErr) {
660
                u16 pci_status;
661
 
662
                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
663
                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
664
                printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
665
                       dev->name, status, pci_status);
666
 
667
                /* TODO: reset hardware */
668
        }
669
 
670
        return IRQ_HANDLED;
671
}
672
 
673
#ifdef CONFIG_NET_POLL_CONTROLLER
674
/*
675
 * Polling receive - used by netconsole and other diagnostic tools
676
 * to allow network i/o with interrupts disabled.
677
 */
678
static void cp_poll_controller(struct net_device *dev)
679
{
680
        disable_irq(dev->irq);
681
        cp_interrupt(dev->irq, dev);
682
        enable_irq(dev->irq);
683
}
684
#endif
685
 
686
static void cp_tx (struct cp_private *cp)
687
{
688
        unsigned tx_head = cp->tx_head;
689
        unsigned tx_tail = cp->tx_tail;
690
 
691
        while (tx_tail != tx_head) {
692
                struct cp_desc *txd = cp->tx_ring + tx_tail;
693
                struct sk_buff *skb;
694
                u32 status;
695
 
696
                rmb();
697
                status = le32_to_cpu(txd->opts1);
698
                if (status & DescOwn)
699
                        break;
700
 
701
                skb = cp->tx_skb[tx_tail];
702
                BUG_ON(!skb);
703
 
704
                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
705
                                 le32_to_cpu(txd->opts1) & 0xffff,
706
                                 PCI_DMA_TODEVICE);
707
 
708
                if (status & LastFrag) {
709
                        if (status & (TxError | TxFIFOUnder)) {
710
                                if (netif_msg_tx_err(cp))
711
                                        printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
712
                                               cp->dev->name, status);
713
                                cp->net_stats.tx_errors++;
714
                                if (status & TxOWC)
715
                                        cp->net_stats.tx_window_errors++;
716
                                if (status & TxMaxCol)
717
                                        cp->net_stats.tx_aborted_errors++;
718
                                if (status & TxLinkFail)
719
                                        cp->net_stats.tx_carrier_errors++;
720
                                if (status & TxFIFOUnder)
721
                                        cp->net_stats.tx_fifo_errors++;
722
                        } else {
723
                                cp->net_stats.collisions +=
724
                                        ((status >> TxColCntShift) & TxColCntMask);
725
                                cp->net_stats.tx_packets++;
726
                                cp->net_stats.tx_bytes += skb->len;
727
                                if (netif_msg_tx_done(cp))
728
                                        printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
729
                        }
730
                        dev_kfree_skb_irq(skb);
731
                }
732
 
733
                cp->tx_skb[tx_tail] = NULL;
734
 
735
                tx_tail = NEXT_TX(tx_tail);
736
        }
737
 
738
        cp->tx_tail = tx_tail;
739
 
740
        if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
741
                netif_wake_queue(cp->dev);
742
}
743
 
744
static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
745
{
746
        struct cp_private *cp = netdev_priv(dev);
747
        unsigned entry;
748
        u32 eor, flags;
749
        unsigned long intr_flags;
750
#if CP_VLAN_TAG_USED
751
        u32 vlan_tag = 0;
752
#endif
753
        int mss = 0;
754
 
755
        spin_lock_irqsave(&cp->lock, intr_flags);
756
 
757
        /* This is a hard error, log it. */
758
        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
759
                netif_stop_queue(dev);
760
                spin_unlock_irqrestore(&cp->lock, intr_flags);
761
                printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
762
                       dev->name);
763
                return 1;
764
        }
765
 
766
#if CP_VLAN_TAG_USED
767
        if (cp->vlgrp && vlan_tx_tag_present(skb))
768
                vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
769
#endif
770
 
771
        entry = cp->tx_head;
772
        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
773
        if (dev->features & NETIF_F_TSO)
774
                mss = skb_shinfo(skb)->gso_size;
775
 
776
        if (skb_shinfo(skb)->nr_frags == 0) {
777
                struct cp_desc *txd = &cp->tx_ring[entry];
778
                u32 len;
779
                dma_addr_t mapping;
780
 
781
                len = skb->len;
782
                mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
783
                CP_VLAN_TX_TAG(txd, vlan_tag);
784
                txd->addr = cpu_to_le64(mapping);
785
                wmb();
786
 
787
                flags = eor | len | DescOwn | FirstFrag | LastFrag;
788
 
789
                if (mss)
790
                        flags |= LargeSend | ((mss & MSSMask) << MSSShift);
791
                else if (skb->ip_summed == CHECKSUM_PARTIAL) {
792
                        const struct iphdr *ip = ip_hdr(skb);
793
                        if (ip->protocol == IPPROTO_TCP)
794
                                flags |= IPCS | TCPCS;
795
                        else if (ip->protocol == IPPROTO_UDP)
796
                                flags |= IPCS | UDPCS;
797
                        else
798
                                WARN_ON(1);     /* we need a WARN() */
799
                }
800
 
801
                txd->opts1 = cpu_to_le32(flags);
802
                wmb();
803
 
804
                cp->tx_skb[entry] = skb;
805
                entry = NEXT_TX(entry);
806
        } else {
807
                struct cp_desc *txd;
808
                u32 first_len, first_eor;
809
                dma_addr_t first_mapping;
810
                int frag, first_entry = entry;
811
                const struct iphdr *ip = ip_hdr(skb);
812
 
813
                /* We must give this initial chunk to the device last.
814
                 * Otherwise we could race with the device.
815
                 */
816
                first_eor = eor;
817
                first_len = skb_headlen(skb);
818
                first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
819
                                               first_len, PCI_DMA_TODEVICE);
820
                cp->tx_skb[entry] = skb;
821
                entry = NEXT_TX(entry);
822
 
823
                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
824
                        skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
825
                        u32 len;
826
                        u32 ctrl;
827
                        dma_addr_t mapping;
828
 
829
                        len = this_frag->size;
830
                        mapping = dma_map_single(&cp->pdev->dev,
831
                                                 ((void *) page_address(this_frag->page) +
832
                                                  this_frag->page_offset),
833
                                                 len, PCI_DMA_TODEVICE);
834
                        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
835
 
836
                        ctrl = eor | len | DescOwn;
837
 
838
                        if (mss)
839
                                ctrl |= LargeSend |
840
                                        ((mss & MSSMask) << MSSShift);
841
                        else if (skb->ip_summed == CHECKSUM_PARTIAL) {
842
                                if (ip->protocol == IPPROTO_TCP)
843
                                        ctrl |= IPCS | TCPCS;
844
                                else if (ip->protocol == IPPROTO_UDP)
845
                                        ctrl |= IPCS | UDPCS;
846
                                else
847
                                        BUG();
848
                        }
849
 
850
                        if (frag == skb_shinfo(skb)->nr_frags - 1)
851
                                ctrl |= LastFrag;
852
 
853
                        txd = &cp->tx_ring[entry];
854
                        CP_VLAN_TX_TAG(txd, vlan_tag);
855
                        txd->addr = cpu_to_le64(mapping);
856
                        wmb();
857
 
858
                        txd->opts1 = cpu_to_le32(ctrl);
859
                        wmb();
860
 
861
                        cp->tx_skb[entry] = skb;
862
                        entry = NEXT_TX(entry);
863
                }
864
 
865
                txd = &cp->tx_ring[first_entry];
866
                CP_VLAN_TX_TAG(txd, vlan_tag);
867
                txd->addr = cpu_to_le64(first_mapping);
868
                wmb();
869
 
870
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
871
                        if (ip->protocol == IPPROTO_TCP)
872
                                txd->opts1 = cpu_to_le32(first_eor | first_len |
873
                                                         FirstFrag | DescOwn |
874
                                                         IPCS | TCPCS);
875
                        else if (ip->protocol == IPPROTO_UDP)
876
                                txd->opts1 = cpu_to_le32(first_eor | first_len |
877
                                                         FirstFrag | DescOwn |
878
                                                         IPCS | UDPCS);
879
                        else
880
                                BUG();
881
                } else
882
                        txd->opts1 = cpu_to_le32(first_eor | first_len |
883
                                                 FirstFrag | DescOwn);
884
                wmb();
885
        }
886
        cp->tx_head = entry;
887
        if (netif_msg_tx_queued(cp))
888
                printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
889
                       dev->name, entry, skb->len);
890
        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
891
                netif_stop_queue(dev);
892
 
893
        spin_unlock_irqrestore(&cp->lock, intr_flags);
894
 
895
        cpw8(TxPoll, NormalTxPoll);
896
        dev->trans_start = jiffies;
897
 
898
        return 0;
899
}
900
 
901
/* Set or clear the multicast filter for this adaptor.
902
   This routine is not state sensitive and need not be SMP locked. */
903
 
904
static void __cp_set_rx_mode (struct net_device *dev)
905
{
906
        struct cp_private *cp = netdev_priv(dev);
907
        u32 mc_filter[2];       /* Multicast hash filter */
908
        int i, rx_mode;
909
        u32 tmp;
910
 
911
        /* Note: do not reorder, GCC is clever about common statements. */
912
        if (dev->flags & IFF_PROMISC) {
913
                /* Unconditionally log net taps. */
914
                rx_mode =
915
                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
916
                    AcceptAllPhys;
917
                mc_filter[1] = mc_filter[0] = 0xffffffff;
918
        } else if ((dev->mc_count > multicast_filter_limit)
919
                   || (dev->flags & IFF_ALLMULTI)) {
920
                /* Too many to filter perfectly -- accept all multicasts. */
921
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
922
                mc_filter[1] = mc_filter[0] = 0xffffffff;
923
        } else {
924
                struct dev_mc_list *mclist;
925
                rx_mode = AcceptBroadcast | AcceptMyPhys;
926
                mc_filter[1] = mc_filter[0] = 0;
927
                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
928
                     i++, mclist = mclist->next) {
929
                        int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
930
 
931
                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
932
                        rx_mode |= AcceptMulticast;
933
                }
934
        }
935
 
936
        /* We can safely update without stopping the chip. */
937
        tmp = cp_rx_config | rx_mode;
938
        if (cp->rx_config != tmp) {
939
                cpw32_f (RxConfig, tmp);
940
                cp->rx_config = tmp;
941
        }
942
        cpw32_f (MAR0 + 0, mc_filter[0]);
943
        cpw32_f (MAR0 + 4, mc_filter[1]);
944
}
945
 
946
static void cp_set_rx_mode (struct net_device *dev)
947
{
948
        unsigned long flags;
949
        struct cp_private *cp = netdev_priv(dev);
950
 
951
        spin_lock_irqsave (&cp->lock, flags);
952
        __cp_set_rx_mode(dev);
953
        spin_unlock_irqrestore (&cp->lock, flags);
954
}
955
 
956
static void __cp_get_stats(struct cp_private *cp)
957
{
958
        /* only lower 24 bits valid; write any value to clear */
959
        cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
960
        cpw32 (RxMissed, 0);
961
}
962
 
963
static struct net_device_stats *cp_get_stats(struct net_device *dev)
964
{
965
        struct cp_private *cp = netdev_priv(dev);
966
        unsigned long flags;
967
 
968
        /* The chip only need report frame silently dropped. */
969
        spin_lock_irqsave(&cp->lock, flags);
970
        if (netif_running(dev) && netif_device_present(dev))
971
                __cp_get_stats(cp);
972
        spin_unlock_irqrestore(&cp->lock, flags);
973
 
974
        return &cp->net_stats;
975
}
976
 
977
static void cp_stop_hw (struct cp_private *cp)
978
{
979
        cpw16(IntrStatus, ~(cpr16(IntrStatus)));
980
        cpw16_f(IntrMask, 0);
981
        cpw8(Cmd, 0);
982
        cpw16_f(CpCmd, 0);
983
        cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
984
 
985
        cp->rx_tail = 0;
986
        cp->tx_head = cp->tx_tail = 0;
987
}
988
 
989
static void cp_reset_hw (struct cp_private *cp)
990
{
991
        unsigned work = 1000;
992
 
993
        cpw8(Cmd, CmdReset);
994
 
995
        while (work--) {
996
                if (!(cpr8(Cmd) & CmdReset))
997
                        return;
998
 
999
                schedule_timeout_uninterruptible(10);
1000
        }
1001
 
1002
        printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1003
}
1004
 
1005
static inline void cp_start_hw (struct cp_private *cp)
1006
{
1007
        cpw16(CpCmd, cp->cpcmd);
1008
        cpw8(Cmd, RxOn | TxOn);
1009
}
1010
 
1011
static void cp_init_hw (struct cp_private *cp)
1012
{
1013
        struct net_device *dev = cp->dev;
1014
        dma_addr_t ring_dma;
1015
 
1016
        cp_reset_hw(cp);
1017
 
1018
        cpw8_f (Cfg9346, Cfg9346_Unlock);
1019
 
1020
        /* Restore our idea of the MAC address. */
1021
        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1022
        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1023
 
1024
        cp_start_hw(cp);
1025
        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1026
 
1027
        __cp_set_rx_mode(dev);
1028
        cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1029
 
1030
        cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1031
        /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1032
        cpw8(Config3, PARMEnable);
1033
        cp->wol_enabled = 0;
1034
 
1035
        cpw8(Config5, cpr8(Config5) & PMEStatus);
1036
 
1037
        cpw32_f(HiTxRingAddr, 0);
1038
        cpw32_f(HiTxRingAddr + 4, 0);
1039
 
1040
        ring_dma = cp->ring_dma;
1041
        cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1042
        cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1043
 
1044
        ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1045
        cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1046
        cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1047
 
1048
        cpw16(MultiIntr, 0);
1049
 
1050
        cpw16_f(IntrMask, cp_intr_mask);
1051
 
1052
        cpw8_f(Cfg9346, Cfg9346_Lock);
1053
}
1054
 
1055
static int cp_refill_rx (struct cp_private *cp)
1056
{
1057
        unsigned i;
1058
 
1059
        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1060
                struct sk_buff *skb;
1061
                dma_addr_t mapping;
1062
 
1063
                skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1064
                if (!skb)
1065
                        goto err_out;
1066
 
1067
                skb_reserve(skb, RX_OFFSET);
1068
 
1069
                mapping = dma_map_single(&cp->pdev->dev, skb->data,
1070
                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1071
                cp->rx_skb[i] = skb;
1072
 
1073
                cp->rx_ring[i].opts2 = 0;
1074
                cp->rx_ring[i].addr = cpu_to_le64(mapping);
1075
                if (i == (CP_RX_RING_SIZE - 1))
1076
                        cp->rx_ring[i].opts1 =
1077
                                cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1078
                else
1079
                        cp->rx_ring[i].opts1 =
1080
                                cpu_to_le32(DescOwn | cp->rx_buf_sz);
1081
        }
1082
 
1083
        return 0;
1084
 
1085
err_out:
1086
        cp_clean_rings(cp);
1087
        return -ENOMEM;
1088
}
1089
 
1090
static void cp_init_rings_index (struct cp_private *cp)
1091
{
1092
        cp->rx_tail = 0;
1093
        cp->tx_head = cp->tx_tail = 0;
1094
}
1095
 
1096
static int cp_init_rings (struct cp_private *cp)
1097
{
1098
        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1099
        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1100
 
1101
        cp_init_rings_index(cp);
1102
 
1103
        return cp_refill_rx (cp);
1104
}
1105
 
1106
static int cp_alloc_rings (struct cp_private *cp)
1107
{
1108
        void *mem;
1109
 
1110
        mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1111
                                 &cp->ring_dma, GFP_KERNEL);
1112
        if (!mem)
1113
                return -ENOMEM;
1114
 
1115
        cp->rx_ring = mem;
1116
        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1117
 
1118
        return cp_init_rings(cp);
1119
}
1120
 
1121
static void cp_clean_rings (struct cp_private *cp)
1122
{
1123
        struct cp_desc *desc;
1124
        unsigned i;
1125
 
1126
        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1127
                if (cp->rx_skb[i]) {
1128
                        desc = cp->rx_ring + i;
1129
                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1130
                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1131
                        dev_kfree_skb(cp->rx_skb[i]);
1132
                }
1133
        }
1134
 
1135
        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1136
                if (cp->tx_skb[i]) {
1137
                        struct sk_buff *skb = cp->tx_skb[i];
1138
 
1139
                        desc = cp->tx_ring + i;
1140
                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1141
                                         le32_to_cpu(desc->opts1) & 0xffff,
1142
                                         PCI_DMA_TODEVICE);
1143
                        if (le32_to_cpu(desc->opts1) & LastFrag)
1144
                                dev_kfree_skb(skb);
1145
                        cp->net_stats.tx_dropped++;
1146
                }
1147
        }
1148
 
1149
        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1150
        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1151
 
1152
        memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1153
        memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1154
}
1155
 
1156
static void cp_free_rings (struct cp_private *cp)
1157
{
1158
        cp_clean_rings(cp);
1159
        dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1160
                          cp->ring_dma);
1161
        cp->rx_ring = NULL;
1162
        cp->tx_ring = NULL;
1163
}
1164
 
1165
static int cp_open (struct net_device *dev)
1166
{
1167
        struct cp_private *cp = netdev_priv(dev);
1168
        int rc;
1169
 
1170
        if (netif_msg_ifup(cp))
1171
                printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1172
 
1173
        rc = cp_alloc_rings(cp);
1174
        if (rc)
1175
                return rc;
1176
 
1177
        napi_enable(&cp->napi);
1178
 
1179
        cp_init_hw(cp);
1180
 
1181
        rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1182
        if (rc)
1183
                goto err_out_hw;
1184
 
1185
        netif_carrier_off(dev);
1186
        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1187
        netif_start_queue(dev);
1188
 
1189
        return 0;
1190
 
1191
err_out_hw:
1192
        napi_disable(&cp->napi);
1193
        cp_stop_hw(cp);
1194
        cp_free_rings(cp);
1195
        return rc;
1196
}
1197
 
1198
static int cp_close (struct net_device *dev)
1199
{
1200
        struct cp_private *cp = netdev_priv(dev);
1201
        unsigned long flags;
1202
 
1203
        napi_disable(&cp->napi);
1204
 
1205
        if (netif_msg_ifdown(cp))
1206
                printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1207
 
1208
        spin_lock_irqsave(&cp->lock, flags);
1209
 
1210
        netif_stop_queue(dev);
1211
        netif_carrier_off(dev);
1212
 
1213
        cp_stop_hw(cp);
1214
 
1215
        spin_unlock_irqrestore(&cp->lock, flags);
1216
 
1217
        synchronize_irq(dev->irq);
1218
        free_irq(dev->irq, dev);
1219
 
1220
        cp_free_rings(cp);
1221
        return 0;
1222
}
1223
 
1224
static void cp_tx_timeout(struct net_device *dev)
1225
{
1226
        struct cp_private *cp = netdev_priv(dev);
1227
        unsigned long flags;
1228
        int rc;
1229
 
1230
        printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n",
1231
               dev->name, cpr8(Cmd), cpr16(CpCmd),
1232
               cpr16(IntrStatus), cpr16(IntrMask));
1233
 
1234
        spin_lock_irqsave(&cp->lock, flags);
1235
 
1236
        cp_stop_hw(cp);
1237
        cp_clean_rings(cp);
1238
        rc = cp_init_rings(cp);
1239
        cp_start_hw(cp);
1240
 
1241
        netif_wake_queue(dev);
1242
 
1243
        spin_unlock_irqrestore(&cp->lock, flags);
1244
 
1245
        return;
1246
}
1247
 
1248
#ifdef BROKEN
1249
static int cp_change_mtu(struct net_device *dev, int new_mtu)
1250
{
1251
        struct cp_private *cp = netdev_priv(dev);
1252
        int rc;
1253
        unsigned long flags;
1254
 
1255
        /* check for invalid MTU, according to hardware limits */
1256
        if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1257
                return -EINVAL;
1258
 
1259
        /* if network interface not up, no need for complexity */
1260
        if (!netif_running(dev)) {
1261
                dev->mtu = new_mtu;
1262
                cp_set_rxbufsize(cp);   /* set new rx buf size */
1263
                return 0;
1264
        }
1265
 
1266
        spin_lock_irqsave(&cp->lock, flags);
1267
 
1268
        cp_stop_hw(cp);                 /* stop h/w and free rings */
1269
        cp_clean_rings(cp);
1270
 
1271
        dev->mtu = new_mtu;
1272
        cp_set_rxbufsize(cp);           /* set new rx buf size */
1273
 
1274
        rc = cp_init_rings(cp);         /* realloc and restart h/w */
1275
        cp_start_hw(cp);
1276
 
1277
        spin_unlock_irqrestore(&cp->lock, flags);
1278
 
1279
        return rc;
1280
}
1281
#endif /* BROKEN */
1282
 
1283
static const char mii_2_8139_map[8] = {
1284
        BasicModeCtrl,
1285
        BasicModeStatus,
1286
        0,
1287
        0,
1288
        NWayAdvert,
1289
        NWayLPAR,
1290
        NWayExpansion,
1291
 
1292
};
1293
 
1294
static int mdio_read(struct net_device *dev, int phy_id, int location)
1295
{
1296
        struct cp_private *cp = netdev_priv(dev);
1297
 
1298
        return location < 8 && mii_2_8139_map[location] ?
1299
               readw(cp->regs + mii_2_8139_map[location]) : 0;
1300
}
1301
 
1302
 
1303
static void mdio_write(struct net_device *dev, int phy_id, int location,
1304
                       int value)
1305
{
1306
        struct cp_private *cp = netdev_priv(dev);
1307
 
1308
        if (location == 0) {
1309
                cpw8(Cfg9346, Cfg9346_Unlock);
1310
                cpw16(BasicModeCtrl, value);
1311
                cpw8(Cfg9346, Cfg9346_Lock);
1312
        } else if (location < 8 && mii_2_8139_map[location])
1313
                cpw16(mii_2_8139_map[location], value);
1314
}
1315
 
1316
/* Set the ethtool Wake-on-LAN settings */
1317
static int netdev_set_wol (struct cp_private *cp,
1318
                           const struct ethtool_wolinfo *wol)
1319
{
1320
        u8 options;
1321
 
1322
        options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1323
        /* If WOL is being disabled, no need for complexity */
1324
        if (wol->wolopts) {
1325
                if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1326
                if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1327
        }
1328
 
1329
        cpw8 (Cfg9346, Cfg9346_Unlock);
1330
        cpw8 (Config3, options);
1331
        cpw8 (Cfg9346, Cfg9346_Lock);
1332
 
1333
        options = 0; /* Paranoia setting */
1334
        options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1335
        /* If WOL is being disabled, no need for complexity */
1336
        if (wol->wolopts) {
1337
                if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1338
                if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1339
                if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1340
        }
1341
 
1342
        cpw8 (Config5, options);
1343
 
1344
        cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1345
 
1346
        return 0;
1347
}
1348
 
1349
/* Get the ethtool Wake-on-LAN settings */
1350
static void netdev_get_wol (struct cp_private *cp,
1351
                     struct ethtool_wolinfo *wol)
1352
{
1353
        u8 options;
1354
 
1355
        wol->wolopts   = 0; /* Start from scratch */
1356
        wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1357
                         WAKE_MCAST | WAKE_UCAST;
1358
        /* We don't need to go on if WOL is disabled */
1359
        if (!cp->wol_enabled) return;
1360
 
1361
        options        = cpr8 (Config3);
1362
        if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1363
        if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1364
 
1365
        options        = 0; /* Paranoia setting */
1366
        options        = cpr8 (Config5);
1367
        if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1368
        if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1369
        if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1370
}
1371
 
1372
static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1373
{
1374
        struct cp_private *cp = netdev_priv(dev);
1375
 
1376
        strcpy (info->driver, DRV_NAME);
1377
        strcpy (info->version, DRV_VERSION);
1378
        strcpy (info->bus_info, pci_name(cp->pdev));
1379
}
1380
 
1381
static int cp_get_regs_len(struct net_device *dev)
1382
{
1383
        return CP_REGS_SIZE;
1384
}
1385
 
1386
static int cp_get_sset_count (struct net_device *dev, int sset)
1387
{
1388
        switch (sset) {
1389
        case ETH_SS_STATS:
1390
                return CP_NUM_STATS;
1391
        default:
1392
                return -EOPNOTSUPP;
1393
        }
1394
}
1395
 
1396
static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1397
{
1398
        struct cp_private *cp = netdev_priv(dev);
1399
        int rc;
1400
        unsigned long flags;
1401
 
1402
        spin_lock_irqsave(&cp->lock, flags);
1403
        rc = mii_ethtool_gset(&cp->mii_if, cmd);
1404
        spin_unlock_irqrestore(&cp->lock, flags);
1405
 
1406
        return rc;
1407
}
1408
 
1409
static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1410
{
1411
        struct cp_private *cp = netdev_priv(dev);
1412
        int rc;
1413
        unsigned long flags;
1414
 
1415
        spin_lock_irqsave(&cp->lock, flags);
1416
        rc = mii_ethtool_sset(&cp->mii_if, cmd);
1417
        spin_unlock_irqrestore(&cp->lock, flags);
1418
 
1419
        return rc;
1420
}
1421
 
1422
static int cp_nway_reset(struct net_device *dev)
1423
{
1424
        struct cp_private *cp = netdev_priv(dev);
1425
        return mii_nway_restart(&cp->mii_if);
1426
}
1427
 
1428
static u32 cp_get_msglevel(struct net_device *dev)
1429
{
1430
        struct cp_private *cp = netdev_priv(dev);
1431
        return cp->msg_enable;
1432
}
1433
 
1434
static void cp_set_msglevel(struct net_device *dev, u32 value)
1435
{
1436
        struct cp_private *cp = netdev_priv(dev);
1437
        cp->msg_enable = value;
1438
}
1439
 
1440
static u32 cp_get_rx_csum(struct net_device *dev)
1441
{
1442
        struct cp_private *cp = netdev_priv(dev);
1443
        return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1444
}
1445
 
1446
static int cp_set_rx_csum(struct net_device *dev, u32 data)
1447
{
1448
        struct cp_private *cp = netdev_priv(dev);
1449
        u16 cmd = cp->cpcmd, newcmd;
1450
 
1451
        newcmd = cmd;
1452
 
1453
        if (data)
1454
                newcmd |= RxChkSum;
1455
        else
1456
                newcmd &= ~RxChkSum;
1457
 
1458
        if (newcmd != cmd) {
1459
                unsigned long flags;
1460
 
1461
                spin_lock_irqsave(&cp->lock, flags);
1462
                cp->cpcmd = newcmd;
1463
                cpw16_f(CpCmd, newcmd);
1464
                spin_unlock_irqrestore(&cp->lock, flags);
1465
        }
1466
 
1467
        return 0;
1468
}
1469
 
1470
static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1471
                        void *p)
1472
{
1473
        struct cp_private *cp = netdev_priv(dev);
1474
        unsigned long flags;
1475
 
1476
        if (regs->len < CP_REGS_SIZE)
1477
                return /* -EINVAL */;
1478
 
1479
        regs->version = CP_REGS_VER;
1480
 
1481
        spin_lock_irqsave(&cp->lock, flags);
1482
        memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1483
        spin_unlock_irqrestore(&cp->lock, flags);
1484
}
1485
 
1486
static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1487
{
1488
        struct cp_private *cp = netdev_priv(dev);
1489
        unsigned long flags;
1490
 
1491
        spin_lock_irqsave (&cp->lock, flags);
1492
        netdev_get_wol (cp, wol);
1493
        spin_unlock_irqrestore (&cp->lock, flags);
1494
}
1495
 
1496
static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1497
{
1498
        struct cp_private *cp = netdev_priv(dev);
1499
        unsigned long flags;
1500
        int rc;
1501
 
1502
        spin_lock_irqsave (&cp->lock, flags);
1503
        rc = netdev_set_wol (cp, wol);
1504
        spin_unlock_irqrestore (&cp->lock, flags);
1505
 
1506
        return rc;
1507
}
1508
 
1509
static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1510
{
1511
        switch (stringset) {
1512
        case ETH_SS_STATS:
1513
                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1514
                break;
1515
        default:
1516
                BUG();
1517
                break;
1518
        }
1519
}
1520
 
1521
static void cp_get_ethtool_stats (struct net_device *dev,
1522
                                  struct ethtool_stats *estats, u64 *tmp_stats)
1523
{
1524
        struct cp_private *cp = netdev_priv(dev);
1525
        struct cp_dma_stats *nic_stats;
1526
        dma_addr_t dma;
1527
        int i;
1528
 
1529
        nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1530
                                       &dma, GFP_KERNEL);
1531
        if (!nic_stats)
1532
                return;
1533
 
1534
        /* begin NIC statistics dump */
1535
        cpw32(StatsAddr + 4, (u64)dma >> 32);
1536
        cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1537
        cpr32(StatsAddr);
1538
 
1539
        for (i = 0; i < 1000; i++) {
1540
                if ((cpr32(StatsAddr) & DumpStats) == 0)
1541
                        break;
1542
                udelay(10);
1543
        }
1544
        cpw32(StatsAddr, 0);
1545
        cpw32(StatsAddr + 4, 0);
1546
        cpr32(StatsAddr);
1547
 
1548
        i = 0;
1549
        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1550
        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1551
        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1552
        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1553
        tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1554
        tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1555
        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1556
        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1557
        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1558
        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1559
        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1560
        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1561
        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1562
        tmp_stats[i++] = cp->cp_stats.rx_frags;
1563
        BUG_ON(i != CP_NUM_STATS);
1564
 
1565
        dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1566
}
1567
 
1568
static const struct ethtool_ops cp_ethtool_ops = {
1569
        .get_drvinfo            = cp_get_drvinfo,
1570
        .get_regs_len           = cp_get_regs_len,
1571
        .get_sset_count         = cp_get_sset_count,
1572
        .get_settings           = cp_get_settings,
1573
        .set_settings           = cp_set_settings,
1574
        .nway_reset             = cp_nway_reset,
1575
        .get_link               = ethtool_op_get_link,
1576
        .get_msglevel           = cp_get_msglevel,
1577
        .set_msglevel           = cp_set_msglevel,
1578
        .get_rx_csum            = cp_get_rx_csum,
1579
        .set_rx_csum            = cp_set_rx_csum,
1580
        .set_tx_csum            = ethtool_op_set_tx_csum, /* local! */
1581
        .set_sg                 = ethtool_op_set_sg,
1582
        .set_tso                = ethtool_op_set_tso,
1583
        .get_regs               = cp_get_regs,
1584
        .get_wol                = cp_get_wol,
1585
        .set_wol                = cp_set_wol,
1586
        .get_strings            = cp_get_strings,
1587
        .get_ethtool_stats      = cp_get_ethtool_stats,
1588
        .get_eeprom_len         = cp_get_eeprom_len,
1589
        .get_eeprom             = cp_get_eeprom,
1590
        .set_eeprom             = cp_set_eeprom,
1591
};
1592
 
1593
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1594
{
1595
        struct cp_private *cp = netdev_priv(dev);
1596
        int rc;
1597
        unsigned long flags;
1598
 
1599
        if (!netif_running(dev))
1600
                return -EINVAL;
1601
 
1602
        spin_lock_irqsave(&cp->lock, flags);
1603
        rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1604
        spin_unlock_irqrestore(&cp->lock, flags);
1605
        return rc;
1606
}
1607
 
1608
/* Serial EEPROM section. */
1609
 
1610
/*  EEPROM_Ctrl bits. */
1611
#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1612
#define EE_CS                   0x08    /* EEPROM chip select. */
1613
#define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1614
#define EE_WRITE_0              0x00
1615
#define EE_WRITE_1              0x02
1616
#define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1617
#define EE_ENB                  (0x80 | EE_CS)
1618
 
1619
/* Delay between EEPROM clock transitions.
1620
   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1621
 */
1622
 
1623
#define eeprom_delay()  readl(ee_addr)
1624
 
1625
/* The EEPROM commands include the alway-set leading bit. */
1626
#define EE_EXTEND_CMD   (4)
1627
#define EE_WRITE_CMD    (5)
1628
#define EE_READ_CMD             (6)
1629
#define EE_ERASE_CMD    (7)
1630
 
1631
#define EE_EWDS_ADDR    (0)
1632
#define EE_WRAL_ADDR    (1)
1633
#define EE_ERAL_ADDR    (2)
1634
#define EE_EWEN_ADDR    (3)
1635
 
1636
#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1637
 
1638
static void eeprom_cmd_start(void __iomem *ee_addr)
1639
{
1640
        writeb (EE_ENB & ~EE_CS, ee_addr);
1641
        writeb (EE_ENB, ee_addr);
1642
        eeprom_delay ();
1643
}
1644
 
1645
static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1646
{
1647
        int i;
1648
 
1649
        /* Shift the command bits out. */
1650
        for (i = cmd_len - 1; i >= 0; i--) {
1651
                int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1652
                writeb (EE_ENB | dataval, ee_addr);
1653
                eeprom_delay ();
1654
                writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1655
                eeprom_delay ();
1656
        }
1657
        writeb (EE_ENB, ee_addr);
1658
        eeprom_delay ();
1659
}
1660
 
1661
static void eeprom_cmd_end(void __iomem *ee_addr)
1662
{
1663
        writeb (~EE_CS, ee_addr);
1664
        eeprom_delay ();
1665
}
1666
 
1667
static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1668
                              int addr_len)
1669
{
1670
        int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1671
 
1672
        eeprom_cmd_start(ee_addr);
1673
        eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1674
        eeprom_cmd_end(ee_addr);
1675
}
1676
 
1677
static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1678
{
1679
        int i;
1680
        u16 retval = 0;
1681
        void __iomem *ee_addr = ioaddr + Cfg9346;
1682
        int read_cmd = location | (EE_READ_CMD << addr_len);
1683
 
1684
        eeprom_cmd_start(ee_addr);
1685
        eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1686
 
1687
        for (i = 16; i > 0; i--) {
1688
                writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1689
                eeprom_delay ();
1690
                retval =
1691
                    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1692
                                     0);
1693
                writeb (EE_ENB, ee_addr);
1694
                eeprom_delay ();
1695
        }
1696
 
1697
        eeprom_cmd_end(ee_addr);
1698
 
1699
        return retval;
1700
}
1701
 
1702
static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1703
                         int addr_len)
1704
{
1705
        int i;
1706
        void __iomem *ee_addr = ioaddr + Cfg9346;
1707
        int write_cmd = location | (EE_WRITE_CMD << addr_len);
1708
 
1709
        eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1710
 
1711
        eeprom_cmd_start(ee_addr);
1712
        eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1713
        eeprom_cmd(ee_addr, val, 16);
1714
        eeprom_cmd_end(ee_addr);
1715
 
1716
        eeprom_cmd_start(ee_addr);
1717
        for (i = 0; i < 20000; i++)
1718
                if (readb(ee_addr) & EE_DATA_READ)
1719
                        break;
1720
        eeprom_cmd_end(ee_addr);
1721
 
1722
        eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1723
}
1724
 
1725
static int cp_get_eeprom_len(struct net_device *dev)
1726
{
1727
        struct cp_private *cp = netdev_priv(dev);
1728
        int size;
1729
 
1730
        spin_lock_irq(&cp->lock);
1731
        size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1732
        spin_unlock_irq(&cp->lock);
1733
 
1734
        return size;
1735
}
1736
 
1737
static int cp_get_eeprom(struct net_device *dev,
1738
                         struct ethtool_eeprom *eeprom, u8 *data)
1739
{
1740
        struct cp_private *cp = netdev_priv(dev);
1741
        unsigned int addr_len;
1742
        u16 val;
1743
        u32 offset = eeprom->offset >> 1;
1744
        u32 len = eeprom->len;
1745
        u32 i = 0;
1746
 
1747
        eeprom->magic = CP_EEPROM_MAGIC;
1748
 
1749
        spin_lock_irq(&cp->lock);
1750
 
1751
        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1752
 
1753
        if (eeprom->offset & 1) {
1754
                val = read_eeprom(cp->regs, offset, addr_len);
1755
                data[i++] = (u8)(val >> 8);
1756
                offset++;
1757
        }
1758
 
1759
        while (i < len - 1) {
1760
                val = read_eeprom(cp->regs, offset, addr_len);
1761
                data[i++] = (u8)val;
1762
                data[i++] = (u8)(val >> 8);
1763
                offset++;
1764
        }
1765
 
1766
        if (i < len) {
1767
                val = read_eeprom(cp->regs, offset, addr_len);
1768
                data[i] = (u8)val;
1769
        }
1770
 
1771
        spin_unlock_irq(&cp->lock);
1772
        return 0;
1773
}
1774
 
1775
static int cp_set_eeprom(struct net_device *dev,
1776
                         struct ethtool_eeprom *eeprom, u8 *data)
1777
{
1778
        struct cp_private *cp = netdev_priv(dev);
1779
        unsigned int addr_len;
1780
        u16 val;
1781
        u32 offset = eeprom->offset >> 1;
1782
        u32 len = eeprom->len;
1783
        u32 i = 0;
1784
 
1785
        if (eeprom->magic != CP_EEPROM_MAGIC)
1786
                return -EINVAL;
1787
 
1788
        spin_lock_irq(&cp->lock);
1789
 
1790
        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1791
 
1792
        if (eeprom->offset & 1) {
1793
                val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1794
                val |= (u16)data[i++] << 8;
1795
                write_eeprom(cp->regs, offset, val, addr_len);
1796
                offset++;
1797
        }
1798
 
1799
        while (i < len - 1) {
1800
                val = (u16)data[i++];
1801
                val |= (u16)data[i++] << 8;
1802
                write_eeprom(cp->regs, offset, val, addr_len);
1803
                offset++;
1804
        }
1805
 
1806
        if (i < len) {
1807
                val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1808
                val |= (u16)data[i];
1809
                write_eeprom(cp->regs, offset, val, addr_len);
1810
        }
1811
 
1812
        spin_unlock_irq(&cp->lock);
1813
        return 0;
1814
}
1815
 
1816
/* Put the board into D3cold state and wait for WakeUp signal */
1817
static void cp_set_d3_state (struct cp_private *cp)
1818
{
1819
        pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1820
        pci_set_power_state (cp->pdev, PCI_D3hot);
1821
}
1822
 
1823
static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1824
{
1825
        struct net_device *dev;
1826
        struct cp_private *cp;
1827
        int rc;
1828
        void __iomem *regs;
1829
        resource_size_t pciaddr;
1830
        unsigned int addr_len, i, pci_using_dac;
1831
        DECLARE_MAC_BUF(mac);
1832
 
1833
#ifndef MODULE
1834
        static int version_printed;
1835
        if (version_printed++ == 0)
1836
                printk("%s", version);
1837
#endif
1838
 
1839
        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1840
            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1841
                dev_err(&pdev->dev,
1842
                           "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1843
                           pdev->vendor, pdev->device, pdev->revision);
1844
                dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1845
                return -ENODEV;
1846
        }
1847
 
1848
        dev = alloc_etherdev(sizeof(struct cp_private));
1849
        if (!dev)
1850
                return -ENOMEM;
1851
        SET_NETDEV_DEV(dev, &pdev->dev);
1852
 
1853
        cp = netdev_priv(dev);
1854
        cp->pdev = pdev;
1855
        cp->dev = dev;
1856
        cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1857
        spin_lock_init (&cp->lock);
1858
        cp->mii_if.dev = dev;
1859
        cp->mii_if.mdio_read = mdio_read;
1860
        cp->mii_if.mdio_write = mdio_write;
1861
        cp->mii_if.phy_id = CP_INTERNAL_PHY;
1862
        cp->mii_if.phy_id_mask = 0x1f;
1863
        cp->mii_if.reg_num_mask = 0x1f;
1864
        cp_set_rxbufsize(cp);
1865
 
1866
        rc = pci_enable_device(pdev);
1867
        if (rc)
1868
                goto err_out_free;
1869
 
1870
        rc = pci_set_mwi(pdev);
1871
        if (rc)
1872
                goto err_out_disable;
1873
 
1874
        rc = pci_request_regions(pdev, DRV_NAME);
1875
        if (rc)
1876
                goto err_out_mwi;
1877
 
1878
        pciaddr = pci_resource_start(pdev, 1);
1879
        if (!pciaddr) {
1880
                rc = -EIO;
1881
                dev_err(&pdev->dev, "no MMIO resource\n");
1882
                goto err_out_res;
1883
        }
1884
        if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1885
                rc = -EIO;
1886
                dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1887
                       (unsigned long long)pci_resource_len(pdev, 1));
1888
                goto err_out_res;
1889
        }
1890
 
1891
        /* Configure DMA attributes. */
1892
        if ((sizeof(dma_addr_t) > 4) &&
1893
            !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1894
            !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1895
                pci_using_dac = 1;
1896
        } else {
1897
                pci_using_dac = 0;
1898
 
1899
                rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1900
                if (rc) {
1901
                        dev_err(&pdev->dev,
1902
                                   "No usable DMA configuration, aborting.\n");
1903
                        goto err_out_res;
1904
                }
1905
                rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1906
                if (rc) {
1907
                        dev_err(&pdev->dev,
1908
                                   "No usable consistent DMA configuration, "
1909
                                   "aborting.\n");
1910
                        goto err_out_res;
1911
                }
1912
        }
1913
 
1914
        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1915
                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1916
 
1917
        regs = ioremap(pciaddr, CP_REGS_SIZE);
1918
        if (!regs) {
1919
                rc = -EIO;
1920
                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1921
                       (unsigned long long)pci_resource_len(pdev, 1),
1922
                       (unsigned long long)pciaddr);
1923
                goto err_out_res;
1924
        }
1925
        dev->base_addr = (unsigned long) regs;
1926
        cp->regs = regs;
1927
 
1928
        cp_stop_hw(cp);
1929
 
1930
        /* read MAC address from EEPROM */
1931
        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1932
        for (i = 0; i < 3; i++)
1933
                ((__le16 *) (dev->dev_addr))[i] =
1934
                    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1935
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1936
 
1937
        dev->open = cp_open;
1938
        dev->stop = cp_close;
1939
        dev->set_multicast_list = cp_set_rx_mode;
1940
        dev->hard_start_xmit = cp_start_xmit;
1941
        dev->get_stats = cp_get_stats;
1942
        dev->do_ioctl = cp_ioctl;
1943
#ifdef CONFIG_NET_POLL_CONTROLLER
1944
        dev->poll_controller = cp_poll_controller;
1945
#endif
1946
        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1947
#ifdef BROKEN
1948
        dev->change_mtu = cp_change_mtu;
1949
#endif
1950
        dev->ethtool_ops = &cp_ethtool_ops;
1951
        dev->tx_timeout = cp_tx_timeout;
1952
        dev->watchdog_timeo = TX_TIMEOUT;
1953
 
1954
#if CP_VLAN_TAG_USED
1955
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1956
        dev->vlan_rx_register = cp_vlan_rx_register;
1957
#endif
1958
 
1959
        if (pci_using_dac)
1960
                dev->features |= NETIF_F_HIGHDMA;
1961
 
1962
#if 0 /* disabled by default until verified */
1963
        dev->features |= NETIF_F_TSO;
1964
#endif
1965
 
1966
        dev->irq = pdev->irq;
1967
 
1968
        rc = register_netdev(dev);
1969
        if (rc)
1970
                goto err_out_iomap;
1971
 
1972
        printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1973
                "%s, IRQ %d\n",
1974
                dev->name,
1975
                dev->base_addr,
1976
                print_mac(mac, dev->dev_addr),
1977
                dev->irq);
1978
 
1979
        pci_set_drvdata(pdev, dev);
1980
 
1981
        /* enable busmastering and memory-write-invalidate */
1982
        pci_set_master(pdev);
1983
 
1984
        if (cp->wol_enabled)
1985
                cp_set_d3_state (cp);
1986
 
1987
        return 0;
1988
 
1989
err_out_iomap:
1990
        iounmap(regs);
1991
err_out_res:
1992
        pci_release_regions(pdev);
1993
err_out_mwi:
1994
        pci_clear_mwi(pdev);
1995
err_out_disable:
1996
        pci_disable_device(pdev);
1997
err_out_free:
1998
        free_netdev(dev);
1999
        return rc;
2000
}
2001
 
2002
static void cp_remove_one (struct pci_dev *pdev)
2003
{
2004
        struct net_device *dev = pci_get_drvdata(pdev);
2005
        struct cp_private *cp = netdev_priv(dev);
2006
 
2007
        unregister_netdev(dev);
2008
        iounmap(cp->regs);
2009
        if (cp->wol_enabled)
2010
                pci_set_power_state (pdev, PCI_D0);
2011
        pci_release_regions(pdev);
2012
        pci_clear_mwi(pdev);
2013
        pci_disable_device(pdev);
2014
        pci_set_drvdata(pdev, NULL);
2015
        free_netdev(dev);
2016
}
2017
 
2018
#ifdef CONFIG_PM
2019
static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2020
{
2021
        struct net_device *dev = pci_get_drvdata(pdev);
2022
        struct cp_private *cp = netdev_priv(dev);
2023
        unsigned long flags;
2024
 
2025
        if (!netif_running(dev))
2026
                return 0;
2027
 
2028
        netif_device_detach (dev);
2029
        netif_stop_queue (dev);
2030
 
2031
        spin_lock_irqsave (&cp->lock, flags);
2032
 
2033
        /* Disable Rx and Tx */
2034
        cpw16 (IntrMask, 0);
2035
        cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2036
 
2037
        spin_unlock_irqrestore (&cp->lock, flags);
2038
 
2039
        pci_save_state(pdev);
2040
        pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2041
        pci_set_power_state(pdev, pci_choose_state(pdev, state));
2042
 
2043
        return 0;
2044
}
2045
 
2046
static int cp_resume (struct pci_dev *pdev)
2047
{
2048
        struct net_device *dev = pci_get_drvdata (pdev);
2049
        struct cp_private *cp = netdev_priv(dev);
2050
        unsigned long flags;
2051
 
2052
        if (!netif_running(dev))
2053
                return 0;
2054
 
2055
        netif_device_attach (dev);
2056
 
2057
        pci_set_power_state(pdev, PCI_D0);
2058
        pci_restore_state(pdev);
2059
        pci_enable_wake(pdev, PCI_D0, 0);
2060
 
2061
        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2062
        cp_init_rings_index (cp);
2063
        cp_init_hw (cp);
2064
        netif_start_queue (dev);
2065
 
2066
        spin_lock_irqsave (&cp->lock, flags);
2067
 
2068
        mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2069
 
2070
        spin_unlock_irqrestore (&cp->lock, flags);
2071
 
2072
        return 0;
2073
}
2074
#endif /* CONFIG_PM */
2075
 
2076
static struct pci_driver cp_driver = {
2077
        .name         = DRV_NAME,
2078
        .id_table     = cp_pci_tbl,
2079
        .probe        = cp_init_one,
2080
        .remove       = cp_remove_one,
2081
#ifdef CONFIG_PM
2082
        .resume       = cp_resume,
2083
        .suspend      = cp_suspend,
2084
#endif
2085
};
2086
 
2087
static int __init cp_init (void)
2088
{
2089
#ifdef MODULE
2090
        printk("%s", version);
2091
#endif
2092
        return pci_register_driver(&cp_driver);
2093
}
2094
 
2095
static void __exit cp_exit (void)
2096
{
2097
        pci_unregister_driver (&cp_driver);
2098
}
2099
 
2100
module_init(cp_init);
2101
module_exit(cp_exit);

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.