1 |
1275 |
phoenix |
/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
|
2 |
|
|
/*
|
3 |
|
|
Copyright 2001,2002 Jeff Garzik <jgarzik@pobox.com>
|
4 |
|
|
|
5 |
|
|
Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
|
6 |
|
|
Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
|
7 |
|
|
Copyright 2001 Manfred Spraul [natsemi.c]
|
8 |
|
|
Copyright 1999-2001 by Donald Becker. [natsemi.c]
|
9 |
|
|
Written 1997-2001 by Donald Becker. [8139too.c]
|
10 |
|
|
Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
|
11 |
|
|
|
12 |
|
|
This software may be used and distributed according to the terms of
|
13 |
|
|
the GNU General Public License (GPL), incorporated herein by reference.
|
14 |
|
|
Drivers based on or derived from this code fall under the GPL and must
|
15 |
|
|
retain the authorship, copyright and license notice. This file is not
|
16 |
|
|
a complete program and may only be used when the entire operating
|
17 |
|
|
system is licensed under the GPL.
|
18 |
|
|
|
19 |
|
|
See the file COPYING in this distribution for more information.
|
20 |
|
|
|
21 |
|
|
Contributors:
|
22 |
|
|
|
23 |
|
|
Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
|
24 |
|
|
PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
|
25 |
|
|
LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
|
26 |
|
|
|
27 |
|
|
TODO:
|
28 |
|
|
* Test Tx checksumming thoroughly
|
29 |
|
|
* Implement dev->tx_timeout
|
30 |
|
|
|
31 |
|
|
Low priority TODO:
|
32 |
|
|
* Complete reset on PciErr
|
33 |
|
|
* Consider Rx interrupt mitigation using TimerIntr
|
34 |
|
|
* Investigate using skb->priority with h/w VLAN priority
|
35 |
|
|
* Investigate using High Priority Tx Queue with skb->priority
|
36 |
|
|
* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
|
37 |
|
|
* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
|
38 |
|
|
* Implement Tx software interrupt mitigation via
|
39 |
|
|
Tx descriptor bit
|
40 |
|
|
* The real minimum of CP_MIN_MTU is 4 bytes. However,
|
41 |
|
|
for this to be supported, one must(?) turn on packet padding.
|
42 |
|
|
* Support external MII transceivers (patch available)
|
43 |
|
|
|
44 |
|
|
NOTES:
|
45 |
|
|
* TX checksumming is considered experimental. It is off by
|
46 |
|
|
default, use ethtool to turn it on.
|
47 |
|
|
|
48 |
|
|
*/
|
49 |
|
|
|
50 |
|
|
#define DRV_NAME "8139cp"
|
51 |
|
|
#define DRV_VERSION "1.1"
|
52 |
|
|
#define DRV_RELDATE "Aug 30, 2003"
|
53 |
|
|
|
54 |
|
|
|
55 |
|
|
#include <linux/config.h>
|
56 |
|
|
#include <linux/module.h>
|
57 |
|
|
#include <linux/kernel.h>
|
58 |
|
|
#include <linux/compiler.h>
|
59 |
|
|
#include <linux/netdevice.h>
|
60 |
|
|
#include <linux/etherdevice.h>
|
61 |
|
|
#include <linux/init.h>
|
62 |
|
|
#include <linux/pci.h>
|
63 |
|
|
#include <linux/delay.h>
|
64 |
|
|
#include <linux/ethtool.h>
|
65 |
|
|
#include <linux/mii.h>
|
66 |
|
|
#include <linux/if_vlan.h>
|
67 |
|
|
#include <linux/crc32.h>
|
68 |
|
|
#include <linux/in.h>
|
69 |
|
|
#include <linux/ip.h>
|
70 |
|
|
#include <linux/tcp.h>
|
71 |
|
|
#include <linux/udp.h>
|
72 |
|
|
#include <asm/io.h>
|
73 |
|
|
#include <asm/uaccess.h>
|
74 |
|
|
|
75 |
|
|
/* VLAN tagging feature enable/disable */
|
76 |
|
|
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
77 |
|
|
#define CP_VLAN_TAG_USED 1
|
78 |
|
|
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
|
79 |
|
|
do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
|
80 |
|
|
#else
|
81 |
|
|
#define CP_VLAN_TAG_USED 0
|
82 |
|
|
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
|
83 |
|
|
do { (tx_desc)->opts2 = 0; } while (0)
|
84 |
|
|
#endif
|
85 |
|
|
|
86 |
|
|
/* These identify the driver base version and may not be removed. */
|
87 |
|
|
static char version[] =
|
88 |
|
|
KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
|
89 |
|
|
|
90 |
|
|
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
|
91 |
|
|
MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
|
92 |
|
|
MODULE_LICENSE("GPL");
|
93 |
|
|
|
94 |
|
|
static int debug = -1;
|
95 |
|
|
MODULE_PARM (debug, "i");
|
96 |
|
|
MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
|
97 |
|
|
|
98 |
|
|
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
|
99 |
|
|
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
|
100 |
|
|
static int multicast_filter_limit = 32;
|
101 |
|
|
MODULE_PARM (multicast_filter_limit, "i");
|
102 |
|
|
MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
|
103 |
|
|
|
104 |
|
|
#define PFX DRV_NAME ": "
|
105 |
|
|
|
106 |
|
|
#ifndef TRUE
|
107 |
|
|
#define FALSE 0
|
108 |
|
|
#define TRUE (!FALSE)
|
109 |
|
|
#endif
|
110 |
|
|
|
111 |
|
|
#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
|
112 |
|
|
NETIF_MSG_PROBE | \
|
113 |
|
|
NETIF_MSG_LINK)
|
114 |
|
|
#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
|
115 |
|
|
#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
|
116 |
|
|
#define CP_REGS_SIZE (0xff + 1)
|
117 |
|
|
#define CP_REGS_VER 1 /* version 1 */
|
118 |
|
|
#define CP_RX_RING_SIZE 64
|
119 |
|
|
#define CP_TX_RING_SIZE 64
|
120 |
|
|
#define CP_RING_BYTES \
|
121 |
|
|
((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
|
122 |
|
|
(sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
|
123 |
|
|
CP_STATS_SIZE)
|
124 |
|
|
#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
|
125 |
|
|
#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
|
126 |
|
|
#define TX_BUFFS_AVAIL(CP) \
|
127 |
|
|
(((CP)->tx_tail <= (CP)->tx_head) ? \
|
128 |
|
|
(CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
|
129 |
|
|
(CP)->tx_tail - (CP)->tx_head - 1)
|
130 |
|
|
|
131 |
|
|
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
|
132 |
|
|
#define RX_OFFSET 2
|
133 |
|
|
#define CP_INTERNAL_PHY 32
|
134 |
|
|
|
135 |
|
|
/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
|
136 |
|
|
#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
|
137 |
|
|
#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
|
138 |
|
|
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
|
139 |
|
|
#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
|
140 |
|
|
|
141 |
|
|
/* Time in jiffies before concluding the transmitter is hung. */
|
142 |
|
|
#define TX_TIMEOUT (6*HZ)
|
143 |
|
|
|
144 |
|
|
/* hardware minimum and maximum for a single frame's data payload */
|
145 |
|
|
#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
|
146 |
|
|
#define CP_MAX_MTU 4096
|
147 |
|
|
|
148 |
|
|
enum {
|
149 |
|
|
/* NIC register offsets */
|
150 |
|
|
MAC0 = 0x00, /* Ethernet hardware address. */
|
151 |
|
|
MAR0 = 0x08, /* Multicast filter. */
|
152 |
|
|
StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
|
153 |
|
|
TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
|
154 |
|
|
HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
|
155 |
|
|
Cmd = 0x37, /* Command register */
|
156 |
|
|
IntrMask = 0x3C, /* Interrupt mask */
|
157 |
|
|
IntrStatus = 0x3E, /* Interrupt status */
|
158 |
|
|
TxConfig = 0x40, /* Tx configuration */
|
159 |
|
|
ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
|
160 |
|
|
RxConfig = 0x44, /* Rx configuration */
|
161 |
|
|
RxMissed = 0x4C, /* 24 bits valid, write clears */
|
162 |
|
|
Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
|
163 |
|
|
Config1 = 0x52, /* Config1 */
|
164 |
|
|
Config3 = 0x59, /* Config3 */
|
165 |
|
|
Config4 = 0x5A, /* Config4 */
|
166 |
|
|
MultiIntr = 0x5C, /* Multiple interrupt select */
|
167 |
|
|
BasicModeCtrl = 0x62, /* MII BMCR */
|
168 |
|
|
BasicModeStatus = 0x64, /* MII BMSR */
|
169 |
|
|
NWayAdvert = 0x66, /* MII ADVERTISE */
|
170 |
|
|
NWayLPAR = 0x68, /* MII LPA */
|
171 |
|
|
NWayExpansion = 0x6A, /* MII Expansion */
|
172 |
|
|
Config5 = 0xD8, /* Config5 */
|
173 |
|
|
TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
|
174 |
|
|
RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
|
175 |
|
|
CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
|
176 |
|
|
IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
|
177 |
|
|
RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
|
178 |
|
|
TxThresh = 0xEC, /* Early Tx threshold */
|
179 |
|
|
OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
|
180 |
|
|
OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
|
181 |
|
|
|
182 |
|
|
/* Tx and Rx status descriptors */
|
183 |
|
|
DescOwn = (1 << 31), /* Descriptor is owned by NIC */
|
184 |
|
|
RingEnd = (1 << 30), /* End of descriptor ring */
|
185 |
|
|
FirstFrag = (1 << 29), /* First segment of a packet */
|
186 |
|
|
LastFrag = (1 << 28), /* Final segment of a packet */
|
187 |
|
|
TxError = (1 << 23), /* Tx error summary */
|
188 |
|
|
RxError = (1 << 20), /* Rx error summary */
|
189 |
|
|
IPCS = (1 << 18), /* Calculate IP checksum */
|
190 |
|
|
UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
|
191 |
|
|
TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
|
192 |
|
|
TxVlanTag = (1 << 17), /* Add VLAN tag */
|
193 |
|
|
RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
|
194 |
|
|
IPFail = (1 << 15), /* IP checksum failed */
|
195 |
|
|
UDPFail = (1 << 14), /* UDP/IP checksum failed */
|
196 |
|
|
TCPFail = (1 << 13), /* TCP/IP checksum failed */
|
197 |
|
|
NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
|
198 |
|
|
PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
|
199 |
|
|
PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
|
200 |
|
|
RxProtoTCP = 1,
|
201 |
|
|
RxProtoUDP = 2,
|
202 |
|
|
RxProtoIP = 3,
|
203 |
|
|
TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
|
204 |
|
|
TxOWC = (1 << 22), /* Tx Out-of-window collision */
|
205 |
|
|
TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
|
206 |
|
|
TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
|
207 |
|
|
TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
|
208 |
|
|
TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
|
209 |
|
|
RxErrFrame = (1 << 27), /* Rx frame alignment error */
|
210 |
|
|
RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
|
211 |
|
|
RxErrCRC = (1 << 18), /* Rx CRC error */
|
212 |
|
|
RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
|
213 |
|
|
RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
|
214 |
|
|
RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
|
215 |
|
|
|
216 |
|
|
/* StatsAddr register */
|
217 |
|
|
DumpStats = (1 << 3), /* Begin stats dump */
|
218 |
|
|
|
219 |
|
|
/* RxConfig register */
|
220 |
|
|
RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
|
221 |
|
|
RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
|
222 |
|
|
AcceptErr = 0x20, /* Accept packets with CRC errors */
|
223 |
|
|
AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
|
224 |
|
|
AcceptBroadcast = 0x08, /* Accept broadcast packets */
|
225 |
|
|
AcceptMulticast = 0x04, /* Accept multicast packets */
|
226 |
|
|
AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
|
227 |
|
|
AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
|
228 |
|
|
|
229 |
|
|
/* IntrMask / IntrStatus registers */
|
230 |
|
|
PciErr = (1 << 15), /* System error on the PCI bus */
|
231 |
|
|
TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
|
232 |
|
|
LenChg = (1 << 13), /* Cable length change */
|
233 |
|
|
SWInt = (1 << 8), /* Software-requested interrupt */
|
234 |
|
|
TxEmpty = (1 << 7), /* No Tx descriptors available */
|
235 |
|
|
RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
|
236 |
|
|
LinkChg = (1 << 5), /* Packet underrun, or link change */
|
237 |
|
|
RxEmpty = (1 << 4), /* No Rx descriptors available */
|
238 |
|
|
TxErr = (1 << 3), /* Tx error */
|
239 |
|
|
TxOK = (1 << 2), /* Tx packet sent */
|
240 |
|
|
RxErr = (1 << 1), /* Rx error */
|
241 |
|
|
RxOK = (1 << 0), /* Rx packet received */
|
242 |
|
|
IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
|
243 |
|
|
but hardware likes to raise it */
|
244 |
|
|
|
245 |
|
|
IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
|
246 |
|
|
RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
|
247 |
|
|
RxErr | RxOK | IntrResvd,
|
248 |
|
|
|
249 |
|
|
/* C mode command register */
|
250 |
|
|
CmdReset = (1 << 4), /* Enable to reset; self-clearing */
|
251 |
|
|
RxOn = (1 << 3), /* Rx mode enable */
|
252 |
|
|
TxOn = (1 << 2), /* Tx mode enable */
|
253 |
|
|
|
254 |
|
|
/* C+ mode command register */
|
255 |
|
|
RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
|
256 |
|
|
RxChkSum = (1 << 5), /* Rx checksum offload enable */
|
257 |
|
|
PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
|
258 |
|
|
PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
|
259 |
|
|
CpRxOn = (1 << 1), /* Rx mode enable */
|
260 |
|
|
CpTxOn = (1 << 0), /* Tx mode enable */
|
261 |
|
|
|
262 |
|
|
/* Cfg9436 EEPROM control register */
|
263 |
|
|
Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
|
264 |
|
|
Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
|
265 |
|
|
|
266 |
|
|
/* TxConfig register */
|
267 |
|
|
IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
|
268 |
|
|
TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
|
269 |
|
|
|
270 |
|
|
/* Early Tx Threshold register */
|
271 |
|
|
TxThreshMask = 0x3f, /* Mask bits 5-0 */
|
272 |
|
|
TxThreshMax = 2048, /* Max early Tx threshold */
|
273 |
|
|
|
274 |
|
|
/* Config1 register */
|
275 |
|
|
DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
|
276 |
|
|
LWACT = (1 << 4), /* LWAKE active mode */
|
277 |
|
|
PMEnable = (1 << 0), /* Enable various PM features of chip */
|
278 |
|
|
|
279 |
|
|
/* Config3 register */
|
280 |
|
|
PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
|
281 |
|
|
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
|
282 |
|
|
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
|
283 |
|
|
|
284 |
|
|
/* Config4 register */
|
285 |
|
|
LWPTN = (1 << 1), /* LWAKE Pattern */
|
286 |
|
|
LWPME = (1 << 4), /* LANWAKE vs PMEB */
|
287 |
|
|
|
288 |
|
|
/* Config5 register */
|
289 |
|
|
BWF = (1 << 6), /* Accept Broadcast wakeup frame */
|
290 |
|
|
MWF = (1 << 5), /* Accept Multicast wakeup frame */
|
291 |
|
|
UWF = (1 << 4), /* Accept Unicast wakeup frame */
|
292 |
|
|
LANWake = (1 << 1), /* Enable LANWake signal */
|
293 |
|
|
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
|
294 |
|
|
|
295 |
|
|
cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
|
296 |
|
|
cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
|
297 |
|
|
cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
|
298 |
|
|
};
|
299 |
|
|
|
300 |
|
|
static const unsigned int cp_rx_config =
|
301 |
|
|
(RX_FIFO_THRESH << RxCfgFIFOShift) |
|
302 |
|
|
(RX_DMA_BURST << RxCfgDMAShift);
|
303 |
|
|
|
304 |
|
|
struct cp_desc {
|
305 |
|
|
u32 opts1;
|
306 |
|
|
u32 opts2;
|
307 |
|
|
u64 addr;
|
308 |
|
|
};
|
309 |
|
|
|
310 |
|
|
struct ring_info {
|
311 |
|
|
struct sk_buff *skb;
|
312 |
|
|
dma_addr_t mapping;
|
313 |
|
|
unsigned frag;
|
314 |
|
|
};
|
315 |
|
|
|
316 |
|
|
struct cp_dma_stats {
|
317 |
|
|
u64 tx_ok;
|
318 |
|
|
u64 rx_ok;
|
319 |
|
|
u64 tx_err;
|
320 |
|
|
u32 rx_err;
|
321 |
|
|
u16 rx_fifo;
|
322 |
|
|
u16 frame_align;
|
323 |
|
|
u32 tx_ok_1col;
|
324 |
|
|
u32 tx_ok_mcol;
|
325 |
|
|
u64 rx_ok_phys;
|
326 |
|
|
u64 rx_ok_bcast;
|
327 |
|
|
u32 rx_ok_mcast;
|
328 |
|
|
u16 tx_abort;
|
329 |
|
|
u16 tx_underrun;
|
330 |
|
|
} __attribute__((packed));
|
331 |
|
|
|
332 |
|
|
struct cp_extra_stats {
|
333 |
|
|
unsigned long rx_frags;
|
334 |
|
|
};
|
335 |
|
|
|
336 |
|
|
struct cp_private {
|
337 |
|
|
unsigned tx_head;
|
338 |
|
|
unsigned tx_tail;
|
339 |
|
|
unsigned rx_tail;
|
340 |
|
|
|
341 |
|
|
void *regs;
|
342 |
|
|
struct net_device *dev;
|
343 |
|
|
spinlock_t lock;
|
344 |
|
|
|
345 |
|
|
struct cp_desc *rx_ring;
|
346 |
|
|
struct cp_desc *tx_ring;
|
347 |
|
|
struct ring_info tx_skb[CP_TX_RING_SIZE];
|
348 |
|
|
struct ring_info rx_skb[CP_RX_RING_SIZE];
|
349 |
|
|
unsigned rx_buf_sz;
|
350 |
|
|
dma_addr_t ring_dma;
|
351 |
|
|
|
352 |
|
|
#if CP_VLAN_TAG_USED
|
353 |
|
|
struct vlan_group *vlgrp;
|
354 |
|
|
#endif
|
355 |
|
|
|
356 |
|
|
u32 msg_enable;
|
357 |
|
|
|
358 |
|
|
struct net_device_stats net_stats;
|
359 |
|
|
struct cp_extra_stats cp_stats;
|
360 |
|
|
struct cp_dma_stats *nic_stats;
|
361 |
|
|
dma_addr_t nic_stats_dma;
|
362 |
|
|
|
363 |
|
|
struct pci_dev *pdev;
|
364 |
|
|
u32 rx_config;
|
365 |
|
|
u16 cpcmd;
|
366 |
|
|
|
367 |
|
|
unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */
|
368 |
|
|
u32 power_state[16];
|
369 |
|
|
|
370 |
|
|
struct mii_if_info mii_if;
|
371 |
|
|
};
|
372 |
|
|
|
373 |
|
|
#define cpr8(reg) readb(cp->regs + (reg))
|
374 |
|
|
#define cpr16(reg) readw(cp->regs + (reg))
|
375 |
|
|
#define cpr32(reg) readl(cp->regs + (reg))
|
376 |
|
|
#define cpw8(reg,val) writeb((val), cp->regs + (reg))
|
377 |
|
|
#define cpw16(reg,val) writew((val), cp->regs + (reg))
|
378 |
|
|
#define cpw32(reg,val) writel((val), cp->regs + (reg))
|
379 |
|
|
#define cpw8_f(reg,val) do { \
|
380 |
|
|
writeb((val), cp->regs + (reg)); \
|
381 |
|
|
readb(cp->regs + (reg)); \
|
382 |
|
|
} while (0)
|
383 |
|
|
#define cpw16_f(reg,val) do { \
|
384 |
|
|
writew((val), cp->regs + (reg)); \
|
385 |
|
|
readw(cp->regs + (reg)); \
|
386 |
|
|
} while (0)
|
387 |
|
|
#define cpw32_f(reg,val) do { \
|
388 |
|
|
writel((val), cp->regs + (reg)); \
|
389 |
|
|
readl(cp->regs + (reg)); \
|
390 |
|
|
} while (0)
|
391 |
|
|
|
392 |
|
|
|
393 |
|
|
static void __cp_set_rx_mode (struct net_device *dev);
|
394 |
|
|
static void cp_tx (struct cp_private *cp);
|
395 |
|
|
static void cp_clean_rings (struct cp_private *cp);
|
396 |
|
|
|
397 |
|
|
static struct pci_device_id cp_pci_tbl[] = {
|
398 |
|
|
{ PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
|
399 |
|
|
PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
|
400 |
|
|
{ },
|
401 |
|
|
};
|
402 |
|
|
MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
|
403 |
|
|
|
404 |
|
|
static struct {
|
405 |
|
|
const char str[ETH_GSTRING_LEN];
|
406 |
|
|
} ethtool_stats_keys[] = {
|
407 |
|
|
{ "tx_ok" },
|
408 |
|
|
{ "rx_ok" },
|
409 |
|
|
{ "tx_err" },
|
410 |
|
|
{ "rx_err" },
|
411 |
|
|
{ "rx_fifo" },
|
412 |
|
|
{ "frame_align" },
|
413 |
|
|
{ "tx_ok_1col" },
|
414 |
|
|
{ "tx_ok_mcol" },
|
415 |
|
|
{ "rx_ok_phys" },
|
416 |
|
|
{ "rx_ok_bcast" },
|
417 |
|
|
{ "rx_ok_mcast" },
|
418 |
|
|
{ "tx_abort" },
|
419 |
|
|
{ "tx_underrun" },
|
420 |
|
|
{ "rx_frags" },
|
421 |
|
|
};
|
422 |
|
|
|
423 |
|
|
|
424 |
|
|
#if CP_VLAN_TAG_USED
|
425 |
|
|
static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
|
426 |
|
|
{
|
427 |
|
|
struct cp_private *cp = dev->priv;
|
428 |
|
|
|
429 |
|
|
spin_lock_irq(&cp->lock);
|
430 |
|
|
cp->vlgrp = grp;
|
431 |
|
|
cp->cpcmd |= RxVlanOn;
|
432 |
|
|
cpw16(CpCmd, cp->cpcmd);
|
433 |
|
|
spin_unlock_irq(&cp->lock);
|
434 |
|
|
}
|
435 |
|
|
|
436 |
|
|
static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
437 |
|
|
{
|
438 |
|
|
struct cp_private *cp = dev->priv;
|
439 |
|
|
|
440 |
|
|
spin_lock_irq(&cp->lock);
|
441 |
|
|
cp->cpcmd &= ~RxVlanOn;
|
442 |
|
|
cpw16(CpCmd, cp->cpcmd);
|
443 |
|
|
if (cp->vlgrp)
|
444 |
|
|
cp->vlgrp->vlan_devices[vid] = NULL;
|
445 |
|
|
spin_unlock_irq(&cp->lock);
|
446 |
|
|
}
|
447 |
|
|
#endif /* CP_VLAN_TAG_USED */
|
448 |
|
|
|
449 |
|
|
static inline void cp_set_rxbufsize (struct cp_private *cp)
|
450 |
|
|
{
|
451 |
|
|
unsigned int mtu = cp->dev->mtu;
|
452 |
|
|
|
453 |
|
|
if (mtu > ETH_DATA_LEN)
|
454 |
|
|
/* MTU + ethernet header + FCS + optional VLAN tag */
|
455 |
|
|
cp->rx_buf_sz = mtu + ETH_HLEN + 8;
|
456 |
|
|
else
|
457 |
|
|
cp->rx_buf_sz = PKT_BUF_SZ;
|
458 |
|
|
}
|
459 |
|
|
|
460 |
|
|
static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
|
461 |
|
|
struct cp_desc *desc)
|
462 |
|
|
{
|
463 |
|
|
skb->protocol = eth_type_trans (skb, cp->dev);
|
464 |
|
|
|
465 |
|
|
cp->net_stats.rx_packets++;
|
466 |
|
|
cp->net_stats.rx_bytes += skb->len;
|
467 |
|
|
cp->dev->last_rx = jiffies;
|
468 |
|
|
|
469 |
|
|
#if CP_VLAN_TAG_USED
|
470 |
|
|
if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
|
471 |
|
|
vlan_hwaccel_receive_skb(skb, cp->vlgrp,
|
472 |
|
|
be16_to_cpu(desc->opts2 & 0xffff));
|
473 |
|
|
} else
|
474 |
|
|
#endif
|
475 |
|
|
netif_receive_skb(skb);
|
476 |
|
|
}
|
477 |
|
|
|
478 |
|
|
static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
|
479 |
|
|
u32 status, u32 len)
|
480 |
|
|
{
|
481 |
|
|
if (netif_msg_rx_err (cp))
|
482 |
|
|
printk (KERN_DEBUG
|
483 |
|
|
"%s: rx err, slot %d status 0x%x len %d\n",
|
484 |
|
|
cp->dev->name, rx_tail, status, len);
|
485 |
|
|
cp->net_stats.rx_errors++;
|
486 |
|
|
if (status & RxErrFrame)
|
487 |
|
|
cp->net_stats.rx_frame_errors++;
|
488 |
|
|
if (status & RxErrCRC)
|
489 |
|
|
cp->net_stats.rx_crc_errors++;
|
490 |
|
|
if ((status & RxErrRunt) || (status & RxErrLong))
|
491 |
|
|
cp->net_stats.rx_length_errors++;
|
492 |
|
|
if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
|
493 |
|
|
cp->net_stats.rx_length_errors++;
|
494 |
|
|
if (status & RxErrFIFO)
|
495 |
|
|
cp->net_stats.rx_fifo_errors++;
|
496 |
|
|
}
|
497 |
|
|
|
498 |
|
|
static inline unsigned int cp_rx_csum_ok (u32 status)
|
499 |
|
|
{
|
500 |
|
|
unsigned int protocol = (status >> 16) & 0x3;
|
501 |
|
|
|
502 |
|
|
if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
|
503 |
|
|
return 1;
|
504 |
|
|
else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
|
505 |
|
|
return 1;
|
506 |
|
|
else if ((protocol == RxProtoIP) && (!(status & IPFail)))
|
507 |
|
|
return 1;
|
508 |
|
|
return 0;
|
509 |
|
|
}
|
510 |
|
|
|
511 |
|
|
static int cp_rx_poll (struct net_device *dev, int *budget)
|
512 |
|
|
{
|
513 |
|
|
struct cp_private *cp = dev->priv;
|
514 |
|
|
unsigned rx_tail = cp->rx_tail;
|
515 |
|
|
unsigned rx_work = dev->quota;
|
516 |
|
|
unsigned rx;
|
517 |
|
|
|
518 |
|
|
rx_status_loop:
|
519 |
|
|
rx = 0;
|
520 |
|
|
cpw16(IntrStatus, cp_rx_intr_mask);
|
521 |
|
|
|
522 |
|
|
while (1) {
|
523 |
|
|
u32 status, len;
|
524 |
|
|
dma_addr_t mapping;
|
525 |
|
|
struct sk_buff *skb, *new_skb;
|
526 |
|
|
struct cp_desc *desc;
|
527 |
|
|
unsigned buflen;
|
528 |
|
|
|
529 |
|
|
skb = cp->rx_skb[rx_tail].skb;
|
530 |
|
|
if (!skb)
|
531 |
|
|
BUG();
|
532 |
|
|
|
533 |
|
|
desc = &cp->rx_ring[rx_tail];
|
534 |
|
|
status = le32_to_cpu(desc->opts1);
|
535 |
|
|
if (status & DescOwn)
|
536 |
|
|
break;
|
537 |
|
|
|
538 |
|
|
len = (status & 0x1fff) - 4;
|
539 |
|
|
mapping = cp->rx_skb[rx_tail].mapping;
|
540 |
|
|
|
541 |
|
|
if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
|
542 |
|
|
/* we don't support incoming fragmented frames.
|
543 |
|
|
* instead, we attempt to ensure that the
|
544 |
|
|
* pre-allocated RX skbs are properly sized such
|
545 |
|
|
* that RX fragments are never encountered
|
546 |
|
|
*/
|
547 |
|
|
cp_rx_err_acct(cp, rx_tail, status, len);
|
548 |
|
|
cp->net_stats.rx_dropped++;
|
549 |
|
|
cp->cp_stats.rx_frags++;
|
550 |
|
|
goto rx_next;
|
551 |
|
|
}
|
552 |
|
|
|
553 |
|
|
if (status & (RxError | RxErrFIFO)) {
|
554 |
|
|
cp_rx_err_acct(cp, rx_tail, status, len);
|
555 |
|
|
goto rx_next;
|
556 |
|
|
}
|
557 |
|
|
|
558 |
|
|
if (netif_msg_rx_status(cp))
|
559 |
|
|
printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
|
560 |
|
|
cp->dev->name, rx_tail, status, len);
|
561 |
|
|
|
562 |
|
|
buflen = cp->rx_buf_sz + RX_OFFSET;
|
563 |
|
|
new_skb = dev_alloc_skb (buflen);
|
564 |
|
|
if (!new_skb) {
|
565 |
|
|
cp->net_stats.rx_dropped++;
|
566 |
|
|
goto rx_next;
|
567 |
|
|
}
|
568 |
|
|
|
569 |
|
|
skb_reserve(new_skb, RX_OFFSET);
|
570 |
|
|
new_skb->dev = cp->dev;
|
571 |
|
|
|
572 |
|
|
pci_unmap_single(cp->pdev, mapping,
|
573 |
|
|
buflen, PCI_DMA_FROMDEVICE);
|
574 |
|
|
|
575 |
|
|
/* Handle checksum offloading for incoming packets. */
|
576 |
|
|
if (cp_rx_csum_ok(status))
|
577 |
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
578 |
|
|
else
|
579 |
|
|
skb->ip_summed = CHECKSUM_NONE;
|
580 |
|
|
|
581 |
|
|
skb_put(skb, len);
|
582 |
|
|
|
583 |
|
|
mapping =
|
584 |
|
|
cp->rx_skb[rx_tail].mapping =
|
585 |
|
|
pci_map_single(cp->pdev, new_skb->tail,
|
586 |
|
|
buflen, PCI_DMA_FROMDEVICE);
|
587 |
|
|
cp->rx_skb[rx_tail].skb = new_skb;
|
588 |
|
|
|
589 |
|
|
cp_rx_skb(cp, skb, desc);
|
590 |
|
|
rx++;
|
591 |
|
|
|
592 |
|
|
rx_next:
|
593 |
|
|
cp->rx_ring[rx_tail].opts2 = 0;
|
594 |
|
|
cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
|
595 |
|
|
if (rx_tail == (CP_RX_RING_SIZE - 1))
|
596 |
|
|
desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
|
597 |
|
|
cp->rx_buf_sz);
|
598 |
|
|
else
|
599 |
|
|
desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
|
600 |
|
|
rx_tail = NEXT_RX(rx_tail);
|
601 |
|
|
|
602 |
|
|
if (!rx_work--)
|
603 |
|
|
break;
|
604 |
|
|
}
|
605 |
|
|
|
606 |
|
|
cp->rx_tail = rx_tail;
|
607 |
|
|
|
608 |
|
|
dev->quota -= rx;
|
609 |
|
|
*budget -= rx;
|
610 |
|
|
|
611 |
|
|
/* if we did not reach work limit, then we're done with
|
612 |
|
|
* this round of polling
|
613 |
|
|
*/
|
614 |
|
|
if (rx_work) {
|
615 |
|
|
if (cpr16(IntrStatus) & cp_rx_intr_mask)
|
616 |
|
|
goto rx_status_loop;
|
617 |
|
|
|
618 |
|
|
local_irq_disable();
|
619 |
|
|
cpw16_f(IntrMask, cp_intr_mask);
|
620 |
|
|
__netif_rx_complete(dev);
|
621 |
|
|
local_irq_enable();
|
622 |
|
|
|
623 |
|
|
return 0; /* done */
|
624 |
|
|
}
|
625 |
|
|
|
626 |
|
|
return 1; /* not done */
|
627 |
|
|
}
|
628 |
|
|
|
629 |
|
|
static irqreturn_t
|
630 |
|
|
cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
|
631 |
|
|
{
|
632 |
|
|
struct net_device *dev = dev_instance;
|
633 |
|
|
struct cp_private *cp = dev->priv;
|
634 |
|
|
u16 status;
|
635 |
|
|
|
636 |
|
|
status = cpr16(IntrStatus);
|
637 |
|
|
if (!status || (status == 0xFFFF))
|
638 |
|
|
return IRQ_NONE;
|
639 |
|
|
|
640 |
|
|
if (netif_msg_intr(cp))
|
641 |
|
|
printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
|
642 |
|
|
dev->name, status, cpr8(Cmd), cpr16(CpCmd));
|
643 |
|
|
|
644 |
|
|
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
|
645 |
|
|
|
646 |
|
|
spin_lock(&cp->lock);
|
647 |
|
|
|
648 |
|
|
/* close possible race's with dev_close */
|
649 |
|
|
if (unlikely(!netif_running(dev))) {
|
650 |
|
|
cpw16(IntrMask, 0);
|
651 |
|
|
goto out;
|
652 |
|
|
}
|
653 |
|
|
|
654 |
|
|
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) {
|
655 |
|
|
if (netif_rx_schedule_prep(dev)) {
|
656 |
|
|
cpw16_f(IntrMask, cp_norx_intr_mask);
|
657 |
|
|
__netif_rx_schedule(dev);
|
658 |
|
|
}
|
659 |
|
|
}
|
660 |
|
|
if (status & (TxOK | TxErr | TxEmpty | SWInt))
|
661 |
|
|
cp_tx(cp);
|
662 |
|
|
if (status & LinkChg)
|
663 |
|
|
mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
|
664 |
|
|
|
665 |
|
|
if (status & PciErr) {
|
666 |
|
|
u16 pci_status;
|
667 |
|
|
|
668 |
|
|
pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
|
669 |
|
|
pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
|
670 |
|
|
printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
|
671 |
|
|
dev->name, status, pci_status);
|
672 |
|
|
|
673 |
|
|
/* TODO: reset hardware */
|
674 |
|
|
}
|
675 |
|
|
out:
|
676 |
|
|
spin_unlock(&cp->lock);
|
677 |
|
|
return IRQ_HANDLED;
|
678 |
|
|
}
|
679 |
|
|
|
680 |
|
|
static void cp_tx (struct cp_private *cp)
|
681 |
|
|
{
|
682 |
|
|
unsigned tx_head = cp->tx_head;
|
683 |
|
|
unsigned tx_tail = cp->tx_tail;
|
684 |
|
|
|
685 |
|
|
while (tx_tail != tx_head) {
|
686 |
|
|
struct sk_buff *skb;
|
687 |
|
|
u32 status;
|
688 |
|
|
|
689 |
|
|
rmb();
|
690 |
|
|
status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);
|
691 |
|
|
if (status & DescOwn)
|
692 |
|
|
break;
|
693 |
|
|
|
694 |
|
|
skb = cp->tx_skb[tx_tail].skb;
|
695 |
|
|
if (!skb)
|
696 |
|
|
BUG();
|
697 |
|
|
|
698 |
|
|
pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
|
699 |
|
|
skb->len, PCI_DMA_TODEVICE);
|
700 |
|
|
|
701 |
|
|
if (status & LastFrag) {
|
702 |
|
|
if (status & (TxError | TxFIFOUnder)) {
|
703 |
|
|
if (netif_msg_tx_err(cp))
|
704 |
|
|
printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
|
705 |
|
|
cp->dev->name, status);
|
706 |
|
|
cp->net_stats.tx_errors++;
|
707 |
|
|
if (status & TxOWC)
|
708 |
|
|
cp->net_stats.tx_window_errors++;
|
709 |
|
|
if (status & TxMaxCol)
|
710 |
|
|
cp->net_stats.tx_aborted_errors++;
|
711 |
|
|
if (status & TxLinkFail)
|
712 |
|
|
cp->net_stats.tx_carrier_errors++;
|
713 |
|
|
if (status & TxFIFOUnder)
|
714 |
|
|
cp->net_stats.tx_fifo_errors++;
|
715 |
|
|
} else {
|
716 |
|
|
cp->net_stats.collisions +=
|
717 |
|
|
((status >> TxColCntShift) & TxColCntMask);
|
718 |
|
|
cp->net_stats.tx_packets++;
|
719 |
|
|
cp->net_stats.tx_bytes += skb->len;
|
720 |
|
|
if (netif_msg_tx_done(cp))
|
721 |
|
|
printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
|
722 |
|
|
}
|
723 |
|
|
dev_kfree_skb_irq(skb);
|
724 |
|
|
}
|
725 |
|
|
|
726 |
|
|
cp->tx_skb[tx_tail].skb = NULL;
|
727 |
|
|
|
728 |
|
|
tx_tail = NEXT_TX(tx_tail);
|
729 |
|
|
}
|
730 |
|
|
|
731 |
|
|
cp->tx_tail = tx_tail;
|
732 |
|
|
|
733 |
|
|
if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
|
734 |
|
|
netif_wake_queue(cp->dev);
|
735 |
|
|
}
|
736 |
|
|
|
737 |
|
|
static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
738 |
|
|
{
|
739 |
|
|
struct cp_private *cp = dev->priv;
|
740 |
|
|
unsigned entry;
|
741 |
|
|
u32 eor;
|
742 |
|
|
#if CP_VLAN_TAG_USED
|
743 |
|
|
u32 vlan_tag = 0;
|
744 |
|
|
#endif
|
745 |
|
|
|
746 |
|
|
spin_lock_irq(&cp->lock);
|
747 |
|
|
|
748 |
|
|
/* This is a hard error, log it. */
|
749 |
|
|
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
|
750 |
|
|
netif_stop_queue(dev);
|
751 |
|
|
spin_unlock_irq(&cp->lock);
|
752 |
|
|
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
|
753 |
|
|
dev->name);
|
754 |
|
|
return 1;
|
755 |
|
|
}
|
756 |
|
|
|
757 |
|
|
#if CP_VLAN_TAG_USED
|
758 |
|
|
if (cp->vlgrp && vlan_tx_tag_present(skb))
|
759 |
|
|
vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
|
760 |
|
|
#endif
|
761 |
|
|
|
762 |
|
|
entry = cp->tx_head;
|
763 |
|
|
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
|
764 |
|
|
if (skb_shinfo(skb)->nr_frags == 0) {
|
765 |
|
|
struct cp_desc *txd = &cp->tx_ring[entry];
|
766 |
|
|
u32 len;
|
767 |
|
|
dma_addr_t mapping;
|
768 |
|
|
|
769 |
|
|
len = skb->len;
|
770 |
|
|
mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
771 |
|
|
CP_VLAN_TX_TAG(txd, vlan_tag);
|
772 |
|
|
txd->addr = cpu_to_le64(mapping);
|
773 |
|
|
wmb();
|
774 |
|
|
|
775 |
|
|
if (skb->ip_summed == CHECKSUM_HW) {
|
776 |
|
|
const struct iphdr *ip = skb->nh.iph;
|
777 |
|
|
if (ip->protocol == IPPROTO_TCP)
|
778 |
|
|
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
|
779 |
|
|
FirstFrag | LastFrag |
|
780 |
|
|
IPCS | TCPCS);
|
781 |
|
|
else if (ip->protocol == IPPROTO_UDP)
|
782 |
|
|
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
|
783 |
|
|
FirstFrag | LastFrag |
|
784 |
|
|
IPCS | UDPCS);
|
785 |
|
|
else
|
786 |
|
|
BUG();
|
787 |
|
|
} else
|
788 |
|
|
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
|
789 |
|
|
FirstFrag | LastFrag);
|
790 |
|
|
wmb();
|
791 |
|
|
|
792 |
|
|
cp->tx_skb[entry].skb = skb;
|
793 |
|
|
cp->tx_skb[entry].mapping = mapping;
|
794 |
|
|
cp->tx_skb[entry].frag = 0;
|
795 |
|
|
entry = NEXT_TX(entry);
|
796 |
|
|
} else {
|
797 |
|
|
struct cp_desc *txd;
|
798 |
|
|
u32 first_len, first_eor;
|
799 |
|
|
dma_addr_t first_mapping;
|
800 |
|
|
int frag, first_entry = entry;
|
801 |
|
|
const struct iphdr *ip = skb->nh.iph;
|
802 |
|
|
|
803 |
|
|
/* We must give this initial chunk to the device last.
|
804 |
|
|
* Otherwise we could race with the device.
|
805 |
|
|
*/
|
806 |
|
|
first_eor = eor;
|
807 |
|
|
first_len = skb_headlen(skb);
|
808 |
|
|
first_mapping = pci_map_single(cp->pdev, skb->data,
|
809 |
|
|
first_len, PCI_DMA_TODEVICE);
|
810 |
|
|
cp->tx_skb[entry].skb = skb;
|
811 |
|
|
cp->tx_skb[entry].mapping = first_mapping;
|
812 |
|
|
cp->tx_skb[entry].frag = 1;
|
813 |
|
|
entry = NEXT_TX(entry);
|
814 |
|
|
|
815 |
|
|
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
|
816 |
|
|
skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
|
817 |
|
|
u32 len;
|
818 |
|
|
u32 ctrl;
|
819 |
|
|
dma_addr_t mapping;
|
820 |
|
|
|
821 |
|
|
len = this_frag->size;
|
822 |
|
|
mapping = pci_map_single(cp->pdev,
|
823 |
|
|
((void *) page_address(this_frag->page) +
|
824 |
|
|
this_frag->page_offset),
|
825 |
|
|
len, PCI_DMA_TODEVICE);
|
826 |
|
|
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
|
827 |
|
|
|
828 |
|
|
if (skb->ip_summed == CHECKSUM_HW) {
|
829 |
|
|
ctrl = eor | len | DescOwn | IPCS;
|
830 |
|
|
if (ip->protocol == IPPROTO_TCP)
|
831 |
|
|
ctrl |= TCPCS;
|
832 |
|
|
else if (ip->protocol == IPPROTO_UDP)
|
833 |
|
|
ctrl |= UDPCS;
|
834 |
|
|
else
|
835 |
|
|
BUG();
|
836 |
|
|
} else
|
837 |
|
|
ctrl = eor | len | DescOwn;
|
838 |
|
|
|
839 |
|
|
if (frag == skb_shinfo(skb)->nr_frags - 1)
|
840 |
|
|
ctrl |= LastFrag;
|
841 |
|
|
|
842 |
|
|
txd = &cp->tx_ring[entry];
|
843 |
|
|
CP_VLAN_TX_TAG(txd, vlan_tag);
|
844 |
|
|
txd->addr = cpu_to_le64(mapping);
|
845 |
|
|
wmb();
|
846 |
|
|
|
847 |
|
|
txd->opts1 = cpu_to_le32(ctrl);
|
848 |
|
|
wmb();
|
849 |
|
|
|
850 |
|
|
cp->tx_skb[entry].skb = skb;
|
851 |
|
|
cp->tx_skb[entry].mapping = mapping;
|
852 |
|
|
cp->tx_skb[entry].frag = frag + 2;
|
853 |
|
|
entry = NEXT_TX(entry);
|
854 |
|
|
}
|
855 |
|
|
|
856 |
|
|
txd = &cp->tx_ring[first_entry];
|
857 |
|
|
CP_VLAN_TX_TAG(txd, vlan_tag);
|
858 |
|
|
txd->addr = cpu_to_le64(first_mapping);
|
859 |
|
|
wmb();
|
860 |
|
|
|
861 |
|
|
if (skb->ip_summed == CHECKSUM_HW) {
|
862 |
|
|
if (ip->protocol == IPPROTO_TCP)
|
863 |
|
|
txd->opts1 = cpu_to_le32(first_eor | first_len |
|
864 |
|
|
FirstFrag | DescOwn |
|
865 |
|
|
IPCS | TCPCS);
|
866 |
|
|
else if (ip->protocol == IPPROTO_UDP)
|
867 |
|
|
txd->opts1 = cpu_to_le32(first_eor | first_len |
|
868 |
|
|
FirstFrag | DescOwn |
|
869 |
|
|
IPCS | UDPCS);
|
870 |
|
|
else
|
871 |
|
|
BUG();
|
872 |
|
|
} else
|
873 |
|
|
txd->opts1 = cpu_to_le32(first_eor | first_len |
|
874 |
|
|
FirstFrag | DescOwn);
|
875 |
|
|
wmb();
|
876 |
|
|
}
|
877 |
|
|
cp->tx_head = entry;
|
878 |
|
|
if (netif_msg_tx_queued(cp))
|
879 |
|
|
printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
|
880 |
|
|
dev->name, entry, skb->len);
|
881 |
|
|
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
|
882 |
|
|
netif_stop_queue(dev);
|
883 |
|
|
|
884 |
|
|
spin_unlock_irq(&cp->lock);
|
885 |
|
|
|
886 |
|
|
cpw8(TxPoll, NormalTxPoll);
|
887 |
|
|
dev->trans_start = jiffies;
|
888 |
|
|
|
889 |
|
|
return 0;
|
890 |
|
|
}
|
891 |
|
|
|
892 |
|
|
/* Set or clear the multicast filter for this adaptor.
|
893 |
|
|
This routine is not state sensitive and need not be SMP locked. */
|
894 |
|
|
|
895 |
|
|
static void __cp_set_rx_mode (struct net_device *dev)
|
896 |
|
|
{
|
897 |
|
|
struct cp_private *cp = dev->priv;
|
898 |
|
|
u32 mc_filter[2]; /* Multicast hash filter */
|
899 |
|
|
int i, rx_mode;
|
900 |
|
|
u32 tmp;
|
901 |
|
|
|
902 |
|
|
/* Note: do not reorder, GCC is clever about common statements. */
|
903 |
|
|
if (dev->flags & IFF_PROMISC) {
|
904 |
|
|
/* Unconditionally log net taps. */
|
905 |
|
|
printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
|
906 |
|
|
dev->name);
|
907 |
|
|
rx_mode =
|
908 |
|
|
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
|
909 |
|
|
AcceptAllPhys;
|
910 |
|
|
mc_filter[1] = mc_filter[0] = 0xffffffff;
|
911 |
|
|
} else if ((dev->mc_count > multicast_filter_limit)
|
912 |
|
|
|| (dev->flags & IFF_ALLMULTI)) {
|
913 |
|
|
/* Too many to filter perfectly -- accept all multicasts. */
|
914 |
|
|
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
|
915 |
|
|
mc_filter[1] = mc_filter[0] = 0xffffffff;
|
916 |
|
|
} else {
|
917 |
|
|
struct dev_mc_list *mclist;
|
918 |
|
|
rx_mode = AcceptBroadcast | AcceptMyPhys;
|
919 |
|
|
mc_filter[1] = mc_filter[0] = 0;
|
920 |
|
|
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
|
921 |
|
|
i++, mclist = mclist->next) {
|
922 |
|
|
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
|
923 |
|
|
|
924 |
|
|
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
|
925 |
|
|
rx_mode |= AcceptMulticast;
|
926 |
|
|
}
|
927 |
|
|
}
|
928 |
|
|
|
929 |
|
|
/* We can safely update without stopping the chip. */
|
930 |
|
|
tmp = cp_rx_config | rx_mode;
|
931 |
|
|
if (cp->rx_config != tmp) {
|
932 |
|
|
cpw32_f (RxConfig, tmp);
|
933 |
|
|
cp->rx_config = tmp;
|
934 |
|
|
}
|
935 |
|
|
cpw32_f (MAR0 + 0, mc_filter[0]);
|
936 |
|
|
cpw32_f (MAR0 + 4, mc_filter[1]);
|
937 |
|
|
}
|
938 |
|
|
|
939 |
|
|
static void cp_set_rx_mode (struct net_device *dev)
|
940 |
|
|
{
|
941 |
|
|
unsigned long flags;
|
942 |
|
|
struct cp_private *cp = dev->priv;
|
943 |
|
|
|
944 |
|
|
spin_lock_irqsave (&cp->lock, flags);
|
945 |
|
|
__cp_set_rx_mode(dev);
|
946 |
|
|
spin_unlock_irqrestore (&cp->lock, flags);
|
947 |
|
|
}
|
948 |
|
|
|
949 |
|
|
static void __cp_get_stats(struct cp_private *cp)
|
950 |
|
|
{
|
951 |
|
|
/* only lower 24 bits valid; write any value to clear */
|
952 |
|
|
cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
|
953 |
|
|
cpw32 (RxMissed, 0);
|
954 |
|
|
}
|
955 |
|
|
|
956 |
|
|
static struct net_device_stats *cp_get_stats(struct net_device *dev)
|
957 |
|
|
{
|
958 |
|
|
struct cp_private *cp = dev->priv;
|
959 |
|
|
|
960 |
|
|
/* The chip only need report frame silently dropped. */
|
961 |
|
|
spin_lock_irq(&cp->lock);
|
962 |
|
|
if (netif_running(dev) && netif_device_present(dev))
|
963 |
|
|
__cp_get_stats(cp);
|
964 |
|
|
spin_unlock_irq(&cp->lock);
|
965 |
|
|
|
966 |
|
|
return &cp->net_stats;
|
967 |
|
|
}
|
968 |
|
|
|
969 |
|
|
static void cp_stop_hw (struct cp_private *cp)
|
970 |
|
|
{
|
971 |
|
|
struct net_device *dev = cp->dev;
|
972 |
|
|
|
973 |
|
|
cpw16(IntrStatus, ~(cpr16(IntrStatus)));
|
974 |
|
|
cpw16_f(IntrMask, 0);
|
975 |
|
|
cpw8(Cmd, 0);
|
976 |
|
|
cpw16_f(CpCmd, 0);
|
977 |
|
|
cpw16(IntrStatus, ~(cpr16(IntrStatus)));
|
978 |
|
|
synchronize_irq();
|
979 |
|
|
udelay(10);
|
980 |
|
|
|
981 |
|
|
cp->rx_tail = 0;
|
982 |
|
|
cp->tx_head = cp->tx_tail = 0;
|
983 |
|
|
|
984 |
|
|
(void) dev; /* avoid compiler warning when synchronize_irq()
|
985 |
|
|
* disappears during !CONFIG_SMP
|
986 |
|
|
*/
|
987 |
|
|
}
|
988 |
|
|
|
989 |
|
|
static void cp_reset_hw (struct cp_private *cp)
|
990 |
|
|
{
|
991 |
|
|
unsigned work = 1000;
|
992 |
|
|
|
993 |
|
|
cpw8(Cmd, CmdReset);
|
994 |
|
|
|
995 |
|
|
while (work--) {
|
996 |
|
|
if (!(cpr8(Cmd) & CmdReset))
|
997 |
|
|
return;
|
998 |
|
|
|
999 |
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
1000 |
|
|
schedule_timeout(10);
|
1001 |
|
|
}
|
1002 |
|
|
|
1003 |
|
|
printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
|
1004 |
|
|
}
|
1005 |
|
|
|
1006 |
|
|
static inline void cp_start_hw (struct cp_private *cp)
|
1007 |
|
|
{
|
1008 |
|
|
cpw16(CpCmd, cp->cpcmd);
|
1009 |
|
|
cpw8(Cmd, RxOn | TxOn);
|
1010 |
|
|
}
|
1011 |
|
|
|
1012 |
|
|
static void cp_init_hw (struct cp_private *cp)
|
1013 |
|
|
{
|
1014 |
|
|
struct net_device *dev = cp->dev;
|
1015 |
|
|
|
1016 |
|
|
cp_reset_hw(cp);
|
1017 |
|
|
|
1018 |
|
|
cpw8_f (Cfg9346, Cfg9346_Unlock);
|
1019 |
|
|
|
1020 |
|
|
/* Restore our idea of the MAC address. */
|
1021 |
|
|
cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
|
1022 |
|
|
cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
|
1023 |
|
|
|
1024 |
|
|
cp_start_hw(cp);
|
1025 |
|
|
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
|
1026 |
|
|
|
1027 |
|
|
__cp_set_rx_mode(dev);
|
1028 |
|
|
cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
|
1029 |
|
|
|
1030 |
|
|
cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
|
1031 |
|
|
/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
|
1032 |
|
|
cpw8(Config3, PARMEnable);
|
1033 |
|
|
cp->wol_enabled = 0;
|
1034 |
|
|
|
1035 |
|
|
cpw8(Config5, cpr8(Config5) & PMEStatus);
|
1036 |
|
|
|
1037 |
|
|
cpw32_f(HiTxRingAddr, 0);
|
1038 |
|
|
cpw32_f(HiTxRingAddr + 4, 0);
|
1039 |
|
|
|
1040 |
|
|
cpw32_f(RxRingAddr, cp->ring_dma);
|
1041 |
|
|
cpw32_f(RxRingAddr + 4, 0); /* FIXME: 64-bit PCI */
|
1042 |
|
|
cpw32_f(TxRingAddr, cp->ring_dma + (sizeof(struct cp_desc) * CP_RX_RING_SIZE));
|
1043 |
|
|
cpw32_f(TxRingAddr + 4, 0); /* FIXME: 64-bit PCI */
|
1044 |
|
|
|
1045 |
|
|
cpw16(MultiIntr, 0);
|
1046 |
|
|
|
1047 |
|
|
cpw16_f(IntrMask, cp_intr_mask);
|
1048 |
|
|
|
1049 |
|
|
cpw8_f(Cfg9346, Cfg9346_Lock);
|
1050 |
|
|
}
|
1051 |
|
|
|
1052 |
|
|
static int cp_refill_rx (struct cp_private *cp)
|
1053 |
|
|
{
|
1054 |
|
|
unsigned i;
|
1055 |
|
|
|
1056 |
|
|
for (i = 0; i < CP_RX_RING_SIZE; i++) {
|
1057 |
|
|
struct sk_buff *skb;
|
1058 |
|
|
|
1059 |
|
|
skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
|
1060 |
|
|
if (!skb)
|
1061 |
|
|
goto err_out;
|
1062 |
|
|
|
1063 |
|
|
skb->dev = cp->dev;
|
1064 |
|
|
skb_reserve(skb, RX_OFFSET);
|
1065 |
|
|
|
1066 |
|
|
cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
|
1067 |
|
|
skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
1068 |
|
|
cp->rx_skb[i].skb = skb;
|
1069 |
|
|
cp->rx_skb[i].frag = 0;
|
1070 |
|
|
|
1071 |
|
|
cp->rx_ring[i].opts2 = 0;
|
1072 |
|
|
cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
|
1073 |
|
|
if (i == (CP_RX_RING_SIZE - 1))
|
1074 |
|
|
cp->rx_ring[i].opts1 =
|
1075 |
|
|
cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
|
1076 |
|
|
else
|
1077 |
|
|
cp->rx_ring[i].opts1 =
|
1078 |
|
|
cpu_to_le32(DescOwn | cp->rx_buf_sz);
|
1079 |
|
|
}
|
1080 |
|
|
|
1081 |
|
|
return 0;
|
1082 |
|
|
|
1083 |
|
|
err_out:
|
1084 |
|
|
cp_clean_rings(cp);
|
1085 |
|
|
return -ENOMEM;
|
1086 |
|
|
}
|
1087 |
|
|
|
1088 |
|
|
static int cp_init_rings (struct cp_private *cp)
|
1089 |
|
|
{
|
1090 |
|
|
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
|
1091 |
|
|
cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
|
1092 |
|
|
|
1093 |
|
|
cp->rx_tail = 0;
|
1094 |
|
|
cp->tx_head = cp->tx_tail = 0;
|
1095 |
|
|
|
1096 |
|
|
return cp_refill_rx (cp);
|
1097 |
|
|
}
|
1098 |
|
|
|
1099 |
|
|
static int cp_alloc_rings (struct cp_private *cp)
|
1100 |
|
|
{
|
1101 |
|
|
void *mem;
|
1102 |
|
|
|
1103 |
|
|
mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
|
1104 |
|
|
if (!mem)
|
1105 |
|
|
return -ENOMEM;
|
1106 |
|
|
|
1107 |
|
|
cp->rx_ring = mem;
|
1108 |
|
|
cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
|
1109 |
|
|
|
1110 |
|
|
mem += (CP_RING_BYTES - CP_STATS_SIZE);
|
1111 |
|
|
cp->nic_stats = mem;
|
1112 |
|
|
cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE);
|
1113 |
|
|
|
1114 |
|
|
return cp_init_rings(cp);
|
1115 |
|
|
}
|
1116 |
|
|
|
1117 |
|
|
static void cp_clean_rings (struct cp_private *cp)
|
1118 |
|
|
{
|
1119 |
|
|
unsigned i;
|
1120 |
|
|
|
1121 |
|
|
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
|
1122 |
|
|
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
|
1123 |
|
|
|
1124 |
|
|
for (i = 0; i < CP_RX_RING_SIZE; i++) {
|
1125 |
|
|
if (cp->rx_skb[i].skb) {
|
1126 |
|
|
pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
|
1127 |
|
|
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
1128 |
|
|
dev_kfree_skb(cp->rx_skb[i].skb);
|
1129 |
|
|
}
|
1130 |
|
|
}
|
1131 |
|
|
|
1132 |
|
|
for (i = 0; i < CP_TX_RING_SIZE; i++) {
|
1133 |
|
|
if (cp->tx_skb[i].skb) {
|
1134 |
|
|
struct sk_buff *skb = cp->tx_skb[i].skb;
|
1135 |
|
|
pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
|
1136 |
|
|
skb->len, PCI_DMA_TODEVICE);
|
1137 |
|
|
dev_kfree_skb(skb);
|
1138 |
|
|
cp->net_stats.tx_dropped++;
|
1139 |
|
|
}
|
1140 |
|
|
}
|
1141 |
|
|
|
1142 |
|
|
memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
|
1143 |
|
|
memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
|
1144 |
|
|
}
|
1145 |
|
|
|
1146 |
|
|
static void cp_free_rings (struct cp_private *cp)
|
1147 |
|
|
{
|
1148 |
|
|
cp_clean_rings(cp);
|
1149 |
|
|
pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
|
1150 |
|
|
cp->rx_ring = NULL;
|
1151 |
|
|
cp->tx_ring = NULL;
|
1152 |
|
|
cp->nic_stats = NULL;
|
1153 |
|
|
}
|
1154 |
|
|
|
1155 |
|
|
static int cp_open (struct net_device *dev)
|
1156 |
|
|
{
|
1157 |
|
|
struct cp_private *cp = dev->priv;
|
1158 |
|
|
int rc;
|
1159 |
|
|
|
1160 |
|
|
if (netif_msg_ifup(cp))
|
1161 |
|
|
printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
|
1162 |
|
|
|
1163 |
|
|
rc = cp_alloc_rings(cp);
|
1164 |
|
|
if (rc)
|
1165 |
|
|
return rc;
|
1166 |
|
|
|
1167 |
|
|
cp_init_hw(cp);
|
1168 |
|
|
|
1169 |
|
|
rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev);
|
1170 |
|
|
if (rc)
|
1171 |
|
|
goto err_out_hw;
|
1172 |
|
|
|
1173 |
|
|
netif_carrier_off(dev);
|
1174 |
|
|
mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
|
1175 |
|
|
netif_start_queue(dev);
|
1176 |
|
|
|
1177 |
|
|
return 0;
|
1178 |
|
|
|
1179 |
|
|
err_out_hw:
|
1180 |
|
|
cp_stop_hw(cp);
|
1181 |
|
|
cp_free_rings(cp);
|
1182 |
|
|
return rc;
|
1183 |
|
|
}
|
1184 |
|
|
|
1185 |
|
|
static int cp_close (struct net_device *dev)
|
1186 |
|
|
{
|
1187 |
|
|
struct cp_private *cp = dev->priv;
|
1188 |
|
|
|
1189 |
|
|
if (netif_msg_ifdown(cp))
|
1190 |
|
|
printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
|
1191 |
|
|
|
1192 |
|
|
netif_stop_queue(dev);
|
1193 |
|
|
netif_carrier_off(dev);
|
1194 |
|
|
|
1195 |
|
|
spin_lock_irq(&cp->lock);
|
1196 |
|
|
cp_stop_hw(cp);
|
1197 |
|
|
spin_unlock_irq(&cp->lock);
|
1198 |
|
|
|
1199 |
|
|
free_irq(dev->irq, dev);
|
1200 |
|
|
cp_free_rings(cp);
|
1201 |
|
|
return 0;
|
1202 |
|
|
}
|
1203 |
|
|
|
1204 |
|
|
#ifdef BROKEN
|
1205 |
|
|
static int cp_change_mtu(struct net_device *dev, int new_mtu)
|
1206 |
|
|
{
|
1207 |
|
|
struct cp_private *cp = dev->priv;
|
1208 |
|
|
int rc;
|
1209 |
|
|
|
1210 |
|
|
/* check for invalid MTU, according to hardware limits */
|
1211 |
|
|
if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
|
1212 |
|
|
return -EINVAL;
|
1213 |
|
|
|
1214 |
|
|
/* if network interface not up, no need for complexity */
|
1215 |
|
|
if (!netif_running(dev)) {
|
1216 |
|
|
dev->mtu = new_mtu;
|
1217 |
|
|
cp_set_rxbufsize(cp); /* set new rx buf size */
|
1218 |
|
|
return 0;
|
1219 |
|
|
}
|
1220 |
|
|
|
1221 |
|
|
spin_lock_irq(&cp->lock);
|
1222 |
|
|
|
1223 |
|
|
cp_stop_hw(cp); /* stop h/w and free rings */
|
1224 |
|
|
cp_clean_rings(cp);
|
1225 |
|
|
|
1226 |
|
|
dev->mtu = new_mtu;
|
1227 |
|
|
cp_set_rxbufsize(cp); /* set new rx buf size */
|
1228 |
|
|
|
1229 |
|
|
rc = cp_init_rings(cp); /* realloc and restart h/w */
|
1230 |
|
|
cp_start_hw(cp);
|
1231 |
|
|
|
1232 |
|
|
spin_unlock_irq(&cp->lock);
|
1233 |
|
|
|
1234 |
|
|
return rc;
|
1235 |
|
|
}
|
1236 |
|
|
#endif /* BROKEN */
|
1237 |
|
|
|
1238 |
|
|
static char mii_2_8139_map[8] = {
|
1239 |
|
|
BasicModeCtrl,
|
1240 |
|
|
BasicModeStatus,
|
1241 |
|
|
0,
|
1242 |
|
|
0,
|
1243 |
|
|
NWayAdvert,
|
1244 |
|
|
NWayLPAR,
|
1245 |
|
|
NWayExpansion,
|
1246 |
|
|
|
1247 |
|
|
};
|
1248 |
|
|
|
1249 |
|
|
static int mdio_read(struct net_device *dev, int phy_id, int location)
|
1250 |
|
|
{
|
1251 |
|
|
struct cp_private *cp = dev->priv;
|
1252 |
|
|
|
1253 |
|
|
return location < 8 && mii_2_8139_map[location] ?
|
1254 |
|
|
readw(cp->regs + mii_2_8139_map[location]) : 0;
|
1255 |
|
|
}
|
1256 |
|
|
|
1257 |
|
|
|
1258 |
|
|
static void mdio_write(struct net_device *dev, int phy_id, int location,
|
1259 |
|
|
int value)
|
1260 |
|
|
{
|
1261 |
|
|
struct cp_private *cp = dev->priv;
|
1262 |
|
|
|
1263 |
|
|
if (location == 0) {
|
1264 |
|
|
cpw8(Cfg9346, Cfg9346_Unlock);
|
1265 |
|
|
cpw16(BasicModeCtrl, value);
|
1266 |
|
|
cpw8(Cfg9346, Cfg9346_Lock);
|
1267 |
|
|
} else if (location < 8 && mii_2_8139_map[location])
|
1268 |
|
|
cpw16(mii_2_8139_map[location], value);
|
1269 |
|
|
}
|
1270 |
|
|
|
1271 |
|
|
/* Set the ethtool Wake-on-LAN settings */
|
1272 |
|
|
static int netdev_set_wol (struct cp_private *cp,
|
1273 |
|
|
const struct ethtool_wolinfo *wol)
|
1274 |
|
|
{
|
1275 |
|
|
u8 options;
|
1276 |
|
|
|
1277 |
|
|
options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
|
1278 |
|
|
/* If WOL is being disabled, no need for complexity */
|
1279 |
|
|
if (wol->wolopts) {
|
1280 |
|
|
if (wol->wolopts & WAKE_PHY) options |= LinkUp;
|
1281 |
|
|
if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
|
1282 |
|
|
}
|
1283 |
|
|
|
1284 |
|
|
cpw8 (Cfg9346, Cfg9346_Unlock);
|
1285 |
|
|
cpw8 (Config3, options);
|
1286 |
|
|
cpw8 (Cfg9346, Cfg9346_Lock);
|
1287 |
|
|
|
1288 |
|
|
options = 0; /* Paranoia setting */
|
1289 |
|
|
options = cpr8 (Config5) & ~(UWF | MWF | BWF);
|
1290 |
|
|
/* If WOL is being disabled, no need for complexity */
|
1291 |
|
|
if (wol->wolopts) {
|
1292 |
|
|
if (wol->wolopts & WAKE_UCAST) options |= UWF;
|
1293 |
|
|
if (wol->wolopts & WAKE_BCAST) options |= BWF;
|
1294 |
|
|
if (wol->wolopts & WAKE_MCAST) options |= MWF;
|
1295 |
|
|
}
|
1296 |
|
|
|
1297 |
|
|
cpw8 (Config5, options);
|
1298 |
|
|
|
1299 |
|
|
cp->wol_enabled = (wol->wolopts) ? 1 : 0;
|
1300 |
|
|
|
1301 |
|
|
return 0;
|
1302 |
|
|
}
|
1303 |
|
|
|
1304 |
|
|
/* Get the ethtool Wake-on-LAN settings */
|
1305 |
|
|
static void netdev_get_wol (struct cp_private *cp,
|
1306 |
|
|
struct ethtool_wolinfo *wol)
|
1307 |
|
|
{
|
1308 |
|
|
u8 options;
|
1309 |
|
|
|
1310 |
|
|
wol->wolopts = 0; /* Start from scratch */
|
1311 |
|
|
wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
|
1312 |
|
|
WAKE_MCAST | WAKE_UCAST;
|
1313 |
|
|
/* We don't need to go on if WOL is disabled */
|
1314 |
|
|
if (!cp->wol_enabled) return;
|
1315 |
|
|
|
1316 |
|
|
options = cpr8 (Config3);
|
1317 |
|
|
if (options & LinkUp) wol->wolopts |= WAKE_PHY;
|
1318 |
|
|
if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
|
1319 |
|
|
|
1320 |
|
|
options = 0; /* Paranoia setting */
|
1321 |
|
|
options = cpr8 (Config5);
|
1322 |
|
|
if (options & UWF) wol->wolopts |= WAKE_UCAST;
|
1323 |
|
|
if (options & BWF) wol->wolopts |= WAKE_BCAST;
|
1324 |
|
|
if (options & MWF) wol->wolopts |= WAKE_MCAST;
|
1325 |
|
|
}
|
1326 |
|
|
|
1327 |
|
|
static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
|
1328 |
|
|
{
|
1329 |
|
|
struct cp_private *cp = dev->priv;
|
1330 |
|
|
|
1331 |
|
|
strcpy (info->driver, DRV_NAME);
|
1332 |
|
|
strcpy (info->version, DRV_VERSION);
|
1333 |
|
|
strcpy (info->bus_info, pci_name(cp->pdev));
|
1334 |
|
|
}
|
1335 |
|
|
|
1336 |
|
|
static int cp_get_regs_len(struct net_device *dev)
|
1337 |
|
|
{
|
1338 |
|
|
return CP_REGS_SIZE;
|
1339 |
|
|
}
|
1340 |
|
|
|
1341 |
|
|
static int cp_get_stats_count (struct net_device *dev)
|
1342 |
|
|
{
|
1343 |
|
|
return CP_NUM_STATS;
|
1344 |
|
|
}
|
1345 |
|
|
|
1346 |
|
|
static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
1347 |
|
|
{
|
1348 |
|
|
struct cp_private *cp = dev->priv;
|
1349 |
|
|
int rc;
|
1350 |
|
|
|
1351 |
|
|
spin_lock_irq(&cp->lock);
|
1352 |
|
|
rc = mii_ethtool_gset(&cp->mii_if, cmd);
|
1353 |
|
|
spin_unlock_irq(&cp->lock);
|
1354 |
|
|
|
1355 |
|
|
return rc;
|
1356 |
|
|
}
|
1357 |
|
|
|
1358 |
|
|
static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
1359 |
|
|
{
|
1360 |
|
|
struct cp_private *cp = dev->priv;
|
1361 |
|
|
int rc;
|
1362 |
|
|
|
1363 |
|
|
spin_lock_irq(&cp->lock);
|
1364 |
|
|
rc = mii_ethtool_sset(&cp->mii_if, cmd);
|
1365 |
|
|
spin_unlock_irq(&cp->lock);
|
1366 |
|
|
|
1367 |
|
|
return rc;
|
1368 |
|
|
}
|
1369 |
|
|
|
1370 |
|
|
static int cp_nway_reset(struct net_device *dev)
|
1371 |
|
|
{
|
1372 |
|
|
struct cp_private *cp = dev->priv;
|
1373 |
|
|
return mii_nway_restart(&cp->mii_if);
|
1374 |
|
|
}
|
1375 |
|
|
|
1376 |
|
|
static u32 cp_get_msglevel(struct net_device *dev)
|
1377 |
|
|
{
|
1378 |
|
|
struct cp_private *cp = dev->priv;
|
1379 |
|
|
return cp->msg_enable;
|
1380 |
|
|
}
|
1381 |
|
|
|
1382 |
|
|
static void cp_set_msglevel(struct net_device *dev, u32 value)
|
1383 |
|
|
{
|
1384 |
|
|
struct cp_private *cp = dev->priv;
|
1385 |
|
|
cp->msg_enable = value;
|
1386 |
|
|
}
|
1387 |
|
|
|
1388 |
|
|
static u32 cp_get_rx_csum(struct net_device *dev)
|
1389 |
|
|
{
|
1390 |
|
|
struct cp_private *cp = dev->priv;
|
1391 |
|
|
return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
|
1392 |
|
|
}
|
1393 |
|
|
|
1394 |
|
|
static int cp_set_rx_csum(struct net_device *dev, u32 data)
|
1395 |
|
|
{
|
1396 |
|
|
struct cp_private *cp = dev->priv;
|
1397 |
|
|
u16 cmd = cp->cpcmd, newcmd;
|
1398 |
|
|
|
1399 |
|
|
newcmd = cmd;
|
1400 |
|
|
|
1401 |
|
|
if (data)
|
1402 |
|
|
newcmd |= RxChkSum;
|
1403 |
|
|
else
|
1404 |
|
|
newcmd &= ~RxChkSum;
|
1405 |
|
|
|
1406 |
|
|
if (newcmd != cmd) {
|
1407 |
|
|
spin_lock_irq(&cp->lock);
|
1408 |
|
|
cp->cpcmd = newcmd;
|
1409 |
|
|
cpw16_f(CpCmd, newcmd);
|
1410 |
|
|
spin_unlock_irq(&cp->lock);
|
1411 |
|
|
}
|
1412 |
|
|
|
1413 |
|
|
return 0;
|
1414 |
|
|
}
|
1415 |
|
|
|
1416 |
|
|
static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
1417 |
|
|
void *p)
|
1418 |
|
|
{
|
1419 |
|
|
struct cp_private *cp = dev->priv;
|
1420 |
|
|
|
1421 |
|
|
if (regs->len < CP_REGS_SIZE)
|
1422 |
|
|
return /* -EINVAL */;
|
1423 |
|
|
|
1424 |
|
|
regs->version = CP_REGS_VER;
|
1425 |
|
|
|
1426 |
|
|
spin_lock_irq(&cp->lock);
|
1427 |
|
|
memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
|
1428 |
|
|
spin_unlock_irq(&cp->lock);
|
1429 |
|
|
}
|
1430 |
|
|
|
1431 |
|
|
static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
|
1432 |
|
|
{
|
1433 |
|
|
struct cp_private *cp = dev->priv;
|
1434 |
|
|
|
1435 |
|
|
spin_lock_irq (&cp->lock);
|
1436 |
|
|
netdev_get_wol (cp, wol);
|
1437 |
|
|
spin_unlock_irq (&cp->lock);
|
1438 |
|
|
}
|
1439 |
|
|
|
1440 |
|
|
static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
|
1441 |
|
|
{
|
1442 |
|
|
struct cp_private *cp = dev->priv;
|
1443 |
|
|
int rc;
|
1444 |
|
|
|
1445 |
|
|
spin_lock_irq (&cp->lock);
|
1446 |
|
|
rc = netdev_set_wol (cp, wol);
|
1447 |
|
|
spin_unlock_irq (&cp->lock);
|
1448 |
|
|
|
1449 |
|
|
return rc;
|
1450 |
|
|
}
|
1451 |
|
|
|
1452 |
|
|
static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
|
1453 |
|
|
{
|
1454 |
|
|
switch (stringset) {
|
1455 |
|
|
case ETH_SS_STATS:
|
1456 |
|
|
memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
|
1457 |
|
|
break;
|
1458 |
|
|
default:
|
1459 |
|
|
BUG();
|
1460 |
|
|
break;
|
1461 |
|
|
}
|
1462 |
|
|
}
|
1463 |
|
|
|
1464 |
|
|
static void cp_get_ethtool_stats (struct net_device *dev,
|
1465 |
|
|
struct ethtool_stats *estats, u64 *tmp_stats)
|
1466 |
|
|
{
|
1467 |
|
|
struct cp_private *cp = dev->priv;
|
1468 |
|
|
unsigned int work = 100;
|
1469 |
|
|
int i;
|
1470 |
|
|
|
1471 |
|
|
/* begin NIC statistics dump */
|
1472 |
|
|
cpw32(StatsAddr + 4, 0); /* FIXME: 64-bit PCI */
|
1473 |
|
|
cpw32(StatsAddr, cp->nic_stats_dma | DumpStats);
|
1474 |
|
|
cpr32(StatsAddr);
|
1475 |
|
|
|
1476 |
|
|
while (work-- > 0) {
|
1477 |
|
|
if ((cpr32(StatsAddr) & DumpStats) == 0)
|
1478 |
|
|
break;
|
1479 |
|
|
cpu_relax();
|
1480 |
|
|
}
|
1481 |
|
|
|
1482 |
|
|
if (cpr32(StatsAddr) & DumpStats)
|
1483 |
|
|
return /* -EIO */;
|
1484 |
|
|
|
1485 |
|
|
i = 0;
|
1486 |
|
|
tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok);
|
1487 |
|
|
tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok);
|
1488 |
|
|
tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err);
|
1489 |
|
|
tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err);
|
1490 |
|
|
tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo);
|
1491 |
|
|
tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align);
|
1492 |
|
|
tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col);
|
1493 |
|
|
tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol);
|
1494 |
|
|
tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys);
|
1495 |
|
|
tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast);
|
1496 |
|
|
tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast);
|
1497 |
|
|
tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort);
|
1498 |
|
|
tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun);
|
1499 |
|
|
tmp_stats[i++] = cp->cp_stats.rx_frags;
|
1500 |
|
|
if (i != CP_NUM_STATS)
|
1501 |
|
|
BUG();
|
1502 |
|
|
}
|
1503 |
|
|
|
1504 |
|
|
static struct ethtool_ops cp_ethtool_ops = {
|
1505 |
|
|
.get_drvinfo = cp_get_drvinfo,
|
1506 |
|
|
.get_regs_len = cp_get_regs_len,
|
1507 |
|
|
.get_stats_count = cp_get_stats_count,
|
1508 |
|
|
.get_settings = cp_get_settings,
|
1509 |
|
|
.set_settings = cp_set_settings,
|
1510 |
|
|
.nway_reset = cp_nway_reset,
|
1511 |
|
|
.get_link = ethtool_op_get_link,
|
1512 |
|
|
.get_msglevel = cp_get_msglevel,
|
1513 |
|
|
.set_msglevel = cp_set_msglevel,
|
1514 |
|
|
.get_rx_csum = cp_get_rx_csum,
|
1515 |
|
|
.set_rx_csum = cp_set_rx_csum,
|
1516 |
|
|
.get_tx_csum = ethtool_op_get_tx_csum,
|
1517 |
|
|
.set_tx_csum = ethtool_op_set_tx_csum, /* local! */
|
1518 |
|
|
.get_sg = ethtool_op_get_sg,
|
1519 |
|
|
.set_sg = ethtool_op_set_sg,
|
1520 |
|
|
.get_regs = cp_get_regs,
|
1521 |
|
|
.get_wol = cp_get_wol,
|
1522 |
|
|
.set_wol = cp_set_wol,
|
1523 |
|
|
.get_strings = cp_get_strings,
|
1524 |
|
|
.get_ethtool_stats = cp_get_ethtool_stats,
|
1525 |
|
|
};
|
1526 |
|
|
|
1527 |
|
|
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
|
1528 |
|
|
{
|
1529 |
|
|
struct cp_private *cp = dev->priv;
|
1530 |
|
|
struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &rq->ifr_data;
|
1531 |
|
|
int rc;
|
1532 |
|
|
|
1533 |
|
|
if (!netif_running(dev))
|
1534 |
|
|
return -EINVAL;
|
1535 |
|
|
|
1536 |
|
|
spin_lock_irq(&cp->lock);
|
1537 |
|
|
rc = generic_mii_ioctl(&cp->mii_if, mii, cmd, NULL);
|
1538 |
|
|
spin_unlock_irq(&cp->lock);
|
1539 |
|
|
return rc;
|
1540 |
|
|
}
|
1541 |
|
|
|
1542 |
|
|
/* Serial EEPROM section. */
|
1543 |
|
|
|
1544 |
|
|
/* EEPROM_Ctrl bits. */
|
1545 |
|
|
#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
|
1546 |
|
|
#define EE_CS 0x08 /* EEPROM chip select. */
|
1547 |
|
|
#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
|
1548 |
|
|
#define EE_WRITE_0 0x00
|
1549 |
|
|
#define EE_WRITE_1 0x02
|
1550 |
|
|
#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
|
1551 |
|
|
#define EE_ENB (0x80 | EE_CS)
|
1552 |
|
|
|
1553 |
|
|
/* Delay between EEPROM clock transitions.
|
1554 |
|
|
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
|
1555 |
|
|
*/
|
1556 |
|
|
|
1557 |
|
|
#define eeprom_delay() readl(ee_addr)
|
1558 |
|
|
|
1559 |
|
|
/* The EEPROM commands include the alway-set leading bit. */
|
1560 |
|
|
#define EE_WRITE_CMD (5)
|
1561 |
|
|
#define EE_READ_CMD (6)
|
1562 |
|
|
#define EE_ERASE_CMD (7)
|
1563 |
|
|
|
1564 |
|
|
static int read_eeprom (void *ioaddr, int location, int addr_len)
|
1565 |
|
|
{
|
1566 |
|
|
int i;
|
1567 |
|
|
unsigned retval = 0;
|
1568 |
|
|
void *ee_addr = ioaddr + Cfg9346;
|
1569 |
|
|
int read_cmd = location | (EE_READ_CMD << addr_len);
|
1570 |
|
|
|
1571 |
|
|
writeb (EE_ENB & ~EE_CS, ee_addr);
|
1572 |
|
|
writeb (EE_ENB, ee_addr);
|
1573 |
|
|
eeprom_delay ();
|
1574 |
|
|
|
1575 |
|
|
/* Shift the read command bits out. */
|
1576 |
|
|
for (i = 4 + addr_len; i >= 0; i--) {
|
1577 |
|
|
int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
|
1578 |
|
|
writeb (EE_ENB | dataval, ee_addr);
|
1579 |
|
|
eeprom_delay ();
|
1580 |
|
|
writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
|
1581 |
|
|
eeprom_delay ();
|
1582 |
|
|
}
|
1583 |
|
|
writeb (EE_ENB, ee_addr);
|
1584 |
|
|
eeprom_delay ();
|
1585 |
|
|
|
1586 |
|
|
for (i = 16; i > 0; i--) {
|
1587 |
|
|
writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
|
1588 |
|
|
eeprom_delay ();
|
1589 |
|
|
retval =
|
1590 |
|
|
(retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
|
1591 |
|
|
0);
|
1592 |
|
|
writeb (EE_ENB, ee_addr);
|
1593 |
|
|
eeprom_delay ();
|
1594 |
|
|
}
|
1595 |
|
|
|
1596 |
|
|
/* Terminate the EEPROM access. */
|
1597 |
|
|
writeb (~EE_CS, ee_addr);
|
1598 |
|
|
eeprom_delay ();
|
1599 |
|
|
|
1600 |
|
|
return retval;
|
1601 |
|
|
}
|
1602 |
|
|
|
1603 |
|
|
/* Put the board into D3cold state and wait for WakeUp signal */
|
1604 |
|
|
static void cp_set_d3_state (struct cp_private *cp)
|
1605 |
|
|
{
|
1606 |
|
|
pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
|
1607 |
|
|
pci_set_power_state (cp->pdev, 3);
|
1608 |
|
|
}
|
1609 |
|
|
|
1610 |
|
|
static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
1611 |
|
|
{
|
1612 |
|
|
struct net_device *dev;
|
1613 |
|
|
struct cp_private *cp;
|
1614 |
|
|
int rc;
|
1615 |
|
|
void *regs;
|
1616 |
|
|
long pciaddr;
|
1617 |
|
|
unsigned int addr_len, i, pci_using_dac;
|
1618 |
|
|
u8 pci_rev;
|
1619 |
|
|
|
1620 |
|
|
#ifndef MODULE
|
1621 |
|
|
static int version_printed;
|
1622 |
|
|
if (version_printed++ == 0)
|
1623 |
|
|
printk("%s", version);
|
1624 |
|
|
#endif
|
1625 |
|
|
|
1626 |
|
|
pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
|
1627 |
|
|
|
1628 |
|
|
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
|
1629 |
|
|
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
|
1630 |
|
|
printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
|
1631 |
|
|
pci_name(pdev), pdev->vendor, pdev->device, pci_rev);
|
1632 |
|
|
printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n");
|
1633 |
|
|
return -ENODEV;
|
1634 |
|
|
}
|
1635 |
|
|
|
1636 |
|
|
dev = alloc_etherdev(sizeof(struct cp_private));
|
1637 |
|
|
if (!dev)
|
1638 |
|
|
return -ENOMEM;
|
1639 |
|
|
SET_MODULE_OWNER(dev);
|
1640 |
|
|
cp = dev->priv;
|
1641 |
|
|
cp->pdev = pdev;
|
1642 |
|
|
cp->dev = dev;
|
1643 |
|
|
cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
|
1644 |
|
|
spin_lock_init (&cp->lock);
|
1645 |
|
|
cp->mii_if.dev = dev;
|
1646 |
|
|
cp->mii_if.mdio_read = mdio_read;
|
1647 |
|
|
cp->mii_if.mdio_write = mdio_write;
|
1648 |
|
|
cp->mii_if.phy_id = CP_INTERNAL_PHY;
|
1649 |
|
|
cp->mii_if.phy_id_mask = 0x1f;
|
1650 |
|
|
cp->mii_if.reg_num_mask = 0x1f;
|
1651 |
|
|
cp_set_rxbufsize(cp);
|
1652 |
|
|
|
1653 |
|
|
rc = pci_enable_device(pdev);
|
1654 |
|
|
if (rc)
|
1655 |
|
|
goto err_out_free;
|
1656 |
|
|
|
1657 |
|
|
rc = pci_set_mwi(pdev);
|
1658 |
|
|
if (rc)
|
1659 |
|
|
goto err_out_disable;
|
1660 |
|
|
|
1661 |
|
|
rc = pci_request_regions(pdev, DRV_NAME);
|
1662 |
|
|
if (rc)
|
1663 |
|
|
goto err_out_mwi;
|
1664 |
|
|
|
1665 |
|
|
if (pdev->irq < 2) {
|
1666 |
|
|
rc = -EIO;
|
1667 |
|
|
printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
|
1668 |
|
|
pdev->irq, pci_name(pdev));
|
1669 |
|
|
goto err_out_res;
|
1670 |
|
|
}
|
1671 |
|
|
pciaddr = pci_resource_start(pdev, 1);
|
1672 |
|
|
if (!pciaddr) {
|
1673 |
|
|
rc = -EIO;
|
1674 |
|
|
printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
|
1675 |
|
|
pci_name(pdev));
|
1676 |
|
|
goto err_out_res;
|
1677 |
|
|
}
|
1678 |
|
|
if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
|
1679 |
|
|
rc = -EIO;
|
1680 |
|
|
printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
|
1681 |
|
|
pci_resource_len(pdev, 1), pci_name(pdev));
|
1682 |
|
|
goto err_out_res;
|
1683 |
|
|
}
|
1684 |
|
|
|
1685 |
|
|
/* Configure DMA attributes. */
|
1686 |
|
|
if ((sizeof(dma_addr_t) > 32) &&
|
1687 |
|
|
!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
|
1688 |
|
|
pci_using_dac = 1;
|
1689 |
|
|
} else {
|
1690 |
|
|
rc = pci_set_dma_mask(pdev, 0xffffffffULL);
|
1691 |
|
|
if (rc) {
|
1692 |
|
|
printk(KERN_ERR PFX "No usable DMA configuration, "
|
1693 |
|
|
"aborting.\n");
|
1694 |
|
|
goto err_out_res;
|
1695 |
|
|
}
|
1696 |
|
|
pci_using_dac = 0;
|
1697 |
|
|
}
|
1698 |
|
|
|
1699 |
|
|
cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
|
1700 |
|
|
PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
|
1701 |
|
|
|
1702 |
|
|
regs = ioremap_nocache(pciaddr, CP_REGS_SIZE);
|
1703 |
|
|
if (!regs) {
|
1704 |
|
|
rc = -EIO;
|
1705 |
|
|
printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
|
1706 |
|
|
pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
|
1707 |
|
|
goto err_out_res;
|
1708 |
|
|
}
|
1709 |
|
|
dev->base_addr = (unsigned long) regs;
|
1710 |
|
|
cp->regs = regs;
|
1711 |
|
|
|
1712 |
|
|
cp_stop_hw(cp);
|
1713 |
|
|
|
1714 |
|
|
/* read MAC address from EEPROM */
|
1715 |
|
|
addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
|
1716 |
|
|
for (i = 0; i < 3; i++)
|
1717 |
|
|
((u16 *) (dev->dev_addr))[i] =
|
1718 |
|
|
le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
|
1719 |
|
|
|
1720 |
|
|
dev->open = cp_open;
|
1721 |
|
|
dev->stop = cp_close;
|
1722 |
|
|
dev->set_multicast_list = cp_set_rx_mode;
|
1723 |
|
|
dev->hard_start_xmit = cp_start_xmit;
|
1724 |
|
|
dev->get_stats = cp_get_stats;
|
1725 |
|
|
dev->do_ioctl = cp_ioctl;
|
1726 |
|
|
dev->poll = cp_rx_poll;
|
1727 |
|
|
dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
|
1728 |
|
|
#ifdef BROKEN
|
1729 |
|
|
dev->change_mtu = cp_change_mtu;
|
1730 |
|
|
#endif
|
1731 |
|
|
dev->ethtool_ops = &cp_ethtool_ops;
|
1732 |
|
|
#if 0
|
1733 |
|
|
dev->tx_timeout = cp_tx_timeout;
|
1734 |
|
|
dev->watchdog_timeo = TX_TIMEOUT;
|
1735 |
|
|
#endif
|
1736 |
|
|
|
1737 |
|
|
#if CP_VLAN_TAG_USED
|
1738 |
|
|
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
1739 |
|
|
dev->vlan_rx_register = cp_vlan_rx_register;
|
1740 |
|
|
dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
|
1741 |
|
|
#endif
|
1742 |
|
|
|
1743 |
|
|
dev->irq = pdev->irq;
|
1744 |
|
|
|
1745 |
|
|
rc = register_netdev(dev);
|
1746 |
|
|
if (rc)
|
1747 |
|
|
goto err_out_iomap;
|
1748 |
|
|
|
1749 |
|
|
printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
|
1750 |
|
|
"%02x:%02x:%02x:%02x:%02x:%02x, "
|
1751 |
|
|
"IRQ %d\n",
|
1752 |
|
|
dev->name,
|
1753 |
|
|
dev->base_addr,
|
1754 |
|
|
dev->dev_addr[0], dev->dev_addr[1],
|
1755 |
|
|
dev->dev_addr[2], dev->dev_addr[3],
|
1756 |
|
|
dev->dev_addr[4], dev->dev_addr[5],
|
1757 |
|
|
dev->irq);
|
1758 |
|
|
|
1759 |
|
|
pci_set_drvdata(pdev, dev);
|
1760 |
|
|
|
1761 |
|
|
/* enable busmastering and memory-write-invalidate */
|
1762 |
|
|
pci_set_master(pdev);
|
1763 |
|
|
|
1764 |
|
|
if (cp->wol_enabled) cp_set_d3_state (cp);
|
1765 |
|
|
|
1766 |
|
|
return 0;
|
1767 |
|
|
|
1768 |
|
|
err_out_iomap:
|
1769 |
|
|
iounmap(regs);
|
1770 |
|
|
err_out_res:
|
1771 |
|
|
pci_release_regions(pdev);
|
1772 |
|
|
err_out_mwi:
|
1773 |
|
|
pci_clear_mwi(pdev);
|
1774 |
|
|
err_out_disable:
|
1775 |
|
|
pci_disable_device(pdev);
|
1776 |
|
|
err_out_free:
|
1777 |
|
|
kfree(dev);
|
1778 |
|
|
return rc;
|
1779 |
|
|
}
|
1780 |
|
|
|
1781 |
|
|
static void cp_remove_one (struct pci_dev *pdev)
|
1782 |
|
|
{
|
1783 |
|
|
struct net_device *dev = pci_get_drvdata(pdev);
|
1784 |
|
|
struct cp_private *cp = dev->priv;
|
1785 |
|
|
|
1786 |
|
|
if (!dev)
|
1787 |
|
|
BUG();
|
1788 |
|
|
unregister_netdev(dev);
|
1789 |
|
|
iounmap(cp->regs);
|
1790 |
|
|
if (cp->wol_enabled) pci_set_power_state (pdev, 0);
|
1791 |
|
|
pci_release_regions(pdev);
|
1792 |
|
|
pci_clear_mwi(pdev);
|
1793 |
|
|
pci_disable_device(pdev);
|
1794 |
|
|
pci_set_drvdata(pdev, NULL);
|
1795 |
|
|
kfree(dev);
|
1796 |
|
|
}
|
1797 |
|
|
|
1798 |
|
|
#ifdef CONFIG_PM
|
1799 |
|
|
static int cp_suspend (struct pci_dev *pdev, u32 state)
|
1800 |
|
|
{
|
1801 |
|
|
struct net_device *dev;
|
1802 |
|
|
struct cp_private *cp;
|
1803 |
|
|
unsigned long flags;
|
1804 |
|
|
|
1805 |
|
|
dev = pci_get_drvdata (pdev);
|
1806 |
|
|
cp = dev->priv;
|
1807 |
|
|
|
1808 |
|
|
if (!dev || !netif_running (dev)) return 0;
|
1809 |
|
|
|
1810 |
|
|
netif_device_detach (dev);
|
1811 |
|
|
netif_stop_queue (dev);
|
1812 |
|
|
|
1813 |
|
|
spin_lock_irqsave (&cp->lock, flags);
|
1814 |
|
|
|
1815 |
|
|
/* Disable Rx and Tx */
|
1816 |
|
|
cpw16 (IntrMask, 0);
|
1817 |
|
|
cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
|
1818 |
|
|
|
1819 |
|
|
spin_unlock_irqrestore (&cp->lock, flags);
|
1820 |
|
|
|
1821 |
|
|
if (cp->pdev && cp->wol_enabled) {
|
1822 |
|
|
pci_save_state (cp->pdev, cp->power_state);
|
1823 |
|
|
cp_set_d3_state (cp);
|
1824 |
|
|
}
|
1825 |
|
|
|
1826 |
|
|
return 0;
|
1827 |
|
|
}
|
1828 |
|
|
|
1829 |
|
|
static int cp_resume (struct pci_dev *pdev)
|
1830 |
|
|
{
|
1831 |
|
|
struct net_device *dev;
|
1832 |
|
|
struct cp_private *cp;
|
1833 |
|
|
|
1834 |
|
|
dev = pci_get_drvdata (pdev);
|
1835 |
|
|
cp = dev->priv;
|
1836 |
|
|
|
1837 |
|
|
netif_device_attach (dev);
|
1838 |
|
|
|
1839 |
|
|
if (cp->pdev && cp->wol_enabled) {
|
1840 |
|
|
pci_set_power_state (cp->pdev, 0);
|
1841 |
|
|
pci_restore_state (cp->pdev, cp->power_state);
|
1842 |
|
|
}
|
1843 |
|
|
|
1844 |
|
|
cp_init_hw (cp);
|
1845 |
|
|
netif_start_queue (dev);
|
1846 |
|
|
|
1847 |
|
|
return 0;
|
1848 |
|
|
}
|
1849 |
|
|
#endif /* CONFIG_PM */
|
1850 |
|
|
|
1851 |
|
|
static struct pci_driver cp_driver = {
|
1852 |
|
|
.name = DRV_NAME,
|
1853 |
|
|
.id_table = cp_pci_tbl,
|
1854 |
|
|
.probe = cp_init_one,
|
1855 |
|
|
.remove = cp_remove_one,
|
1856 |
|
|
#ifdef CONFIG_PM
|
1857 |
|
|
.resume = cp_resume,
|
1858 |
|
|
.suspend = cp_suspend,
|
1859 |
|
|
#endif
|
1860 |
|
|
};
|
1861 |
|
|
|
1862 |
|
|
static int __init cp_init (void)
|
1863 |
|
|
{
|
1864 |
|
|
#ifdef MODULE
|
1865 |
|
|
printk("%s", version);
|
1866 |
|
|
#endif
|
1867 |
|
|
return pci_module_init (&cp_driver);
|
1868 |
|
|
}
|
1869 |
|
|
|
1870 |
|
|
static void __exit cp_exit (void)
|
1871 |
|
|
{
|
1872 |
|
|
pci_unregister_driver (&cp_driver);
|
1873 |
|
|
}
|
1874 |
|
|
|
1875 |
|
|
module_init(cp_init);
|
1876 |
|
|
module_exit(cp_exit);
|