OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [starfire.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2
/*
3
        Written 1998-2000 by Donald Becker.
4
 
5
        Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
6
        send all bug reports to me, and not to Donald Becker, as this code
7
        has been modified quite a bit from Donald's original version.
8
 
9
        This software may be used and distributed according to the terms of
10
        the GNU General Public License (GPL), incorporated herein by reference.
11
        Drivers based on or derived from this code fall under the GPL and must
12
        retain the authorship, copyright and license notice.  This file is not
13
        a complete program and may only be used when the entire operating
14
        system is licensed under the GPL.
15
 
16
        The author may be reached as becker@scyld.com, or C/O
17
        Scyld Computing Corporation
18
        410 Severn Ave., Suite 210
19
        Annapolis MD 21403
20
 
21
        Support and updates available at
22
        http://www.scyld.com/network/starfire.html
23
 
24
        -----------------------------------------------------------
25
 
26
        Linux kernel-specific changes:
27
 
28
        LK1.1.1 (jgarzik):
29
        - Use PCI driver interface
30
        - Fix MOD_xxx races
31
        - softnet fixups
32
 
33
        LK1.1.2 (jgarzik):
34
        - Merge Becker version 0.15
35
 
36
        LK1.1.3 (Andrew Morton)
37
        - Timer cleanups
38
 
39
        LK1.1.4 (jgarzik):
40
        - Merge Becker version 1.03
41
 
42
        LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
43
        - Support hardware Rx/Tx checksumming
44
        - Use the GFP firmware taken from Adaptec's Netware driver
45
 
46
        LK1.2.2 (Ion Badulescu)
47
        - Backported to 2.2.x
48
 
49
        LK1.2.3 (Ion Badulescu)
50
        - Fix the flaky mdio interface
51
        - More compat clean-ups
52
 
53
        LK1.2.4 (Ion Badulescu)
54
        - More 2.2.x initialization fixes
55
 
56
        LK1.2.5 (Ion Badulescu)
57
        - Several fixes from Manfred Spraul
58
 
59
        LK1.2.6 (Ion Badulescu)
60
        - Fixed ifup/ifdown/ifup problem in 2.4.x
61
 
62
        LK1.2.7 (Ion Badulescu)
63
        - Removed unused code
64
        - Made more functions static and __init
65
 
66
        LK1.2.8 (Ion Badulescu)
67
        - Quell bogus error messages, inform about the Tx threshold
68
        - Removed #ifdef CONFIG_PCI, this driver is PCI only
69
 
70
        LK1.2.9 (Ion Badulescu)
71
        - Merged Jeff Garzik's changes from 2.4.4-pre5
72
        - Added 2.2.x compatibility stuff required by the above changes
73
 
74
        LK1.2.9a (Ion Badulescu)
75
        - More updates from Jeff Garzik
76
 
77
        LK1.3.0 (Ion Badulescu)
78
        - Merged zerocopy support
79
 
80
        LK1.3.1 (Ion Badulescu)
81
        - Added ethtool support
82
        - Added GPIO (media change) interrupt support
83
 
84
        LK1.3.2 (Ion Badulescu)
85
        - Fixed 2.2.x compatibility issues introduced in 1.3.1
86
        - Fixed ethtool ioctl returning uninitialized memory
87
 
88
        LK1.3.3 (Ion Badulescu)
89
        - Initialize the TxMode register properly
90
        - Don't dereference dev->priv after freeing it
91
 
92
        LK1.3.4 (Ion Badulescu)
93
        - Fixed initialization timing problems
94
        - Fixed interrupt mask definitions
95
 
96
        LK1.3.5 (jgarzik)
97
        - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
98
 
99
        LK1.3.6:
100
        - Sparc64 support and fixes (Ion Badulescu)
101
        - Better stats and error handling (Ion Badulescu)
102
        - Use new pci_set_mwi() PCI API function (jgarzik)
103
 
104
        LK1.3.7 (Ion Badulescu)
105
        - minimal implementation of tx_timeout()
106
        - correctly shutdown the Rx/Tx engines in netdev_close()
107
        - added calls to netif_carrier_on/off
108
        (patch from Stefan Rompf <srompf@isg.de>)
109
        - VLAN support
110
 
111
        LK1.3.8 (Ion Badulescu)
112
        - adjust DMA burst size on sparc64
113
        - 64-bit support
114
        - reworked zerocopy support for 64-bit buffers
115
        - working and usable interrupt mitigation/latency
116
        - reduced Tx interrupt frequency for lower interrupt overhead
117
 
118
        LK1.3.9 (Ion Badulescu)
119
        - bugfix for mcast filter
120
        - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
121
 
122
TODO:
123
        - full NAPI support
124
*/
125
 
126
#define DRV_NAME        "starfire"
127
#define DRV_VERSION     "1.03+LK1.3.9"
128
#define DRV_RELDATE     "December 13, 2002"
129
 
130
#include <linux/config.h>
131
#include <linux/version.h>
132
#include <linux/module.h>
133
#include <linux/kernel.h>
134
#include <linux/pci.h>
135
#include <linux/netdevice.h>
136
#include <linux/etherdevice.h>
137
#include <linux/init.h>
138
#include <linux/delay.h>
139
#include <asm/processor.h>              /* Processor type for cache alignment. */
140
#include <asm/uaccess.h>
141
#include <asm/io.h>
142
 
143
/*
144
 * Adaptec's license for their Novell drivers (which is where I got the
145
 * firmware files) does not allow one to redistribute them. Thus, we can't
146
 * include the firmware with this driver.
147
 *
148
 * However, should a legal-to-use firmware become available,
149
 * the driver developer would need only to obtain the firmware in the
150
 * form of a C header file.
151
 * Once that's done, the #undef below must be changed into a #define
152
 * for this driver to really use the firmware. Note that Rx/Tx
153
 * hardware TCP checksumming is not possible without the firmware.
154
 *
155
 * WANTED: legal firmware to include with this GPL'd driver.
156
 */
157
#undef HAS_FIRMWARE
158
/*
159
 * The current frame processor firmware fails to checksum a fragment
160
 * of length 1. If and when this is fixed, the #define below can be removed.
161
 */
162
#define HAS_BROKEN_FIRMWARE
163
/*
164
 * Define this if using the driver with the zero-copy patch
165
 */
166
#if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
167
#define ZEROCOPY
168
#endif
169
 
170
#ifdef HAS_FIRMWARE
171
#include "starfire_firmware.h"
172
#endif /* HAS_FIRMWARE */
173
 
174
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
175
#define VLAN_SUPPORT
176
#endif
177
 
178
/* The user-configurable values.
179
   These may be modified when a driver module is loaded.*/
180
 
181
/* Used for tuning interrupt latency vs. overhead. */
182
static int intr_latency;
183
static int small_frames;
184
 
185
static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
186
static int max_interrupt_work = 20;
187
static int mtu;
188
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
189
   The Starfire has a 512 element hash table based on the Ethernet CRC. */
190
static int multicast_filter_limit = 512;
191
/* Whether to do TCP/UDP checksums in hardware */
192
#ifdef HAS_FIRMWARE
193
static int enable_hw_cksum = 1;
194
#else
195
static int enable_hw_cksum = 0;
196
#endif
197
 
198
#define PKT_BUF_SZ      1536            /* Size of each temporary Rx buffer.*/
199
/*
200
 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
201
 * Setting to > 1518 effectively disables this feature.
202
 *
203
 * NOTE:
204
 * The ia64 doesn't allow for unaligned loads even of integers being
205
 * misaligned on a 2 byte boundary. Thus always force copying of
206
 * packets as the starfire doesn't allow for misaligned DMAs ;-(
207
 * 23/10/2000 - Jes
208
 *
209
 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
210
 * at least, having unaligned frames leads to a rather serious performance
211
 * penalty. -Ion
212
 */
213
#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
214
static int rx_copybreak = PKT_BUF_SZ;
215
#else
216
static int rx_copybreak /* = 0 */;
217
#endif
218
 
219
/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
220
#ifdef __sparc__
221
#define DMA_BURST_SIZE 64
222
#else
223
#define DMA_BURST_SIZE 128
224
#endif
225
 
226
/* Used to pass the media type, etc.
227
   Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
228
   The media type is usually passed in 'options[]'.
229
   These variables are deprecated, use ethtool instead. -Ion
230
*/
231
#define MAX_UNITS 8             /* More are supported, limit only on options */
232
static int options[MAX_UNITS] = {0, };
233
static int full_duplex[MAX_UNITS] = {0, };
234
 
235
/* Operational parameters that are set at compile time. */
236
 
237
/* The "native" ring sizes are either 256 or 2048.
238
   However in some modes a descriptor may be marked to wrap the ring earlier.
239
*/
240
#define RX_RING_SIZE    256
241
#define TX_RING_SIZE    32
242
/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
243
#define DONE_Q_SIZE     1024
244
/* All queues must be aligned on a 256-byte boundary */
245
#define QUEUE_ALIGN     256
246
 
247
#if RX_RING_SIZE > 256
248
#define RX_Q_ENTRIES Rx2048QEntries
249
#else
250
#define RX_Q_ENTRIES Rx256QEntries
251
#endif
252
 
253
/* Operational parameters that usually are not changed. */
254
/* Time in jiffies before concluding the transmitter is hung. */
255
#define TX_TIMEOUT      (2 * HZ)
256
 
257
/*
258
 * This SUCKS.
259
 * We need a much better method to determine if dma_addr_t is 64-bit.
260
 */
261
#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
262
/* 64-bit dma_addr_t */
263
#define ADDR_64BITS     /* This chip uses 64 bit addresses. */
264
#define cpu_to_dma(x) cpu_to_le64(x)
265
#define dma_to_cpu(x) le64_to_cpu(x)
266
#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
267
#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
268
#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
269
#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
270
#define RX_DESC_ADDR_SIZE RxDescAddr64bit
271
#else  /* 32-bit dma_addr_t */
272
#define cpu_to_dma(x) cpu_to_le32(x)
273
#define dma_to_cpu(x) le32_to_cpu(x)
274
#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
275
#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
276
#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
277
#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
278
#define RX_DESC_ADDR_SIZE RxDescAddr32bit
279
#endif
280
 
281
#ifdef MAX_SKB_FRAGS
282
#define skb_first_frag_len(skb) skb_headlen(skb)
283
#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
284
#else  /* not MAX_SKB_FRAGS */
285
#define skb_first_frag_len(skb) (skb->len)
286
#define skb_num_frags(skb) 1
287
#endif /* not MAX_SKB_FRAGS */
288
 
289
/* 2.2.x compatibility code */
290
#if LINUX_VERSION_CODE < 0x20300
291
 
292
#include "starfire-kcomp22.h"
293
 
294
#else  /* LINUX_VERSION_CODE > 0x20300 */
295
 
296
#include <linux/crc32.h>
297
#include <linux/ethtool.h>
298
#include <linux/mii.h>
299
 
300
#include <linux/if_vlan.h>
301
 
302
#define COMPAT_MOD_INC_USE_COUNT
303
#define COMPAT_MOD_DEC_USE_COUNT
304
 
305
#define init_tx_timer(dev, func, timeout) \
306
        dev->tx_timeout = func; \
307
        dev->watchdog_timeo = timeout;
308
#define kick_tx_timer(dev, func, timeout)
309
 
310
#define netif_start_if(dev)
311
#define netif_stop_if(dev)
312
 
313
#define PCI_SLOT_NAME(pci_dev)  (pci_dev)->slot_name
314
 
315
#endif /* LINUX_VERSION_CODE > 0x20300 */
316
/* end of compatibility code */
317
 
318
 
319
/* These identify the driver base version and may not be removed. */
320
static char version[] __devinitdata =
321
KERN_INFO "starfire.c:v1.03 7/26/2000  Written by Donald Becker <becker@scyld.com>\n"
322
KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
323
 
324
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
325
MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
326
MODULE_LICENSE("GPL");
327
 
328
MODULE_PARM(max_interrupt_work, "i");
329
MODULE_PARM(mtu, "i");
330
MODULE_PARM(debug, "i");
331
MODULE_PARM(rx_copybreak, "i");
332
MODULE_PARM(intr_latency, "i");
333
MODULE_PARM(small_frames, "i");
334
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
335
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
336
MODULE_PARM(enable_hw_cksum, "i");
337
MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
338
MODULE_PARM_DESC(mtu, "MTU (all boards)");
339
MODULE_PARM_DESC(debug, "Debug level (0-6)");
340
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
341
MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
342
MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
343
MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
344
MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
345
MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
346
 
347
/*
348
                                Theory of Operation
349
 
350
I. Board Compatibility
351
 
352
This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
353
 
354
II. Board-specific settings
355
 
356
III. Driver operation
357
 
358
IIIa. Ring buffers
359
 
360
The Starfire hardware uses multiple fixed-size descriptor queues/rings.  The
361
ring sizes are set fixed by the hardware, but may optionally be wrapped
362
earlier by the END bit in the descriptor.
363
This driver uses that hardware queue size for the Rx ring, where a large
364
number of entries has no ill effect beyond increases the potential backlog.
365
The Tx ring is wrapped with the END bit, since a large hardware Tx queue
366
disables the queue layer priority ordering and we have no mechanism to
367
utilize the hardware two-level priority queue.  When modifying the
368
RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
369
levels.
370
 
371
IIIb/c. Transmit/Receive Structure
372
 
373
See the Adaptec manual for the many possible structures, and options for
374
each structure.  There are far too many to document all of them here.
375
 
376
For transmit this driver uses type 0/1 transmit descriptors (depending
377
on the 32/64 bitness of the architecture), and relies on automatic
378
minimum-length padding.  It does not use the completion queue
379
consumer index, but instead checks for non-zero status entries.
380
 
381
For receive this driver uses type 0/1/2/3 receive descriptors.  The driver
382
allocates full frame size skbuffs for the Rx ring buffers, so all frames
383
should fit in a single descriptor.  The driver does not use the completion
384
queue consumer index, but instead checks for non-zero status entries.
385
 
386
When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
387
is allocated and the frame is copied to the new skbuff.  When the incoming
388
frame is larger, the skbuff is passed directly up the protocol stack.
389
Buffers consumed this way are replaced by newly allocated skbuffs in a later
390
phase of receive.
391
 
392
A notable aspect of operation is that unaligned buffers are not permitted by
393
the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
394
isn't longword aligned, which may cause problems on some machine
395
e.g. Alphas and IA64. For these architectures, the driver is forced to copy
396
the frame into a new skbuff unconditionally. Copied frames are put into the
397
skbuff at an offset of "+2", thus 16-byte aligning the IP header.
398
 
399
IIId. Synchronization
400
 
401
The driver runs as two independent, single-threaded flows of control.  One
402
is the send-packet routine, which enforces single-threaded use by the
403
dev->tbusy flag.  The other thread is the interrupt handler, which is single
404
threaded by the hardware and interrupt handling software.
405
 
406
The send packet thread has partial control over the Tx ring and the netif_queue
407
status. If the number of free Tx slots in the ring falls below a certain number
408
(currently hardcoded to 4), it signals the upper layer to stop the queue.
409
 
410
The interrupt handler has exclusive control over the Rx ring and records stats
411
from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
412
empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
413
number of free Tx slow is above the threshold, it signals the upper layer to
414
restart the queue.
415
 
416
IV. Notes
417
 
418
IVb. References
419
 
420
The Adaptec Starfire manuals, available only from Adaptec.
421
http://www.scyld.com/expert/100mbps.html
422
http://www.scyld.com/expert/NWay.html
423
 
424
IVc. Errata
425
 
426
- StopOnPerr is broken, don't enable
427
- Hardware ethernet padding exposes random data, perform software padding
428
  instead (unverified -- works correctly for all the hardware I have)
429
 
430
*/
431
 
432
 
433
 
434
enum chip_capability_flags {CanHaveMII=1, };
435
 
436
enum chipset {
437
        CH_6915 = 0,
438
};
439
 
440
static struct pci_device_id starfire_pci_tbl[] __devinitdata = {
441
        { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
442
        { 0, }
443
};
444
MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
445
 
446
/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
447
static struct chip_info {
448
        const char *name;
449
        int drv_flags;
450
} netdrv_tbl[] __devinitdata = {
451
        { "Adaptec Starfire 6915", CanHaveMII },
452
};
453
 
454
 
455
/* Offsets to the device registers.
456
   Unlike software-only systems, device drivers interact with complex hardware.
457
   It's not useful to define symbolic names for every register bit in the
458
   device.  The name can only partially document the semantics and make
459
   the driver longer and more difficult to read.
460
   In general, only the important configuration values or bits changed
461
   multiple times should be defined symbolically.
462
*/
463
enum register_offsets {
464
        PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
465
        IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
466
        MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
467
        GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
468
        TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
469
        TxRingHiAddr=0x5009C,           /* 64 bit address extension. */
470
        TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
471
        TxThreshold=0x500B0,
472
        CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
473
        RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
474
        CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
475
        RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
476
        RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
477
        TxMode=0x55000, VlanType=0x55064,
478
        PerfFilterTable=0x56000, HashTable=0x56100,
479
        TxGfpMem=0x58000, RxGfpMem=0x5a000,
480
};
481
 
482
/*
483
 * Bits in the interrupt status/mask registers.
484
 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
485
 * enables all the interrupt sources that are or'ed into those status bits.
486
 */
487
enum intr_status_bits {
488
        IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
489
        IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
490
        IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
491
        IntrTxComplQLow=0x200000, IntrPCI=0x100000,
492
        IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
493
        IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
494
        IntrNormalSummary=0x8000, IntrTxDone=0x4000,
495
        IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
496
        IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
497
        IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
498
        IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
499
        IntrNoTxCsum=0x20, IntrTxBadID=0x10,
500
        IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
501
        IntrTxGfp=0x02, IntrPCIPad=0x01,
502
        /* not quite bits */
503
        IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
504
        IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
505
        IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
506
};
507
 
508
/* Bits in the RxFilterMode register. */
509
enum rx_mode_bits {
510
        AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
511
        AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
512
        PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
513
        WakeupOnGFP=0x0800,
514
};
515
 
516
/* Bits in the TxMode register */
517
enum tx_mode_bits {
518
        MiiSoftReset=0x8000, MIILoopback=0x4000,
519
        TxFlowEnable=0x0800, RxFlowEnable=0x0400,
520
        PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
521
};
522
 
523
/* Bits in the TxDescCtrl register. */
524
enum tx_ctrl_bits {
525
        TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
526
        TxDescSpace128=0x30, TxDescSpace256=0x40,
527
        TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
528
        TxDescType3=0x03, TxDescType4=0x04,
529
        TxNoDMACompletion=0x08,
530
        TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
531
        TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
532
        TxDMABurstSizeShift=8,
533
};
534
 
535
/* Bits in the RxDescQCtrl register. */
536
enum rx_ctrl_bits {
537
        RxBufferLenShift=16, RxMinDescrThreshShift=0,
538
        RxPrefetchMode=0x8000, RxVariableQ=0x2000,
539
        Rx2048QEntries=0x4000, Rx256QEntries=0,
540
        RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
541
        RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
542
        RxDescSpace4=0x000, RxDescSpace8=0x100,
543
        RxDescSpace16=0x200, RxDescSpace32=0x300,
544
        RxDescSpace64=0x400, RxDescSpace128=0x500,
545
        RxConsumerWrEn=0x80,
546
};
547
 
548
/* Bits in the RxDMACtrl register. */
549
enum rx_dmactrl_bits {
550
        RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
551
        RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
552
        RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
553
        RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
554
        RxChecksumRejectTCPOnly=0x01000000,
555
        RxCompletionQ2Enable=0x800000,
556
        RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
557
        RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
558
        RxDMAQ2NonIP=0x400000,
559
        RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
560
        RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
561
        RxBurstSizeShift=0,
562
};
563
 
564
/* Bits in the RxCompletionAddr register */
565
enum rx_compl_bits {
566
        RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
567
        RxComplProducerWrEn=0x40,
568
        RxComplType0=0x00, RxComplType1=0x10,
569
        RxComplType2=0x20, RxComplType3=0x30,
570
        RxComplThreshShift=0,
571
};
572
 
573
/* Bits in the TxCompletionAddr register */
574
enum tx_compl_bits {
575
        TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
576
        TxComplProducerWrEn=0x40,
577
        TxComplIntrStatus=0x20,
578
        CommonQueueMode=0x10,
579
        TxComplThreshShift=0,
580
};
581
 
582
/* Bits in the GenCtrl register */
583
enum gen_ctrl_bits {
584
        RxEnable=0x05, TxEnable=0x0a,
585
        RxGFPEnable=0x10, TxGFPEnable=0x20,
586
};
587
 
588
/* Bits in the IntrTimerCtrl register */
589
enum intr_ctrl_bits {
590
        Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
591
        SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
592
        IntrLatencyMask=0x1f,
593
};
594
 
595
/* The Rx and Tx buffer descriptors. */
596
struct starfire_rx_desc {
597
        dma_addr_t rxaddr;
598
};
599
enum rx_desc_bits {
600
        RxDescValid=1, RxDescEndRing=2,
601
};
602
 
603
/* Completion queue entry. */
604
struct short_rx_done_desc {
605
        u32 status;                     /* Low 16 bits is length. */
606
};
607
struct basic_rx_done_desc {
608
        u32 status;                     /* Low 16 bits is length. */
609
        u16 vlanid;
610
        u16 status2;
611
};
612
struct csum_rx_done_desc {
613
        u32 status;                     /* Low 16 bits is length. */
614
        u16 csum;                       /* Partial checksum */
615
        u16 status2;
616
};
617
struct full_rx_done_desc {
618
        u32 status;                     /* Low 16 bits is length. */
619
        u16 status3;
620
        u16 status2;
621
        u16 vlanid;
622
        u16 csum;                       /* partial checksum */
623
        u32 timestamp;
624
};
625
/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
626
#ifdef HAS_FIRMWARE
627
#ifdef VLAN_SUPPORT
628
typedef struct full_rx_done_desc rx_done_desc;
629
#define RxComplType RxComplType3
630
#else  /* not VLAN_SUPPORT */
631
typedef struct csum_rx_done_desc rx_done_desc;
632
#define RxComplType RxComplType2
633
#endif /* not VLAN_SUPPORT */
634
#else  /* not HAS_FIRMWARE */
635
#ifdef VLAN_SUPPORT
636
typedef struct basic_rx_done_desc rx_done_desc;
637
#define RxComplType RxComplType1
638
#else  /* not VLAN_SUPPORT */
639
typedef struct short_rx_done_desc rx_done_desc;
640
#define RxComplType RxComplType0
641
#endif /* not VLAN_SUPPORT */
642
#endif /* not HAS_FIRMWARE */
643
 
644
enum rx_done_bits {
645
        RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
646
};
647
 
648
/* Type 1 Tx descriptor. */
649
struct starfire_tx_desc_1 {
650
        u32 status;                     /* Upper bits are status, lower 16 length. */
651
        u32 addr;
652
};
653
 
654
/* Type 2 Tx descriptor. */
655
struct starfire_tx_desc_2 {
656
        u32 status;                     /* Upper bits are status, lower 16 length. */
657
        u32 reserved;
658
        u64 addr;
659
};
660
 
661
#ifdef ADDR_64BITS
662
typedef struct starfire_tx_desc_2 starfire_tx_desc;
663
#define TX_DESC_TYPE TxDescType2
664
#else  /* not ADDR_64BITS */
665
typedef struct starfire_tx_desc_1 starfire_tx_desc;
666
#define TX_DESC_TYPE TxDescType1
667
#endif /* not ADDR_64BITS */
668
#define TX_DESC_SPACING TxDescSpaceUnlim
669
 
670
enum tx_desc_bits {
671
        TxDescID=0xB0000000,
672
        TxCRCEn=0x01000000, TxDescIntr=0x08000000,
673
        TxRingWrap=0x04000000, TxCalTCP=0x02000000,
674
};
675
struct tx_done_desc {
676
        u32 status;                     /* timestamp, index. */
677
#if 0
678
        u32 intrstatus;                 /* interrupt status */
679
#endif
680
};
681
 
682
struct rx_ring_info {
683
        struct sk_buff *skb;
684
        dma_addr_t mapping;
685
};
686
struct tx_ring_info {
687
        struct sk_buff *skb;
688
        dma_addr_t mapping;
689
        unsigned int used_slots;
690
};
691
 
692
#define PHY_CNT         2
693
struct netdev_private {
694
        /* Descriptor rings first for alignment. */
695
        struct starfire_rx_desc *rx_ring;
696
        starfire_tx_desc *tx_ring;
697
        dma_addr_t rx_ring_dma;
698
        dma_addr_t tx_ring_dma;
699
        /* The addresses of rx/tx-in-place skbuffs. */
700
        struct rx_ring_info rx_info[RX_RING_SIZE];
701
        struct tx_ring_info tx_info[TX_RING_SIZE];
702
        /* Pointers to completion queues (full pages). */
703
        rx_done_desc *rx_done_q;
704
        dma_addr_t rx_done_q_dma;
705
        unsigned int rx_done;
706
        struct tx_done_desc *tx_done_q;
707
        dma_addr_t tx_done_q_dma;
708
        unsigned int tx_done;
709
        struct net_device_stats stats;
710
        struct pci_dev *pci_dev;
711
#ifdef VLAN_SUPPORT
712
        struct vlan_group *vlgrp;
713
#endif
714
        void *queue_mem;
715
        dma_addr_t queue_mem_dma;
716
        size_t queue_mem_size;
717
 
718
        /* Frequently used values: keep some adjacent for cache effect. */
719
        spinlock_t lock;
720
        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
721
        unsigned int cur_tx, dirty_tx, reap_tx;
722
        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
723
        /* These values keep track of the transceiver/media in use. */
724
        int speed100;                   /* Set if speed == 100MBit. */
725
        u32 tx_mode;
726
        u32 intr_timer_ctrl;
727
        u8 tx_threshold;
728
        /* MII transceiver section. */
729
        struct mii_if_info mii_if;              /* MII lib hooks/info */
730
        int phy_cnt;                    /* MII device addresses. */
731
        unsigned char phys[PHY_CNT];    /* MII device addresses. */
732
};
733
 
734
 
735
static int      mdio_read(struct net_device *dev, int phy_id, int location);
736
static void     mdio_write(struct net_device *dev, int phy_id, int location, int value);
737
static int      netdev_open(struct net_device *dev);
738
static void     check_duplex(struct net_device *dev);
739
static void     tx_timeout(struct net_device *dev);
740
static void     init_ring(struct net_device *dev);
741
static int      start_tx(struct sk_buff *skb, struct net_device *dev);
742
static void     intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
743
static void     netdev_error(struct net_device *dev, int intr_status);
744
static int      netdev_rx(struct net_device *dev);
745
static void     netdev_error(struct net_device *dev, int intr_status);
746
static void     set_rx_mode(struct net_device *dev);
747
static struct net_device_stats *get_stats(struct net_device *dev);
748
static int      netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
749
static int      netdev_close(struct net_device *dev);
750
static void     netdev_media_change(struct net_device *dev);
751
 
752
 
753
#ifdef VLAN_SUPPORT
754
static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
755
{
756
        struct netdev_private *np = dev->priv;
757
 
758
        spin_lock(&np->lock);
759
        if (debug > 2)
760
                printk("%s: Setting vlgrp to %p\n", dev->name, grp);
761
        np->vlgrp = grp;
762
        set_rx_mode(dev);
763
        spin_unlock(&np->lock);
764
}
765
 
766
static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
767
{
768
        struct netdev_private *np = dev->priv;
769
 
770
        spin_lock(&np->lock);
771
        if (debug > 1)
772
                printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
773
        set_rx_mode(dev);
774
        spin_unlock(&np->lock);
775
}
776
 
777
static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
778
{
779
        struct netdev_private *np = dev->priv;
780
 
781
        spin_lock(&np->lock);
782
        if (debug > 1)
783
                printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
784
        if (np->vlgrp)
785
                np->vlgrp->vlan_devices[vid] = NULL;
786
        set_rx_mode(dev);
787
        spin_unlock(&np->lock);
788
}
789
#endif /* VLAN_SUPPORT */
790
 
791
 
792
static int __devinit starfire_init_one(struct pci_dev *pdev,
793
                                       const struct pci_device_id *ent)
794
{
795
        struct netdev_private *np;
796
        int i, irq, option, chip_idx = ent->driver_data;
797
        struct net_device *dev;
798
        static int card_idx = -1;
799
        long ioaddr;
800
        int drv_flags, io_size;
801
        int boguscnt;
802
 
803
/* when built into the kernel, we only print version if device is found */
804
#ifndef MODULE
805
        static int printed_version;
806
        if (!printed_version++)
807
                printk(version);
808
#endif
809
 
810
        card_idx++;
811
 
812
        if (pci_enable_device (pdev))
813
                return -EIO;
814
 
815
        ioaddr = pci_resource_start(pdev, 0);
816
        io_size = pci_resource_len(pdev, 0);
817
        if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
818
                printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
819
                return -ENODEV;
820
        }
821
 
822
        dev = alloc_etherdev(sizeof(*np));
823
        if (!dev) {
824
                printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
825
                return -ENOMEM;
826
        }
827
        SET_MODULE_OWNER(dev);
828
 
829
        irq = pdev->irq;
830
 
831
        if (pci_request_regions (pdev, dev->name)) {
832
                printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
833
                goto err_out_free_netdev;
834
        }
835
 
836
        /* ioremap is borken in Linux-2.2.x/sparc64 */
837
#if !defined(CONFIG_SPARC64) || LINUX_VERSION_CODE > 0x20300
838
        ioaddr = (long) ioremap(ioaddr, io_size);
839
        if (!ioaddr) {
840
                printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
841
                        card_idx, io_size, ioaddr);
842
                goto err_out_free_res;
843
        }
844
#endif /* !CONFIG_SPARC64 || Linux 2.3.0+ */
845
 
846
        pci_set_master(pdev);
847
 
848
        /* enable MWI -- it vastly improves Rx performance on sparc64 */
849
        pci_set_mwi(pdev);
850
 
851
#ifdef MAX_SKB_FRAGS
852
        dev->features |= NETIF_F_SG;
853
#endif /* MAX_SKB_FRAGS */
854
#ifdef ZEROCOPY
855
        /* Starfire can do TCP/UDP checksumming */
856
        if (enable_hw_cksum)
857
                dev->features |= NETIF_F_IP_CSUM;
858
#endif /* ZEROCOPY */
859
#ifdef VLAN_SUPPORT
860
        dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
861
        dev->vlan_rx_register = netdev_vlan_rx_register;
862
        dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
863
        dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
864
#endif /* VLAN_RX_KILL_VID */
865
#ifdef ADDR_64BITS
866
        dev->features |= NETIF_F_HIGHDMA;
867
#endif /* ADDR_64BITS */
868
 
869
        /* Serial EEPROM reads are hidden by the hardware. */
870
        for (i = 0; i < 6; i++)
871
                dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20 - i);
872
 
873
#if ! defined(final_version) /* Dump the EEPROM contents during development. */
874
        if (debug > 4)
875
                for (i = 0; i < 0x20; i++)
876
                        printk("%2.2x%s",
877
                               (unsigned int)readb(ioaddr + EEPROMCtrl + i),
878
                               i % 16 != 15 ? " " : "\n");
879
#endif
880
 
881
        /* Issue soft reset */
882
        writel(MiiSoftReset, ioaddr + TxMode);
883
        udelay(1000);
884
        writel(0, ioaddr + TxMode);
885
 
886
        /* Reset the chip to erase previous misconfiguration. */
887
        writel(1, ioaddr + PCIDeviceConfig);
888
        boguscnt = 1000;
889
        while (--boguscnt > 0) {
890
                udelay(10);
891
                if ((readl(ioaddr + PCIDeviceConfig) & 1) == 0)
892
                        break;
893
        }
894
        if (boguscnt == 0)
895
                printk("%s: chipset reset never completed!\n", dev->name);
896
        /* wait a little longer */
897
        udelay(1000);
898
 
899
        dev->base_addr = ioaddr;
900
        dev->irq = irq;
901
 
902
        np = dev->priv;
903
        spin_lock_init(&np->lock);
904
        pci_set_drvdata(pdev, dev);
905
 
906
        np->pci_dev = pdev;
907
 
908
        np->mii_if.dev = dev;
909
        np->mii_if.mdio_read = mdio_read;
910
        np->mii_if.mdio_write = mdio_write;
911
        np->mii_if.phy_id_mask = 0x1f;
912
        np->mii_if.reg_num_mask = 0x1f;
913
 
914
        drv_flags = netdrv_tbl[chip_idx].drv_flags;
915
 
916
        option = card_idx < MAX_UNITS ? options[card_idx] : 0;
917
        if (dev->mem_start)
918
                option = dev->mem_start;
919
 
920
        /* The lower four bits are the media type. */
921
        if (option & 0x200)
922
                np->mii_if.full_duplex = 1;
923
 
924
        if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
925
                np->mii_if.full_duplex = 1;
926
 
927
        if (np->mii_if.full_duplex)
928
                np->mii_if.force_media = 1;
929
        else
930
                np->mii_if.force_media = 0;
931
        np->speed100 = 1;
932
 
933
        /* timer resolution is 128 * 0.8us */
934
        np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
935
                Timer10X | EnableIntrMasking;
936
 
937
        if (small_frames > 0) {
938
                np->intr_timer_ctrl |= SmallFrameBypass;
939
                switch (small_frames) {
940
                case 1 ... 64:
941
                        np->intr_timer_ctrl |= SmallFrame64;
942
                        break;
943
                case 65 ... 128:
944
                        np->intr_timer_ctrl |= SmallFrame128;
945
                        break;
946
                case 129 ... 256:
947
                        np->intr_timer_ctrl |= SmallFrame256;
948
                        break;
949
                default:
950
                        np->intr_timer_ctrl |= SmallFrame512;
951
                        if (small_frames > 512)
952
                                printk("Adjusting small_frames down to 512\n");
953
                        break;
954
                }
955
        }
956
 
957
        /* The chip-specific entries in the device structure. */
958
        dev->open = &netdev_open;
959
        dev->hard_start_xmit = &start_tx;
960
        init_tx_timer(dev, tx_timeout, TX_TIMEOUT);
961
        dev->stop = &netdev_close;
962
        dev->get_stats = &get_stats;
963
        dev->set_multicast_list = &set_rx_mode;
964
        dev->do_ioctl = &netdev_ioctl;
965
 
966
        if (mtu)
967
                dev->mtu = mtu;
968
 
969
        if (register_netdev(dev))
970
                goto err_out_cleardev;
971
 
972
        printk(KERN_INFO "%s: %s at %#lx, ",
973
                   dev->name, netdrv_tbl[chip_idx].name, ioaddr);
974
        for (i = 0; i < 5; i++)
975
                printk("%2.2x:", dev->dev_addr[i]);
976
        printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
977
 
978
        if (drv_flags & CanHaveMII) {
979
                int phy, phy_idx = 0;
980
                int mii_status;
981
                for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
982
                        mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
983
                        mdelay(100);
984
                        boguscnt = 1000;
985
                        while (--boguscnt > 0)
986
                                if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
987
                                        break;
988
                        if (boguscnt == 0) {
989
                                printk("%s: PHY reset never completed!\n", dev->name);
990
                                continue;
991
                        }
992
                        mii_status = mdio_read(dev, phy, MII_BMSR);
993
                        if (mii_status != 0) {
994
                                np->phys[phy_idx++] = phy;
995
                                np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
996
                                printk(KERN_INFO "%s: MII PHY found at address %d, status "
997
                                           "%#4.4x advertising %#4.4x.\n",
998
                                           dev->name, phy, mii_status, np->mii_if.advertising);
999
                                /* there can be only one PHY on-board */
1000
                                break;
1001
                        }
1002
                }
1003
                np->phy_cnt = phy_idx;
1004
                if (np->phy_cnt > 0)
1005
                        np->mii_if.phy_id = np->phys[0];
1006
                else
1007
                        memset(&np->mii_if, 0, sizeof(np->mii_if));
1008
        }
1009
 
1010
        printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
1011
               dev->name, enable_hw_cksum ? "enabled" : "disabled");
1012
        return 0;
1013
 
1014
err_out_cleardev:
1015
        pci_set_drvdata(pdev, NULL);
1016
        iounmap((void *)ioaddr);
1017
err_out_free_res:
1018
        pci_release_regions (pdev);
1019
err_out_free_netdev:
1020
        kfree(dev);
1021
        return -ENODEV;
1022
}
1023
 
1024
 
1025
/* Read the MII Management Data I/O (MDIO) interfaces. */
1026
static int mdio_read(struct net_device *dev, int phy_id, int location)
1027
{
1028
        long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
1029
        int result, boguscnt=1000;
1030
        /* ??? Should we add a busy-wait here? */
1031
        do
1032
                result = readl(mdio_addr);
1033
        while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
1034
        if (boguscnt == 0)
1035
                return 0;
1036
        if ((result & 0xffff) == 0xffff)
1037
                return 0;
1038
        return result & 0xffff;
1039
}
1040
 
1041
 
1042
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
1043
{
1044
        long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
1045
        writel(value, mdio_addr);
1046
        /* The busy-wait will occur before a read. */
1047
}
1048
 
1049
 
1050
static int netdev_open(struct net_device *dev)
1051
{
1052
        struct netdev_private *np = dev->priv;
1053
        long ioaddr = dev->base_addr;
1054
        int i, retval;
1055
        size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
1056
 
1057
        /* Do we ever need to reset the chip??? */
1058
 
1059
        COMPAT_MOD_INC_USE_COUNT;
1060
 
1061
        retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
1062
        if (retval) {
1063
                COMPAT_MOD_DEC_USE_COUNT;
1064
                return retval;
1065
        }
1066
 
1067
        /* Disable the Rx and Tx, and reset the chip. */
1068
        writel(0, ioaddr + GenCtrl);
1069
        writel(1, ioaddr + PCIDeviceConfig);
1070
        if (debug > 1)
1071
                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1072
                       dev->name, dev->irq);
1073
 
1074
        /* Allocate the various queues. */
1075
        if (np->queue_mem == 0) {
1076
                tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1077
                rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1078
                tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1079
                rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1080
                np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1081
                np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1082
                if (np->queue_mem == 0) {
1083
                        COMPAT_MOD_DEC_USE_COUNT;
1084
                        return -ENOMEM;
1085
                }
1086
 
1087
                np->tx_done_q     = np->queue_mem;
1088
                np->tx_done_q_dma = np->queue_mem_dma;
1089
                np->rx_done_q     = (void *) np->tx_done_q + tx_done_q_size;
1090
                np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
1091
                np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
1092
                np->tx_ring_dma   = np->rx_done_q_dma + rx_done_q_size;
1093
                np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
1094
                np->rx_ring_dma   = np->tx_ring_dma + tx_ring_size;
1095
        }
1096
 
1097
        /* Start with no carrier, it gets adjusted later */
1098
        netif_carrier_off(dev);
1099
        init_ring(dev);
1100
        /* Set the size of the Rx buffers. */
1101
        writel((np->rx_buf_sz << RxBufferLenShift) |
1102
               (0 << RxMinDescrThreshShift) |
1103
               RxPrefetchMode | RxVariableQ |
1104
               RX_Q_ENTRIES |
1105
               RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
1106
               RxDescSpace4,
1107
               ioaddr + RxDescQCtrl);
1108
 
1109
        /* Set up the Rx DMA controller. */
1110
        writel(RxChecksumIgnore |
1111
               (0 << RxEarlyIntThreshShift) |
1112
               (6 << RxHighPrioThreshShift) |
1113
               ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
1114
               ioaddr + RxDMACtrl);
1115
 
1116
        /* Set Tx descriptor */
1117
        writel((2 << TxHiPriFIFOThreshShift) |
1118
               (0 << TxPadLenShift) |
1119
               ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
1120
               TX_DESC_Q_ADDR_SIZE |
1121
               TX_DESC_SPACING | TX_DESC_TYPE,
1122
               ioaddr + TxDescCtrl);
1123
 
1124
        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
1125
        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
1126
        writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
1127
        writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
1128
        writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1129
 
1130
        writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
1131
        writel(np->rx_done_q_dma |
1132
               RxComplType |
1133
               (0 << RxComplThreshShift),
1134
               ioaddr + RxCompletionAddr);
1135
 
1136
        if (debug > 1)
1137
                printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1138
 
1139
        /* Fill both the Tx SA register and the Rx perfect filter. */
1140
        for (i = 0; i < 6; i++)
1141
                writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1142
        /* The first entry is special because it bypasses the VLAN filter.
1143
           Don't use it. */
1144
        writew(0, ioaddr + PerfFilterTable);
1145
        writew(0, ioaddr + PerfFilterTable + 4);
1146
        writew(0, ioaddr + PerfFilterTable + 8);
1147
        for (i = 1; i < 16; i++) {
1148
                u16 *eaddrs = (u16 *)dev->dev_addr;
1149
                long setup_frm = ioaddr + PerfFilterTable + i * 16;
1150
                writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
1151
                writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
1152
                writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
1153
        }
1154
 
1155
        /* Initialize other registers. */
1156
        /* Configure the PCI bus bursts and FIFO thresholds. */
1157
        np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;      /* modified when link is up. */
1158
        writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1159
        udelay(1000);
1160
        writel(np->tx_mode, ioaddr + TxMode);
1161
        np->tx_threshold = 4;
1162
        writel(np->tx_threshold, ioaddr + TxThreshold);
1163
 
1164
        writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1165
 
1166
        netif_start_if(dev);
1167
        netif_start_queue(dev);
1168
 
1169
        if (debug > 1)
1170
                printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1171
        set_rx_mode(dev);
1172
 
1173
        np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1174
        check_duplex(dev);
1175
 
1176
        /* Enable GPIO interrupts on link change */
1177
        writel(0x0f00ff00, ioaddr + GPIOCtrl);
1178
 
1179
        /* Set the interrupt mask */
1180
        writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1181
               IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1182
               IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1183
               ioaddr + IntrEnable);
1184
        /* Enable PCI interrupts. */
1185
        writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1186
               ioaddr + PCIDeviceConfig);
1187
 
1188
#ifdef VLAN_SUPPORT
1189
        /* Set VLAN type to 802.1q */
1190
        writel(ETH_P_8021Q, ioaddr + VlanType);
1191
#endif /* VLAN_SUPPORT */
1192
 
1193
#ifdef HAS_FIRMWARE
1194
        /* Load Rx/Tx firmware into the frame processors */
1195
        for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1196
                writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1197
        for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1198
                writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1199
#endif /* HAS_FIRMWARE */
1200
        if (enable_hw_cksum)
1201
                /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1202
                writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1203
        else
1204
                /* Enable the Rx and Tx units only. */
1205
                writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1206
 
1207
        if (debug > 1)
1208
                printk(KERN_DEBUG "%s: Done netdev_open().\n",
1209
                       dev->name);
1210
 
1211
        return 0;
1212
}
1213
 
1214
 
1215
static void check_duplex(struct net_device *dev)
1216
{
1217
        struct netdev_private *np = dev->priv;
1218
        u16 reg0;
1219
        int silly_count = 1000;
1220
 
1221
        mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1222
        mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1223
        udelay(500);
1224
        while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1225
                /* do nothing */;
1226
        if (!silly_count) {
1227
                printk("%s: MII reset failed!\n", dev->name);
1228
                return;
1229
        }
1230
 
1231
        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1232
 
1233
        if (!np->mii_if.force_media) {
1234
                reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1235
        } else {
1236
                reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1237
                if (np->speed100)
1238
                        reg0 |= BMCR_SPEED100;
1239
                if (np->mii_if.full_duplex)
1240
                        reg0 |= BMCR_FULLDPLX;
1241
                printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1242
                       dev->name,
1243
                       np->speed100 ? "100" : "10",
1244
                       np->mii_if.full_duplex ? "full" : "half");
1245
        }
1246
        mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1247
}
1248
 
1249
 
1250
static void tx_timeout(struct net_device *dev)
1251
{
1252
        struct netdev_private *np = dev->priv;
1253
        long ioaddr = dev->base_addr;
1254
        int old_debug;
1255
 
1256
        printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1257
               "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1258
 
1259
        /* Perhaps we should reinitialize the hardware here. */
1260
 
1261
        /*
1262
         * Stop and restart the interface.
1263
         * Cheat and increase the debug level temporarily.
1264
         */
1265
        old_debug = debug;
1266
        debug = 2;
1267
        netdev_close(dev);
1268
        netdev_open(dev);
1269
        debug = old_debug;
1270
 
1271
        /* Trigger an immediate transmit demand. */
1272
 
1273
        dev->trans_start = jiffies;
1274
        np->stats.tx_errors++;
1275
        netif_wake_queue(dev);
1276
}
1277
 
1278
 
1279
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1280
static void init_ring(struct net_device *dev)
1281
{
1282
        struct netdev_private *np = dev->priv;
1283
        int i;
1284
 
1285
        np->cur_rx = np->cur_tx = np->reap_tx = 0;
1286
        np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1287
 
1288
        np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1289
 
1290
        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1291
        for (i = 0; i < RX_RING_SIZE; i++) {
1292
                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1293
                np->rx_info[i].skb = skb;
1294
                if (skb == NULL)
1295
                        break;
1296
                np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1297
                skb->dev = dev;                 /* Mark as being used by this device. */
1298
                /* Grrr, we cannot offset to correctly align the IP header. */
1299
                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1300
        }
1301
        writew(i - 1, dev->base_addr + RxDescQIdx);
1302
        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1303
 
1304
        /* Clear the remainder of the Rx buffer ring. */
1305
        for (  ; i < RX_RING_SIZE; i++) {
1306
                np->rx_ring[i].rxaddr = 0;
1307
                np->rx_info[i].skb = NULL;
1308
                np->rx_info[i].mapping = 0;
1309
        }
1310
        /* Mark the last entry as wrapping the ring. */
1311
        np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1312
 
1313
        /* Clear the completion rings. */
1314
        for (i = 0; i < DONE_Q_SIZE; i++) {
1315
                np->rx_done_q[i].status = 0;
1316
                np->tx_done_q[i].status = 0;
1317
        }
1318
 
1319
        for (i = 0; i < TX_RING_SIZE; i++)
1320
                memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1321
 
1322
        return;
1323
}
1324
 
1325
 
1326
static int start_tx(struct sk_buff *skb, struct net_device *dev)
1327
{
1328
        struct netdev_private *np = dev->priv;
1329
        unsigned int entry;
1330
        u32 status;
1331
        int i;
1332
 
1333
        kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
1334
 
1335
        /*
1336
         * be cautious here, wrapping the queue has weird semantics
1337
         * and we may not have enough slots even when it seems we do.
1338
         */
1339
        if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1340
                netif_stop_queue(dev);
1341
                return 1;
1342
        }
1343
 
1344
#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1345
        {
1346
                int has_bad_length = 0;
1347
 
1348
                if (skb_first_frag_len(skb) == 1)
1349
                        has_bad_length = 1;
1350
                else {
1351
                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1352
                                if (skb_shinfo(skb)->frags[i].size == 1) {
1353
                                        has_bad_length = 1;
1354
                                        break;
1355
                                }
1356
                }
1357
 
1358
                if (has_bad_length)
1359
                        skb_checksum_help(skb);
1360
        }
1361
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1362
 
1363
        entry = np->cur_tx % TX_RING_SIZE;
1364
        for (i = 0; i < skb_num_frags(skb); i++) {
1365
                int wrap_ring = 0;
1366
                status = TxDescID;
1367
 
1368
                if (i == 0) {
1369
                        np->tx_info[entry].skb = skb;
1370
                        status |= TxCRCEn;
1371
                        if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1372
                                status |= TxRingWrap;
1373
                                wrap_ring = 1;
1374
                        }
1375
                        if (np->reap_tx) {
1376
                                status |= TxDescIntr;
1377
                                np->reap_tx = 0;
1378
                        }
1379
                        if (skb->ip_summed == CHECKSUM_HW) {
1380
                                status |= TxCalTCP;
1381
                                np->stats.tx_compressed++;
1382
                        }
1383
                        status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1384
 
1385
                        np->tx_info[entry].mapping =
1386
                                pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1387
                } else {
1388
#ifdef MAX_SKB_FRAGS
1389
                        skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1390
                        status |= this_frag->size;
1391
                        np->tx_info[entry].mapping =
1392
                                pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1393
#endif /* MAX_SKB_FRAGS */
1394
                }
1395
 
1396
                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1397
                np->tx_ring[entry].status = cpu_to_le32(status);
1398
                if (debug > 3)
1399
                        printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1400
                               dev->name, np->cur_tx, np->dirty_tx,
1401
                               entry, status);
1402
                if (wrap_ring) {
1403
                        np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1404
                        np->cur_tx += np->tx_info[entry].used_slots;
1405
                        entry = 0;
1406
                } else {
1407
                        np->tx_info[entry].used_slots = 1;
1408
                        np->cur_tx += np->tx_info[entry].used_slots;
1409
                        entry++;
1410
                }
1411
                /* scavenge the tx descriptors twice per TX_RING_SIZE */
1412
                if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1413
                        np->reap_tx = 1;
1414
        }
1415
 
1416
        /* Non-x86: explicitly flush descriptor cache lines here. */
1417
        /* Ensure all descriptors are written back before the transmit is
1418
           initiated. - Jes */
1419
        wmb();
1420
 
1421
        /* Update the producer index. */
1422
        writel(entry * (sizeof(starfire_tx_desc) / 8), dev->base_addr + TxProducerIdx);
1423
 
1424
        /* 4 is arbitrary, but should be ok */
1425
        if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1426
                netif_stop_queue(dev);
1427
 
1428
        dev->trans_start = jiffies;
1429
 
1430
        return 0;
1431
}
1432
 
1433
 
1434
/* The interrupt handler does all of the Rx thread work and cleans up
1435
   after the Tx thread. */
1436
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1437
{
1438
        struct net_device *dev = dev_instance;
1439
        struct netdev_private *np;
1440
        long ioaddr;
1441
        int boguscnt = max_interrupt_work;
1442
        int consumer;
1443
        int tx_status;
1444
 
1445
        ioaddr = dev->base_addr;
1446
        np = dev->priv;
1447
 
1448
        do {
1449
                u32 intr_status = readl(ioaddr + IntrClear);
1450
 
1451
                if (debug > 4)
1452
                        printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1453
                               dev->name, intr_status);
1454
 
1455
                if (intr_status == 0 || intr_status == (u32) -1)
1456
                        break;
1457
 
1458
                if (intr_status & (IntrRxDone | IntrRxEmpty))
1459
                        netdev_rx(dev);
1460
 
1461
                /* Scavenge the skbuff list based on the Tx-done queue.
1462
                   There are redundant checks here that may be cleaned up
1463
                   after the driver has proven to be reliable. */
1464
                consumer = readl(ioaddr + TxConsumerIdx);
1465
                if (debug > 3)
1466
                        printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1467
                               dev->name, consumer);
1468
 
1469
                while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1470
                        if (debug > 3)
1471
                                printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1472
                                       dev->name, np->dirty_tx, np->tx_done, tx_status);
1473
                        if ((tx_status & 0xe0000000) == 0xa0000000) {
1474
                                np->stats.tx_packets++;
1475
                        } else if ((tx_status & 0xe0000000) == 0x80000000) {
1476
                                u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1477
                                struct sk_buff *skb = np->tx_info[entry].skb;
1478
                                np->tx_info[entry].skb = NULL;
1479
                                pci_unmap_single(np->pci_dev,
1480
                                                 np->tx_info[entry].mapping,
1481
                                                 skb_first_frag_len(skb),
1482
                                                 PCI_DMA_TODEVICE);
1483
                                np->tx_info[entry].mapping = 0;
1484
                                np->dirty_tx += np->tx_info[entry].used_slots;
1485
                                entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1486
#ifdef MAX_SKB_FRAGS
1487
                                {
1488
                                        int i;
1489
                                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1490
                                                pci_unmap_single(np->pci_dev,
1491
                                                                 np->tx_info[entry].mapping,
1492
                                                                 skb_shinfo(skb)->frags[i].size,
1493
                                                                 PCI_DMA_TODEVICE);
1494
                                                np->dirty_tx++;
1495
                                                entry++;
1496
                                        }
1497
                                }
1498
#endif /* MAX_SKB_FRAGS */
1499
                                dev_kfree_skb_irq(skb);
1500
                        }
1501
                        np->tx_done_q[np->tx_done].status = 0;
1502
                        np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1503
                }
1504
                writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1505
 
1506
                if (netif_queue_stopped(dev) &&
1507
                    (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1508
                        /* The ring is no longer full, wake the queue. */
1509
                        netif_wake_queue(dev);
1510
                }
1511
 
1512
                /* Stats overflow */
1513
                if (intr_status & IntrStatsMax)
1514
                        get_stats(dev);
1515
 
1516
                /* Media change interrupt. */
1517
                if (intr_status & IntrLinkChange)
1518
                        netdev_media_change(dev);
1519
 
1520
                /* Abnormal error summary/uncommon events handlers. */
1521
                if (intr_status & IntrAbnormalSummary)
1522
                        netdev_error(dev, intr_status);
1523
 
1524
                if (--boguscnt < 0) {
1525
                        if (debug > 1)
1526
                                printk(KERN_WARNING "%s: Too much work at interrupt, "
1527
                                       "status=%#8.8x.\n",
1528
                                       dev->name, intr_status);
1529
                        break;
1530
                }
1531
        } while (1);
1532
 
1533
        if (debug > 4)
1534
                printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1535
                       dev->name, (int) readl(ioaddr + IntrStatus));
1536
}
1537
 
1538
 
1539
/* This routine is logically part of the interrupt handler, but separated
1540
   for clarity and better register allocation. */
1541
static int netdev_rx(struct net_device *dev)
1542
{
1543
        struct netdev_private *np = dev->priv;
1544
        int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1545
        u32 desc_status;
1546
 
1547
        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1548
        while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1549
                struct sk_buff *skb;
1550
                u16 pkt_len;
1551
                int entry;
1552
                rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1553
 
1554
                if (debug > 4)
1555
                        printk(KERN_DEBUG "  netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1556
                if (--boguscnt < 0)
1557
                        break;
1558
                if (!(desc_status & RxOK)) {
1559
                        /* There was a error. */
1560
                        if (debug > 2)
1561
                                printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
1562
                        np->stats.rx_errors++;
1563
                        if (desc_status & RxFIFOErr)
1564
                                np->stats.rx_fifo_errors++;
1565
                        goto next_rx;
1566
                }
1567
 
1568
                pkt_len = desc_status;  /* Implicitly Truncate */
1569
                entry = (desc_status >> 16) & 0x7ff;
1570
 
1571
                if (debug > 4)
1572
                        printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, bogus_cnt %d.\n", pkt_len, boguscnt);
1573
                /* Check if the packet is long enough to accept without copying
1574
                   to a minimally-sized skbuff. */
1575
                if (pkt_len < rx_copybreak
1576
                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1577
                        skb->dev = dev;
1578
                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
1579
                        pci_dma_sync_single(np->pci_dev,
1580
                                            np->rx_info[entry].mapping,
1581
                                            pkt_len, PCI_DMA_FROMDEVICE);
1582
                        eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
1583
                        skb_put(skb, pkt_len);
1584
                } else {
1585
                        pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1586
                        skb = np->rx_info[entry].skb;
1587
                        skb_put(skb, pkt_len);
1588
                        np->rx_info[entry].skb = NULL;
1589
                        np->rx_info[entry].mapping = 0;
1590
                }
1591
#ifndef final_version                   /* Remove after testing. */
1592
                /* You will want this info for the initial debug. */
1593
                if (debug > 5)
1594
                        printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1595
                               "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n",
1596
                               skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1597
                               skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1598
                               skb->data[8], skb->data[9], skb->data[10],
1599
                               skb->data[11], skb->data[12], skb->data[13]);
1600
#endif
1601
 
1602
                skb->protocol = eth_type_trans(skb, dev);
1603
#if defined(HAS_FIRMWARE) || defined(VLAN_SUPPORT)
1604
                if (debug > 4)
1605
                        printk(KERN_DEBUG "  netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1606
#endif
1607
#ifdef HAS_FIRMWARE
1608
                if (le16_to_cpu(desc->status2) & 0x0100) {
1609
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1610
                        np->stats.rx_compressed++;
1611
                }
1612
                /*
1613
                 * This feature doesn't seem to be working, at least
1614
                 * with the two firmware versions I have. If the GFP sees
1615
                 * an IP fragment, it either ignores it completely, or reports
1616
                 * "bad checksum" on it.
1617
                 *
1618
                 * Maybe I missed something -- corrections are welcome.
1619
                 * Until then, the printk stays. :-) -Ion
1620
                 */
1621
                else if (le16_to_cpu(desc->status2) & 0x0040) {
1622
                        skb->ip_summed = CHECKSUM_HW;
1623
                        skb->csum = le16_to_cpu(desc->csum);
1624
                        printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1625
                }
1626
#endif /* HAS_FIRMWARE */
1627
#ifdef VLAN_SUPPORT
1628
                if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1629
                        if (debug > 4)
1630
                                printk(KERN_DEBUG "  netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid));
1631
                        /* vlan_hwaccel_rx expects a packet with the VLAN tag stripped out */
1632
                        vlan_hwaccel_rx(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK);
1633
                } else
1634
#endif /* VLAN_SUPPORT */
1635
                        netif_rx(skb);
1636
                dev->last_rx = jiffies;
1637
                np->stats.rx_packets++;
1638
 
1639
        next_rx:
1640
                np->cur_rx++;
1641
                desc->status = 0;
1642
                np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1643
        }
1644
        writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);
1645
 
1646
        /* Refill the Rx ring buffers. */
1647
        for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1648
                struct sk_buff *skb;
1649
                int entry = np->dirty_rx % RX_RING_SIZE;
1650
                if (np->rx_info[entry].skb == NULL) {
1651
                        skb = dev_alloc_skb(np->rx_buf_sz);
1652
                        np->rx_info[entry].skb = skb;
1653
                        if (skb == NULL)
1654
                                break;  /* Better luck next round. */
1655
                        np->rx_info[entry].mapping =
1656
                                pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1657
                        skb->dev = dev; /* Mark as being used by this device. */
1658
                        np->rx_ring[entry].rxaddr =
1659
                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1660
                }
1661
                if (entry == RX_RING_SIZE - 1)
1662
                        np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1663
                /* We could defer this until later... */
1664
                writew(entry, dev->base_addr + RxDescQIdx);
1665
        }
1666
 
1667
        if (debug > 5)
1668
                printk(KERN_DEBUG "  exiting netdev_rx() status of %d was %#8.8x.\n",
1669
                       np->rx_done, desc_status);
1670
 
1671
        /* Restart Rx engine if stopped. */
1672
        return 0;
1673
}
1674
 
1675
 
1676
static void netdev_media_change(struct net_device *dev)
1677
{
1678
        struct netdev_private *np = dev->priv;
1679
        long ioaddr = dev->base_addr;
1680
        u16 reg0, reg1, reg4, reg5;
1681
        u32 new_tx_mode;
1682
        u32 new_intr_timer_ctrl;
1683
 
1684
        /* reset status first */
1685
        mdio_read(dev, np->phys[0], MII_BMCR);
1686
        mdio_read(dev, np->phys[0], MII_BMSR);
1687
 
1688
        reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1689
        reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1690
 
1691
        if (reg1 & BMSR_LSTATUS) {
1692
                /* link is up */
1693
                if (reg0 & BMCR_ANENABLE) {
1694
                        /* autonegotiation is enabled */
1695
                        reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1696
                        reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1697
                        if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1698
                                np->speed100 = 1;
1699
                                np->mii_if.full_duplex = 1;
1700
                        } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1701
                                np->speed100 = 1;
1702
                                np->mii_if.full_duplex = 0;
1703
                        } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1704
                                np->speed100 = 0;
1705
                                np->mii_if.full_duplex = 1;
1706
                        } else {
1707
                                np->speed100 = 0;
1708
                                np->mii_if.full_duplex = 0;
1709
                        }
1710
                } else {
1711
                        /* autonegotiation is disabled */
1712
                        if (reg0 & BMCR_SPEED100)
1713
                                np->speed100 = 1;
1714
                        else
1715
                                np->speed100 = 0;
1716
                        if (reg0 & BMCR_FULLDPLX)
1717
                                np->mii_if.full_duplex = 1;
1718
                        else
1719
                                np->mii_if.full_duplex = 0;
1720
                }
1721
                netif_carrier_on(dev);
1722
                printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1723
                       dev->name,
1724
                       np->speed100 ? "100" : "10",
1725
                       np->mii_if.full_duplex ? "full" : "half");
1726
 
1727
                new_tx_mode = np->tx_mode & ~FullDuplex;        /* duplex setting */
1728
                if (np->mii_if.full_duplex)
1729
                        new_tx_mode |= FullDuplex;
1730
                if (np->tx_mode != new_tx_mode) {
1731
                        np->tx_mode = new_tx_mode;
1732
                        writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1733
                        udelay(1000);
1734
                        writel(np->tx_mode, ioaddr + TxMode);
1735
                }
1736
 
1737
                new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1738
                if (np->speed100)
1739
                        new_intr_timer_ctrl |= Timer10X;
1740
                if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1741
                        np->intr_timer_ctrl = new_intr_timer_ctrl;
1742
                        writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1743
                }
1744
        } else {
1745
                netif_carrier_off(dev);
1746
                printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1747
        }
1748
}
1749
 
1750
 
1751
static void netdev_error(struct net_device *dev, int intr_status)
1752
{
1753
        struct netdev_private *np = dev->priv;
1754
 
1755
        /* Came close to underrunning the Tx FIFO, increase threshold. */
1756
        if (intr_status & IntrTxDataLow) {
1757
                if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1758
                        writel(++np->tx_threshold, dev->base_addr + TxThreshold);
1759
                        printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1760
                               dev->name, np->tx_threshold * 16);
1761
                } else
1762
                        printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1763
        }
1764
        if (intr_status & IntrRxGFPDead) {
1765
                np->stats.rx_fifo_errors++;
1766
                np->stats.rx_errors++;
1767
        }
1768
        if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1769
                np->stats.tx_fifo_errors++;
1770
                np->stats.tx_errors++;
1771
        }
1772
        if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1773
                printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1774
                       dev->name, intr_status);
1775
}
1776
 
1777
 
1778
static struct net_device_stats *get_stats(struct net_device *dev)
1779
{
1780
        long ioaddr = dev->base_addr;
1781
        struct netdev_private *np = dev->priv;
1782
 
1783
        /* This adapter architecture needs no SMP locks. */
1784
        np->stats.tx_bytes = readl(ioaddr + 0x57010);
1785
        np->stats.rx_bytes = readl(ioaddr + 0x57044);
1786
        np->stats.tx_packets = readl(ioaddr + 0x57000);
1787
        np->stats.tx_aborted_errors =
1788
                readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1789
        np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1790
        np->stats.collisions =
1791
                readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1792
 
1793
        /* The chip only need report frame silently dropped. */
1794
        np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1795
        writew(0, ioaddr + RxDMAStatus);
1796
        np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1797
        np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1798
        np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1799
        np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1800
 
1801
        return &np->stats;
1802
}
1803
 
1804
 
1805
/* Chips may use the upper or lower CRC bits, and may reverse and/or invert
1806
   them.  Select the endian-ness that results in minimal calculations.
1807
*/
1808
static void set_rx_mode(struct net_device *dev)
1809
{
1810
        long ioaddr = dev->base_addr;
1811
        u32 rx_mode = MinVLANPrio;
1812
        struct dev_mc_list *mclist;
1813
        int i;
1814
#ifdef VLAN_SUPPORT
1815
        struct netdev_private *np = dev->priv;
1816
 
1817
        rx_mode |= VlanMode;
1818
        if (np->vlgrp) {
1819
                int vlan_count = 0;
1820
                long filter_addr = ioaddr + HashTable + 8;
1821
                for (i = 0; i < VLAN_VID_MASK; i++) {
1822
                        if (np->vlgrp->vlan_devices[i]) {
1823
                                if (vlan_count >= 32)
1824
                                        break;
1825
                                writew(cpu_to_be16(i), filter_addr);
1826
                                filter_addr += 16;
1827
                                vlan_count++;
1828
                        }
1829
                }
1830
                if (i == VLAN_VID_MASK) {
1831
                        rx_mode |= PerfectFilterVlan;
1832
                        while (vlan_count < 32) {
1833
                                writew(0, filter_addr);
1834
                                filter_addr += 16;
1835
                                vlan_count++;
1836
                        }
1837
                }
1838
        }
1839
#endif /* VLAN_SUPPORT */
1840
 
1841
        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1842
                rx_mode |= AcceptAll;
1843
        } else if ((dev->mc_count > multicast_filter_limit)
1844
                   || (dev->flags & IFF_ALLMULTI)) {
1845
                /* Too many to match, or accept all multicasts. */
1846
                rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1847
        } else if (dev->mc_count <= 14) {
1848
                /* Use the 16 element perfect filter, skip first two entries. */
1849
                long filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1850
                u16 *eaddrs;
1851
                for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1852
                     i++, mclist = mclist->next) {
1853
                        eaddrs = (u16 *)mclist->dmi_addr;
1854
                        writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
1855
                        writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1856
                        writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
1857
                }
1858
                eaddrs = (u16 *)dev->dev_addr;
1859
                while (i++ < 16) {
1860
                        writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1861
                        writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1862
                        writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1863
                }
1864
                rx_mode |= AcceptBroadcast|PerfectFilter;
1865
        } else {
1866
                /* Must use a multicast hash table. */
1867
                long filter_addr;
1868
                u16 *eaddrs;
1869
                u16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));      /* Multicast hash filter */
1870
 
1871
                memset(mc_filter, 0, sizeof(mc_filter));
1872
                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1873
                     i++, mclist = mclist->next) {
1874
                        int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1875
                        __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
1876
 
1877
                        *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1878
                }
1879
                /* Clear the perfect filter list, skip first two entries. */
1880
                filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1881
                eaddrs = (u16 *)dev->dev_addr;
1882
                for (i = 2; i < 16; i++) {
1883
                        writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1884
                        writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1885
                        writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1886
                }
1887
                for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1888
                        writew(mc_filter[i], filter_addr);
1889
                rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1890
        }
1891
        writel(rx_mode, ioaddr + RxFilterMode);
1892
}
1893
 
1894
 
1895
static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1896
{
1897
        struct ethtool_cmd ecmd;
1898
        struct netdev_private *np = dev->priv;
1899
 
1900
        if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1901
                return -EFAULT;
1902
 
1903
        switch (ecmd.cmd) {
1904
        case ETHTOOL_GDRVINFO: {
1905
                struct ethtool_drvinfo info;
1906
                memset(&info, 0, sizeof(info));
1907
                info.cmd = ecmd.cmd;
1908
                strcpy(info.driver, DRV_NAME);
1909
                strcpy(info.version, DRV_VERSION);
1910
                *info.fw_version = 0;
1911
                strcpy(info.bus_info, PCI_SLOT_NAME(np->pci_dev));
1912
                if (copy_to_user(useraddr, &info, sizeof(info)))
1913
                       return -EFAULT;
1914
                return 0;
1915
        }
1916
 
1917
        /* get settings */
1918
        case ETHTOOL_GSET: {
1919
                struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1920
                spin_lock_irq(&np->lock);
1921
                mii_ethtool_gset(&np->mii_if, &ecmd);
1922
                spin_unlock_irq(&np->lock);
1923
                if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1924
                        return -EFAULT;
1925
                return 0;
1926
        }
1927
        /* set settings */
1928
        case ETHTOOL_SSET: {
1929
                int r;
1930
                struct ethtool_cmd ecmd;
1931
                if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1932
                        return -EFAULT;
1933
                spin_lock_irq(&np->lock);
1934
                r = mii_ethtool_sset(&np->mii_if, &ecmd);
1935
                spin_unlock_irq(&np->lock);
1936
                check_duplex(dev);
1937
                return r;
1938
        }
1939
        /* restart autonegotiation */
1940
        case ETHTOOL_NWAY_RST: {
1941
                return mii_nway_restart(&np->mii_if);
1942
        }
1943
        /* get link status */
1944
        case ETHTOOL_GLINK: {
1945
                struct ethtool_value edata = {ETHTOOL_GLINK};
1946
                edata.data = mii_link_ok(&np->mii_if);
1947
                if (copy_to_user(useraddr, &edata, sizeof(edata)))
1948
                        return -EFAULT;
1949
                return 0;
1950
        }
1951
 
1952
        /* get message-level */
1953
        case ETHTOOL_GMSGLVL: {
1954
                struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1955
                edata.data = debug;
1956
                if (copy_to_user(useraddr, &edata, sizeof(edata)))
1957
                        return -EFAULT;
1958
                return 0;
1959
        }
1960
        /* set message-level */
1961
        case ETHTOOL_SMSGLVL: {
1962
                struct ethtool_value edata;
1963
                if (copy_from_user(&edata, useraddr, sizeof(edata)))
1964
                        return -EFAULT;
1965
                debug = edata.data;
1966
                return 0;
1967
        }
1968
        default:
1969
                return -EOPNOTSUPP;
1970
        }
1971
}
1972
 
1973
 
1974
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1975
{
1976
        struct netdev_private *np = dev->priv;
1977
        struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
1978
        int rc;
1979
 
1980
        if (!netif_running(dev))
1981
                return -EINVAL;
1982
 
1983
        if (cmd == SIOCETHTOOL)
1984
                rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1985
 
1986
        else {
1987
                spin_lock_irq(&np->lock);
1988
                rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1989
                spin_unlock_irq(&np->lock);
1990
 
1991
                if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1992
                        check_duplex(dev);
1993
        }
1994
 
1995
        return rc;
1996
}
1997
 
1998
static int netdev_close(struct net_device *dev)
1999
{
2000
        long ioaddr = dev->base_addr;
2001
        struct netdev_private *np = dev->priv;
2002
        int i;
2003
 
2004
        netif_stop_queue(dev);
2005
        netif_stop_if(dev);
2006
 
2007
        if (debug > 1) {
2008
                printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
2009
                           dev->name, (int) readl(ioaddr + IntrStatus));
2010
                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
2011
                       dev->name, np->cur_tx, np->dirty_tx,
2012
                       np->cur_rx, np->dirty_rx);
2013
        }
2014
 
2015
        /* Disable interrupts by clearing the interrupt mask. */
2016
        writel(0, ioaddr + IntrEnable);
2017
 
2018
        /* Stop the chip's Tx and Rx processes. */
2019
        writel(0, ioaddr + GenCtrl);
2020
        readl(ioaddr + GenCtrl);
2021
 
2022
        if (debug > 5) {
2023
                printk(KERN_DEBUG"  Tx ring at %#llx:\n",
2024
                       (long long) np->tx_ring_dma);
2025
                for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
2026
                        printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
2027
                               i, le32_to_cpu(np->tx_ring[i].status),
2028
                               (long long) dma_to_cpu(np->tx_ring[i].addr),
2029
                               le32_to_cpu(np->tx_done_q[i].status));
2030
                printk(KERN_DEBUG "  Rx ring at %#llx -> %p:\n",
2031
                       (long long) np->rx_ring_dma, np->rx_done_q);
2032
                if (np->rx_done_q)
2033
                        for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
2034
                                printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
2035
                                       i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
2036
                }
2037
        }
2038
 
2039
        free_irq(dev->irq, dev);
2040
 
2041
        /* Free all the skbuffs in the Rx queue. */
2042
        for (i = 0; i < RX_RING_SIZE; i++) {
2043
                np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
2044
                if (np->rx_info[i].skb != NULL) {
2045
                        pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
2046
                        dev_kfree_skb(np->rx_info[i].skb);
2047
                }
2048
                np->rx_info[i].skb = NULL;
2049
                np->rx_info[i].mapping = 0;
2050
        }
2051
        for (i = 0; i < TX_RING_SIZE; i++) {
2052
                struct sk_buff *skb = np->tx_info[i].skb;
2053
                if (skb == NULL)
2054
                        continue;
2055
                pci_unmap_single(np->pci_dev,
2056
                                 np->tx_info[i].mapping,
2057
                                 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
2058
                np->tx_info[i].mapping = 0;
2059
                dev_kfree_skb(skb);
2060
                np->tx_info[i].skb = NULL;
2061
        }
2062
 
2063
        COMPAT_MOD_DEC_USE_COUNT;
2064
 
2065
        return 0;
2066
}
2067
 
2068
 
2069
static void __devexit starfire_remove_one (struct pci_dev *pdev)
2070
{
2071
        struct net_device *dev = pci_get_drvdata(pdev);
2072
        struct netdev_private *np;
2073
 
2074
        if (!dev)
2075
                BUG();
2076
 
2077
        np = dev->priv;
2078
        if (np->queue_mem)
2079
                pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2080
 
2081
        unregister_netdev(dev);
2082
        iounmap((char *)dev->base_addr);
2083
        pci_release_regions(pdev);
2084
 
2085
        pci_set_drvdata(pdev, NULL);
2086
        kfree(dev);                     /* Will also free np!! */
2087
}
2088
 
2089
 
2090
static struct pci_driver starfire_driver = {
2091
        .name           = DRV_NAME,
2092
        .probe          = starfire_init_one,
2093
        .remove         = __devexit_p(starfire_remove_one),
2094
        .id_table       = starfire_pci_tbl,
2095
};
2096
 
2097
 
2098
static int __init starfire_init (void)
2099
{
2100
/* when a module, this is printed whether or not devices are found in probe */
2101
#ifdef MODULE
2102
        printk(version);
2103
#endif
2104
#ifndef ADDR_64BITS
2105
        /* we can do this test only at run-time... sigh */
2106
        if (sizeof(dma_addr_t) == sizeof(u64)) {
2107
                printk("This driver has not been ported to this 64-bit architecture yet\n");
2108
                return -ENODEV;
2109
        }
2110
#endif /* not ADDR_64BITS */
2111
#ifndef HAS_FIRMWARE
2112
        /* unconditionally disable hw cksums if firmware is not present */
2113
        enable_hw_cksum = 0;
2114
#endif /* not HAS_FIRMWARE */
2115
        return pci_module_init (&starfire_driver);
2116
}
2117
 
2118
 
2119
static void __exit starfire_cleanup (void)
2120
{
2121
        pci_unregister_driver (&starfire_driver);
2122
}
2123
 
2124
 
2125
module_init(starfire_init);
2126
module_exit(starfire_cleanup);
2127
 
2128
 
2129
/*
2130
 * Local variables:
2131
 *  c-basic-offset: 8
2132
 *  tab-width: 8
2133
 * End:
2134
 */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.