OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [natsemi.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
2
/*
3
        Written/copyright 1999-2001 by Donald Becker.
4
        Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5
        Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
6
 
7
        This software may be used and distributed according to the terms of
8
        the GNU General Public License (GPL), incorporated herein by reference.
9
        Drivers based on or derived from this code fall under the GPL and must
10
        retain the authorship, copyright and license notice.  This file is not
11
        a complete program and may only be used when the entire operating
12
        system is licensed under the GPL.  License for under other terms may be
13
        available.  Contact the original author for details.
14
 
15
        The original author may be reached as becker@scyld.com, or at
16
        Scyld Computing Corporation
17
        410 Severn Ave., Suite 210
18
        Annapolis MD 21403
19
 
20
        Support information and updates available at
21
        http://www.scyld.com/network/netsemi.html
22
 
23
 
24
        Linux kernel modifications:
25
 
26
        Version 1.0.1:
27
                - Spinlock fixes
28
                - Bug fixes and better intr performance (Tjeerd)
29
        Version 1.0.2:
30
                - Now reads correct MAC address from eeprom
31
        Version 1.0.3:
32
                - Eliminate redundant priv->tx_full flag
33
                - Call netif_start_queue from dev->tx_timeout
34
                - wmb() in start_tx() to flush data
35
                - Update Tx locking
36
                - Clean up PCI enable (davej)
37
        Version 1.0.4:
38
                - Merge Donald Becker's natsemi.c version 1.07
39
        Version 1.0.5:
40
                - { fill me in }
41
        Version 1.0.6:
42
                * ethtool support (jgarzik)
43
                * Proper initialization of the card (which sometimes
44
                fails to occur and leaves the card in a non-functional
45
                state). (uzi)
46
 
47
                * Some documented register settings to optimize some
48
                of the 100Mbit autodetection circuitry in rev C cards. (uzi)
49
 
50
                * Polling of the PHY intr for stuff like link state
51
                change and auto- negotiation to finally work properly. (uzi)
52
 
53
                * One-liner removal of a duplicate declaration of
54
                netdev_error(). (uzi)
55
 
56
        Version 1.0.7: (Manfred Spraul)
57
                * pci dma
58
                * SMP locking update
59
                * full reset added into tx_timeout
60
                * correct multicast hash generation (both big and little endian)
61
                        [copied from a natsemi driver version
62
                         from Myrio Corporation, Greg Smith]
63
                * suspend/resume
64
 
65
        version 1.0.8 (Tim Hockin <thockin@sun.com>)
66
                * ETHTOOL_* support
67
                * Wake on lan support (Erik Gilling)
68
                * MXDMA fixes for serverworks
69
                * EEPROM reload
70
 
71
        version 1.0.9 (Manfred Spraul)
72
                * Main change: fix lack of synchronize
73
                netif_close/netif_suspend against a last interrupt
74
                or packet.
75
                * do not enable superflous interrupts (e.g. the
76
                drivers relies on TxDone - TxIntr not needed)
77
                * wait that the hardware has really stopped in close
78
                and suspend.
79
                * workaround for the (at least) gcc-2.95.1 compiler
80
                problem. Also simplifies the code a bit.
81
                * disable_irq() in tx_timeout - needed to protect
82
                against rx interrupts.
83
                * stop the nic before switching into silent rx mode
84
                for wol (required according to docu).
85
 
86
        version 1.0.10:
87
                * use long for ee_addr (various)
88
                * print pointers properly (DaveM)
89
                * include asm/irq.h (?)
90
 
91
        version 1.0.11:
92
                * check and reset if PHY errors appear (Adrian Sun)
93
                * WoL cleanup (Tim Hockin)
94
                * Magic number cleanup (Tim Hockin)
95
                * Don't reload EEPROM on every reset (Tim Hockin)
96
                * Save and restore EEPROM state across reset (Tim Hockin)
97
                * MDIO Cleanup (Tim Hockin)
98
                * Reformat register offsets/bits (jgarzik)
99
 
100
        version 1.0.12:
101
                * ETHTOOL_* further support (Tim Hockin)
102
 
103
        version 1.0.13:
104
                * ETHTOOL_[G]EEPROM support (Tim Hockin)
105
 
106
        version 1.0.13:
107
                * crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
108
 
109
        version 1.0.14:
110
                * Cleanup some messages and autoneg in ethtool (Tim Hockin)
111
 
112
        version 1.0.15:
113
                * Get rid of cable_magic flag
114
                * use new (National provided) solution for cable magic issue
115
 
116
        version 1.0.16:
117
                * call netdev_rx() for RxErrors (Manfred Spraul)
118
                * formatting and cleanups
119
                * change options and full_duplex arrays to be zero
120
                  initialized
121
                * enable only the WoL and PHY interrupts in wol mode
122
 
123
        version 1.0.17:
124
                * only do cable_magic on 83815 and early 83816 (Tim Hockin)
125
                * create a function for rx refill (Manfred Spraul)
126
                * combine drain_ring and init_ring (Manfred Spraul)
127
                * oom handling (Manfred Spraul)
128
                * hands_off instead of playing with netif_device_{de,a}ttach
129
                  (Manfred Spraul)
130
                * be sure to write the MAC back to the chip (Manfred Spraul)
131
                * lengthen EEPROM timeout, and always warn about timeouts
132
                  (Manfred Spraul)
133
                * comments update (Manfred)
134
                * do the right thing on a phy-reset (Manfred and Tim)
135
 
136
        TODO:
137
        * big endian support with CFG:BEM instead of cpu_to_le32
138
        * support for an external PHY
139
        * NAPI
140
*/
141
 
142
#if !defined(__OPTIMIZE__)
143
#warning  You must compile this file with the correct options!
144
#warning  See the last lines of the source file.
145
#error You must compile this driver with "-O".
146
#endif
147
 
148
#include <linux/config.h>
149
#include <linux/module.h>
150
#include <linux/kernel.h>
151
#include <linux/string.h>
152
#include <linux/timer.h>
153
#include <linux/errno.h>
154
#include <linux/ioport.h>
155
#include <linux/slab.h>
156
#include <linux/interrupt.h>
157
#include <linux/pci.h>
158
#include <linux/netdevice.h>
159
#include <linux/etherdevice.h>
160
#include <linux/skbuff.h>
161
#include <linux/init.h>
162
#include <linux/spinlock.h>
163
#include <linux/ethtool.h>
164
#include <linux/delay.h>
165
#include <linux/rtnetlink.h>
166
#include <linux/mii.h>
167
#include <linux/crc32.h>
168
#include <asm/processor.h>      /* Processor type for cache alignment. */
169
#include <asm/bitops.h>
170
#include <asm/io.h>
171
#include <asm/irq.h>
172
#include <asm/uaccess.h>
173
 
174
#define DRV_NAME        "natsemi"
175
#define DRV_VERSION     "1.07+LK1.0.17"
176
#define DRV_RELDATE     "Sep 27, 2002"
177
 
178
#define RX_OFFSET       2
179
 
180
/* Updated to recommendations in pci-skeleton v2.03. */
181
 
182
/* The user-configurable values.
183
   These may be modified when a driver module is loaded.*/
184
 
185
#define NATSEMI_DEF_MSG         (NETIF_MSG_DRV          | \
186
                                 NETIF_MSG_LINK         | \
187
                                 NETIF_MSG_WOL          | \
188
                                 NETIF_MSG_RX_ERR       | \
189
                                 NETIF_MSG_TX_ERR)
190
static int debug = -1;
191
 
192
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
193
static int max_interrupt_work = 20;
194
static int mtu;
195
 
196
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
197
   This chip uses a 512 element hash table based on the Ethernet CRC.  */
198
static int multicast_filter_limit = 100;
199
 
200
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
201
   Setting to > 1518 effectively disables this feature. */
202
static int rx_copybreak;
203
 
204
/* Used to pass the media type, etc.
205
   Both 'options[]' and 'full_duplex[]' should exist for driver
206
   interoperability.
207
   The media type is usually passed in 'options[]'.
208
*/
209
#define MAX_UNITS 8             /* More are supported, limit only on options */
210
static int options[MAX_UNITS];
211
static int full_duplex[MAX_UNITS];
212
 
213
/* Operational parameters that are set at compile time. */
214
 
215
/* Keep the ring sizes a power of two for compile efficiency.
216
   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
217
   Making the Tx ring too large decreases the effectiveness of channel
218
   bonding and packet priority.
219
   There are no ill effects from too-large receive rings. */
220
#define TX_RING_SIZE    16
221
#define TX_QUEUE_LEN    10 /* Limit ring entries actually used, min 4. */
222
#define RX_RING_SIZE    32
223
 
224
/* Operational parameters that usually are not changed. */
225
/* Time in jiffies before concluding the transmitter is hung. */
226
#define TX_TIMEOUT  (2*HZ)
227
 
228
#define NATSEMI_HW_TIMEOUT      400
229
#define NATSEMI_TIMER_FREQ      3*HZ
230
#define NATSEMI_PG0_NREGS       64
231
#define NATSEMI_RFDR_NREGS      8
232
#define NATSEMI_PG1_NREGS       4
233
#define NATSEMI_NREGS           (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
234
                                 NATSEMI_PG1_NREGS)
235
#define NATSEMI_REGS_VER        1 /* v1 added RFDR registers */
236
#define NATSEMI_REGS_SIZE       (NATSEMI_NREGS * sizeof(u32))
237
#define NATSEMI_EEPROM_SIZE     24 /* 12 16-bit values */
238
 
239
#define PKT_BUF_SZ              1536 /* Size of each temporary Rx buffer. */
240
 
241
/* These identify the driver base version and may not be removed. */
242
static char version[] __devinitdata =
243
  KERN_INFO DRV_NAME " dp8381x driver, version "
244
      DRV_VERSION ", " DRV_RELDATE "\n"
245
  KERN_INFO "  originally by Donald Becker <becker@scyld.com>\n"
246
  KERN_INFO "  http://www.scyld.com/network/natsemi.html\n"
247
  KERN_INFO "  2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
248
 
249
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
250
MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
251
MODULE_LICENSE("GPL");
252
 
253
MODULE_PARM(max_interrupt_work, "i");
254
MODULE_PARM(mtu, "i");
255
MODULE_PARM(debug, "i");
256
MODULE_PARM(rx_copybreak, "i");
257
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
258
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
259
MODULE_PARM_DESC(max_interrupt_work,
260
        "DP8381x maximum events handled per interrupt");
261
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
262
MODULE_PARM_DESC(debug, "DP8381x default debug level");
263
MODULE_PARM_DESC(rx_copybreak,
264
        "DP8381x copy breakpoint for copy-only-tiny-frames");
265
MODULE_PARM_DESC(options,
266
        "DP8381x: Bits 0-3: media type, bit 17: full duplex");
267
MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
268
 
269
/*
270
                                Theory of Operation
271
 
272
I. Board Compatibility
273
 
274
This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
275
It also works with other chips in in the DP83810 series.
276
 
277
II. Board-specific settings
278
 
279
This driver requires the PCI interrupt line to be valid.
280
It honors the EEPROM-set values.
281
 
282
III. Driver operation
283
 
284
IIIa. Ring buffers
285
 
286
This driver uses two statically allocated fixed-size descriptor lists
287
formed into rings by a branch from the final descriptor to the beginning of
288
the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
289
The NatSemi design uses a 'next descriptor' pointer that the driver forms
290
into a list.
291
 
292
IIIb/c. Transmit/Receive Structure
293
 
294
This driver uses a zero-copy receive and transmit scheme.
295
The driver allocates full frame size skbuffs for the Rx ring buffers at
296
open() time and passes the skb->data field to the chip as receive data
297
buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
298
a fresh skbuff is allocated and the frame is copied to the new skbuff.
299
When the incoming frame is larger, the skbuff is passed directly up the
300
protocol stack.  Buffers consumed this way are replaced by newly allocated
301
skbuffs in a later phase of receives.
302
 
303
The RX_COPYBREAK value is chosen to trade-off the memory wasted by
304
using a full-sized skbuff for small frames vs. the copying costs of larger
305
frames.  New boards are typically used in generously configured machines
306
and the underfilled buffers have negligible impact compared to the benefit of
307
a single allocation size, so the default value of zero results in never
308
copying packets.  When copying is done, the cost is usually mitigated by using
309
a combined copy/checksum routine.  Copying also preloads the cache, which is
310
most useful with small frames.
311
 
312
A subtle aspect of the operation is that unaligned buffers are not permitted
313
by the hardware.  Thus the IP header at offset 14 in an ethernet frame isn't
314
longword aligned for further processing.  On copies frames are put into the
315
skbuff at an offset of "+2", 16-byte aligning the IP header.
316
 
317
IIId. Synchronization
318
 
319
Most operations are synchronized on the np->lock irq spinlock, except the
320
performance critical codepaths:
321
 
322
The rx process only runs in the interrupt handler. Access from outside
323
the interrupt handler is only permitted after disable_irq().
324
 
325
The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
326
is set, then access is permitted under spin_lock_irq(&np->lock).
327
 
328
Thus configuration functions that want to access everything must call
329
        disable_irq(dev->irq);
330
        spin_lock_bh(dev->xmit_lock);
331
        spin_lock_irq(&np->lock);
332
 
333
IV. Notes
334
 
335
NatSemi PCI network controllers are very uncommon.
336
 
337
IVb. References
338
 
339
http://www.scyld.com/expert/100mbps.html
340
http://www.scyld.com/expert/NWay.html
341
Datasheet is available from:
342
http://www.national.com/pf/DP/DP83815.html
343
 
344
IVc. Errata
345
 
346
None characterised.
347
*/
348
 
349
 
350
 
351
enum pcistuff {
352
        PCI_USES_IO = 0x01,
353
        PCI_USES_MEM = 0x02,
354
        PCI_USES_MASTER = 0x04,
355
        PCI_ADDR0 = 0x08,
356
        PCI_ADDR1 = 0x10,
357
};
358
 
359
/* MMIO operations required */
360
#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
361
 
362
 
363
/* array of board data directly indexed by pci_tbl[x].driver_data */
364
static struct {
365
        const char *name;
366
        unsigned long flags;
367
} natsemi_pci_info[] __devinitdata = {
368
        { "NatSemi DP8381[56]", PCI_IOTYPE },
369
};
370
 
371
static struct pci_device_id natsemi_pci_tbl[] = {
372
        { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, },
373
        { 0, },
374
};
375
MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
376
 
377
/* Offsets to the device registers.
378
   Unlike software-only systems, device drivers interact with complex hardware.
379
   It's not useful to define symbolic names for every register bit in the
380
   device.
381
*/
382
enum register_offsets {
383
        ChipCmd                 = 0x00,
384
        ChipConfig              = 0x04,
385
        EECtrl                  = 0x08,
386
        PCIBusCfg               = 0x0C,
387
        IntrStatus              = 0x10,
388
        IntrMask                = 0x14,
389
        IntrEnable              = 0x18,
390
        IntrHoldoff             = 0x16, /* DP83816 only */
391
        TxRingPtr               = 0x20,
392
        TxConfig                = 0x24,
393
        RxRingPtr               = 0x30,
394
        RxConfig                = 0x34,
395
        ClkRun                  = 0x3C,
396
        WOLCmd                  = 0x40,
397
        PauseCmd                = 0x44,
398
        RxFilterAddr            = 0x48,
399
        RxFilterData            = 0x4C,
400
        BootRomAddr             = 0x50,
401
        BootRomData             = 0x54,
402
        SiliconRev              = 0x58,
403
        StatsCtrl               = 0x5C,
404
        StatsData               = 0x60,
405
        RxPktErrs               = 0x60,
406
        RxMissed                = 0x68,
407
        RxCRCErrs               = 0x64,
408
        BasicControl            = 0x80,
409
        BasicStatus             = 0x84,
410
        AnegAdv                 = 0x90,
411
        AnegPeer                = 0x94,
412
        PhyStatus               = 0xC0,
413
        MIntrCtrl               = 0xC4,
414
        MIntrStatus             = 0xC8,
415
        PhyCtrl                 = 0xE4,
416
 
417
        /* These are from the spec, around page 78... on a separate table.
418
         * The meaning of these registers depend on the value of PGSEL. */
419
        PGSEL                   = 0xCC,
420
        PMDCSR                  = 0xE4,
421
        TSTDAT                  = 0xFC,
422
        DSPCFG                  = 0xF4,
423
        SDCFG                   = 0xF8
424
};
425
/* the values for the 'magic' registers above (PGSEL=1) */
426
#define PMDCSR_VAL      0x189c  /* enable preferred adaptation circuitry */
427
#define TSTDAT_VAL      0x0
428
#define DSPCFG_VAL      0x5040
429
#define SDCFG_VAL       0x008c  /* set voltage thresholds for Signal Detect */
430
#define DSPCFG_LOCK     0x20    /* coefficient lock bit in DSPCFG */
431
#define TSTDAT_FIXED    0xe8    /* magic number for bad coefficients */
432
 
433
/* misc PCI space registers */
434
enum pci_register_offsets {
435
        PCIPM                   = 0x44,
436
};
437
 
438
enum ChipCmd_bits {
439
        ChipReset               = 0x100,
440
        RxReset                 = 0x20,
441
        TxReset                 = 0x10,
442
        RxOff                   = 0x08,
443
        RxOn                    = 0x04,
444
        TxOff                   = 0x02,
445
        TxOn                    = 0x01,
446
};
447
 
448
enum ChipConfig_bits {
449
        CfgPhyDis               = 0x200,
450
        CfgPhyRst               = 0x400,
451
        CfgExtPhy               = 0x1000,
452
        CfgAnegEnable           = 0x2000,
453
        CfgAneg100              = 0x4000,
454
        CfgAnegFull             = 0x8000,
455
        CfgAnegDone             = 0x8000000,
456
        CfgFullDuplex           = 0x20000000,
457
        CfgSpeed100             = 0x40000000,
458
        CfgLink                 = 0x80000000,
459
};
460
 
461
enum EECtrl_bits {
462
        EE_ShiftClk             = 0x04,
463
        EE_DataIn               = 0x01,
464
        EE_ChipSelect           = 0x08,
465
        EE_DataOut              = 0x02,
466
};
467
 
468
enum PCIBusCfg_bits {
469
        EepromReload            = 0x4,
470
};
471
 
472
/* Bits in the interrupt status/mask registers. */
473
enum IntrStatus_bits {
474
        IntrRxDone              = 0x0001,
475
        IntrRxIntr              = 0x0002,
476
        IntrRxErr               = 0x0004,
477
        IntrRxEarly             = 0x0008,
478
        IntrRxIdle              = 0x0010,
479
        IntrRxOverrun           = 0x0020,
480
        IntrTxDone              = 0x0040,
481
        IntrTxIntr              = 0x0080,
482
        IntrTxErr               = 0x0100,
483
        IntrTxIdle              = 0x0200,
484
        IntrTxUnderrun          = 0x0400,
485
        StatsMax                = 0x0800,
486
        SWInt                   = 0x1000,
487
        WOLPkt                  = 0x2000,
488
        LinkChange              = 0x4000,
489
        IntrHighBits            = 0x8000,
490
        RxStatusFIFOOver        = 0x10000,
491
        IntrPCIErr              = 0xf00000,
492
        RxResetDone             = 0x1000000,
493
        TxResetDone             = 0x2000000,
494
        IntrAbnormalSummary     = 0xCD20,
495
};
496
 
497
/*
498
 * Default Interrupts:
499
 * Rx OK, Rx Packet Error, Rx Overrun,
500
 * Tx OK, Tx Packet Error, Tx Underrun,
501
 * MIB Service, Phy Interrupt, High Bits,
502
 * Rx Status FIFO overrun,
503
 * Received Target Abort, Received Master Abort,
504
 * Signalled System Error, Received Parity Error
505
 */
506
#define DEFAULT_INTR 0x00f1cd65
507
 
508
enum TxConfig_bits {
509
        TxDrthMask              = 0x3f,
510
        TxFlthMask              = 0x3f00,
511
        TxMxdmaMask             = 0x700000,
512
        TxMxdma_512             = 0x0,
513
        TxMxdma_4               = 0x100000,
514
        TxMxdma_8               = 0x200000,
515
        TxMxdma_16              = 0x300000,
516
        TxMxdma_32              = 0x400000,
517
        TxMxdma_64              = 0x500000,
518
        TxMxdma_128             = 0x600000,
519
        TxMxdma_256             = 0x700000,
520
        TxCollRetry             = 0x800000,
521
        TxAutoPad               = 0x10000000,
522
        TxMacLoop               = 0x20000000,
523
        TxHeartIgn              = 0x40000000,
524
        TxCarrierIgn            = 0x80000000
525
};
526
 
527
enum RxConfig_bits {
528
        RxDrthMask              = 0x3e,
529
        RxMxdmaMask             = 0x700000,
530
        RxMxdma_512             = 0x0,
531
        RxMxdma_4               = 0x100000,
532
        RxMxdma_8               = 0x200000,
533
        RxMxdma_16              = 0x300000,
534
        RxMxdma_32              = 0x400000,
535
        RxMxdma_64              = 0x500000,
536
        RxMxdma_128             = 0x600000,
537
        RxMxdma_256             = 0x700000,
538
        RxAcceptLong            = 0x8000000,
539
        RxAcceptTx              = 0x10000000,
540
        RxAcceptRunt            = 0x40000000,
541
        RxAcceptErr             = 0x80000000
542
};
543
 
544
enum ClkRun_bits {
545
        PMEEnable               = 0x100,
546
        PMEStatus               = 0x8000,
547
};
548
 
549
enum WolCmd_bits {
550
        WakePhy                 = 0x1,
551
        WakeUnicast             = 0x2,
552
        WakeMulticast           = 0x4,
553
        WakeBroadcast           = 0x8,
554
        WakeArp                 = 0x10,
555
        WakePMatch0             = 0x20,
556
        WakePMatch1             = 0x40,
557
        WakePMatch2             = 0x80,
558
        WakePMatch3             = 0x100,
559
        WakeMagic               = 0x200,
560
        WakeMagicSecure         = 0x400,
561
        SecureHack              = 0x100000,
562
        WokePhy                 = 0x400000,
563
        WokeUnicast             = 0x800000,
564
        WokeMulticast           = 0x1000000,
565
        WokeBroadcast           = 0x2000000,
566
        WokeArp                 = 0x4000000,
567
        WokePMatch0             = 0x8000000,
568
        WokePMatch1             = 0x10000000,
569
        WokePMatch2             = 0x20000000,
570
        WokePMatch3             = 0x40000000,
571
        WokeMagic               = 0x80000000,
572
        WakeOptsSummary         = 0x7ff
573
};
574
 
575
enum RxFilterAddr_bits {
576
        RFCRAddressMask         = 0x3ff,
577
        AcceptMulticast         = 0x00200000,
578
        AcceptMyPhys            = 0x08000000,
579
        AcceptAllPhys           = 0x10000000,
580
        AcceptAllMulticast      = 0x20000000,
581
        AcceptBroadcast         = 0x40000000,
582
        RxFilterEnable          = 0x80000000
583
};
584
 
585
enum StatsCtrl_bits {
586
        StatsWarn               = 0x1,
587
        StatsFreeze             = 0x2,
588
        StatsClear              = 0x4,
589
        StatsStrobe             = 0x8,
590
};
591
 
592
enum MIntrCtrl_bits {
593
        MICRIntEn               = 0x2,
594
};
595
 
596
enum PhyCtrl_bits {
597
        PhyAddrMask             = 0xf,
598
};
599
 
600
/* values we might find in the silicon revision register */
601
#define SRR_DP83815_C   0x0302
602
#define SRR_DP83815_D   0x0403
603
#define SRR_DP83816_A4  0x0504
604
#define SRR_DP83816_A5  0x0505
605
 
606
/* The Rx and Tx buffer descriptors. */
607
/* Note that using only 32 bit fields simplifies conversion to big-endian
608
   architectures. */
609
struct netdev_desc {
610
        u32 next_desc;
611
        s32 cmd_status;
612
        u32 addr;
613
        u32 software_use;
614
};
615
 
616
/* Bits in network_desc.status */
617
enum desc_status_bits {
618
        DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
619
        DescNoCRC=0x10000000, DescPktOK=0x08000000,
620
        DescSizeMask=0xfff,
621
 
622
        DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
623
        DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
624
        DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
625
        DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
626
 
627
        DescRxAbort=0x04000000, DescRxOver=0x02000000,
628
        DescRxDest=0x01800000, DescRxLong=0x00400000,
629
        DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
630
        DescRxCRC=0x00080000, DescRxAlign=0x00040000,
631
        DescRxLoop=0x00020000, DesRxColl=0x00010000,
632
};
633
 
634
struct netdev_private {
635
        /* Descriptor rings first for alignment */
636
        dma_addr_t ring_dma;
637
        struct netdev_desc *rx_ring;
638
        struct netdev_desc *tx_ring;
639
        /* The addresses of receive-in-place skbuffs */
640
        struct sk_buff *rx_skbuff[RX_RING_SIZE];
641
        dma_addr_t rx_dma[RX_RING_SIZE];
642
        /* address of a sent-in-place packet/buffer, for later free() */
643
        struct sk_buff *tx_skbuff[TX_RING_SIZE];
644
        dma_addr_t tx_dma[TX_RING_SIZE];
645
        struct net_device_stats stats;
646
        /* Media monitoring timer */
647
        struct timer_list timer;
648
        /* Frequently used values: keep some adjacent for cache effect */
649
        struct pci_dev *pci_dev;
650
        struct netdev_desc *rx_head_desc;
651
        /* Producer/consumer ring indices */
652
        unsigned int cur_rx, dirty_rx;
653
        unsigned int cur_tx, dirty_tx;
654
        /* Based on MTU+slack. */
655
        unsigned int rx_buf_sz;
656
        int oom;
657
        /* Do not touch the nic registers */
658
        int hands_off;
659
        /* These values are keep track of the transceiver/media in use */
660
        unsigned int full_duplex;
661
        /* Rx filter */
662
        u32 cur_rx_mode;
663
        u32 rx_filter[16];
664
        /* FIFO and PCI burst thresholds */
665
        u32 tx_config, rx_config;
666
        /* original contents of ClkRun register */
667
        u32 SavedClkRun;
668
        /* silicon revision */
669
        u32 srr;
670
        /* expected DSPCFG value */
671
        u16 dspcfg;
672
        /* MII transceiver section */
673
        u16 advertising;
674
        unsigned int iosize;
675
        spinlock_t lock;
676
        u32 msg_enable;
677
};
678
 
679
static int eeprom_read(long ioaddr, int location);
680
static int mdio_read(struct net_device *dev, int phy_id, int reg);
681
static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 data);
682
static void natsemi_reset(struct net_device *dev);
683
static void natsemi_reload_eeprom(struct net_device *dev);
684
static void natsemi_stop_rxtx(struct net_device *dev);
685
static int netdev_open(struct net_device *dev);
686
static void do_cable_magic(struct net_device *dev);
687
static void undo_cable_magic(struct net_device *dev);
688
static void check_link(struct net_device *dev);
689
static void netdev_timer(unsigned long data);
690
static void dump_ring(struct net_device *dev);
691
static void tx_timeout(struct net_device *dev);
692
static int alloc_ring(struct net_device *dev);
693
static void refill_rx(struct net_device *dev);
694
static void init_ring(struct net_device *dev);
695
static void drain_tx(struct net_device *dev);
696
static void drain_ring(struct net_device *dev);
697
static void free_ring(struct net_device *dev);
698
static void reinit_ring(struct net_device *dev);
699
static void init_registers(struct net_device *dev);
700
static int start_tx(struct sk_buff *skb, struct net_device *dev);
701
static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
702
static void netdev_error(struct net_device *dev, int intr_status);
703
static void netdev_rx(struct net_device *dev);
704
static void netdev_tx_done(struct net_device *dev);
705
static void __set_rx_mode(struct net_device *dev);
706
static void set_rx_mode(struct net_device *dev);
707
static void __get_stats(struct net_device *dev);
708
static struct net_device_stats *get_stats(struct net_device *dev);
709
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
710
static int netdev_set_wol(struct net_device *dev, u32 newval);
711
static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
712
static int netdev_set_sopass(struct net_device *dev, u8 *newval);
713
static int netdev_get_sopass(struct net_device *dev, u8 *data);
714
static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
715
static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
716
static void enable_wol_mode(struct net_device *dev, int enable_intr);
717
static int netdev_close(struct net_device *dev);
718
static int netdev_get_regs(struct net_device *dev, u8 *buf);
719
static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
720
 
721
 
722
static int __devinit natsemi_probe1 (struct pci_dev *pdev,
723
        const struct pci_device_id *ent)
724
{
725
        struct net_device *dev;
726
        struct netdev_private *np;
727
        int i, option, irq, chip_idx = ent->driver_data;
728
        static int find_cnt = -1;
729
        unsigned long ioaddr, iosize;
730
        const int pcibar = 1; /* PCI base address register */
731
        int prev_eedata;
732
        u32 tmp;
733
 
734
/* when built into the kernel, we only print version if device is found */
735
#ifndef MODULE
736
        static int printed_version;
737
        if (!printed_version++)
738
                printk(version);
739
#endif
740
 
741
        i = pci_enable_device(pdev);
742
        if (i) return i;
743
 
744
        /* natsemi has a non-standard PM control register
745
         * in PCI config space.  Some boards apparently need
746
         * to be brought to D0 in this manner.
747
         */
748
        pci_read_config_dword(pdev, PCIPM, &tmp);
749
        if (tmp & PCI_PM_CTRL_STATE_MASK) {
750
                /* D0 state, disable PME assertion */
751
                u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
752
                pci_write_config_dword(pdev, PCIPM, newtmp);
753
        }
754
 
755
        find_cnt++;
756
        ioaddr = pci_resource_start(pdev, pcibar);
757
        iosize = pci_resource_len(pdev, pcibar);
758
        irq = pdev->irq;
759
 
760
        if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER)
761
                pci_set_master(pdev);
762
 
763
        dev = alloc_etherdev(sizeof (struct netdev_private));
764
        if (!dev)
765
                return -ENOMEM;
766
        SET_MODULE_OWNER(dev);
767
 
768
        i = pci_request_regions(pdev, dev->name);
769
        if (i)
770
                goto err_pci_request_regions;
771
 
772
        ioaddr = (unsigned long) ioremap (ioaddr, iosize);
773
        if (!ioaddr) {
774
                i = -ENOMEM;
775
                goto err_ioremap;
776
        }
777
 
778
        /* Work around the dropped serial bit. */
779
        prev_eedata = eeprom_read(ioaddr, 6);
780
        for (i = 0; i < 3; i++) {
781
                int eedata = eeprom_read(ioaddr, i + 7);
782
                dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
783
                dev->dev_addr[i*2+1] = eedata >> 7;
784
                prev_eedata = eedata;
785
        }
786
 
787
        dev->base_addr = ioaddr;
788
        dev->irq = irq;
789
 
790
        np = dev->priv;
791
 
792
        np->pci_dev = pdev;
793
        pci_set_drvdata(pdev, dev);
794
        np->iosize = iosize;
795
        spin_lock_init(&np->lock);
796
        np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
797
        np->hands_off = 0;
798
 
799
        /* Reset the chip to erase previous misconfiguration. */
800
        natsemi_reload_eeprom(dev);
801
        natsemi_reset(dev);
802
 
803
        option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
804
        if (dev->mem_start)
805
                option = dev->mem_start;
806
 
807
        /* The lower four bits are the media type. */
808
        if (option) {
809
                if (option & 0x200)
810
                        np->full_duplex = 1;
811
                if (option & 15)
812
                        printk(KERN_INFO
813
                                "%s: ignoring user supplied media type %d",
814
                                dev->name, option & 15);
815
        }
816
        if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt])
817
                np->full_duplex = 1;
818
 
819
        /* The chip-specific entries in the device structure. */
820
        dev->open = &netdev_open;
821
        dev->hard_start_xmit = &start_tx;
822
        dev->stop = &netdev_close;
823
        dev->get_stats = &get_stats;
824
        dev->set_multicast_list = &set_rx_mode;
825
        dev->do_ioctl = &netdev_ioctl;
826
        dev->tx_timeout = &tx_timeout;
827
        dev->watchdog_timeo = TX_TIMEOUT;
828
 
829
        if (mtu)
830
                dev->mtu = mtu;
831
 
832
        i = register_netdev(dev);
833
        if (i)
834
                goto err_register_netdev;
835
 
836
        netif_carrier_off(dev);
837
 
838
        if (netif_msg_drv(np)) {
839
                printk(KERN_INFO "%s: %s at %#08lx, ",
840
                        dev->name, natsemi_pci_info[chip_idx].name, ioaddr);
841
                for (i = 0; i < ETH_ALEN-1; i++)
842
                                printk("%02x:", dev->dev_addr[i]);
843
                printk("%02x, IRQ %d.\n", dev->dev_addr[i], irq);
844
        }
845
 
846
        np->advertising = mdio_read(dev, 1, MII_ADVERTISE);
847
        if ((readl(ioaddr + ChipConfig) & 0xe000) != 0xe000
848
         && netif_msg_probe(np)) {
849
                u32 chip_config = readl(ioaddr + ChipConfig);
850
                printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
851
                        "10%s %s duplex.\n",
852
                        dev->name,
853
                        chip_config & CfgAnegEnable ?
854
                          "enabled, advertise" : "disabled, force",
855
                        chip_config & CfgAneg100 ? "0" : "",
856
                        chip_config & CfgAnegFull ? "full" : "half");
857
        }
858
        if (netif_msg_probe(np))
859
                printk(KERN_INFO
860
                        "%s: Transceiver status %#04x advertising %#04x.\n",
861
                        dev->name, mdio_read(dev, 1, MII_BMSR),
862
                        np->advertising);
863
 
864
        /* save the silicon revision for later querying */
865
        np->srr = readl(ioaddr + SiliconRev);
866
        if (netif_msg_hw(np))
867
                printk(KERN_INFO "%s: silicon revision %#04x.\n",
868
                                dev->name, np->srr);
869
 
870
 
871
        return 0;
872
 
873
 err_register_netdev:
874
        iounmap ((void *) dev->base_addr);
875
 
876
 err_ioremap:
877
        pci_release_regions(pdev);
878
        pci_set_drvdata(pdev, NULL);
879
 
880
 err_pci_request_regions:
881
        free_netdev(dev);
882
        return i;
883
}
884
 
885
 
886
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
887
   The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
888
 
889
/* Delay between EEPROM clock transitions.
890
   No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
891
   a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
892
   made udelay() unreliable.
893
   The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
894
   depricated.
895
*/
896
#define eeprom_delay(ee_addr)   readl(ee_addr)
897
 
898
#define EE_Write0 (EE_ChipSelect)
899
#define EE_Write1 (EE_ChipSelect | EE_DataIn)
900
 
901
/* The EEPROM commands include the alway-set leading bit. */
902
enum EEPROM_Cmds {
903
        EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
904
};
905
 
906
static int eeprom_read(long addr, int location)
907
{
908
        int i;
909
        int retval = 0;
910
        long ee_addr = addr + EECtrl;
911
        int read_cmd = location | EE_ReadCmd;
912
        writel(EE_Write0, ee_addr);
913
 
914
        /* Shift the read command bits out. */
915
        for (i = 10; i >= 0; i--) {
916
                short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
917
                writel(dataval, ee_addr);
918
                eeprom_delay(ee_addr);
919
                writel(dataval | EE_ShiftClk, ee_addr);
920
                eeprom_delay(ee_addr);
921
        }
922
        writel(EE_ChipSelect, ee_addr);
923
        eeprom_delay(ee_addr);
924
 
925
        for (i = 0; i < 16; i++) {
926
                writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
927
                eeprom_delay(ee_addr);
928
                retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
929
                writel(EE_ChipSelect, ee_addr);
930
                eeprom_delay(ee_addr);
931
        }
932
 
933
        /* Terminate the EEPROM access. */
934
        writel(EE_Write0, ee_addr);
935
        writel(0, ee_addr);
936
        return retval;
937
}
938
 
939
/* MII transceiver control section.
940
 * The 83815 series has an internal transceiver, and we present the
941
 * management registers as if they were MII connected. */
942
 
943
static int mdio_read(struct net_device *dev, int phy_id, int reg)
944
{
945
        if (phy_id == 1 && reg < 32)
946
                return readl(dev->base_addr+BasicControl+(reg<<2))&0xffff;
947
        else
948
                return 0xffff;
949
}
950
 
951
static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 data)
952
{
953
        struct netdev_private *np = dev->priv;
954
        if (phy_id == 1 && reg < 32) {
955
                writew(data, dev->base_addr+BasicControl+(reg<<2));
956
                switch (reg) {
957
                        case MII_ADVERTISE: np->advertising = data; break;
958
                }
959
        }
960
}
961
 
962
/* CFG bits [13:16] [18:23] */
963
#define CFG_RESET_SAVE 0xfde000
964
/* WCSR bits [0:4] [9:10] */
965
#define WCSR_RESET_SAVE 0x61f
966
/* RFCR bits [20] [22] [27:31] */
967
#define RFCR_RESET_SAVE 0xf8500000;
968
 
969
static void natsemi_reset(struct net_device *dev)
970
{
971
        int i;
972
        u32 cfg;
973
        u32 wcsr;
974
        u32 rfcr;
975
        u16 pmatch[3];
976
        u16 sopass[3];
977
        struct netdev_private *np = dev->priv;
978
 
979
        /*
980
         * Resetting the chip causes some registers to be lost.
981
         * Natsemi suggests NOT reloading the EEPROM while live, so instead
982
         * we save the state that would have been loaded from EEPROM
983
         * on a normal power-up (see the spec EEPROM map).  This assumes
984
         * whoever calls this will follow up with init_registers() eventually.
985
         */
986
 
987
        /* CFG */
988
        cfg = readl(dev->base_addr + ChipConfig) & CFG_RESET_SAVE;
989
        /* WCSR */
990
        wcsr = readl(dev->base_addr + WOLCmd) & WCSR_RESET_SAVE;
991
        /* RFCR */
992
        rfcr = readl(dev->base_addr + RxFilterAddr) & RFCR_RESET_SAVE;
993
        /* PMATCH */
994
        for (i = 0; i < 3; i++) {
995
                writel(i*2, dev->base_addr + RxFilterAddr);
996
                pmatch[i] = readw(dev->base_addr + RxFilterData);
997
        }
998
        /* SOPAS */
999
        for (i = 0; i < 3; i++) {
1000
                writel(0xa+(i*2), dev->base_addr + RxFilterAddr);
1001
                sopass[i] = readw(dev->base_addr + RxFilterData);
1002
        }
1003
 
1004
        /* now whack the chip */
1005
        writel(ChipReset, dev->base_addr + ChipCmd);
1006
        for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1007
                if (!(readl(dev->base_addr + ChipCmd) & ChipReset))
1008
                        break;
1009
                udelay(5);
1010
        }
1011
        if (i==NATSEMI_HW_TIMEOUT) {
1012
                printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1013
                        dev->name, i*5);
1014
        } else if (netif_msg_hw(np)) {
1015
                printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1016
                        dev->name, i*5);
1017
        }
1018
 
1019
        /* restore CFG */
1020
        cfg |= readl(dev->base_addr + ChipConfig) & ~CFG_RESET_SAVE;
1021
        writel(cfg, dev->base_addr + ChipConfig);
1022
        /* restore WCSR */
1023
        wcsr |= readl(dev->base_addr + WOLCmd) & ~WCSR_RESET_SAVE;
1024
        writel(wcsr, dev->base_addr + WOLCmd);
1025
        /* read RFCR */
1026
        rfcr |= readl(dev->base_addr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1027
        /* restore PMATCH */
1028
        for (i = 0; i < 3; i++) {
1029
                writel(i*2, dev->base_addr + RxFilterAddr);
1030
                writew(pmatch[i], dev->base_addr + RxFilterData);
1031
        }
1032
        for (i = 0; i < 3; i++) {
1033
                writel(0xa+(i*2), dev->base_addr + RxFilterAddr);
1034
                writew(sopass[i], dev->base_addr + RxFilterData);
1035
        }
1036
        /* restore RFCR */
1037
        writel(rfcr, dev->base_addr + RxFilterAddr);
1038
}
1039
 
1040
static void natsemi_reload_eeprom(struct net_device *dev)
1041
{
1042
        struct netdev_private *np = dev->priv;
1043
        int i;
1044
 
1045
        writel(EepromReload, dev->base_addr + PCIBusCfg);
1046
        for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1047
                udelay(50);
1048
                if (!(readl(dev->base_addr + PCIBusCfg) & EepromReload))
1049
                        break;
1050
        }
1051
        if (i==NATSEMI_HW_TIMEOUT) {
1052
                printk(KERN_WARNING "%s: EEPROM did not reload in %d usec.\n",
1053
                        dev->name, i*50);
1054
        } else if (netif_msg_hw(np)) {
1055
                printk(KERN_DEBUG "%s: EEPROM reloaded in %d usec.\n",
1056
                        dev->name, i*50);
1057
        }
1058
}
1059
 
1060
static void natsemi_stop_rxtx(struct net_device *dev)
1061
{
1062
        long ioaddr = dev->base_addr;
1063
        struct netdev_private *np = dev->priv;
1064
        int i;
1065
 
1066
        writel(RxOff | TxOff, ioaddr + ChipCmd);
1067
        for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1068
                if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1069
                        break;
1070
                udelay(5);
1071
        }
1072
        if (i==NATSEMI_HW_TIMEOUT) {
1073
                printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1074
                        dev->name, i*5);
1075
        } else if (netif_msg_hw(np)) {
1076
                printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1077
                        dev->name, i*5);
1078
        }
1079
}
1080
 
1081
static int netdev_open(struct net_device *dev)
1082
{
1083
        struct netdev_private *np = dev->priv;
1084
        long ioaddr = dev->base_addr;
1085
        int i;
1086
 
1087
        /* Reset the chip, just in case. */
1088
        natsemi_reset(dev);
1089
 
1090
        i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
1091
        if (i) return i;
1092
 
1093
        if (netif_msg_ifup(np))
1094
                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1095
                        dev->name, dev->irq);
1096
        i = alloc_ring(dev);
1097
        if (i < 0) {
1098
                free_irq(dev->irq, dev);
1099
                return i;
1100
        }
1101
        init_ring(dev);
1102
        spin_lock_irq(&np->lock);
1103
        init_registers(dev);
1104
        /* now set the MAC address according to dev->dev_addr */
1105
        for (i = 0; i < 3; i++) {
1106
                u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1107
 
1108
                writel(i*2, ioaddr + RxFilterAddr);
1109
                writew(mac, ioaddr + RxFilterData);
1110
        }
1111
        writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1112
        spin_unlock_irq(&np->lock);
1113
 
1114
        netif_start_queue(dev);
1115
 
1116
        if (netif_msg_ifup(np))
1117
                printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1118
                        dev->name, (int)readl(ioaddr + ChipCmd));
1119
 
1120
        /* Set the timer to check for link beat. */
1121
        init_timer(&np->timer);
1122
        np->timer.expires = jiffies + NATSEMI_TIMER_FREQ;
1123
        np->timer.data = (unsigned long)dev;
1124
        np->timer.function = &netdev_timer; /* timer handler */
1125
        add_timer(&np->timer);
1126
 
1127
        return 0;
1128
}
1129
 
1130
static void do_cable_magic(struct net_device *dev)
1131
{
1132
        struct netdev_private *np = dev->priv;
1133
 
1134
        if (np->srr >= SRR_DP83816_A5)
1135
                return;
1136
 
1137
        /*
1138
         * 100 MBit links with short cables can trip an issue with the chip.
1139
         * The problem manifests as lots of CRC errors and/or flickering
1140
         * activity LED while idle.  This process is based on instructions
1141
         * from engineers at National.
1142
         */
1143
        if (readl(dev->base_addr + ChipConfig) & CfgSpeed100) {
1144
                u16 data;
1145
 
1146
                writew(1, dev->base_addr + PGSEL);
1147
                /*
1148
                 * coefficient visibility should already be enabled via
1149
                 * DSPCFG | 0x1000
1150
                 */
1151
                data = readw(dev->base_addr + TSTDAT) & 0xff;
1152
                /*
1153
                 * the value must be negative, and within certain values
1154
                 * (these values all come from National)
1155
                 */
1156
                if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1157
                        struct netdev_private *np = dev->priv;
1158
 
1159
                        /* the bug has been triggered - fix the coefficient */
1160
                        writew(TSTDAT_FIXED, dev->base_addr + TSTDAT);
1161
                        /* lock the value */
1162
                        data = readw(dev->base_addr + DSPCFG);
1163
                        np->dspcfg = data | DSPCFG_LOCK;
1164
                        writew(np->dspcfg, dev->base_addr + DSPCFG);
1165
                }
1166
                writew(0, dev->base_addr + PGSEL);
1167
        }
1168
}
1169
 
1170
static void undo_cable_magic(struct net_device *dev)
1171
{
1172
        u16 data;
1173
        struct netdev_private *np = dev->priv;
1174
 
1175
        if (np->srr >= SRR_DP83816_A5)
1176
                return;
1177
 
1178
        writew(1, dev->base_addr + PGSEL);
1179
        /* make sure the lock bit is clear */
1180
        data = readw(dev->base_addr + DSPCFG);
1181
        np->dspcfg = data & ~DSPCFG_LOCK;
1182
        writew(np->dspcfg, dev->base_addr + DSPCFG);
1183
        writew(0, dev->base_addr + PGSEL);
1184
}
1185
 
1186
static void check_link(struct net_device *dev)
1187
{
1188
        struct netdev_private *np = dev->priv;
1189
        long ioaddr = dev->base_addr;
1190
        int duplex;
1191
        int chipcfg = readl(ioaddr + ChipConfig);
1192
 
1193
        if (!(chipcfg & CfgLink)) {
1194
                if (netif_carrier_ok(dev)) {
1195
                        if (netif_msg_link(np))
1196
                                printk(KERN_NOTICE "%s: link down.\n",
1197
                                        dev->name);
1198
                        netif_carrier_off(dev);
1199
                        undo_cable_magic(dev);
1200
                }
1201
                return;
1202
        }
1203
        if (!netif_carrier_ok(dev)) {
1204
                if (netif_msg_link(np))
1205
                        printk(KERN_NOTICE "%s: link up.\n", dev->name);
1206
                netif_carrier_on(dev);
1207
                do_cable_magic(dev);
1208
        }
1209
 
1210
        duplex = np->full_duplex || (chipcfg & CfgFullDuplex ? 1 : 0);
1211
 
1212
        /* if duplex is set then bit 28 must be set, too */
1213
        if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1214
                if (netif_msg_link(np))
1215
                        printk(KERN_INFO
1216
                                "%s: Setting %s-duplex based on negotiated "
1217
                                "link capability.\n", dev->name,
1218
                                duplex ? "full" : "half");
1219
                if (duplex) {
1220
                        np->rx_config |= RxAcceptTx;
1221
                        np->tx_config |= TxCarrierIgn | TxHeartIgn;
1222
                } else {
1223
                        np->rx_config &= ~RxAcceptTx;
1224
                        np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1225
                }
1226
                writel(np->tx_config, ioaddr + TxConfig);
1227
                writel(np->rx_config, ioaddr + RxConfig);
1228
        }
1229
}
1230
 
1231
static void init_registers(struct net_device *dev)
1232
{
1233
        struct netdev_private *np = dev->priv;
1234
        long ioaddr = dev->base_addr;
1235
        int i;
1236
 
1237
        for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1238
                if (readl(dev->base_addr + ChipConfig) & CfgAnegDone)
1239
                        break;
1240
                udelay(10);
1241
        }
1242
        if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1243
                printk(KERN_INFO
1244
                        "%s: autonegotiation did not complete in %d usec.\n",
1245
                        dev->name, i*10);
1246
        }
1247
 
1248
        /* On page 78 of the spec, they recommend some settings for "optimum
1249
           performance" to be done in sequence.  These settings optimize some
1250
           of the 100Mbit autodetection circuitry.  They say we only want to
1251
           do this for rev C of the chip, but engineers at NSC (Bradley
1252
           Kennedy) recommends always setting them.  If you don't, you get
1253
           errors on some autonegotiations that make the device unusable.
1254
        */
1255
        writew(1, ioaddr + PGSEL);
1256
        writew(PMDCSR_VAL, ioaddr + PMDCSR);
1257
        writew(TSTDAT_VAL, ioaddr + TSTDAT);
1258
        writew(DSPCFG_VAL, ioaddr + DSPCFG);
1259
        writew(SDCFG_VAL, ioaddr + SDCFG);
1260
        writew(0, ioaddr + PGSEL);
1261
        np->dspcfg = DSPCFG_VAL;
1262
 
1263
        /* Enable PHY Specific event based interrupts.  Link state change
1264
           and Auto-Negotiation Completion are among the affected.
1265
           Read the intr status to clear it (needed for wake events).
1266
        */
1267
        readw(ioaddr + MIntrStatus);
1268
        writew(MICRIntEn, ioaddr + MIntrCtrl);
1269
 
1270
        /* clear any interrupts that are pending, such as wake events */
1271
        readl(ioaddr + IntrStatus);
1272
 
1273
        writel(np->ring_dma, ioaddr + RxRingPtr);
1274
        writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1275
                ioaddr + TxRingPtr);
1276
 
1277
        /* Initialize other registers.
1278
         * Configure the PCI bus bursts and FIFO thresholds.
1279
         * Configure for standard, in-spec Ethernet.
1280
         * Start with half-duplex. check_link will update
1281
         * to the correct settings.
1282
         */
1283
 
1284
        /* DRTH: 2: start tx if 64 bytes are in the fifo
1285
         * FLTH: 0x10: refill with next packet if 512 bytes are free
1286
         * MXDMA: 0: up to 256 byte bursts.
1287
         *      MXDMA must be <= FLTH
1288
         * ECRETRY=1
1289
         * ATP=1
1290
         */
1291
        np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | (0x1002);
1292
        writel(np->tx_config, ioaddr + TxConfig);
1293
 
1294
        /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1295
         * MXDMA 0: up to 256 byte bursts
1296
         */
1297
        np->rx_config = RxMxdma_256 | 0x20;
1298
        writel(np->rx_config, ioaddr + RxConfig);
1299
 
1300
        /* Disable PME:
1301
         * The PME bit is initialized from the EEPROM contents.
1302
         * PCI cards probably have PME disabled, but motherboard
1303
         * implementations may have PME set to enable WakeOnLan.
1304
         * With PME set the chip will scan incoming packets but
1305
         * nothing will be written to memory. */
1306
        np->SavedClkRun = readl(ioaddr + ClkRun);
1307
        writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1308
        if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1309
                printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1310
                        dev->name, readl(ioaddr + WOLCmd));
1311
        }
1312
 
1313
        check_link(dev);
1314
        __set_rx_mode(dev);
1315
 
1316
        /* Enable interrupts by setting the interrupt mask. */
1317
        writel(DEFAULT_INTR, ioaddr + IntrMask);
1318
        writel(1, ioaddr + IntrEnable);
1319
 
1320
        writel(RxOn | TxOn, ioaddr + ChipCmd);
1321
        writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1322
}
1323
 
1324
/*
1325
 * netdev_timer:
1326
 * Purpose:
1327
 * 1) check for link changes. Usually they are handled by the MII interrupt
1328
 *    but it doesn't hurt to check twice.
1329
 * 2) check for sudden death of the NIC:
1330
 *    It seems that a reference set for this chip went out with incorrect info,
1331
 *    and there exist boards that aren't quite right.  An unexpected voltage
1332
 *    drop can cause the PHY to get itself in a weird state (basically reset).
1333
 *    NOTE: this only seems to affect revC chips.
1334
 * 3) check of death of the RX path due to OOM
1335
 */
1336
static void netdev_timer(unsigned long data)
1337
{
1338
        struct net_device *dev = (struct net_device *)data;
1339
        struct netdev_private *np = dev->priv;
1340
        int next_tick = 5*HZ;
1341
        long ioaddr = dev->base_addr;
1342
        u16 dspcfg;
1343
 
1344
        if (netif_msg_timer(np)) {
1345
                /* DO NOT read the IntrStatus register,
1346
                 * a read clears any pending interrupts.
1347
                 */
1348
                printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1349
                        dev->name);
1350
        }
1351
 
1352
        spin_lock_irq(&np->lock);
1353
 
1354
        /* check for a nasty random phy-reset - use dspcfg as a flag */
1355
        writew(1, ioaddr+PGSEL);
1356
        dspcfg = readw(ioaddr+DSPCFG);
1357
        writew(0, ioaddr+PGSEL);
1358
        if (dspcfg != np->dspcfg) {
1359
                if (!netif_queue_stopped(dev)) {
1360
                        spin_unlock_irq(&np->lock);
1361
                        if (netif_msg_hw(np))
1362
                                printk(KERN_NOTICE "%s: possible phy reset: "
1363
                                        "re-initializing\n", dev->name);
1364
                        disable_irq(dev->irq);
1365
                        spin_lock_irq(&np->lock);
1366
                        natsemi_stop_rxtx(dev);
1367
                        dump_ring(dev);
1368
                        reinit_ring(dev);
1369
                        init_registers(dev);
1370
                        spin_unlock_irq(&np->lock);
1371
                        enable_irq(dev->irq);
1372
                } else {
1373
                        /* hurry back */
1374
                        next_tick = HZ;
1375
                        spin_unlock_irq(&np->lock);
1376
                }
1377
        } else {
1378
                /* init_registers() calls check_link() for the above case */
1379
                check_link(dev);
1380
                spin_unlock_irq(&np->lock);
1381
        }
1382
        if (np->oom) {
1383
                disable_irq(dev->irq);
1384
                np->oom = 0;
1385
                refill_rx(dev);
1386
                enable_irq(dev->irq);
1387
                if (!np->oom) {
1388
                        writel(RxOn, dev->base_addr + ChipCmd);
1389
                } else {
1390
                        next_tick = 1;
1391
                }
1392
        }
1393
        mod_timer(&np->timer, jiffies + next_tick);
1394
}
1395
 
1396
static void dump_ring(struct net_device *dev)
1397
{
1398
        struct netdev_private *np = dev->priv;
1399
 
1400
        if (netif_msg_pktdata(np)) {
1401
                int i;
1402
                printk(KERN_DEBUG "  Tx ring at %p:\n", np->tx_ring);
1403
                for (i = 0; i < TX_RING_SIZE; i++) {
1404
                        printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1405
                                i, np->tx_ring[i].next_desc,
1406
                                np->tx_ring[i].cmd_status,
1407
                                np->tx_ring[i].addr);
1408
                }
1409
                printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
1410
                for (i = 0; i < RX_RING_SIZE; i++) {
1411
                        printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1412
                                i, np->rx_ring[i].next_desc,
1413
                                np->rx_ring[i].cmd_status,
1414
                                np->rx_ring[i].addr);
1415
                }
1416
        }
1417
}
1418
 
1419
static void tx_timeout(struct net_device *dev)
1420
{
1421
        struct netdev_private *np = dev->priv;
1422
        long ioaddr = dev->base_addr;
1423
 
1424
        disable_irq(dev->irq);
1425
        spin_lock_irq(&np->lock);
1426
        if (!np->hands_off) {
1427
                if (netif_msg_tx_err(np))
1428
                        printk(KERN_WARNING
1429
                                "%s: Transmit timed out, status %#08x,"
1430
                                " resetting...\n",
1431
                                dev->name, readl(ioaddr + IntrStatus));
1432
                dump_ring(dev);
1433
 
1434
                natsemi_reset(dev);
1435
                reinit_ring(dev);
1436
                init_registers(dev);
1437
        } else {
1438
                printk(KERN_WARNING
1439
                        "%s: tx_timeout while in hands_off state?\n",
1440
                        dev->name);
1441
        }
1442
        spin_unlock_irq(&np->lock);
1443
        enable_irq(dev->irq);
1444
 
1445
        dev->trans_start = jiffies;
1446
        np->stats.tx_errors++;
1447
        netif_wake_queue(dev);
1448
}
1449
 
1450
static int alloc_ring(struct net_device *dev)
1451
{
1452
        struct netdev_private *np = dev->priv;
1453
        np->rx_ring = pci_alloc_consistent(np->pci_dev,
1454
                sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1455
                &np->ring_dma);
1456
        if (!np->rx_ring)
1457
                return -ENOMEM;
1458
        np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1459
        return 0;
1460
}
1461
 
1462
static void refill_rx(struct net_device *dev)
1463
{
1464
        struct netdev_private *np = dev->priv;
1465
 
1466
        /* Refill the Rx ring buffers. */
1467
        for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1468
                struct sk_buff *skb;
1469
                int entry = np->dirty_rx % RX_RING_SIZE;
1470
                if (np->rx_skbuff[entry] == NULL) {
1471
                        unsigned int buflen = np->rx_buf_sz + RX_OFFSET;
1472
                        skb = dev_alloc_skb(buflen);
1473
                        np->rx_skbuff[entry] = skb;
1474
                        if (skb == NULL)
1475
                                break; /* Better luck next round. */
1476
                        skb->dev = dev; /* Mark as being used by this device. */
1477
                        np->rx_dma[entry] = pci_map_single(np->pci_dev,
1478
                                skb->tail, buflen, PCI_DMA_FROMDEVICE);
1479
                        np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1480
                }
1481
                np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1482
        }
1483
        if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1484
                if (netif_msg_rx_err(np))
1485
                        printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1486
                np->oom = 1;
1487
        }
1488
}
1489
 
1490
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1491
static void init_ring(struct net_device *dev)
1492
{
1493
        struct netdev_private *np = dev->priv;
1494
        int i;
1495
 
1496
        /* 1) TX ring */
1497
        np->dirty_tx = np->cur_tx = 0;
1498
        for (i = 0; i < TX_RING_SIZE; i++) {
1499
                np->tx_skbuff[i] = NULL;
1500
                np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1501
                        +sizeof(struct netdev_desc)
1502
                        *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1503
                np->tx_ring[i].cmd_status = 0;
1504
        }
1505
 
1506
        /* 2) RX ring */
1507
        np->dirty_rx = 0;
1508
        np->cur_rx = RX_RING_SIZE;
1509
        np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1510
        np->oom = 0;
1511
        np->rx_head_desc = &np->rx_ring[0];
1512
 
1513
        /* Please be carefull before changing this loop - at least gcc-2.95.1
1514
         * miscompiles it otherwise.
1515
         */
1516
        /* Initialize all Rx descriptors. */
1517
        for (i = 0; i < RX_RING_SIZE; i++) {
1518
                np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1519
                                +sizeof(struct netdev_desc)
1520
                                *((i+1)%RX_RING_SIZE));
1521
                np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
1522
                np->rx_skbuff[i] = NULL;
1523
        }
1524
        refill_rx(dev);
1525
        dump_ring(dev);
1526
}
1527
 
1528
static void drain_tx(struct net_device *dev)
1529
{
1530
        struct netdev_private *np = dev->priv;
1531
        int i;
1532
 
1533
        for (i = 0; i < TX_RING_SIZE; i++) {
1534
                if (np->tx_skbuff[i]) {
1535
                        pci_unmap_single(np->pci_dev,
1536
                                np->tx_dma[i], np->tx_skbuff[i]->len,
1537
                                PCI_DMA_TODEVICE);
1538
                        dev_kfree_skb(np->tx_skbuff[i]);
1539
                        np->stats.tx_dropped++;
1540
                }
1541
                np->tx_skbuff[i] = NULL;
1542
        }
1543
}
1544
 
1545
static void drain_ring(struct net_device *dev)
1546
{
1547
        struct netdev_private *np = dev->priv;
1548
        unsigned int buflen = np->rx_buf_sz + RX_OFFSET;
1549
        int i;
1550
 
1551
        /* Free all the skbuffs in the Rx queue. */
1552
        for (i = 0; i < RX_RING_SIZE; i++) {
1553
                np->rx_ring[i].cmd_status = 0;
1554
                np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1555
                if (np->rx_skbuff[i]) {
1556
                        pci_unmap_single(np->pci_dev,
1557
                                np->rx_dma[i], buflen,
1558
                                PCI_DMA_FROMDEVICE);
1559
                        dev_kfree_skb(np->rx_skbuff[i]);
1560
                }
1561
                np->rx_skbuff[i] = NULL;
1562
        }
1563
        drain_tx(dev);
1564
}
1565
 
1566
static void free_ring(struct net_device *dev)
1567
{
1568
        struct netdev_private *np = dev->priv;
1569
        pci_free_consistent(np->pci_dev,
1570
                sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1571
                np->rx_ring, np->ring_dma);
1572
}
1573
 
1574
static void reinit_ring(struct net_device *dev)
1575
{
1576
        struct netdev_private *np = dev->priv;
1577
        int i;
1578
 
1579
        /* drain TX ring */
1580
        drain_tx(dev);
1581
        np->dirty_tx = np->cur_tx = 0;
1582
        for (i=0;i<TX_RING_SIZE;i++)
1583
                np->tx_ring[i].cmd_status = 0;
1584
 
1585
        /* RX Ring */
1586
        np->dirty_rx = 0;
1587
        np->cur_rx = RX_RING_SIZE;
1588
        np->rx_head_desc = &np->rx_ring[0];
1589
        /* Initialize all Rx descriptors. */
1590
        for (i = 0; i < RX_RING_SIZE; i++)
1591
                np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
1592
 
1593
        refill_rx(dev);
1594
}
1595
 
1596
static int start_tx(struct sk_buff *skb, struct net_device *dev)
1597
{
1598
        struct netdev_private *np = dev->priv;
1599
        unsigned entry;
1600
 
1601
        /* Note: Ordering is important here, set the field with the
1602
           "ownership" bit last, and only then increment cur_tx. */
1603
 
1604
        /* Calculate the next Tx descriptor entry. */
1605
        entry = np->cur_tx % TX_RING_SIZE;
1606
 
1607
        np->tx_skbuff[entry] = skb;
1608
        np->tx_dma[entry] = pci_map_single(np->pci_dev,
1609
                                skb->data,skb->len, PCI_DMA_TODEVICE);
1610
 
1611
        np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
1612
 
1613
        spin_lock_irq(&np->lock);
1614
 
1615
        if (!np->hands_off) {
1616
                np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
1617
                /* StrongARM: Explicitly cache flush np->tx_ring and
1618
                 * skb->data,skb->len. */
1619
                wmb();
1620
                np->cur_tx++;
1621
                if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
1622
                        netdev_tx_done(dev);
1623
                        if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
1624
                                netif_stop_queue(dev);
1625
                }
1626
                /* Wake the potentially-idle transmit channel. */
1627
                writel(TxOn, dev->base_addr + ChipCmd);
1628
        } else {
1629
                dev_kfree_skb_irq(skb);
1630
                np->stats.tx_dropped++;
1631
        }
1632
        spin_unlock_irq(&np->lock);
1633
 
1634
        dev->trans_start = jiffies;
1635
 
1636
        if (netif_msg_tx_queued(np)) {
1637
                printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1638
                        dev->name, np->cur_tx, entry);
1639
        }
1640
        return 0;
1641
}
1642
 
1643
static void netdev_tx_done(struct net_device *dev)
1644
{
1645
        struct netdev_private *np = dev->priv;
1646
 
1647
        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1648
                int entry = np->dirty_tx % TX_RING_SIZE;
1649
                if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
1650
                        break;
1651
                if (netif_msg_tx_done(np))
1652
                        printk(KERN_DEBUG
1653
                                "%s: tx frame #%d finished, status %#08x.\n",
1654
                                        dev->name, np->dirty_tx,
1655
                                        le32_to_cpu(np->tx_ring[entry].cmd_status));
1656
                if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
1657
                        np->stats.tx_packets++;
1658
                        np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1659
                } else { /* Various Tx errors */
1660
                        int tx_status =
1661
                                le32_to_cpu(np->tx_ring[entry].cmd_status);
1662
                        if (tx_status & (DescTxAbort|DescTxExcColl))
1663
                                np->stats.tx_aborted_errors++;
1664
                        if (tx_status & DescTxFIFO)
1665
                                np->stats.tx_fifo_errors++;
1666
                        if (tx_status & DescTxCarrier)
1667
                                np->stats.tx_carrier_errors++;
1668
                        if (tx_status & DescTxOOWCol)
1669
                                np->stats.tx_window_errors++;
1670
                        np->stats.tx_errors++;
1671
                }
1672
                pci_unmap_single(np->pci_dev,np->tx_dma[entry],
1673
                                        np->tx_skbuff[entry]->len,
1674
                                        PCI_DMA_TODEVICE);
1675
                /* Free the original skb. */
1676
                dev_kfree_skb_irq(np->tx_skbuff[entry]);
1677
                np->tx_skbuff[entry] = NULL;
1678
        }
1679
        if (netif_queue_stopped(dev)
1680
                && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1681
                /* The ring is no longer full, wake queue. */
1682
                netif_wake_queue(dev);
1683
        }
1684
}
1685
 
1686
/* The interrupt handler does all of the Rx thread work and cleans up
1687
   after the Tx thread. */
1688
static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1689
{
1690
        struct net_device *dev = dev_instance;
1691
        struct netdev_private *np = dev->priv;
1692
        long ioaddr = dev->base_addr;
1693
        int boguscnt = max_interrupt_work;
1694
        unsigned int handled = 0;
1695
 
1696
        if (np->hands_off)
1697
                return IRQ_NONE;
1698
        do {
1699
                /* Reading automatically acknowledges all int sources. */
1700
                u32 intr_status = readl(ioaddr + IntrStatus);
1701
 
1702
                if (netif_msg_intr(np))
1703
                        printk(KERN_DEBUG
1704
                                "%s: Interrupt, status %#08x, mask %#08x.\n",
1705
                                dev->name, intr_status,
1706
                                readl(ioaddr + IntrMask));
1707
 
1708
                if (intr_status == 0)
1709
                        break;
1710
                handled = 1;
1711
 
1712
                if (intr_status &
1713
                   (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
1714
                    IntrRxErr | IntrRxOverrun)) {
1715
                        netdev_rx(dev);
1716
                }
1717
 
1718
                if (intr_status &
1719
                   (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
1720
                        spin_lock(&np->lock);
1721
                        netdev_tx_done(dev);
1722
                        spin_unlock(&np->lock);
1723
                }
1724
 
1725
                /* Abnormal error summary/uncommon events handlers. */
1726
                if (intr_status & IntrAbnormalSummary)
1727
                        netdev_error(dev, intr_status);
1728
 
1729
                if (--boguscnt < 0) {
1730
                        if (netif_msg_intr(np))
1731
                                printk(KERN_WARNING
1732
                                        "%s: Too much work at interrupt, "
1733
                                        "status=%#08x.\n",
1734
                                        dev->name, intr_status);
1735
                        break;
1736
                }
1737
        } while (1);
1738
 
1739
        if (netif_msg_intr(np))
1740
                printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name);
1741
 
1742
        return IRQ_RETVAL(handled);
1743
}
1744
 
1745
/* This routine is logically part of the interrupt handler, but separated
1746
   for clarity and better register allocation. */
1747
static void netdev_rx(struct net_device *dev)
1748
{
1749
        struct netdev_private *np = dev->priv;
1750
        int entry = np->cur_rx % RX_RING_SIZE;
1751
        int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1752
        s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
1753
        unsigned int buflen = np->rx_buf_sz + RX_OFFSET;
1754
 
1755
        /* If the driver owns the next entry it's a new packet. Send it up. */
1756
        while (desc_status < 0) { /* e.g. & DescOwn */
1757
                if (netif_msg_rx_status(np))
1758
                        printk(KERN_DEBUG
1759
                                "  netdev_rx() entry %d status was %#08x.\n",
1760
                                entry, desc_status);
1761
                if (--boguscnt < 0)
1762
                        break;
1763
                if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
1764
                        if (desc_status & DescMore) {
1765
                                if (netif_msg_rx_err(np))
1766
                                        printk(KERN_WARNING
1767
                                                "%s: Oversized(?) Ethernet "
1768
                                                "frame spanned multiple "
1769
                                                "buffers, entry %#08x "
1770
                                                "status %#08x.\n", dev->name,
1771
                                                np->cur_rx, desc_status);
1772
                                np->stats.rx_length_errors++;
1773
                        } else {
1774
                                /* There was an error. */
1775
                                np->stats.rx_errors++;
1776
                                if (desc_status & (DescRxAbort|DescRxOver))
1777
                                        np->stats.rx_over_errors++;
1778
                                if (desc_status & (DescRxLong|DescRxRunt))
1779
                                        np->stats.rx_length_errors++;
1780
                                if (desc_status & (DescRxInvalid|DescRxAlign))
1781
                                        np->stats.rx_frame_errors++;
1782
                                if (desc_status & DescRxCRC)
1783
                                        np->stats.rx_crc_errors++;
1784
                        }
1785
                } else {
1786
                        struct sk_buff *skb;
1787
                        /* Omit CRC size. */
1788
                        int pkt_len = (desc_status & DescSizeMask) - 4;
1789
                        /* Check if the packet is long enough to accept
1790
                         * without copying to a minimally-sized skbuff. */
1791
                        if (pkt_len < rx_copybreak
1792
                            && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
1793
                                skb->dev = dev;
1794
                                /* 16 byte align the IP header */
1795
                                skb_reserve(skb, RX_OFFSET);
1796
                                pci_dma_sync_single(np->pci_dev,
1797
                                        np->rx_dma[entry],
1798
                                        buflen,
1799
                                        PCI_DMA_FROMDEVICE);
1800
#if HAS_IP_COPYSUM
1801
                                eth_copy_and_sum(skb,
1802
                                        np->rx_skbuff[entry]->tail, pkt_len, 0);
1803
                                skb_put(skb, pkt_len);
1804
#else
1805
                                memcpy(skb_put(skb, pkt_len),
1806
                                        np->rx_skbuff[entry]->tail, pkt_len);
1807
#endif
1808
                        } else {
1809
                                pci_unmap_single(np->pci_dev, np->rx_dma[entry],
1810
                                        buflen, PCI_DMA_FROMDEVICE);
1811
                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
1812
                                np->rx_skbuff[entry] = NULL;
1813
                        }
1814
                        skb->protocol = eth_type_trans(skb, dev);
1815
                        netif_rx(skb);
1816
                        dev->last_rx = jiffies;
1817
                        np->stats.rx_packets++;
1818
                        np->stats.rx_bytes += pkt_len;
1819
                }
1820
                entry = (++np->cur_rx) % RX_RING_SIZE;
1821
                np->rx_head_desc = &np->rx_ring[entry];
1822
                desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
1823
        }
1824
        refill_rx(dev);
1825
 
1826
        /* Restart Rx engine if stopped. */
1827
        if (np->oom)
1828
                mod_timer(&np->timer, jiffies + 1);
1829
        else
1830
                writel(RxOn, dev->base_addr + ChipCmd);
1831
}
1832
 
1833
static void netdev_error(struct net_device *dev, int intr_status)
1834
{
1835
        struct netdev_private *np = dev->priv;
1836
        long ioaddr = dev->base_addr;
1837
 
1838
        spin_lock(&np->lock);
1839
        if (intr_status & LinkChange) {
1840
                u16 adv = mdio_read(dev, 1, MII_ADVERTISE);
1841
                u16 lpa = mdio_read(dev, 1, MII_LPA);
1842
                if (mdio_read(dev, 1, MII_BMCR) & BMCR_ANENABLE
1843
                 && netif_msg_link(np)) {
1844
                        printk(KERN_INFO
1845
                                "%s: Autonegotiation advertising"
1846
                                " %#04x  partner %#04x.\n", dev->name,
1847
                                adv, lpa);
1848
                }
1849
 
1850
                /* read MII int status to clear the flag */
1851
                readw(ioaddr + MIntrStatus);
1852
                check_link(dev);
1853
        }
1854
        if (intr_status & StatsMax) {
1855
                __get_stats(dev);
1856
        }
1857
        if (intr_status & IntrTxUnderrun) {
1858
                if ((np->tx_config & TxDrthMask) < 62)
1859
                        np->tx_config += 2;
1860
                if (netif_msg_tx_err(np))
1861
                        printk(KERN_NOTICE
1862
                                "%s: increased Tx threshold, txcfg %#08x.\n",
1863
                                dev->name, np->tx_config);
1864
                writel(np->tx_config, ioaddr + TxConfig);
1865
        }
1866
        if (intr_status & WOLPkt && netif_msg_wol(np)) {
1867
                int wol_status = readl(ioaddr + WOLCmd);
1868
                printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
1869
                        dev->name, wol_status);
1870
        }
1871
        if (intr_status & RxStatusFIFOOver) {
1872
                if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
1873
                        printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
1874
                                dev->name);
1875
                }
1876
                np->stats.rx_fifo_errors++;
1877
        }
1878
        /* Hmmmmm, it's not clear how to recover from PCI faults. */
1879
        if (intr_status & IntrPCIErr) {
1880
                printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
1881
                        intr_status & IntrPCIErr);
1882
                np->stats.tx_fifo_errors++;
1883
                np->stats.rx_fifo_errors++;
1884
        }
1885
        spin_unlock(&np->lock);
1886
}
1887
 
1888
static void __get_stats(struct net_device *dev)
1889
{
1890
        long ioaddr = dev->base_addr;
1891
        struct netdev_private *np = dev->priv;
1892
 
1893
        /* The chip only need report frame silently dropped. */
1894
        np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
1895
        np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
1896
}
1897
 
1898
static struct net_device_stats *get_stats(struct net_device *dev)
1899
{
1900
        struct netdev_private *np = dev->priv;
1901
 
1902
        /* The chip only need report frame silently dropped. */
1903
        spin_lock_irq(&np->lock);
1904
        if (netif_running(dev) && !np->hands_off)
1905
                __get_stats(dev);
1906
        spin_unlock_irq(&np->lock);
1907
 
1908
        return &np->stats;
1909
}
1910
 
1911
#define HASH_TABLE      0x200
1912
static void __set_rx_mode(struct net_device *dev)
1913
{
1914
        long ioaddr = dev->base_addr;
1915
        struct netdev_private *np = dev->priv;
1916
        u8 mc_filter[64]; /* Multicast hash filter */
1917
        u32 rx_mode;
1918
 
1919
        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1920
                /* Unconditionally log net taps. */
1921
                printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1922
                        dev->name);
1923
                rx_mode = RxFilterEnable | AcceptBroadcast
1924
                        | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
1925
        } else if ((dev->mc_count > multicast_filter_limit)
1926
          || (dev->flags & IFF_ALLMULTI)) {
1927
                rx_mode = RxFilterEnable | AcceptBroadcast
1928
                        | AcceptAllMulticast | AcceptMyPhys;
1929
        } else {
1930
                struct dev_mc_list *mclist;
1931
                int i;
1932
                memset(mc_filter, 0, sizeof(mc_filter));
1933
                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1934
                         i++, mclist = mclist->next) {
1935
                        int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
1936
                        mc_filter[i/8] |= (1 << (i & 0x07));
1937
                }
1938
                rx_mode = RxFilterEnable | AcceptBroadcast
1939
                        | AcceptMulticast | AcceptMyPhys;
1940
                for (i = 0; i < 64; i += 2) {
1941
                        writew(HASH_TABLE + i, ioaddr + RxFilterAddr);
1942
                        writew((mc_filter[i+1]<<8) + mc_filter[i],
1943
                                ioaddr + RxFilterData);
1944
                }
1945
        }
1946
        writel(rx_mode, ioaddr + RxFilterAddr);
1947
        np->cur_rx_mode = rx_mode;
1948
}
1949
 
1950
static void set_rx_mode(struct net_device *dev)
1951
{
1952
        struct netdev_private *np = dev->priv;
1953
        spin_lock_irq(&np->lock);
1954
        if (!np->hands_off)
1955
                __set_rx_mode(dev);
1956
        spin_unlock_irq(&np->lock);
1957
}
1958
 
1959
static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1960
{
1961
        struct netdev_private *np = dev->priv;
1962
        u32 cmd;
1963
 
1964
        if (get_user(cmd, (u32 *)useraddr))
1965
                return -EFAULT;
1966
 
1967
        switch (cmd) {
1968
        /* get driver info */
1969
        case ETHTOOL_GDRVINFO: {
1970
                struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1971
                strncpy(info.driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
1972
                strncpy(info.version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
1973
                info.fw_version[0] = '\0';
1974
                strncpy(info.bus_info, pci_name(np->pci_dev),
1975
                        ETHTOOL_BUSINFO_LEN);
1976
                info.eedump_len = NATSEMI_EEPROM_SIZE;
1977
                info.regdump_len = NATSEMI_REGS_SIZE;
1978
                if (copy_to_user(useraddr, &info, sizeof(info)))
1979
                        return -EFAULT;
1980
                return 0;
1981
        }
1982
        /* get settings */
1983
        case ETHTOOL_GSET: {
1984
                struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1985
                spin_lock_irq(&np->lock);
1986
                netdev_get_ecmd(dev, &ecmd);
1987
                spin_unlock_irq(&np->lock);
1988
                if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1989
                        return -EFAULT;
1990
                return 0;
1991
        }
1992
        /* set settings */
1993
        case ETHTOOL_SSET: {
1994
                struct ethtool_cmd ecmd;
1995
                int r;
1996
                if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1997
                        return -EFAULT;
1998
                spin_lock_irq(&np->lock);
1999
                r = netdev_set_ecmd(dev, &ecmd);
2000
                spin_unlock_irq(&np->lock);
2001
                return r;
2002
        }
2003
        /* get wake-on-lan */
2004
        case ETHTOOL_GWOL: {
2005
                struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
2006
                spin_lock_irq(&np->lock);
2007
                netdev_get_wol(dev, &wol.supported, &wol.wolopts);
2008
                netdev_get_sopass(dev, wol.sopass);
2009
                spin_unlock_irq(&np->lock);
2010
                if (copy_to_user(useraddr, &wol, sizeof(wol)))
2011
                        return -EFAULT;
2012
                return 0;
2013
        }
2014
        /* set wake-on-lan */
2015
        case ETHTOOL_SWOL: {
2016
                struct ethtool_wolinfo wol;
2017
                int r;
2018
                if (copy_from_user(&wol, useraddr, sizeof(wol)))
2019
                        return -EFAULT;
2020
                spin_lock_irq(&np->lock);
2021
                netdev_set_wol(dev, wol.wolopts);
2022
                r = netdev_set_sopass(dev, wol.sopass);
2023
                spin_unlock_irq(&np->lock);
2024
                return r;
2025
        }
2026
        /* get registers */
2027
        case ETHTOOL_GREGS: {
2028
                struct ethtool_regs regs;
2029
                u8 regbuf[NATSEMI_REGS_SIZE];
2030
                int r;
2031
 
2032
                if (copy_from_user(&regs, useraddr, sizeof(regs)))
2033
                        return -EFAULT;
2034
 
2035
                if (regs.len > NATSEMI_REGS_SIZE) {
2036
                        regs.len = NATSEMI_REGS_SIZE;
2037
                }
2038
                regs.version = NATSEMI_REGS_VER;
2039
                if (copy_to_user(useraddr, &regs, sizeof(regs)))
2040
                        return -EFAULT;
2041
 
2042
                useraddr += offsetof(struct ethtool_regs, data);
2043
 
2044
                spin_lock_irq(&np->lock);
2045
                r = netdev_get_regs(dev, regbuf);
2046
                spin_unlock_irq(&np->lock);
2047
 
2048
                if (r)
2049
                        return r;
2050
                if (copy_to_user(useraddr, regbuf, regs.len))
2051
                        return -EFAULT;
2052
                return 0;
2053
        }
2054
        /* get message-level */
2055
        case ETHTOOL_GMSGLVL: {
2056
                struct ethtool_value edata = {ETHTOOL_GMSGLVL};
2057
                edata.data = np->msg_enable;
2058
                if (copy_to_user(useraddr, &edata, sizeof(edata)))
2059
                        return -EFAULT;
2060
                return 0;
2061
        }
2062
        /* set message-level */
2063
        case ETHTOOL_SMSGLVL: {
2064
                struct ethtool_value edata;
2065
                if (copy_from_user(&edata, useraddr, sizeof(edata)))
2066
                        return -EFAULT;
2067
                np->msg_enable = edata.data;
2068
                return 0;
2069
        }
2070
        /* restart autonegotiation */
2071
        case ETHTOOL_NWAY_RST: {
2072
                int tmp;
2073
                int r = -EINVAL;
2074
                /* if autoneg is off, it's an error */
2075
                tmp = mdio_read(dev, 1, MII_BMCR);
2076
                if (tmp & BMCR_ANENABLE) {
2077
                        tmp |= (BMCR_ANRESTART);
2078
                        mdio_write(dev, 1, MII_BMCR, tmp);
2079
                        r = 0;
2080
                }
2081
                return r;
2082
        }
2083
        /* get link status */
2084
        case ETHTOOL_GLINK: {
2085
                struct ethtool_value edata = {ETHTOOL_GLINK};
2086
                /* LSTATUS is latched low until a read - so read twice */
2087
                mdio_read(dev, 1, MII_BMSR);
2088
                edata.data = (mdio_read(dev, 1, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2089
                if (copy_to_user(useraddr, &edata, sizeof(edata)))
2090
                        return -EFAULT;
2091
                return 0;
2092
        }
2093
        /* get EEPROM */
2094
        case ETHTOOL_GEEPROM: {
2095
                struct ethtool_eeprom eeprom;
2096
                u8 eebuf[NATSEMI_EEPROM_SIZE];
2097
                int r;
2098
 
2099
                if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
2100
                        return -EFAULT;
2101
 
2102
                if (eeprom.offset > eeprom.offset+eeprom.len)
2103
                        return -EINVAL;
2104
 
2105
                if ((eeprom.offset+eeprom.len) > NATSEMI_EEPROM_SIZE) {
2106
                        eeprom.len = NATSEMI_EEPROM_SIZE-eeprom.offset;
2107
                }
2108
                eeprom.magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2109
                if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
2110
                        return -EFAULT;
2111
 
2112
                useraddr += offsetof(struct ethtool_eeprom, data);
2113
 
2114
                spin_lock_irq(&np->lock);
2115
                r = netdev_get_eeprom(dev, eebuf);
2116
                spin_unlock_irq(&np->lock);
2117
 
2118
                if (r)
2119
                        return r;
2120
                if (copy_to_user(useraddr, eebuf+eeprom.offset, eeprom.len))
2121
                        return -EFAULT;
2122
                return 0;
2123
        }
2124
 
2125
        }
2126
 
2127
        return -EOPNOTSUPP;
2128
}
2129
 
2130
static int netdev_set_wol(struct net_device *dev, u32 newval)
2131
{
2132
        struct netdev_private *np = dev->priv;
2133
        u32 data = readl(dev->base_addr + WOLCmd) & ~WakeOptsSummary;
2134
 
2135
        /* translate to bitmasks this chip understands */
2136
        if (newval & WAKE_PHY)
2137
                data |= WakePhy;
2138
        if (newval & WAKE_UCAST)
2139
                data |= WakeUnicast;
2140
        if (newval & WAKE_MCAST)
2141
                data |= WakeMulticast;
2142
        if (newval & WAKE_BCAST)
2143
                data |= WakeBroadcast;
2144
        if (newval & WAKE_ARP)
2145
                data |= WakeArp;
2146
        if (newval & WAKE_MAGIC)
2147
                data |= WakeMagic;
2148
        if (np->srr >= SRR_DP83815_D) {
2149
                if (newval & WAKE_MAGICSECURE) {
2150
                        data |= WakeMagicSecure;
2151
                }
2152
        }
2153
 
2154
        writel(data, dev->base_addr + WOLCmd);
2155
 
2156
        return 0;
2157
}
2158
 
2159
static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2160
{
2161
        struct netdev_private *np = dev->priv;
2162
        u32 regval = readl(dev->base_addr + WOLCmd);
2163
 
2164
        *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2165
                        | WAKE_ARP | WAKE_MAGIC);
2166
 
2167
        if (np->srr >= SRR_DP83815_D) {
2168
                /* SOPASS works on revD and higher */
2169
                *supported |= WAKE_MAGICSECURE;
2170
        }
2171
        *cur = 0;
2172
 
2173
        /* translate from chip bitmasks */
2174
        if (regval & WakePhy)
2175
                *cur |= WAKE_PHY;
2176
        if (regval & WakeUnicast)
2177
                *cur |= WAKE_UCAST;
2178
        if (regval & WakeMulticast)
2179
                *cur |= WAKE_MCAST;
2180
        if (regval & WakeBroadcast)
2181
                *cur |= WAKE_BCAST;
2182
        if (regval & WakeArp)
2183
                *cur |= WAKE_ARP;
2184
        if (regval & WakeMagic)
2185
                *cur |= WAKE_MAGIC;
2186
        if (regval & WakeMagicSecure) {
2187
                /* this can be on in revC, but it's broken */
2188
                *cur |= WAKE_MAGICSECURE;
2189
        }
2190
 
2191
        return 0;
2192
}
2193
 
2194
static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2195
{
2196
        struct netdev_private *np = dev->priv;
2197
        u16 *sval = (u16 *)newval;
2198
        u32 addr;
2199
 
2200
        if (np->srr < SRR_DP83815_D) {
2201
                return 0;
2202
        }
2203
 
2204
        /* enable writing to these registers by disabling the RX filter */
2205
        addr = readl(dev->base_addr + RxFilterAddr) & ~RFCRAddressMask;
2206
        addr &= ~RxFilterEnable;
2207
        writel(addr, dev->base_addr + RxFilterAddr);
2208
 
2209
        /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2210
        writel(addr | 0xa, dev->base_addr + RxFilterAddr);
2211
        writew(sval[0], dev->base_addr + RxFilterData);
2212
 
2213
        writel(addr | 0xc, dev->base_addr + RxFilterAddr);
2214
        writew(sval[1], dev->base_addr + RxFilterData);
2215
 
2216
        writel(addr | 0xe, dev->base_addr + RxFilterAddr);
2217
        writew(sval[2], dev->base_addr + RxFilterData);
2218
 
2219
        /* re-enable the RX filter */
2220
        writel(addr | RxFilterEnable, dev->base_addr + RxFilterAddr);
2221
 
2222
        return 0;
2223
}
2224
 
2225
static int netdev_get_sopass(struct net_device *dev, u8 *data)
2226
{
2227
        struct netdev_private *np = dev->priv;
2228
        u16 *sval = (u16 *)data;
2229
        u32 addr;
2230
 
2231
        if (np->srr < SRR_DP83815_D) {
2232
                sval[0] = sval[1] = sval[2] = 0;
2233
                return 0;
2234
        }
2235
 
2236
        /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2237
        addr = readl(dev->base_addr + RxFilterAddr) & ~RFCRAddressMask;
2238
 
2239
        writel(addr | 0xa, dev->base_addr + RxFilterAddr);
2240
        sval[0] = readw(dev->base_addr + RxFilterData);
2241
 
2242
        writel(addr | 0xc, dev->base_addr + RxFilterAddr);
2243
        sval[1] = readw(dev->base_addr + RxFilterData);
2244
 
2245
        writel(addr | 0xe, dev->base_addr + RxFilterAddr);
2246
        sval[2] = readw(dev->base_addr + RxFilterData);
2247
 
2248
        writel(addr, dev->base_addr + RxFilterAddr);
2249
 
2250
        return 0;
2251
}
2252
 
2253
static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2254
{
2255
        u32 tmp;
2256
 
2257
        ecmd->supported =
2258
                (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2259
                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2260
                SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2261
 
2262
        /* only supports twisted-pair or MII */
2263
        tmp = readl(dev->base_addr + ChipConfig);
2264
        if (tmp & CfgExtPhy)
2265
                ecmd->port = PORT_MII;
2266
        else
2267
                ecmd->port = PORT_TP;
2268
 
2269
        /* only supports internal transceiver */
2270
        ecmd->transceiver = XCVR_INTERNAL;
2271
 
2272
        /* not sure what this is for */
2273
        ecmd->phy_address = readw(dev->base_addr + PhyCtrl) & PhyAddrMask;
2274
 
2275
        ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
2276
        tmp = mdio_read(dev, 1, MII_ADVERTISE);
2277
        if (tmp & ADVERTISE_10HALF)
2278
                ecmd->advertising |= ADVERTISED_10baseT_Half;
2279
        if (tmp & ADVERTISE_10FULL)
2280
                ecmd->advertising |= ADVERTISED_10baseT_Full;
2281
        if (tmp & ADVERTISE_100HALF)
2282
                ecmd->advertising |= ADVERTISED_100baseT_Half;
2283
        if (tmp & ADVERTISE_100FULL)
2284
                ecmd->advertising |= ADVERTISED_100baseT_Full;
2285
 
2286
        tmp = mdio_read(dev, 1, MII_BMCR);
2287
        if (tmp & BMCR_ANENABLE) {
2288
                ecmd->advertising |= ADVERTISED_Autoneg;
2289
                ecmd->autoneg = AUTONEG_ENABLE;
2290
        } else {
2291
                ecmd->autoneg = AUTONEG_DISABLE;
2292
        }
2293
 
2294
        tmp = readl(dev->base_addr + ChipConfig);
2295
        if (tmp & CfgSpeed100) {
2296
                ecmd->speed = SPEED_100;
2297
        } else {
2298
                ecmd->speed = SPEED_10;
2299
        }
2300
 
2301
        if (tmp & CfgFullDuplex) {
2302
                ecmd->duplex = DUPLEX_FULL;
2303
        } else {
2304
                ecmd->duplex = DUPLEX_HALF;
2305
        }
2306
 
2307
        /* ignore maxtxpkt, maxrxpkt for now */
2308
 
2309
        return 0;
2310
}
2311
 
2312
static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2313
{
2314
        struct netdev_private *np = dev->priv;
2315
        u32 tmp;
2316
 
2317
        if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
2318
                return -EINVAL;
2319
        if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2320
                return -EINVAL;
2321
        if (ecmd->port != PORT_TP && ecmd->port != PORT_MII)
2322
                return -EINVAL;
2323
        if (ecmd->transceiver != XCVR_INTERNAL)
2324
                return -EINVAL;
2325
        if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
2326
                return -EINVAL;
2327
        /* ignore phy_address, maxtxpkt, maxrxpkt for now */
2328
 
2329
        /* WHEW! now lets bang some bits */
2330
 
2331
        tmp = mdio_read(dev, 1, MII_BMCR);
2332
        if (ecmd->autoneg == AUTONEG_ENABLE) {
2333
                /* turn on autonegotiation */
2334
                tmp |= BMCR_ANENABLE;
2335
                np->advertising = mdio_read(dev, 1, MII_ADVERTISE);
2336
        } else {
2337
                /* turn off auto negotiation, set speed and duplexity */
2338
                tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
2339
                if (ecmd->speed == SPEED_100)
2340
                        tmp |= BMCR_SPEED100;
2341
                if (ecmd->duplex == DUPLEX_FULL)
2342
                        tmp |= BMCR_FULLDPLX;
2343
                else
2344
                        np->full_duplex = 0;
2345
        }
2346
        mdio_write(dev, 1, MII_BMCR, tmp);
2347
        return 0;
2348
}
2349
 
2350
static int netdev_get_regs(struct net_device *dev, u8 *buf)
2351
{
2352
        int i;
2353
        int j;
2354
        u32 rfcr;
2355
        u32 *rbuf = (u32 *)buf;
2356
 
2357
        /* read all of page 0 of registers */
2358
        for (i = 0; i < NATSEMI_PG0_NREGS; i++) {
2359
                rbuf[i] = readl(dev->base_addr + i*4);
2360
        }
2361
 
2362
        /* read only the 'magic' registers from page 1 */
2363
        writew(1, dev->base_addr + PGSEL);
2364
        rbuf[i++] = readw(dev->base_addr + PMDCSR);
2365
        rbuf[i++] = readw(dev->base_addr + TSTDAT);
2366
        rbuf[i++] = readw(dev->base_addr + DSPCFG);
2367
        rbuf[i++] = readw(dev->base_addr + SDCFG);
2368
        writew(0, dev->base_addr + PGSEL);
2369
 
2370
        /* read RFCR indexed registers */
2371
        rfcr = readl(dev->base_addr + RxFilterAddr);
2372
        for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
2373
                writel(j*2, dev->base_addr + RxFilterAddr);
2374
                rbuf[i++] = readw(dev->base_addr + RxFilterData);
2375
        }
2376
        writel(rfcr, dev->base_addr + RxFilterAddr);
2377
 
2378
        /* the interrupt status is clear-on-read - see if we missed any */
2379
        if (rbuf[4] & rbuf[5]) {
2380
                printk(KERN_WARNING
2381
                        "%s: shoot, we dropped an interrupt (%#08x)\n",
2382
                        dev->name, rbuf[4] & rbuf[5]);
2383
        }
2384
 
2385
        return 0;
2386
}
2387
 
2388
#define SWAP_BITS(x)    ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
2389
                        | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9)  \
2390
                        | (((x) & 0x0010) << 7)  | (((x) & 0x0020) << 5)  \
2391
                        | (((x) & 0x0040) << 3)  | (((x) & 0x0080) << 1)  \
2392
                        | (((x) & 0x0100) >> 1)  | (((x) & 0x0200) >> 3)  \
2393
                        | (((x) & 0x0400) >> 5)  | (((x) & 0x0800) >> 7)  \
2394
                        | (((x) & 0x1000) >> 9)  | (((x) & 0x2000) >> 11) \
2395
                        | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
2396
 
2397
static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
2398
{
2399
        int i;
2400
        u16 *ebuf = (u16 *)buf;
2401
 
2402
        /* eeprom_read reads 16 bits, and indexes by 16 bits */
2403
        for (i = 0; i < NATSEMI_EEPROM_SIZE/2; i++) {
2404
                ebuf[i] = eeprom_read(dev->base_addr, i);
2405
                /* The EEPROM itself stores data bit-swapped, but eeprom_read
2406
                 * reads it back "sanely". So we swap it back here in order to
2407
                 * present it to userland as it is stored. */
2408
                ebuf[i] = SWAP_BITS(ebuf[i]);
2409
        }
2410
        return 0;
2411
}
2412
 
2413
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2414
{
2415
        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2416
 
2417
        switch(cmd) {
2418
        case SIOCETHTOOL:
2419
                return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2420
        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
2421
        case SIOCDEVPRIVATE:            /* for binary compat, remove in 2.5 */
2422
                data->phy_id = 1;
2423
                /* Fall Through */
2424
 
2425
        case SIOCGMIIREG:               /* Read MII PHY register. */
2426
        case SIOCDEVPRIVATE+1:          /* for binary compat, remove in 2.5 */
2427
                data->val_out = mdio_read(dev, data->phy_id & 0x1f,
2428
                        data->reg_num & 0x1f);
2429
                return 0;
2430
 
2431
        case SIOCSMIIREG:               /* Write MII PHY register. */
2432
        case SIOCDEVPRIVATE+2:          /* for binary compat, remove in 2.5 */
2433
                if (!capable(CAP_NET_ADMIN))
2434
                        return -EPERM;
2435
                mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f,
2436
                        data->val_in);
2437
                return 0;
2438
        default:
2439
                return -EOPNOTSUPP;
2440
        }
2441
}
2442
 
2443
static void enable_wol_mode(struct net_device *dev, int enable_intr)
2444
{
2445
        long ioaddr = dev->base_addr;
2446
        struct netdev_private *np = dev->priv;
2447
 
2448
        if (netif_msg_wol(np))
2449
                printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
2450
                        dev->name);
2451
 
2452
        /* For WOL we must restart the rx process in silent mode.
2453
         * Write NULL to the RxRingPtr. Only possible if
2454
         * rx process is stopped
2455
         */
2456
        writel(0, ioaddr + RxRingPtr);
2457
 
2458
        /* read WoL status to clear */
2459
        readl(ioaddr + WOLCmd);
2460
 
2461
        /* PME on, clear status */
2462
        writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
2463
 
2464
        /* and restart the rx process */
2465
        writel(RxOn, ioaddr + ChipCmd);
2466
 
2467
        if (enable_intr) {
2468
                /* enable the WOL interrupt.
2469
                 * Could be used to send a netlink message.
2470
                 */
2471
                writel(WOLPkt | LinkChange, ioaddr + IntrMask);
2472
                writel(1, ioaddr + IntrEnable);
2473
        }
2474
}
2475
 
2476
static int netdev_close(struct net_device *dev)
2477
{
2478
        long ioaddr = dev->base_addr;
2479
        struct netdev_private *np = dev->priv;
2480
 
2481
        if (netif_msg_ifdown(np))
2482
                printk(KERN_DEBUG
2483
                        "%s: Shutting down ethercard, status was %#04x.\n",
2484
                        dev->name, (int)readl(ioaddr + ChipCmd));
2485
        if (netif_msg_pktdata(np))
2486
                printk(KERN_DEBUG
2487
                        "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
2488
                        dev->name, np->cur_tx, np->dirty_tx,
2489
                        np->cur_rx, np->dirty_rx);
2490
 
2491
        /*
2492
         * FIXME: what if someone tries to close a device
2493
         * that is suspended?
2494
         * Should we reenable the nic to switch to
2495
         * the final WOL settings?
2496
         */
2497
 
2498
        del_timer_sync(&np->timer);
2499
        disable_irq(dev->irq);
2500
        spin_lock_irq(&np->lock);
2501
        /* Disable interrupts, and flush posted writes */
2502
        writel(0, ioaddr + IntrEnable);
2503
        readl(ioaddr + IntrEnable);
2504
        np->hands_off = 1;
2505
        spin_unlock_irq(&np->lock);
2506
        enable_irq(dev->irq);
2507
 
2508
        free_irq(dev->irq, dev);
2509
 
2510
        /* Interrupt disabled, interrupt handler released,
2511
         * queue stopped, timer deleted, rtnl_lock held
2512
         * All async codepaths that access the driver are disabled.
2513
         */
2514
        spin_lock_irq(&np->lock);
2515
        np->hands_off = 0;
2516
        readl(ioaddr + IntrMask);
2517
        readw(ioaddr + MIntrStatus);
2518
 
2519
        /* Freeze Stats */
2520
        writel(StatsFreeze, ioaddr + StatsCtrl);
2521
 
2522
        /* Stop the chip's Tx and Rx processes. */
2523
        natsemi_stop_rxtx(dev);
2524
 
2525
        __get_stats(dev);
2526
        spin_unlock_irq(&np->lock);
2527
 
2528
        /* clear the carrier last - an interrupt could reenable it otherwise */
2529
        netif_carrier_off(dev);
2530
        netif_stop_queue(dev);
2531
 
2532
        dump_ring(dev);
2533
        drain_ring(dev);
2534
        free_ring(dev);
2535
 
2536
        {
2537
                u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
2538
                if (wol) {
2539
                        /* restart the NIC in WOL mode.
2540
                         * The nic must be stopped for this.
2541
                         */
2542
                        enable_wol_mode(dev, 0);
2543
                } else {
2544
                        /* Restore PME enable bit unmolested */
2545
                        writel(np->SavedClkRun, ioaddr + ClkRun);
2546
                }
2547
        }
2548
        return 0;
2549
}
2550
 
2551
 
2552
static void __devexit natsemi_remove1 (struct pci_dev *pdev)
2553
{
2554
        struct net_device *dev = pci_get_drvdata(pdev);
2555
 
2556
        unregister_netdev (dev);
2557
        pci_release_regions (pdev);
2558
        iounmap ((char *) dev->base_addr);
2559
        free_netdev (dev);
2560
        pci_set_drvdata(pdev, NULL);
2561
}
2562
 
2563
#ifdef CONFIG_PM
2564
 
2565
/*
2566
 * The ns83815 chip doesn't have explicit RxStop bits.
2567
 * Kicking the Rx or Tx process for a new packet reenables the Rx process
2568
 * of the nic, thus this function must be very careful:
2569
 *
2570
 * suspend/resume synchronization:
2571
 * entry points:
2572
 *   netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
2573
 *   start_tx, tx_timeout
2574
 *
2575
 * No function accesses the hardware without checking np->hands_off.
2576
 *      the check occurs under spin_lock_irq(&np->lock);
2577
 * exceptions:
2578
 *      * netdev_ioctl: noncritical access.
2579
 *      * netdev_open: cannot happen due to the device_detach
2580
 *      * netdev_close: doesn't hurt.
2581
 *      * netdev_timer: timer stopped by natsemi_suspend.
2582
 *      * intr_handler: doesn't acquire the spinlock. suspend calls
2583
 *              disable_irq() to enforce synchronization.
2584
 *
2585
 * Interrupts must be disabled, otherwise hands_off can cause irq storms.
2586
 */
2587
 
2588
static int natsemi_suspend (struct pci_dev *pdev, u32 state)
2589
{
2590
        struct net_device *dev = pci_get_drvdata (pdev);
2591
        struct netdev_private *np = dev->priv;
2592
        long ioaddr = dev->base_addr;
2593
 
2594
        rtnl_lock();
2595
        if (netif_running (dev)) {
2596
                del_timer_sync(&np->timer);
2597
 
2598
                disable_irq(dev->irq);
2599
                spin_lock_irq(&np->lock);
2600
 
2601
                writel(0, ioaddr + IntrEnable);
2602
                np->hands_off = 1;
2603
                natsemi_stop_rxtx(dev);
2604
                netif_stop_queue(dev);
2605
 
2606
                spin_unlock_irq(&np->lock);
2607
                enable_irq(dev->irq);
2608
 
2609
                /* Update the error counts. */
2610
                __get_stats(dev);
2611
 
2612
                /* pci_power_off(pdev, -1); */
2613
                drain_ring(dev);
2614
                {
2615
                        u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
2616
                        /* Restore PME enable bit */
2617
                        if (wol) {
2618
                                /* restart the NIC in WOL mode.
2619
                                 * The nic must be stopped for this.
2620
                                 * FIXME: use the WOL interrupt
2621
                                 */
2622
                                enable_wol_mode(dev, 0);
2623
                        } else {
2624
                                /* Restore PME enable bit unmolested */
2625
                                writel(np->SavedClkRun, ioaddr + ClkRun);
2626
                        }
2627
                }
2628
        }
2629
        netif_device_detach(dev);
2630
        rtnl_unlock();
2631
        return 0;
2632
}
2633
 
2634
 
2635
static int natsemi_resume (struct pci_dev *pdev)
2636
{
2637
        struct net_device *dev = pci_get_drvdata (pdev);
2638
        struct netdev_private *np = dev->priv;
2639
 
2640
        rtnl_lock();
2641
        if (netif_device_present(dev))
2642
                goto out;
2643
        if (netif_running(dev)) {
2644
                BUG_ON(!np->hands_off);
2645
                pci_enable_device(pdev);
2646
        /*      pci_power_on(pdev); */
2647
 
2648
                natsemi_reset(dev);
2649
                init_ring(dev);
2650
                disable_irq(dev->irq);
2651
                spin_lock_irq(&np->lock);
2652
                np->hands_off = 0;
2653
                init_registers(dev);
2654
                netif_device_attach(dev);
2655
                spin_unlock_irq(&np->lock);
2656
                enable_irq(dev->irq);
2657
 
2658
                mod_timer(&np->timer, jiffies + 1*HZ);
2659
        }
2660
        netif_device_attach(dev);
2661
out:
2662
        rtnl_unlock();
2663
        return 0;
2664
}
2665
 
2666
#endif /* CONFIG_PM */
2667
 
2668
static struct pci_driver natsemi_driver = {
2669
        .name           = DRV_NAME,
2670
        .id_table       = natsemi_pci_tbl,
2671
        .probe          = natsemi_probe1,
2672
        .remove         = __devexit_p(natsemi_remove1),
2673
#ifdef CONFIG_PM
2674
        .suspend        = natsemi_suspend,
2675
        .resume         = natsemi_resume,
2676
#endif
2677
};
2678
 
2679
static int __init natsemi_init_mod (void)
2680
{
2681
/* when a module, this is printed whether or not devices are found in probe */
2682
#ifdef MODULE
2683
        printk(version);
2684
#endif
2685
 
2686
        return pci_module_init (&natsemi_driver);
2687
}
2688
 
2689
static void __exit natsemi_exit_mod (void)
2690
{
2691
        pci_unregister_driver (&natsemi_driver);
2692
}
2693
 
2694
module_init(natsemi_init_mod);
2695
module_exit(natsemi_exit_mod);
2696
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.