OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [uclinux/] [uClinux-2.0.x/] [drivers/] [net/] [eepro100.c] - Blame information for rev 199

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/* drivers/net/eepro100.c: An Intel i82557 Ethernet driver for Linux. */
2
/*
3
   NOTICE: this version tested with kernels 1.3.72 and later only!
4
        Written 1996-1998 by Donald Becker.
5
 
6
        This software may be used and distributed according to the terms
7
        of the GNU Public License, incorporated herein by reference.
8
 
9
        This driver is for the Intel EtherExpress Pro 100B boards.
10
        It should work with other i82557 and i82558 boards.
11
        To use a built-in driver, install as drivers/net/eepro100.c.
12
        To use as a module, use the compile-command at the end of the file.
13
 
14
        The author may be reached as becker@CESDIS.usra.edu, or C/O
15
        Center of Excellence in Space Data and Information Sciences
16
           Code 930.5, NASA Goddard Space Flight Center, Greenbelt MD 20771
17
        For updates see
18
                http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html
19
        There is also a mailing list based at
20
                linux-eepro100@cesdis.gsfc.nasa.gov
21
*/
22
 
23
static const char *version =
24
"eepro100.c:v1.05 10/16/98 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n";
25
 
26
/* A few user-configurable values that apply to all boards.
27
   First set are undocumented and spelled per Intel recommendations. */
28
 
29
static int congenb = 0;          /* Enable congestion control in the DP83840. */
30
static int txfifo = 8;          /* Tx FIFO threshold in 4 byte units, 0-15 */
31
static int rxfifo = 8;          /* Rx FIFO threshold, default 32 bytes. */
32
/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
33
static int txdmacount = 128;
34
static int rxdmacount = 0;
35
 
36
/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
37
   Lower values use more memory, but are faster. */
38
static int rx_copybreak = 200;
39
 
40
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
41
static int max_interrupt_work = 20;
42
 
43
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
44
static int multicast_filter_limit = 64;
45
 
46
#include <linux/config.h>
47
#ifdef MODULE
48
#ifdef MODVERSIONS
49
#include <linux/modversions.h>
50
#endif
51
#include <linux/module.h>
52
#else
53
#define MOD_INC_USE_COUNT
54
#define MOD_DEC_USE_COUNT
55
#endif
56
 
57
#include <linux/version.h>
58
#include <linux/kernel.h>
59
#include <linux/string.h>
60
#include <linux/timer.h>
61
#include <linux/errno.h>
62
#include <linux/ioport.h>
63
#include <linux/malloc.h>
64
#include <linux/interrupt.h>
65
#include <linux/pci.h>
66
#if LINUX_VERSION_CODE < 0x20155
67
#include <linux/bios32.h>               /* Ignore the bogus warning in 2.1.100+ */
68
#endif
69
#include <asm/bitops.h>
70
#include <asm/io.h>
71
 
72
#include <linux/netdevice.h>
73
#include <linux/etherdevice.h>
74
#include <linux/skbuff.h>
75
#include <linux/delay.h>
76
 
77
/* Unused in the 2.0.* version, but retained for documentation. */
78
#if LINUX_VERSION_CODE > 0x20118  &&  defined(MODULE)
79
MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
80
MODULE_DESCRIPTION("Intel i82557/i82558 PCI EtherExpressPro driver");
81
MODULE_PARM(debug, "i");
82
MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
83
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
84
MODULE_PARM(congenb, "i");
85
MODULE_PARM(txfifo, "i");
86
MODULE_PARM(rxfifo, "i");
87
MODULE_PARM(txdmacount, "i");
88
MODULE_PARM(rxdmacount, "i");
89
MODULE_PARM(rx_copybreak, "i");
90
MODULE_PARM(max_interrupt_work, "i");
91
MODULE_PARM(multicast_filter_limit, "i");
92
#endif
93
 
94
#define RUN_AT(x) (jiffies + (x))
95
 
96
#if (LINUX_VERSION_CODE < 0x20123)
97
#define test_and_set_bit(val, addr) set_bit(val, addr)
98
#endif
99
#if LINUX_VERSION_CODE < 0x20159
100
#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
101
#else
102
#define dev_free_skb(skb) dev_kfree_skb(skb);
103
#endif
104
 
105
/* The total I/O port extent of the board.
106
   The registers beyond 0x18 only exist on the i82558. */
107
#define SPEEDO3_TOTAL_SIZE 0x20
108
 
109
int speedo_debug = 1;
110
 
111
/*
112
                                Theory of Operation
113
 
114
I. Board Compatibility
115
 
116
This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
117
single-chip fast Ethernet controller for PCI, as used on the Intel
118
EtherExpress Pro 100 adapter.
119
 
120
II. Board-specific settings
121
 
122
PCI bus devices are configured by the system at boot time, so no jumpers
123
need to be set on the board.  The system BIOS should be set to assign the
124
PCI INTA signal to an otherwise unused system IRQ line.  While it's
125
possible to share PCI interrupt lines, it negatively impacts performance and
126
only recent kernels support it.
127
 
128
III. Driver operation
129
 
130
IIIA. General
131
The Speedo3 is very similar to other Intel network chips, that is to say
132
"apparently designed on a different planet".  This chips retains the complex
133
Rx and Tx descriptors and multiple buffers pointers as previous chips, but
134
also has simplified Tx and Rx buffer modes.  This driver uses the "flexible"
135
Tx mode, but in a simplified lower-overhead manner: it associates only a
136
single buffer descriptor with each frame descriptor.
137
 
138
Despite the extra space overhead in each receive skbuff, the driver must use
139
the simplified Rx buffer mode to assure that only a single data buffer is
140
associated with each RxFD. The driver implements this by reserving space
141
for the Rx descriptor at the head of each Rx skbuff.
142
 
143
The Speedo-3 has receive and command unit base addresses that are added to
144
almost all descriptor pointers.  The driver sets these to zero, so that all
145
pointer fields are absolute addresses.
146
 
147
The System Control Block (SCB) of some previous Intel chips exists on the
148
chip in both PCI I/O and memory space.  This driver uses the I/O space
149
registers, but might switch to memory mapped mode to better support non-x86
150
processors.
151
 
152
IIIB. Transmit structure
153
 
154
The driver must use the complex Tx command+descriptor mode in order to
155
have a indirect pointer to the skbuff data section.  Each Tx command block
156
(TxCB) is associated with two immediately appended Tx Buffer Descriptor
157
(TxBD).  A fixed ring of these TxCB+TxBD pairs are kept as part of the
158
speedo_private data structure for each adapter instance.
159
 
160
The newer i82558 explicitly supports this structure, and can read the two
161
TxBDs in the same PCI burst as the TxCB.
162
 
163
This ring structure is used for all normal transmit packets, but the
164
transmit packet descriptors aren't long enough for most non-Tx commands such
165
as CmdConfigure.  This is complicated by the possibility that the chip has
166
already loaded the link address in the previous descriptor.  So for these
167
commands we convert the next free descriptor on the ring to a NoOp, and point
168
that descriptor's link to the complex command.
169
 
170
An additional complexity of these non-transmit commands are that they may be
171
added asynchronous to the normal transmit queue, so we disable interrupts
172
whenever the Tx descriptor ring is manipulated.
173
 
174
A notable aspect of these special configure commands is that they do
175
work with the normal Tx ring entry scavenge method.  The Tx ring scavenge
176
is done at interrupt time using the 'dirty_tx' index, and checking for the
177
command-complete bit.  While the setup frames may have the NoOp command on the
178
Tx ring marked as complete, but not have completed the setup command, this
179
is not a problem.  The tx_ring entry can be still safely reused, as the
180
tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
181
 
182
Commands may have bits set e.g. CmdSuspend in the command word to either
183
suspend or stop the transmit/command unit.  This driver always flags the last
184
command with CmdSuspend, erases the CmdSuspend in the previous command, and
185
then issues a CU_RESUME.
186
Note: Watch out for the potential race condition here: imagine
187
        erasing the previous suspend
188
                the chip processes the previous command
189
                the chip processes the final command, and suspends
190
        doing the CU_RESUME
191
                the chip processes the next-yet-valid post-final-command.
192
So blindly sending a CU_RESUME is only safe if we do it immediately after
193
after erasing the previous CmdSuspend, without the possibility of an
194
intervening delay.  Thus the resume command is always within the
195
interrupts-disabled region.  This is a timing dependence, but handling this
196
condition in a timing-independent way would considerably complicate the code.
197
 
198
Note: In previous generation Intel chips, restarting the command unit was a
199
notoriously slow process.  This is presumably no longer true.
200
 
201
IIIC. Receive structure
202
 
203
Because of the bus-master support on the Speedo3 this driver uses the new
204
SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
205
This scheme allocates full-sized skbuffs as receive buffers.  The value
206
SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
207
trade-off the memory wasted by passing the full-sized skbuff to the queue
208
layer for all frames vs. the copying cost of copying a frame to a
209
correctly-sized skbuff.
210
 
211
For small frames the copying cost is negligible (esp. considering that we
212
are pre-loading the cache with immediately useful header information), so we
213
allocate a new, minimally-sized skbuff.  For large frames the copying cost
214
is non-trivial, and the larger copy might flush the cache of useful data, so
215
we pass up the skbuff the packet was received into.
216
 
217
IIID. Synchronization
218
The driver runs as two independent, single-threaded flows of control.  One
219
is the send-packet routine, which enforces single-threaded use by the
220
dev->tbusy flag.  The other thread is the interrupt handler, which is single
221
threaded by the hardware and other software.
222
 
223
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
224
flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
225
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
226
the 'sp->tx_full' flag.
227
 
228
The interrupt handler has exclusive control over the Rx ring and records stats
229
from the Tx ring.  (The Tx-done interrupt can't be selectively turned off, so
230
we can't avoid the interrupt overhead by having the Tx routine reap the Tx
231
stats.)  After reaping the stats, it marks the queue entry as empty by setting
232
the 'base' to zero.      Iff the 'sp->tx_full' flag is set, it clears both the
233
tx_full and tbusy flags.
234
 
235
IV. Notes
236
 
237
Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
238
that stated that I could disclose the information.  But I still resent
239
having to sign an Intel NDA when I'm helping Intel sell their own product!
240
 
241
*/
242
 
243
/* A few values that may be tweaked. */
244
/* The ring sizes should be a power of two for efficiency. */
245
#define TX_RING_SIZE    16              /* Effectively 2 entries fewer. */
246
#define RX_RING_SIZE    16
247
/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
248
#define PKT_BUF_SZ              1536
249
 
250
/* Time in jiffies before concluding the transmitter is hung. */
251
#define TX_TIMEOUT  ((800*HZ)/1000)
252
 
253
/* How to wait for the command unit to accept a command.
254
   Typically this takes 0 ticks. */
255
static inline void wait_for_cmd_done(long cmd_ioaddr)
256
{
257
        int wait = 100;
258
        do   ;
259
        while(inb(cmd_ioaddr) && --wait >= 0);
260
}
261
 
262
/* Operational parameter that usually are not changed. */
263
 
264
/* The rest of these values should never change. */
265
 
266
/* Offsets to the various registers.
267
   All accesses need not be longword aligned. */
268
enum speedo_offsets {
269
        SCBStatus = 0, SCBCmd = 2,       /* Rx/Command Unit command and status. */
270
        SCBPointer = 4,                         /* General purpose pointer. */
271
        SCBPort = 8,                            /* Misc. commands and operands.  */
272
        SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
273
        SCBCtrlMDI = 16,                        /* MDI interface control. */
274
        SCBEarlyRx = 20,                        /* Early receive byte count. */
275
};
276
/* Commands that can be put in a command list entry. */
277
enum commands {
278
        CmdNOp = 0, CmdIASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
279
        CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7,
280
        CmdSuspend = 0x4000,            /* Suspend after completion. */
281
        CmdIntr = 0x2000,                       /* Interrupt after completion. */
282
        CmdTxFlex = 0x0008,                     /* Use "Flexible mode" for CmdTx command. */
283
};
284
 
285
/* The SCB accepts the following controls for the Tx and Rx units: */
286
#define  CU_START               0x0010
287
#define  CU_RESUME              0x0020
288
#define  CU_STATSADDR   0x0040
289
#define  CU_SHOWSTATS   0x0050  /* Dump statistics counters. */
290
#define  CU_CMD_BASE    0x0060  /* Base address to add to add CU commands. */
291
#define  CU_DUMPSTATS   0x0070  /* Dump then reset stats counters. */
292
 
293
#define  RX_START       0x0001
294
#define  RX_RESUME      0x0002
295
#define  RX_ABORT       0x0004
296
#define  RX_ADDR_LOAD   0x0006
297
#define  RX_RESUMENR    0x0007
298
#define INT_MASK        0x0100
299
#define DRVR_INT        0x0200          /* Driver generated interrupt. */
300
 
301
/* The Speedo3 Rx and Tx frame/buffer descriptors. */
302
struct descriptor {                     /* A generic descriptor. */
303
        s16 status;             /* Offset 0. */
304
        s16 command;            /* Offset 2. */
305
        u32 link;                                       /* struct descriptor *  */
306
        unsigned char params[0];
307
};
308
 
309
/* The Speedo3 Rx and Tx buffer descriptors. */
310
struct RxFD {                                   /* Receive frame descriptor. */
311
        s32 status;
312
        u32 link;                                       /* struct RxFD * */
313
        u32 rx_buf_addr;                        /* void * */
314
        u16 count;
315
        u16 size;
316
};
317
 
318
/* Selected elements of the Tx/RxFD.status word. */
319
enum RxFD_bits {
320
        RxComplete=0x8000, RxOK=0x2000,
321
        RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
322
        RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
323
        StatusComplete=0x8000,
324
};
325
 
326
struct TxFD {                                   /* Transmit frame descriptor set. */
327
        s32 status;
328
        u32 link;                                       /* void * */
329
        u32 tx_desc_addr;                       /* Always points to the tx_buf_addr element. */
330
        s32 count;                                      /* # of TBD (=1), Tx start thresh., etc. */
331
        /* This constitutes two "TBD" entries -- we only use one. */
332
        u32 tx_buf_addr0;                       /* void *, frame to be transmitted.  */
333
        s32 tx_buf_size0;                       /* Length of Tx frame. */
334
        u32 tx_buf_addr1;                       /* void *, frame to be transmitted.  */
335
        s32 tx_buf_size1;                       /* Length of Tx frame. */
336
};
337
 
338
/* Elements of the dump_statistics block. This block must be lword aligned. */
339
struct speedo_stats {
340
        u32 tx_good_frames;
341
        u32 tx_coll16_errs;
342
        u32 tx_late_colls;
343
        u32 tx_underruns;
344
        u32 tx_lost_carrier;
345
        u32 tx_deferred;
346
        u32 tx_one_colls;
347
        u32 tx_multi_colls;
348
        u32 tx_total_colls;
349
        u32 rx_good_frames;
350
        u32 rx_crc_errs;
351
        u32 rx_align_errs;
352
        u32 rx_resource_errs;
353
        u32 rx_overrun_errs;
354
        u32 rx_colls_errs;
355
        u32 rx_runt_errs;
356
        u32 done_marker;
357
};
358
 
359
struct speedo_private {
360
        char devname[8];                        /* Used only for kernel debugging. */
361
        const char *product_name;
362
        struct device *next_module;
363
        struct TxFD     tx_ring[TX_RING_SIZE];  /* Commands (usually CmdTxPacket). */
364
        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
365
        struct sk_buff* tx_skbuff[TX_RING_SIZE];
366
        struct descriptor  *last_cmd;   /* Last command sent. */
367
        /* Rx descriptor ring & addresses of receive-in-place skbuffs. */
368
        struct RxFD *rx_ringp[RX_RING_SIZE];
369
        struct sk_buff* rx_skbuff[RX_RING_SIZE];
370
        struct RxFD *last_rxf;  /* Last command sent. */
371
        struct enet_statistics stats;
372
        struct speedo_stats lstats;
373
        struct timer_list timer;        /* Media selection timer. */
374
        long last_rx_time;                      /* Last Rx, in jiffies, to handle Rx hang. */
375
        unsigned int cur_rx, cur_tx;            /* The next free ring entry */
376
        unsigned int dirty_rx, dirty_tx;        /* The ring entries to be free()ed. */
377
        int mc_setup_frm_len;                           /* The length of an allocated.. */
378
        struct descriptor *mc_setup_frm;        /* ..multicast setup frame. */
379
        int mc_setup_busy;                                      /* Avoid double-use of setup frame. */
380
        int in_interrupt;                                       /* Word-aligned dev->interrupt */
381
        char rx_mode;                                           /* Current PROMISC/ALLMULTI setting. */
382
        unsigned int tx_full:1;                         /* The Tx queue is full. */
383
        unsigned int full_duplex:1;                     /* Full-duplex operation requested. */
384
        unsigned int default_port:1;            /* Last dev->if_port value. */
385
        unsigned int rx_bug:1;                          /* Work around receiver hang errata. */
386
        unsigned int rx_bug10:1;                        /* Receiver might hang at 10mbps. */
387
        unsigned int rx_bug100:1;                       /* Receiver might hang at 100mbps. */
388
        unsigned short phy[2];                          /* PHY media interfaces available. */
389
};
390
 
391
/* The parameters for a CmdConfigure operation.
392
   There are so many options that it would be difficult to document each bit.
393
   We mostly use the default or recommended settings. */
394
const char i82557_config_cmd[22] = {
395
        22, 0x08, 0, 0,  0, 0x80, 0x32, 0x03,  1, /* 1=Use MII  0=Use AUI */
396
        0, 0x2E, 0,  0x60, 0,
397
        0xf2, 0x48,   0, 0x40, 0xf2, 0x80,               /* 0x40=Force full-duplex */
398
        0x3f, 0x05, };
399
const char i82558_config_cmd[22] = {
400
        22, 0x08, 0, 1,  0, 0x80, 0x22, 0x03,  1, /* 1=Use MII  0=Use AUI */
401
        0, 0x2E, 0,  0x60, 0x08, 0x88,
402
        0x68, 0, 0x40, 0xf2, 0xBD,               /* 0xBD->0xFD=Force full-duplex */
403
        0x31, 0x05, };
404
 
405
/* PHY media interface chips. */
406
static const char *phys[] = {
407
        "None", "i82553-A/B", "i82553-C", "i82503",
408
        "DP83840", "80c240", "80c24", "i82555",
409
        "unknown-8", "unknown-9", "DP83840A", "unknown-11",
410
        "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
411
enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
412
                                         S80C24, I82555, DP83840A=10, };
413
static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
414
 
415
static void speedo_found1(struct device *dev, long ioaddr, int irq,
416
                                                  int card_idx);
417
 
418
static int read_eeprom(long ioaddr, int location, int addr_len);
419
static int mdio_read(long ioaddr, int phy_id, int location);
420
static int mdio_write(long ioaddr, int phy_id, int location, int value);
421
static int speedo_open(struct device *dev);
422
static void speedo_timer(unsigned long data);
423
static void speedo_init_rx_ring(struct device *dev);
424
static int speedo_start_xmit(struct sk_buff *skb, struct device *dev);
425
static int speedo_rx(struct device *dev);
426
static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
427
static int speedo_close(struct device *dev);
428
static struct enet_statistics *speedo_get_stats(struct device *dev);
429
static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd);
430
static void set_rx_mode(struct device *dev);
431
 
432
 
433
 
434
/* The parameters that may be passed in... */
435
/* 'options' is used to pass a transceiver override or full-duplex flag
436
   e.g. "options=16" for FD, "options=32" for 100mbps-only. */
437
static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
438
static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
439
#ifdef MODULE
440
static int debug = -1;                  /* The debug level */
441
#endif
442
 
443
#ifdef honor_default_port
444
/* Optional driver feature to allow forcing the transceiver setting.
445
   Not recommended. */
446
static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
447
                                                   0x2000, 0x2100, 0x0400, 0x3100};
448
#endif
449
 
450
/* A list of all installed Speedo devices, for removing the driver module. */
451
static struct device *root_speedo_dev = NULL;
452
 
453
int eepro100_init(struct device *dev)
454
{
455
        int cards_found = 0;
456
        static int pci_index = 0;
457
 
458
        if (! pcibios_present())
459
                return cards_found;
460
 
461
        for (; pci_index < 8; pci_index++) {
462
                unsigned char pci_bus, pci_device_fn, pci_latency;
463
                long ioaddr;
464
                int irq;
465
 
466
                u16 pci_command, new_command;
467
 
468
                if (pcibios_find_device(PCI_VENDOR_ID_INTEL,
469
                                                                PCI_DEVICE_ID_INTEL_82557,
470
                                                                pci_index, &pci_bus,
471
                                                                &pci_device_fn))
472
                        break;
473
#if LINUX_VERSION_CODE >= 0x20155  ||  PCI_SUPPORT_1
474
                {
475
                        struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
476
                        ioaddr = pdev->base_address[1];         /* Use [0] to mem-map */
477
                        irq = pdev->irq;
478
                }
479
#else
480
                {
481
                        u32 pci_ioaddr;
482
                        u8 pci_irq_line;
483
                        pcibios_read_config_byte(pci_bus, pci_device_fn,
484
                                                                         PCI_INTERRUPT_LINE, &pci_irq_line);
485
                        /* Note: BASE_ADDRESS_0 is for memory-mapping the registers. */
486
                        pcibios_read_config_dword(pci_bus, pci_device_fn,
487
                                                                          PCI_BASE_ADDRESS_1, &pci_ioaddr);
488
                        ioaddr = pci_ioaddr;
489
                        irq = pci_irq_line;
490
                }
491
#endif
492
                /* Remove I/O space marker in bit 0. */
493
                ioaddr &= ~3;
494
                if (speedo_debug > 2)
495
                        printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
496
                                   ioaddr, irq);
497
 
498
                /* Get and check the bus-master and latency values. */
499
                pcibios_read_config_word(pci_bus, pci_device_fn,
500
                                                                 PCI_COMMAND, &pci_command);
501
                new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
502
                if (pci_command != new_command) {
503
                        printk(KERN_INFO "  The PCI BIOS has not enabled this"
504
                                   " device!  Updating PCI command %4.4x->%4.4x.\n",
505
                                   pci_command, new_command);
506
                        pcibios_write_config_word(pci_bus, pci_device_fn,
507
                                                                          PCI_COMMAND, new_command);
508
                }
509
                pcibios_read_config_byte(pci_bus, pci_device_fn,
510
                                                                 PCI_LATENCY_TIMER, &pci_latency);
511
                if (pci_latency < 32) {
512
                        printk("  PCI latency timer (CFLT) is unreasonably low at %d."
513
                                   "  Setting to 32 clocks.\n", pci_latency);
514
                        pcibios_write_config_byte(pci_bus, pci_device_fn,
515
                                                                          PCI_LATENCY_TIMER, 32);
516
                } else if (speedo_debug > 1)
517
                        printk("  PCI latency timer (CFLT) is %#x.\n", pci_latency);
518
 
519
                speedo_found1(dev, ioaddr, irq, cards_found);
520
                dev = NULL;
521
                cards_found++;
522
        }
523
 
524
        return cards_found;
525
}
526
 
527
static void speedo_found1(struct device *dev, long ioaddr, int irq,
528
                                                  int card_idx)
529
{
530
        static int did_version = 0;                      /* Already printed version info. */
531
        struct speedo_private *sp;
532
        char *product;
533
        int i, option;
534
        u16 eeprom[0x40];
535
 
536
        if (speedo_debug > 0  &&  did_version++ == 0)
537
                printk(version);
538
 
539
        dev = init_etherdev(dev, sizeof(struct speedo_private));
540
 
541
        if (dev->mem_start > 0)
542
                option = dev->mem_start;
543
        else if (card_idx >= 0  &&  options[card_idx] >= 0)
544
                option = options[card_idx];
545
        else
546
                option = 0;
547
 
548
        /* Read the station address EEPROM before doing the reset.
549
           Perhaps this should even be done before accepting the device,
550
           then we wouldn't have a device name with which to report the error. */
551
        {
552
                u16 sum = 0;
553
                int j;
554
                int addr_len = read_eeprom(ioaddr, 0, 6) == 0xffff ? 8 : 6;
555
 
556
                for (j = 0, i = 0; i < 0x40; i++) {
557
                        u16 value = read_eeprom(ioaddr, i, addr_len);
558
                        eeprom[i] = value;
559
                        sum += value;
560
                        if (i < 3) {
561
                                dev->dev_addr[j++] = value;
562
                                dev->dev_addr[j++] = value >> 8;
563
                        }
564
                }
565
                if (sum != 0xBABA)
566
                        printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
567
                                   "check settings before activating this device!\n",
568
                                   dev->name, sum);
569
                /* Don't  unregister_netdev(dev);  as the EEPro may actually be
570
                   usable, especially if the MAC address is set later. */
571
        }
572
 
573
        /* Reset the chip: stop Tx and Rx processes and clear counters.
574
           This takes less than 10usec and will easily finish before the next
575
           action. */
576
        outl(0, ioaddr + SCBPort);
577
 
578
        if (eeprom[3] & 0x0100)
579
                product = "OEM i82557/i82558 10/100 Ethernet";
580
        else
581
                product = "Intel EtherExpress Pro 10/100";
582
 
583
        printk(KERN_INFO "%s: %s at %#3lx, ", dev->name, product, ioaddr);
584
 
585
        for (i = 0; i < 5; i++)
586
                printk("%2.2X:", dev->dev_addr[i]);
587
        printk("%2.2X, IRQ %d.\n", dev->dev_addr[i], irq);
588
 
589
#ifndef kernel_bloat
590
        /* OK, this is pure kernel bloat.  I don't like it when other drivers
591
           waste non-pageable kernel space to emit similar messages, but I need
592
           them for bug reports. */
593
        {
594
                const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
595
                /* The self-test results must be paragraph aligned. */
596
                s32 str[6], *volatile self_test_results;
597
                int boguscnt = 16000;   /* Timeout for set-test. */
598
                if (eeprom[3] & 0x03)
599
                        printk(KERN_INFO "  Receiver lock-up bug exists -- enabling"
600
                                   " work-around.\n");
601
                printk(KERN_INFO "  Board assembly %4.4x%2.2x-%3.3d, Physical"
602
                           " connectors present:",
603
                           eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
604
                for (i = 0; i < 4; i++)
605
                        if (eeprom[5] & (1<<i))
606
                                printk(connectors[i]);
607
                printk("\n"KERN_INFO"  Primary interface chip %s PHY #%d.\n",
608
                           phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
609
                if (eeprom[7] & 0x0700)
610
                        printk(KERN_INFO "    Secondary interface chip %s.\n",
611
                                   phys[(eeprom[7]>>8)&7]);
612
                if (((eeprom[6]>>8) & 0x3f) == DP83840
613
                        ||  ((eeprom[6]>>8) & 0x3f) == DP83840A) {
614
                        int mdi_reg23 = mdio_read(ioaddr, eeprom[6] & 0x1f, 23) | 0x0422;
615
                        if (congenb)
616
                          mdi_reg23 |= 0x0100;
617
                        printk(KERN_INFO"  DP83840 specific setup, setting register 23 to %4.4x.\n",
618
                                   mdi_reg23);
619
                        mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23);
620
                }
621
                if ((option >= 0) && (option & 0x70)) {
622
                        printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
623
                                   (option & 0x20 ? 100 : 10),
624
                                   (option & 0x10 ? "full" : "half"));
625
                        mdio_write(ioaddr, eeprom[6] & 0x1f, 0,
626
                                           ((option & 0x20) ? 0x2000 : 0) |      /* 100mbps? */
627
                                           ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
628
                }
629
 
630
                /* Perform a system self-test. */
631
                self_test_results = (s32*) ((((long) str) + 15) & ~0xf);
632
                self_test_results[0] = 0;
633
                self_test_results[1] = -1;
634
                outl(virt_to_bus(self_test_results) | 1, ioaddr + SCBPort);
635
                do {
636
                        udelay(10);
637
                } while (self_test_results[1] == -1  &&  --boguscnt >= 0);
638
 
639
                if (boguscnt < 0) {              /* Test optimized out. */
640
                        printk(KERN_ERR "Self test failed, status %8.8x:\n"
641
                                   KERN_ERR " Failure to initialize the i82557.\n"
642
                                   KERN_ERR " Verify that the card is a bus-master"
643
                                   " capable slot.\n",
644
                                   self_test_results[1]);
645
                } else
646
                        printk(KERN_INFO "  General self-test: %s.\n"
647
                                   KERN_INFO "  Serial sub-system self-test: %s.\n"
648
                                   KERN_INFO "  Internal registers self-test: %s.\n"
649
                                   KERN_INFO "  ROM checksum self-test: %s (%#8.8x).\n",
650
                                   self_test_results[1] & 0x1000 ? "failed" : "passed",
651
                                   self_test_results[1] & 0x0020 ? "failed" : "passed",
652
                                   self_test_results[1] & 0x0008 ? "failed" : "passed",
653
                                   self_test_results[1] & 0x0004 ? "failed" : "passed",
654
                                   self_test_results[0]);
655
        }
656
#endif  /* kernel_bloat */
657
 
658
        outl(0, ioaddr + SCBPort);
659
 
660
        /* We do a request_region() only to register /proc/ioports info. */
661
        request_region(ioaddr, SPEEDO3_TOTAL_SIZE, "Intel Speedo3 Ethernet");
662
 
663
        dev->base_addr = ioaddr;
664
        dev->irq = irq;
665
 
666
        if (dev->priv == NULL)
667
                dev->priv = kmalloc(sizeof(*sp), GFP_KERNEL);
668
        sp = dev->priv;
669
        memset(sp, 0, sizeof(*sp));
670
        sp->next_module = root_speedo_dev;
671
        root_speedo_dev = dev;
672
 
673
        sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
674
        if (card_idx >= 0) {
675
                if (full_duplex[card_idx] >= 0)
676
                        sp->full_duplex = full_duplex[card_idx];
677
        }
678
        sp->default_port = option >= 0 ? (option & 0x0f) : 0;
679
 
680
        sp->phy[0] = eeprom[6];
681
        sp->phy[1] = eeprom[7];
682
        sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
683
 
684
        if (sp->rx_bug)
685
                printk(KERN_INFO "  Receiver lock-up workaround activated.\n");
686
 
687
        /* The Speedo-specific entries in the device structure. */
688
        dev->open = &speedo_open;
689
        dev->hard_start_xmit = &speedo_start_xmit;
690
        dev->stop = &speedo_close;
691
        dev->get_stats = &speedo_get_stats;
692
        dev->set_multicast_list = &set_rx_mode;
693
        dev->do_ioctl = &speedo_ioctl;
694
 
695
        return;
696
}
697
 
698
/* Serial EEPROM section.
699
   A "bit" grungy, but we work our way through bit-by-bit :->. */
700
/*  EEPROM_Ctrl bits. */
701
#define EE_SHIFT_CLK    0x01    /* EEPROM shift clock. */
702
#define EE_CS                   0x02    /* EEPROM chip select. */
703
#define EE_DATA_WRITE   0x04    /* EEPROM chip data in. */
704
#define EE_WRITE_0              0x01
705
#define EE_WRITE_1              0x05
706
#define EE_DATA_READ    0x08    /* EEPROM chip data out. */
707
#define EE_ENB                  (0x4800 | EE_CS)
708
 
709
/* Delay between EEPROM clock transitions.
710
   This will actually work with no delay on 33Mhz PCI.  */
711
#define eeprom_delay(nanosec)           udelay(1);
712
 
713
/* The EEPROM commands include the alway-set leading bit. */
714
#define EE_WRITE_CMD    (5 << addr_len)
715
#define EE_READ_CMD             (6 << addr_len)
716
#define EE_ERASE_CMD    (7 << addr_len)
717
 
718
static int read_eeprom(long ioaddr, int location, int addr_len)
719
{
720
        unsigned short retval = 0;
721
        int ee_addr = ioaddr + SCBeeprom;
722
        int read_cmd = location | EE_READ_CMD;
723
        int i;
724
 
725
        outw(EE_ENB & ~EE_CS, ee_addr);
726
        outw(EE_ENB, ee_addr);
727
 
728
        /* Shift the read command bits out. */
729
        for (i = 12; i >= 0; i--) {
730
                short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
731
                outw(EE_ENB | dataval, ee_addr);
732
                eeprom_delay(100);
733
                outw(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
734
                eeprom_delay(150);
735
        }
736
        outw(EE_ENB, ee_addr);
737
 
738
        for (i = 15; i >= 0; i--) {
739
                outw(EE_ENB | EE_SHIFT_CLK, ee_addr);
740
                eeprom_delay(100);
741
                retval = (retval << 1) | ((inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
742
                outw(EE_ENB, ee_addr);
743
                eeprom_delay(100);
744
        }
745
 
746
        /* Terminate the EEPROM access. */
747
        outw(EE_ENB & ~EE_CS, ee_addr);
748
        return retval;
749
}
750
 
751
static int mdio_read(long ioaddr, int phy_id, int location)
752
{
753
        int val, boguscnt = 64*10;              /* <64 usec. to complete, typ 27 ticks */
754
        outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
755
        do {
756
                val = inl(ioaddr + SCBCtrlMDI);
757
                if (--boguscnt < 0) {
758
                        printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
759
                }
760
        } while (! (val & 0x10000000));
761
        return val & 0xffff;
762
}
763
 
764
static int mdio_write(long ioaddr, int phy_id, int location, int value)
765
{
766
        int val, boguscnt = 64*10;              /* <64 usec. to complete, typ 27 ticks */
767
        outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
768
                 ioaddr + SCBCtrlMDI);
769
        do {
770
                val = inl(ioaddr + SCBCtrlMDI);
771
                if (--boguscnt < 0) {
772
                        printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
773
                }
774
        } while (! (val & 0x10000000));
775
        return val & 0xffff;
776
}
777
 
778
 
779
static int
780
speedo_open(struct device *dev)
781
{
782
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
783
        long ioaddr = dev->base_addr;
784
 
785
#ifdef notdef
786
        /* We could reset the chip, but should not need to. */
787
        outl(0, ioaddr + SCBPort);
788
        udelay(10);
789
#endif
790
 
791
        if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ,
792
                                        "Intel EtherExpress Pro 10/100 Ethernet", dev)) {
793
                return -EAGAIN;
794
        }
795
        if (speedo_debug > 1)
796
                printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
797
 
798
        MOD_INC_USE_COUNT;
799
 
800
        /* Retrigger negotiation to reset previous errors. */
801
        if ((sp->phy[0] & 0x8000) == 0) {
802
                int phy_addr = sp->phy[0] & 0x1f ;
803
                /* Use 0x3300 for restarting NWay, other values to force xcvr:
804
                   0x0000 10-HD
805
                   0x0100 10-FD
806
                   0x2000 100-HD
807
                   0x2100 100-FD
808
                */
809
#ifdef honor_default_port
810
                mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
811
#else
812
                mdio_write(ioaddr, phy_addr, 0, 0x3300);
813
#endif
814
        }
815
 
816
        /* Load the statistics block address. */
817
        wait_for_cmd_done(ioaddr + SCBCmd);
818
        outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
819
        outw(INT_MASK | CU_STATSADDR, ioaddr + SCBCmd);
820
        sp->lstats.done_marker = 0;
821
 
822
        speedo_init_rx_ring(dev);
823
        wait_for_cmd_done(ioaddr + SCBCmd);
824
        outl(0, ioaddr + SCBPointer);
825
        outw(INT_MASK | RX_ADDR_LOAD, ioaddr + SCBCmd);
826
 
827
        /* Todo: verify that we must wait for previous command completion. */
828
        wait_for_cmd_done(ioaddr + SCBCmd);
829
        outl(virt_to_bus(sp->rx_ringp[0]), ioaddr + SCBPointer);
830
        outw(INT_MASK | RX_START, ioaddr + SCBCmd);
831
 
832
        /* Fill the first command with our physical address. */
833
        {
834
                u16 *eaddrs = (u16 *)dev->dev_addr;
835
                u16 *setup_frm = (u16 *)&(sp->tx_ring[0].tx_desc_addr);
836
 
837
                /* Avoid a bug(?!) here by marking the command already completed. */
838
                sp->tx_ring[0].status = ((CmdSuspend | CmdIASetup) << 16) | 0xa000;
839
                sp->tx_ring[0].link = virt_to_bus(&(sp->tx_ring[1]));
840
                *setup_frm++ = eaddrs[0];
841
                *setup_frm++ = eaddrs[1];
842
                *setup_frm++ = eaddrs[2];
843
        }
844
        sp->last_cmd = (struct descriptor *)&sp->tx_ring[0];
845
        sp->cur_tx = 1;
846
        sp->dirty_tx = 0;
847
        sp->tx_full = 0;
848
 
849
        wait_for_cmd_done(ioaddr + SCBCmd);
850
        outl(0, ioaddr + SCBPointer);
851
        outw(INT_MASK | CU_CMD_BASE, ioaddr + SCBCmd);
852
 
853
        dev->if_port = sp->default_port;
854
 
855
        sp->in_interrupt = 0;
856
        dev->tbusy = 0;
857
        dev->interrupt = 0;
858
        dev->start = 1;
859
 
860
        /* Start the chip's Tx process and unmask interrupts. */
861
        /* Todo: verify that we must wait for previous command completion. */
862
        wait_for_cmd_done(ioaddr + SCBCmd);
863
        outl(virt_to_bus(&sp->tx_ring[0]), ioaddr + SCBPointer);
864
        outw(CU_START, ioaddr + SCBCmd);
865
 
866
        /* Setup the chip and configure the multicast list. */
867
        sp->mc_setup_frm = NULL;
868
        sp->mc_setup_frm_len = 0;
869
        sp->mc_setup_busy = 0;
870
        sp->rx_mode = -1;                       /* Invalid -> always reset the mode. */
871
        set_rx_mode(dev);
872
 
873
        if (speedo_debug > 2) {
874
                printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
875
                           dev->name, inw(ioaddr + SCBStatus));
876
        }
877
        /* Set the timer.  The timer serves a dual purpose:
878
           1) to monitor the media interface (e.g. link beat) and perhaps switch
879
           to an alternate media type
880
           2) to monitor Rx activity, and restart the Rx process if the receiver
881
           hangs. */
882
        init_timer(&sp->timer);
883
        sp->timer.expires = RUN_AT((24*HZ)/10);                         /* 2.4 sec. */
884
        sp->timer.data = (unsigned long)dev;
885
        sp->timer.function = &speedo_timer;                                     /* timer handler */
886
        add_timer(&sp->timer);
887
 
888
        wait_for_cmd_done(ioaddr + SCBCmd);
889
        outw(CU_DUMPSTATS, ioaddr + SCBCmd);
890
        /* No need to wait for the command unit to accept here. */
891
        if ((sp->phy[0] & 0x8000) == 0)
892
                mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
893
        return 0;
894
}
895
 
896
/* Media monitoring and control. */
897
static void speedo_timer(unsigned long data)
898
{
899
        struct device *dev = (struct device *)data;
900
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
901
 
902
        if (speedo_debug > 3) {
903
                long ioaddr = dev->base_addr;
904
                printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
905
                           dev->name, inw(ioaddr + SCBStatus));
906
        }
907
        if (sp->rx_mode < 0  ||
908
                (sp->rx_bug  && jiffies - sp->last_rx_time > 2*HZ)) {
909
                /* We haven't received a packet in a Long Time.  We might have been
910
                   bitten by the receiver hang bug.  This can be cleared by sending
911
                   a set multicast list command. */
912
                set_rx_mode(dev);
913
        }
914
        /* We must continue to monitor the media. */
915
        sp->timer.expires = RUN_AT(2*HZ);                       /* 2.0 sec. */
916
        add_timer(&sp->timer);
917
}
918
 
919
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
920
static void
921
speedo_init_rx_ring(struct device *dev)
922
{
923
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
924
        struct RxFD *rxf, *last_rxf = NULL;
925
        int i;
926
 
927
        sp->cur_rx = 0;
928
 
929
        for (i = 0; i < RX_RING_SIZE; i++) {
930
                struct sk_buff *skb;
931
                skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
932
                sp->rx_skbuff[i] = skb;
933
                if (skb == NULL)
934
                        break;                  /* OK.  Just initially short of Rx bufs. */
935
                skb->dev = dev;                 /* Mark as being used by this device. */
936
                rxf = (struct RxFD *)skb->tail;
937
                sp->rx_ringp[i] = rxf;
938
                skb_reserve(skb, sizeof(struct RxFD));
939
                if (last_rxf)
940
                        last_rxf->link = virt_to_bus(rxf);
941
                last_rxf = rxf;
942
                rxf->status = 0x00000001;                       /* '1' is flag value only. */
943
                rxf->link = 0;                                           /* None yet. */
944
                /* This field unused by i82557, we use it as a consistency check. */
945
#ifdef final_version
946
                rxf->rx_buf_addr = 0xffffffff;
947
#else
948
                rxf->rx_buf_addr = virt_to_bus(skb->tail);
949
#endif
950
                rxf->count = 0;
951
                rxf->size = PKT_BUF_SZ;
952
        }
953
        sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
954
        /* Mark the last entry as end-of-list. */
955
        last_rxf->status = 0xC0000002;                  /* '2' is flag value only. */
956
        sp->last_rxf = last_rxf;
957
}
958
 
959
static void speedo_tx_timeout(struct device *dev)
960
{
961
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
962
        long ioaddr = dev->base_addr;
963
 
964
        printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
965
                   " %4.4x at %d/%d command %8.8x.\n",
966
                   dev->name, inw(ioaddr + SCBStatus), inw(ioaddr + SCBCmd),
967
                   sp->dirty_tx, sp->cur_tx,
968
                   sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
969
        if ((inw(ioaddr + SCBStatus) & 0x00C0) != 0x0080) {
970
                printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
971
                           dev->name);
972
                outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
973
                         ioaddr + SCBPointer);
974
                outw(CU_START, ioaddr + SCBCmd);
975
        } else {
976
                outw(DRVR_INT, ioaddr + SCBCmd);
977
        }
978
        /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
979
        if ((sp->phy[0] & 0x8000) == 0) {
980
                int phy_addr = sp->phy[0] & 0x1f;
981
                mdio_write(ioaddr, phy_addr, 0, 0x0400);
982
                mdio_write(ioaddr, phy_addr, 1, 0x0000);
983
                mdio_write(ioaddr, phy_addr, 4, 0x0000);
984
                mdio_write(ioaddr, phy_addr, 0, 0x8000);
985
#ifdef honor_default_port
986
                mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
987
#endif
988
        }
989
        sp->stats.tx_errors++;
990
        dev->trans_start = jiffies;
991
        return;
992
}
993
 
994
static int
995
speedo_start_xmit(struct sk_buff *skb, struct device *dev)
996
{
997
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
998
        long ioaddr = dev->base_addr;
999
        int entry;
1000
 
1001
        /* Block a timer-based transmit from overlapping.  This could better be
1002
           done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
1003
           If this ever occurs the queue layer is doing something evil! */
1004
        if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
1005
                int tickssofar = jiffies - dev->trans_start;
1006
                if (tickssofar < TX_TIMEOUT - 2)
1007
                        return 1;
1008
                if (tickssofar < TX_TIMEOUT) {
1009
                        /* Reap sent packets from the full Tx queue. */
1010
                        outw(DRVR_INT, ioaddr + SCBCmd);
1011
                        return 1;
1012
                }
1013
                speedo_tx_timeout(dev);
1014
                return 1;
1015
        }
1016
 
1017
        /* Caution: the write order is important here, set the base address
1018
           with the "ownership" bits last. */
1019
 
1020
        {       /* Prevent interrupts from changing the Tx ring from underneath us. */
1021
                unsigned long flags;
1022
 
1023
                save_flags(flags);
1024
                cli();
1025
                /* Calculate the Tx descriptor entry. */
1026
                entry = sp->cur_tx++ % TX_RING_SIZE;
1027
 
1028
                sp->tx_skbuff[entry] = skb;
1029
                /* Todo: be a little more clever about setting the interrupt bit. */
1030
                sp->tx_ring[entry].status =
1031
                        (CmdSuspend | CmdTx | CmdTxFlex) << 16;
1032
                sp->tx_ring[entry].link =
1033
                  virt_to_bus(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
1034
                sp->tx_ring[entry].tx_desc_addr =
1035
                  virt_to_bus(&sp->tx_ring[entry].tx_buf_addr0);
1036
                /* The data region is always in one buffer descriptor, Tx FIFO
1037
                   threshold of 256. */
1038
                sp->tx_ring[entry].count = 0x01208000;
1039
                sp->tx_ring[entry].tx_buf_addr0 = virt_to_bus(skb->data);
1040
                sp->tx_ring[entry].tx_buf_size0 = skb->len;
1041
                /* Todo: perhaps leave the interrupt bit set if the Tx queue is more
1042
                   than half full.  Argument against: we should be receiving packets
1043
                   and scavenging the queue.  Argument for: if so, it shouldn't
1044
                   matter. */
1045
                sp->last_cmd->command &= ~(CmdSuspend | CmdIntr);
1046
                sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1047
                restore_flags(flags);
1048
                /* Trigger the command unit resume. */
1049
                wait_for_cmd_done(ioaddr + SCBCmd);
1050
                outw(CU_RESUME, ioaddr + SCBCmd);
1051
        }
1052
 
1053
        /* Leave room for set_rx_mode() to fill two entries. */
1054
        if (sp->cur_tx - sp->dirty_tx > TX_RING_SIZE - 3)
1055
                sp->tx_full = 1;
1056
        else
1057
                clear_bit(0, (void*)&dev->tbusy);
1058
 
1059
        dev->trans_start = jiffies;
1060
 
1061
        return 0;
1062
}
1063
 
1064
/* The interrupt handler does all of the Rx thread work and cleans up
1065
   after the Tx thread. */
1066
static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1067
{
1068
        struct device *dev = (struct device *)dev_instance;
1069
        struct speedo_private *sp;
1070
        long ioaddr, boguscnt = max_interrupt_work;
1071
        unsigned short status;
1072
 
1073
#ifndef final_version
1074
        if (dev == NULL) {
1075
                printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);
1076
                return;
1077
        }
1078
#endif
1079
 
1080
        ioaddr = dev->base_addr;
1081
        sp = (struct speedo_private *)dev->priv;
1082
#ifndef final_version
1083
        /* A lock to prevent simultaneous entry on SMP machines. */
1084
        if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1085
                printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1086
                           dev->name);
1087
                sp->in_interrupt = 0;    /* Avoid halting machine. */
1088
                return;
1089
        }
1090
        dev->interrupt = 1;
1091
#endif
1092
 
1093
        do {
1094
                status = inw(ioaddr + SCBStatus);
1095
                /* Acknowledge all of the current interrupt sources ASAP. */
1096
                outw(status & 0xfc00, ioaddr + SCBStatus);
1097
 
1098
                if (speedo_debug > 4)
1099
                        printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",
1100
                                   dev->name, status);
1101
 
1102
                if ((status & 0xfc00) == 0)
1103
                        break;
1104
 
1105
                if (status & 0x4000)     /* Packet received. */
1106
                        speedo_rx(dev);
1107
 
1108
                if (status & 0x1000) {
1109
                  if ((status & 0x003c) == 0x0028) /* No more Rx buffers. */
1110
                        outw(RX_RESUMENR, ioaddr + SCBCmd);
1111
                  else if ((status & 0x003c) == 0x0008) { /* No resources (why?!) */
1112
                        /* No idea of what went wrong.  Restart the receiver. */
1113
                        outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
1114
                                 ioaddr + SCBPointer);
1115
                        outw(RX_START, ioaddr + SCBCmd);
1116
                  }
1117
                  sp->stats.rx_errors++;
1118
                }
1119
 
1120
                /* User interrupt, Command/Tx unit interrupt or CU not active. */
1121
                if (status & 0xA400) {
1122
                        unsigned int dirty_tx = sp->dirty_tx;
1123
 
1124
                        while (sp->cur_tx - dirty_tx > 0) {
1125
                                int entry = dirty_tx % TX_RING_SIZE;
1126
                                int status = sp->tx_ring[entry].status;
1127
 
1128
                                if (speedo_debug > 5)
1129
                                        printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1130
                                                   entry, status);
1131
                                if ((status & StatusComplete) == 0)
1132
                                        break;                  /* It still hasn't been processed. */
1133
                                /* Free the original skb. */
1134
                                if (sp->tx_skbuff[entry]) {
1135
                                        sp->stats.tx_packets++; /* Count only user packets. */
1136
                                        dev_free_skb(sp->tx_skbuff[entry]);
1137
                                        sp->tx_skbuff[entry] = 0;
1138
                                } else if ((sp->tx_ring[entry].status&0x70000) == CmdNOp << 16)
1139
                                        sp->mc_setup_busy = 0;
1140
                                dirty_tx++;
1141
                        }
1142
 
1143
#ifndef final_version
1144
                        if (sp->cur_tx - dirty_tx > TX_RING_SIZE) {
1145
                                printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1146
                                           " full=%d.\n",
1147
                                           dirty_tx, sp->cur_tx, sp->tx_full);
1148
                                dirty_tx += TX_RING_SIZE;
1149
                        }
1150
#endif
1151
 
1152
                        if (sp->tx_full && dev->tbusy
1153
                                && dirty_tx > sp->cur_tx - TX_RING_SIZE + 2) {
1154
                                /* The ring is no longer full, clear tbusy. */
1155
                                sp->tx_full = 0;
1156
                                clear_bit(0, (void*)&dev->tbusy);
1157
                                mark_bh(NET_BH);
1158
                        }
1159
 
1160
                        sp->dirty_tx = dirty_tx;
1161
                }
1162
 
1163
                if (--boguscnt < 0) {
1164
                        printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1165
                                   dev->name, status);
1166
                        /* Clear all interrupt sources. */
1167
                        outl(0xfc00, ioaddr + SCBStatus);
1168
                        break;
1169
                }
1170
        } while (1);
1171
 
1172
        if (speedo_debug > 3)
1173
                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1174
                           dev->name, inw(ioaddr + SCBStatus));
1175
 
1176
        dev->interrupt = 0;
1177
        clear_bit(0, (void*)&sp->in_interrupt);
1178
        return;
1179
}
1180
 
1181
static int
1182
speedo_rx(struct device *dev)
1183
{
1184
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1185
        int entry = sp->cur_rx % RX_RING_SIZE;
1186
        int status;
1187
        int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1188
 
1189
        if (speedo_debug > 4)
1190
                printk(KERN_DEBUG " In speedo_rx().\n");
1191
        /* If we own the next entry, it's a new packet. Send it up. */
1192
        while (sp->rx_ringp[entry] != NULL &&
1193
                   (status = sp->rx_ringp[entry]->status) & RxComplete) {
1194
 
1195
                if (--rx_work_limit < 0)
1196
                        break;
1197
                if (speedo_debug > 4)
1198
                        printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,
1199
                                   sp->rx_ringp[entry]->count & 0x3fff);
1200
                if ((status & (RxErrTooBig|RxOK)) != RxOK) {
1201
                        if (status & RxErrTooBig)
1202
                                printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1203
                                           "status %8.8x!\n", dev->name, status);
1204
                        else if ( ! (status & 0x2000)) {
1205
                                /* There was a fatal error.  This *should* be impossible. */
1206
                                sp->stats.rx_errors++;
1207
                                printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1208
                                           "status %8.8x.\n",
1209
                                           dev->name, status);
1210
                        }
1211
                } else {
1212
                        int pkt_len = sp->rx_ringp[entry]->count & 0x3fff;
1213
                        struct sk_buff *skb;
1214
 
1215
                        /* Check if the packet is long enough to just accept without
1216
                           copying to a properly sized skbuff. */
1217
                        if (pkt_len < rx_copybreak
1218
                                && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1219
                                skb->dev = dev;
1220
                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1221
                                /* 'skb_put()' points to the start of sk_buff data area. */
1222
#if 1 || USE_IP_CSUM
1223
                                /* Packet is in one chunk -- we can copy + cksum. */
1224
                                eth_copy_and_sum(skb,
1225
                                                                 bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
1226
                                                                 pkt_len, 0);
1227
                                skb_put(skb, pkt_len);
1228
#else
1229
                                memcpy(skb_put(skb, pkt_len),
1230
                                           bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr), pkt_len);
1231
#endif
1232
                        } else {
1233
                                void *temp;
1234
                                /* Pass up the already-filled skbuff. */
1235
                                skb = sp->rx_skbuff[entry];
1236
                                if (skb == NULL) {
1237
                                        printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1238
                                                   dev->name);
1239
                                        break;
1240
                                }
1241
                                sp->rx_skbuff[entry] = NULL;
1242
                                temp = skb_put(skb, pkt_len);
1243
                                if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)
1244
                                        printk(KERN_ERR "%s: Rx consistency error -- the skbuff "
1245
                                                   "addresses do not match in speedo_rx: %p vs. %p "
1246
                                                   "/ %p.\n", dev->name,
1247
                                                   bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
1248
                                                   skb->head, temp);
1249
                                sp->rx_ringp[entry] = NULL;
1250
                        }
1251
                        skb->protocol = eth_type_trans(skb, dev);
1252
                        netif_rx(skb);
1253
                        sp->stats.rx_packets++;
1254
                }
1255
                entry = (++sp->cur_rx) % RX_RING_SIZE;
1256
        }
1257
 
1258
        /* Refill the Rx ring buffers. */
1259
        for (; sp->dirty_rx < sp->cur_rx; sp->dirty_rx++) {
1260
                struct RxFD *rxf;
1261
                entry = sp->dirty_rx % RX_RING_SIZE;
1262
                if (sp->rx_skbuff[entry] == NULL) {
1263
                        struct sk_buff *skb;
1264
                        /* Get a fresh skbuff to replace the consumed one. */
1265
                        skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1266
                        sp->rx_skbuff[entry] = skb;
1267
                        if (skb == NULL) {
1268
                                sp->rx_ringp[entry] = NULL;
1269
                                break;                  /* Better luck next time!  */
1270
                        }
1271
                        rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1272
                        skb->dev = dev;
1273
                        skb_reserve(skb, sizeof(struct RxFD));
1274
                        rxf->rx_buf_addr = virt_to_bus(skb->tail);
1275
                } else {
1276
                        rxf = sp->rx_ringp[entry];
1277
                }
1278
                rxf->status = 0xC0000001;       /* '1' for driver use only. */
1279
                rxf->link = 0;                   /* None yet. */
1280
                rxf->count = 0;
1281
                rxf->size = PKT_BUF_SZ;
1282
                sp->last_rxf->link = virt_to_bus(rxf);
1283
                sp->last_rxf->status &= ~0xC0000000;
1284
                sp->last_rxf = rxf;
1285
        }
1286
 
1287
        sp->last_rx_time = jiffies;
1288
        return 0;
1289
}
1290
 
1291
static int
1292
speedo_close(struct device *dev)
1293
{
1294
        long ioaddr = dev->base_addr;
1295
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1296
        int i;
1297
 
1298
        dev->start = 0;
1299
        dev->tbusy = 1;
1300
 
1301
        if (speedo_debug > 1)
1302
                printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1303
                           dev->name, inw(ioaddr + SCBStatus));
1304
 
1305
        /* Shut off the media monitoring timer. */
1306
        del_timer(&sp->timer);
1307
 
1308
        /* Disable interrupts, and stop the chip's Rx process. */
1309
        outw(INT_MASK, ioaddr + SCBCmd);
1310
        outw(INT_MASK | RX_ABORT, ioaddr + SCBCmd);
1311
 
1312
        free_irq(dev->irq, dev);
1313
 
1314
        /* Free all the skbuffs in the Rx and Tx queues. */
1315
        for (i = 0; i < RX_RING_SIZE; i++) {
1316
                struct sk_buff *skb = sp->rx_skbuff[i];
1317
                sp->rx_skbuff[i] = 0;
1318
                /* Clear the Rx descriptors. */
1319
                if (skb)
1320
                        dev_free_skb(skb);
1321
        }
1322
 
1323
        for (i = 0; i < TX_RING_SIZE; i++) {
1324
                struct sk_buff *skb = sp->tx_skbuff[i];
1325
                sp->tx_skbuff[i] = 0;
1326
                /* Clear the Tx descriptors. */
1327
                if (skb)
1328
                        dev_free_skb(skb);
1329
        }
1330
        if (sp->mc_setup_frm) {
1331
                kfree(sp->mc_setup_frm);
1332
                sp->mc_setup_frm_len = 0;
1333
        }
1334
 
1335
        /* Print a few items for debugging. */
1336
        if (speedo_debug > 3) {
1337
                int phy_num = sp->phy[0] & 0x1f;
1338
                printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n",
1339
                           dev->name, sp->cur_rx);
1340
 
1341
                for (i = 0; i < RX_RING_SIZE; i++)
1342
                        printk(KERN_DEBUG "  Rx ring entry %d  %8.8x.\n",
1343
                                   i, (int)sp->rx_ringp[i]->status);
1344
 
1345
                for (i = 0; i < 5; i++)
1346
                        printk(KERN_DEBUG "  PHY index %d register %d is %4.4x.\n",
1347
                                   phy_num, i, mdio_read(ioaddr, phy_num, i));
1348
                for (i = 21; i < 26; i++)
1349
                        printk(KERN_DEBUG "  PHY index %d register %d is %4.4x.\n",
1350
                                   phy_num, i, mdio_read(ioaddr, phy_num, i));
1351
        }
1352
        MOD_DEC_USE_COUNT;
1353
 
1354
        return 0;
1355
}
1356
 
1357
/* The Speedo-3 has an especially awkward and unusable method of getting
1358
   statistics out of the chip.  It takes an unpredictable length of time
1359
   for the dump-stats command to complete.  To avoid a busy-wait loop we
1360
   update the stats with the previous dump results, and then trigger a
1361
   new dump.
1362
 
1363
   These problems are mitigated by the current /proc implementation, which
1364
   calls this routine first to judge the output length, and then to emit the
1365
   output.
1366
 
1367
   Oh, and incoming frames are dropped while executing dump-stats!
1368
   */
1369
static struct enet_statistics *
1370
speedo_get_stats(struct device *dev)
1371
{
1372
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1373
        long ioaddr = dev->base_addr;
1374
 
1375
        if (sp->lstats.done_marker == 0xA007) { /* Previous dump finished */
1376
                sp->stats.tx_aborted_errors += sp->lstats.tx_coll16_errs;
1377
                sp->stats.tx_window_errors += sp->lstats.tx_late_colls;
1378
                sp->stats.tx_fifo_errors += sp->lstats.tx_underruns;
1379
                sp->stats.tx_fifo_errors += sp->lstats.tx_lost_carrier;
1380
                /*sp->stats.tx_deferred += sp->lstats.tx_deferred;*/
1381
                sp->stats.collisions += sp->lstats.tx_total_colls;
1382
                sp->stats.rx_crc_errors += sp->lstats.rx_crc_errs;
1383
                sp->stats.rx_frame_errors += sp->lstats.rx_align_errs;
1384
                sp->stats.rx_over_errors += sp->lstats.rx_resource_errs;
1385
                sp->stats.rx_fifo_errors += sp->lstats.rx_overrun_errs;
1386
                sp->stats.rx_length_errors += sp->lstats.rx_runt_errs;
1387
                sp->lstats.done_marker = 0x0000;
1388
                if (dev->start) {
1389
                        wait_for_cmd_done(ioaddr + SCBCmd);
1390
                        outw(CU_DUMPSTATS, ioaddr + SCBCmd);
1391
                }
1392
        }
1393
        return &sp->stats;
1394
}
1395
 
1396
static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd)
1397
{
1398
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1399
        long ioaddr = dev->base_addr;
1400
        u16 *data = (u16 *)&rq->ifr_data;
1401
        int phy = sp->phy[0] & 0x1f;
1402
 
1403
    switch(cmd) {
1404
        case SIOCDEVPRIVATE:            /* Get the address of the PHY in use. */
1405
                data[0] = phy;
1406
        case SIOCDEVPRIVATE+1:          /* Read the specified MII register. */
1407
                data[3] = mdio_read(ioaddr, data[0], data[1]);
1408
                return 0;
1409
        case SIOCDEVPRIVATE+2:          /* Write the specified MII register */
1410
                if (!suser())
1411
                        return -EPERM;
1412
                mdio_write(ioaddr, data[0], data[1], data[2]);
1413
                return 0;
1414
        default:
1415
                return -EOPNOTSUPP;
1416
        }
1417
}
1418
 
1419
/* Set or clear the multicast filter for this adaptor.
1420
   This is very ugly with Intel chips -- we usually have to execute an
1421
   entire configuration command, plus process a multicast command.
1422
   This is complicated.  We must put a large configuration command and
1423
   an arbitrarily-sized multicast command in the transmit list.
1424
   To minimize the disruption -- the previous command might have already
1425
   loaded the link -- we convert the current command block, normally a Tx
1426
   command, into a no-op and link it to the new command.
1427
*/
1428
static void
1429
set_rx_mode(struct device *dev)
1430
{
1431
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1432
        long ioaddr = dev->base_addr;
1433
        struct descriptor *last_cmd;
1434
        char new_rx_mode;
1435
        unsigned long flags;
1436
        int entry, i;
1437
 
1438
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1439
                new_rx_mode = 3;
1440
        } else if ((dev->flags & IFF_ALLMULTI)  ||
1441
                           dev->mc_count > multicast_filter_limit) {
1442
                new_rx_mode = 1;
1443
        } else
1444
                new_rx_mode = 0;
1445
 
1446
        if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {
1447
          /* The Tx ring is full -- don't add anything!  Presumably the new mode
1448
                 is in config_cmd_data and will be added anyway. */
1449
                sp->rx_mode = -1;
1450
                return;
1451
        }
1452
 
1453
        if (new_rx_mode != sp->rx_mode) {
1454
                u8 *config_cmd_data;
1455
 
1456
                save_flags(flags);              /* Lock to protect sp->cur_tx. */
1457
                cli();
1458
                entry = sp->cur_tx++ % TX_RING_SIZE;
1459
                last_cmd = sp->last_cmd;
1460
                sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1461
                restore_flags(flags);
1462
 
1463
                sp->tx_skbuff[entry] = 0;                        /* Redundant. */
1464
                sp->tx_ring[entry].status = (CmdSuspend | CmdConfigure) << 16;
1465
                sp->tx_ring[entry].link =
1466
                        virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
1467
                config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
1468
                /* Construct a full CmdConfig frame. */
1469
                memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
1470
                config_cmd_data[1] = (txfifo << 4) | rxfifo;
1471
                config_cmd_data[4] = rxdmacount;
1472
                config_cmd_data[5] = txdmacount + 0x80;
1473
                config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
1474
                config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
1475
                config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
1476
                if (sp->phy[0] & 0x8000) {                       /* Use the AUI port instead. */
1477
                        config_cmd_data[15] |= 0x80;
1478
                        config_cmd_data[8] = 0;
1479
                }
1480
                /* Trigger the command unit resume. */
1481
                last_cmd->command &= ~CmdSuspend;
1482
                wait_for_cmd_done(ioaddr + SCBCmd);
1483
                outw(CU_RESUME, ioaddr + SCBCmd);
1484
        }
1485
 
1486
        if (new_rx_mode == 0  &&  dev->mc_count < 4) {
1487
                /* The simple case of 0-3 multicast list entries occurs often, and
1488
                   fits within one tx_ring[] entry. */
1489
                struct dev_mc_list *mclist;
1490
                u16 *setup_params, *eaddrs;
1491
 
1492
                save_flags(flags);              /* Lock to protect sp->cur_tx. */
1493
                cli();
1494
                entry = sp->cur_tx++ % TX_RING_SIZE;
1495
                last_cmd = sp->last_cmd;
1496
                sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1497
                restore_flags(flags);
1498
 
1499
                sp->tx_skbuff[entry] = 0;
1500
                sp->tx_ring[entry].status = (CmdSuspend | CmdMulticastList) << 16;
1501
                sp->tx_ring[entry].link =
1502
                        virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
1503
                sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
1504
                setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
1505
                *setup_params++ = dev->mc_count*6;
1506
                /* Fill in the multicast addresses. */
1507
                for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1508
                         i++, mclist = mclist->next) {
1509
                        eaddrs = (u16 *)mclist->dmi_addr;
1510
                        *setup_params++ = *eaddrs++;
1511
                        *setup_params++ = *eaddrs++;
1512
                        *setup_params++ = *eaddrs++;
1513
                }
1514
 
1515
                last_cmd->command &= ~CmdSuspend;
1516
                /* Immediately trigger the command unit resume. */
1517
                wait_for_cmd_done(ioaddr + SCBCmd);
1518
                outw(CU_RESUME, ioaddr + SCBCmd);
1519
        } else if (new_rx_mode == 0) {
1520
                struct dev_mc_list *mclist;
1521
                u16 *setup_params, *eaddrs;
1522
                struct descriptor *mc_setup_frm = sp->mc_setup_frm;
1523
                int i;
1524
 
1525
                if (sp->mc_setup_frm_len < 10 + dev->mc_count*6
1526
                        || sp->mc_setup_frm == NULL) {
1527
                        /* Allocate a full setup frame, 10bytes + <max addrs>. */
1528
                        if (sp->mc_setup_frm)
1529
                                kfree(sp->mc_setup_frm);
1530
                        sp->mc_setup_busy = 0;
1531
                        sp->mc_setup_frm_len = 10 + multicast_filter_limit*6;
1532
                        sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);
1533
                        if (sp->mc_setup_frm == NULL) {
1534
                                printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
1535
                                           dev->name);
1536
                                sp->rx_mode = -1; /* We failed, try again. */
1537
                                return;
1538
                        }
1539
                }
1540
                /* If we are busy, someone might be quickly adding to the MC list.
1541
                   Try again later when the list changes stop. */
1542
                if (sp->mc_setup_busy) {
1543
                        sp->rx_mode = -1;
1544
                        return;
1545
                }
1546
                mc_setup_frm = sp->mc_setup_frm;
1547
                /* Fill the setup frame. */
1548
                if (speedo_debug > 1)
1549
                        printk(KERN_DEBUG "%s: Constructing a setup frame at %p, "
1550
                                   "%d bytes.\n",
1551
                                   dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);
1552
                mc_setup_frm->status = 0;
1553
                mc_setup_frm->command = CmdSuspend | CmdIntr | CmdMulticastList;
1554
                /* Link set below. */
1555
                setup_params = (u16 *)&mc_setup_frm->params;
1556
                *setup_params++ = dev->mc_count*6;
1557
                /* Fill in the multicast addresses. */
1558
                for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1559
                         i++, mclist = mclist->next) {
1560
                        eaddrs = (u16 *)mclist->dmi_addr;
1561
                        *setup_params++ = *eaddrs++;
1562
                        *setup_params++ = *eaddrs++;
1563
                        *setup_params++ = *eaddrs++;
1564
                }
1565
 
1566
                /* Disable interrupts while playing with the Tx Cmd list. */
1567
                save_flags(flags);
1568
                cli();
1569
                entry = sp->cur_tx++ % TX_RING_SIZE;
1570
                last_cmd = sp->last_cmd;
1571
                sp->last_cmd = mc_setup_frm;
1572
                sp->mc_setup_busy++;
1573
                restore_flags(flags);
1574
 
1575
                /* Change the command to a NoOp, pointing to the CmdMulti command. */
1576
                sp->tx_skbuff[entry] = 0;
1577
                sp->tx_ring[entry].status = CmdNOp << 16;
1578
                sp->tx_ring[entry].link = virt_to_bus(mc_setup_frm);
1579
 
1580
                /* Set the link in the setup frame. */
1581
                mc_setup_frm->link =
1582
                        virt_to_bus(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
1583
 
1584
                last_cmd->command &= ~CmdSuspend;
1585
                /* Immediately trigger the command unit resume. */
1586
                wait_for_cmd_done(ioaddr + SCBCmd);
1587
                outw(CU_RESUME, ioaddr + SCBCmd);
1588
                if (speedo_debug > 5)
1589
                        printk(" CmdMCSetup frame length %d in entry %d.\n",
1590
                                   dev->mc_count, entry);
1591
        }
1592
 
1593
        sp->rx_mode = new_rx_mode;
1594
}
1595
 
1596
#ifdef MODULE
1597
 
1598
int
1599
init_module(void)
1600
{
1601
        int cards_found;
1602
 
1603
        if (debug >= 0)
1604
                speedo_debug = debug;
1605
        if (speedo_debug)
1606
                printk(KERN_INFO "%s", version);
1607
 
1608
        root_speedo_dev = NULL;
1609
        cards_found = eepro100_init(NULL);
1610
        return cards_found ? 0 : -ENODEV;
1611
}
1612
 
1613
void
1614
cleanup_module(void)
1615
{
1616
        struct device *next_dev;
1617
 
1618
        /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1619
        while (root_speedo_dev) {
1620
                next_dev = ((struct speedo_private *)root_speedo_dev->priv)->next_module;
1621
                unregister_netdev(root_speedo_dev);
1622
                release_region(root_speedo_dev->base_addr, SPEEDO3_TOTAL_SIZE);
1623
                kfree(root_speedo_dev);
1624
                root_speedo_dev = next_dev;
1625
        }
1626
}
1627
#else   /* not MODULE */
1628
int eepro100_probe(struct device *dev)
1629
{
1630
        int cards_found = 0;
1631
 
1632
        cards_found = eepro100_init(dev);
1633
 
1634
        if (speedo_debug > 0  &&  cards_found)
1635
                printk(version);
1636
 
1637
        return cards_found ? 0 : -ENODEV;
1638
}
1639
#endif  /* MODULE */
1640
 
1641
/*
1642
 * Local variables:
1643
 *  compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1644
 *  SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1645
 *  c-indent-level: 4
1646
 *  c-basic-offset: 4
1647
 *  tab-width: 4
1648
 * End:
1649
 */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.