1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
|
3 |
|
|
* 1999/2000 Mike Phillips (mikep@linuxtr.net)
|
4 |
|
|
*
|
5 |
|
|
* Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
|
6 |
|
|
* chipset.
|
7 |
|
|
*
|
8 |
|
|
* Base Driver Skeleton:
|
9 |
|
|
* Written 1993-94 by Donald Becker.
|
10 |
|
|
*
|
11 |
|
|
* Copyright 1993 United States Government as represented by the
|
12 |
|
|
* Director, National Security Agency.
|
13 |
|
|
*
|
14 |
|
|
* Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
|
15 |
|
|
* assistance and perserverance with the testing of this driver.
|
16 |
|
|
*
|
17 |
|
|
* This software may be used and distributed according to the terms
|
18 |
|
|
* of the GNU General Public License, incorporated herein by reference.
|
19 |
|
|
*
|
20 |
|
|
* 4/27/99 - Alpha Release 0.1.0
|
21 |
|
|
* First release to the public
|
22 |
|
|
*
|
23 |
|
|
* 6/8/99 - Official Release 0.2.0
|
24 |
|
|
* Merged into the kernel code
|
25 |
|
|
* 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
|
26 |
|
|
* resource. Driver also reports the card name returned by
|
27 |
|
|
* the pci resource.
|
28 |
|
|
* 1/11/00 - Added spinlocks for smp
|
29 |
|
|
* 2/23/00 - Updated to dev_kfree_irq
|
30 |
|
|
* 3/10/00 - Fixed FDX enable which triggered other bugs also
|
31 |
|
|
* squashed.
|
32 |
|
|
* 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
|
33 |
|
|
* The odd thing about the changes is that the fix for
|
34 |
|
|
* endian issues with the big-endian data in the arb, asb...
|
35 |
|
|
* was to always swab() the bytes, no matter what CPU.
|
36 |
|
|
* That's because the read[wl]() functions always swap the
|
37 |
|
|
* bytes on the way in on PPC.
|
38 |
|
|
* Fixing the hardware descriptors was another matter,
|
39 |
|
|
* because they weren't going through read[wl](), there all
|
40 |
|
|
* the results had to be in memory in le32 values. kdaaker
|
41 |
|
|
*
|
42 |
|
|
* 12/23/00 - Added minimal Cardbus support (Thanks Donald).
|
43 |
|
|
*
|
44 |
|
|
* 03/09/01 - Add new pci api, dev_base_lock, general clean up.
|
45 |
|
|
*
|
46 |
|
|
* 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
|
47 |
|
|
* Change proc_fs behaviour, now one entry per adapter.
|
48 |
|
|
*
|
49 |
|
|
* 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
|
50 |
|
|
* adapter when live does not take the system down with it.
|
51 |
|
|
*
|
52 |
|
|
* 06/02/01 - Clean up, copy skb for small packets
|
53 |
|
|
*
|
54 |
|
|
* 06/22/01 - Add EISR error handling routines
|
55 |
|
|
*
|
56 |
|
|
* 07/19/01 - Improve bad LAA reporting, strip out freemem
|
57 |
|
|
* into a separate function, its called from 3
|
58 |
|
|
* different places now.
|
59 |
|
|
* 02/09/02 - Replaced sleep_on.
|
60 |
|
|
* 03/01/02 - Replace access to several registers from 32 bit to
|
61 |
|
|
* 16 bit. Fixes alignment errors on PPC 64 bit machines.
|
62 |
|
|
* Thanks to Al Trautman for this one.
|
63 |
|
|
* 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
|
64 |
|
|
* silently ignored until the error checking code
|
65 |
|
|
* went into version 1.0.0
|
66 |
|
|
* 06/04/02 - Add correct start up sequence for the cardbus adapters.
|
67 |
|
|
* Required for strict compliance with pci power mgmt specs.
|
68 |
|
|
* To Do:
|
69 |
|
|
*
|
70 |
|
|
* Wake on lan
|
71 |
|
|
*
|
72 |
|
|
* If Problems do Occur
|
73 |
|
|
* Most problems can be rectified by either closing and opening the interface
|
74 |
|
|
* (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
|
75 |
|
|
* if compiled into the kernel).
|
76 |
|
|
*/
|
77 |
|
|
|
78 |
|
|
/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
|
79 |
|
|
|
80 |
|
|
#define OLYMPIC_DEBUG 0
|
81 |
|
|
|
82 |
|
|
|
83 |
|
|
#include <linux/module.h>
|
84 |
|
|
#include <linux/kernel.h>
|
85 |
|
|
#include <linux/errno.h>
|
86 |
|
|
#include <linux/timer.h>
|
87 |
|
|
#include <linux/in.h>
|
88 |
|
|
#include <linux/ioport.h>
|
89 |
|
|
#include <linux/string.h>
|
90 |
|
|
#include <linux/proc_fs.h>
|
91 |
|
|
#include <linux/ptrace.h>
|
92 |
|
|
#include <linux/skbuff.h>
|
93 |
|
|
#include <linux/interrupt.h>
|
94 |
|
|
#include <linux/delay.h>
|
95 |
|
|
#include <linux/netdevice.h>
|
96 |
|
|
#include <linux/trdevice.h>
|
97 |
|
|
#include <linux/stddef.h>
|
98 |
|
|
#include <linux/init.h>
|
99 |
|
|
#include <linux/pci.h>
|
100 |
|
|
#include <linux/spinlock.h>
|
101 |
|
|
#include <linux/bitops.h>
|
102 |
|
|
#include <linux/jiffies.h>
|
103 |
|
|
|
104 |
|
|
#include <net/checksum.h>
|
105 |
|
|
#include <net/net_namespace.h>
|
106 |
|
|
|
107 |
|
|
#include <asm/io.h>
|
108 |
|
|
#include <asm/system.h>
|
109 |
|
|
|
110 |
|
|
#include "olympic.h"
|
111 |
|
|
|
112 |
|
|
/* I've got to put some intelligence into the version number so that Peter and I know
|
113 |
|
|
* which version of the code somebody has got.
|
114 |
|
|
* Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
|
115 |
|
|
* So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
|
116 |
|
|
*
|
117 |
|
|
* Official releases will only have an a.b.c version number format.
|
118 |
|
|
*/
|
119 |
|
|
|
120 |
|
|
static char version[] __devinitdata =
|
121 |
|
|
"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
|
122 |
|
|
|
123 |
|
|
static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
|
124 |
|
|
"Address Verification", "Neighbor Notification (Ring Poll)",
|
125 |
|
|
"Request Parameters","FDX Registration Request",
|
126 |
|
|
"FDX Duplicate Address Check", "Station registration Query Wait",
|
127 |
|
|
"Unknown stage"};
|
128 |
|
|
|
129 |
|
|
static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
|
130 |
|
|
"Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
|
131 |
|
|
"Duplicate Node Address","Request Parameters","Remove Received",
|
132 |
|
|
"Reserved", "Reserved", "No Monitor Detected for RPL",
|
133 |
|
|
"Monitor Contention failer for RPL", "FDX Protocol Error"};
|
134 |
|
|
|
135 |
|
|
/* Module paramters */
|
136 |
|
|
|
137 |
|
|
MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
|
138 |
|
|
MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
|
139 |
|
|
|
140 |
|
|
/* Ring Speed 0,4,16,100
|
141 |
|
|
* 0 = Autosense
|
142 |
|
|
* 4,16 = Selected speed only, no autosense
|
143 |
|
|
* This allows the card to be the first on the ring
|
144 |
|
|
* and become the active monitor.
|
145 |
|
|
* 100 = Nothing at present, 100mbps is autodetected
|
146 |
|
|
* if FDX is turned on. May be implemented in the future to
|
147 |
|
|
* fail if 100mpbs is not detected.
|
148 |
|
|
*
|
149 |
|
|
* WARNING: Some hubs will allow you to insert
|
150 |
|
|
* at the wrong speed
|
151 |
|
|
*/
|
152 |
|
|
|
153 |
|
|
static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
|
154 |
|
|
module_param_array(ringspeed, int, NULL, 0);
|
155 |
|
|
|
156 |
|
|
/* Packet buffer size */
|
157 |
|
|
|
158 |
|
|
static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
|
159 |
|
|
module_param_array(pkt_buf_sz, int, NULL, 0) ;
|
160 |
|
|
|
161 |
|
|
/* Message Level */
|
162 |
|
|
|
163 |
|
|
static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
|
164 |
|
|
module_param_array(message_level, int, NULL, 0) ;
|
165 |
|
|
|
166 |
|
|
/* Change network_monitor to receive mac frames through the arb channel.
|
167 |
|
|
* Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
|
168 |
|
|
* device, i.e. tr0, tr1 etc.
|
169 |
|
|
* Intended to be used to create a ring-error reporting network module
|
170 |
|
|
* i.e. it will give you the source address of beaconers on the ring
|
171 |
|
|
*/
|
172 |
|
|
static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
|
173 |
|
|
module_param_array(network_monitor, int, NULL, 0);
|
174 |
|
|
|
175 |
|
|
static struct pci_device_id olympic_pci_tbl[] = {
|
176 |
|
|
{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
|
177 |
|
|
{ } /* Terminating Entry */
|
178 |
|
|
};
|
179 |
|
|
MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
|
180 |
|
|
|
181 |
|
|
|
182 |
|
|
static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
|
183 |
|
|
static int olympic_init(struct net_device *dev);
|
184 |
|
|
static int olympic_open(struct net_device *dev);
|
185 |
|
|
static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
|
186 |
|
|
static int olympic_close(struct net_device *dev);
|
187 |
|
|
static void olympic_set_rx_mode(struct net_device *dev);
|
188 |
|
|
static void olympic_freemem(struct net_device *dev) ;
|
189 |
|
|
static irqreturn_t olympic_interrupt(int irq, void *dev_id);
|
190 |
|
|
static struct net_device_stats * olympic_get_stats(struct net_device *dev);
|
191 |
|
|
static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
|
192 |
|
|
static void olympic_arb_cmd(struct net_device *dev);
|
193 |
|
|
static int olympic_change_mtu(struct net_device *dev, int mtu);
|
194 |
|
|
static void olympic_srb_bh(struct net_device *dev) ;
|
195 |
|
|
static void olympic_asb_bh(struct net_device *dev) ;
|
196 |
|
|
static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
|
197 |
|
|
|
198 |
|
|
static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
199 |
|
|
{
|
200 |
|
|
struct net_device *dev ;
|
201 |
|
|
struct olympic_private *olympic_priv;
|
202 |
|
|
static int card_no = -1 ;
|
203 |
|
|
int i ;
|
204 |
|
|
|
205 |
|
|
card_no++ ;
|
206 |
|
|
|
207 |
|
|
if ((i = pci_enable_device(pdev))) {
|
208 |
|
|
return i ;
|
209 |
|
|
}
|
210 |
|
|
|
211 |
|
|
pci_set_master(pdev);
|
212 |
|
|
|
213 |
|
|
if ((i = pci_request_regions(pdev,"olympic"))) {
|
214 |
|
|
goto op_disable_dev;
|
215 |
|
|
}
|
216 |
|
|
|
217 |
|
|
dev = alloc_trdev(sizeof(struct olympic_private)) ;
|
218 |
|
|
if (!dev) {
|
219 |
|
|
i = -ENOMEM;
|
220 |
|
|
goto op_release_dev;
|
221 |
|
|
}
|
222 |
|
|
|
223 |
|
|
olympic_priv = netdev_priv(dev) ;
|
224 |
|
|
|
225 |
|
|
spin_lock_init(&olympic_priv->olympic_lock) ;
|
226 |
|
|
|
227 |
|
|
init_waitqueue_head(&olympic_priv->srb_wait);
|
228 |
|
|
init_waitqueue_head(&olympic_priv->trb_wait);
|
229 |
|
|
#if OLYMPIC_DEBUG
|
230 |
|
|
printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
|
231 |
|
|
#endif
|
232 |
|
|
dev->irq=pdev->irq;
|
233 |
|
|
dev->base_addr=pci_resource_start(pdev, 0);
|
234 |
|
|
olympic_priv->olympic_card_name = pci_name(pdev);
|
235 |
|
|
olympic_priv->pdev = pdev;
|
236 |
|
|
olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
|
237 |
|
|
olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
|
238 |
|
|
if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
|
239 |
|
|
goto op_free_iomap;
|
240 |
|
|
}
|
241 |
|
|
|
242 |
|
|
if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
|
243 |
|
|
olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
|
244 |
|
|
else
|
245 |
|
|
olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
|
246 |
|
|
|
247 |
|
|
dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
|
248 |
|
|
olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
|
249 |
|
|
olympic_priv->olympic_message_level = message_level[card_no] ;
|
250 |
|
|
olympic_priv->olympic_network_monitor = network_monitor[card_no];
|
251 |
|
|
|
252 |
|
|
if ((i = olympic_init(dev))) {
|
253 |
|
|
goto op_free_iomap;
|
254 |
|
|
}
|
255 |
|
|
|
256 |
|
|
dev->open=&olympic_open;
|
257 |
|
|
dev->hard_start_xmit=&olympic_xmit;
|
258 |
|
|
dev->change_mtu=&olympic_change_mtu;
|
259 |
|
|
dev->stop=&olympic_close;
|
260 |
|
|
dev->do_ioctl=NULL;
|
261 |
|
|
dev->set_multicast_list=&olympic_set_rx_mode;
|
262 |
|
|
dev->get_stats=&olympic_get_stats ;
|
263 |
|
|
dev->set_mac_address=&olympic_set_mac_address ;
|
264 |
|
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
265 |
|
|
|
266 |
|
|
pci_set_drvdata(pdev,dev) ;
|
267 |
|
|
register_netdev(dev) ;
|
268 |
|
|
printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
|
269 |
|
|
if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
|
270 |
|
|
char proc_name[20] ;
|
271 |
|
|
strcpy(proc_name,"olympic_") ;
|
272 |
|
|
strcat(proc_name,dev->name) ;
|
273 |
|
|
create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ;
|
274 |
|
|
printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
|
275 |
|
|
}
|
276 |
|
|
return 0 ;
|
277 |
|
|
|
278 |
|
|
op_free_iomap:
|
279 |
|
|
if (olympic_priv->olympic_mmio)
|
280 |
|
|
iounmap(olympic_priv->olympic_mmio);
|
281 |
|
|
if (olympic_priv->olympic_lap)
|
282 |
|
|
iounmap(olympic_priv->olympic_lap);
|
283 |
|
|
|
284 |
|
|
free_netdev(dev);
|
285 |
|
|
op_release_dev:
|
286 |
|
|
pci_release_regions(pdev);
|
287 |
|
|
|
288 |
|
|
op_disable_dev:
|
289 |
|
|
pci_disable_device(pdev);
|
290 |
|
|
return i;
|
291 |
|
|
}
|
292 |
|
|
|
293 |
|
|
static int __devinit olympic_init(struct net_device *dev)
|
294 |
|
|
{
|
295 |
|
|
struct olympic_private *olympic_priv;
|
296 |
|
|
u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
|
297 |
|
|
unsigned long t;
|
298 |
|
|
unsigned int uaa_addr;
|
299 |
|
|
|
300 |
|
|
olympic_priv=netdev_priv(dev);
|
301 |
|
|
olympic_mmio=olympic_priv->olympic_mmio;
|
302 |
|
|
|
303 |
|
|
printk("%s \n", version);
|
304 |
|
|
printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
|
305 |
|
|
|
306 |
|
|
writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
|
307 |
|
|
t=jiffies;
|
308 |
|
|
while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
|
309 |
|
|
schedule();
|
310 |
|
|
if(time_after(jiffies, t + 40*HZ)) {
|
311 |
|
|
printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
|
312 |
|
|
return -ENODEV;
|
313 |
|
|
}
|
314 |
|
|
}
|
315 |
|
|
|
316 |
|
|
|
317 |
|
|
/* Needed for cardbus */
|
318 |
|
|
if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
|
319 |
|
|
writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
|
320 |
|
|
}
|
321 |
|
|
|
322 |
|
|
#if OLYMPIC_DEBUG
|
323 |
|
|
printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
|
324 |
|
|
printk("GPR: %x\n",readw(olympic_mmio+GPR));
|
325 |
|
|
printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
|
326 |
|
|
#endif
|
327 |
|
|
/* Aaaahhh, You have got to be real careful setting GPR, the card
|
328 |
|
|
holds the previous values from flash memory, including autosense
|
329 |
|
|
and ring speed */
|
330 |
|
|
|
331 |
|
|
writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
|
332 |
|
|
|
333 |
|
|
if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
|
334 |
|
|
writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
|
335 |
|
|
if (olympic_priv->olympic_message_level)
|
336 |
|
|
printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
|
337 |
|
|
} else if (olympic_priv->olympic_ring_speed == 16) {
|
338 |
|
|
if (olympic_priv->olympic_message_level)
|
339 |
|
|
printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
|
340 |
|
|
writew(GPR_16MBPS, olympic_mmio+GPR);
|
341 |
|
|
} else if (olympic_priv->olympic_ring_speed == 4) {
|
342 |
|
|
if (olympic_priv->olympic_message_level)
|
343 |
|
|
printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
|
344 |
|
|
writew(0, olympic_mmio+GPR);
|
345 |
|
|
}
|
346 |
|
|
|
347 |
|
|
writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
|
348 |
|
|
|
349 |
|
|
#if OLYMPIC_DEBUG
|
350 |
|
|
printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
|
351 |
|
|
#endif
|
352 |
|
|
/* Solo has been paused to meet the Cardbus power
|
353 |
|
|
* specs if the adapter is cardbus. Check to
|
354 |
|
|
* see its been paused and then restart solo. The
|
355 |
|
|
* adapter should set the pause bit within 1 second.
|
356 |
|
|
*/
|
357 |
|
|
|
358 |
|
|
if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
|
359 |
|
|
t=jiffies;
|
360 |
|
|
while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
|
361 |
|
|
schedule() ;
|
362 |
|
|
if(time_after(jiffies, t + 2*HZ)) {
|
363 |
|
|
printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
|
364 |
|
|
return -ENODEV;
|
365 |
|
|
}
|
366 |
|
|
}
|
367 |
|
|
writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
|
368 |
|
|
}
|
369 |
|
|
|
370 |
|
|
/* start solo init */
|
371 |
|
|
writel((1<<15),olympic_mmio+SISR_MASK_SUM);
|
372 |
|
|
|
373 |
|
|
t=jiffies;
|
374 |
|
|
while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
|
375 |
|
|
schedule();
|
376 |
|
|
if(time_after(jiffies, t + 15*HZ)) {
|
377 |
|
|
printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
|
378 |
|
|
return -ENODEV;
|
379 |
|
|
}
|
380 |
|
|
}
|
381 |
|
|
|
382 |
|
|
writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
|
383 |
|
|
|
384 |
|
|
#if OLYMPIC_DEBUG
|
385 |
|
|
printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
|
386 |
|
|
#endif
|
387 |
|
|
|
388 |
|
|
init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
|
389 |
|
|
|
390 |
|
|
#if OLYMPIC_DEBUG
|
391 |
|
|
{
|
392 |
|
|
int i;
|
393 |
|
|
printk("init_srb(%p): ",init_srb);
|
394 |
|
|
for(i=0;i<20;i++)
|
395 |
|
|
printk("%x ",readb(init_srb+i));
|
396 |
|
|
printk("\n");
|
397 |
|
|
}
|
398 |
|
|
#endif
|
399 |
|
|
if(readw(init_srb+6)) {
|
400 |
|
|
printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
|
401 |
|
|
return -ENODEV;
|
402 |
|
|
}
|
403 |
|
|
|
404 |
|
|
if (olympic_priv->olympic_message_level) {
|
405 |
|
|
if ( readb(init_srb +2) & 0x40) {
|
406 |
|
|
printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
|
407 |
|
|
} else {
|
408 |
|
|
printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
|
409 |
|
|
}
|
410 |
|
|
}
|
411 |
|
|
|
412 |
|
|
uaa_addr=swab16(readw(init_srb+8));
|
413 |
|
|
|
414 |
|
|
#if OLYMPIC_DEBUG
|
415 |
|
|
printk("UAA resides at %x\n",uaa_addr);
|
416 |
|
|
#endif
|
417 |
|
|
|
418 |
|
|
writel(uaa_addr,olympic_mmio+LAPA);
|
419 |
|
|
adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
|
420 |
|
|
|
421 |
|
|
memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
|
422 |
|
|
|
423 |
|
|
#if OLYMPIC_DEBUG
|
424 |
|
|
{
|
425 |
|
|
DECLARE_MAC_BUF(mac);
|
426 |
|
|
printk("adapter address: %s\n", print_mac(mac, dev->dev_addr));
|
427 |
|
|
}
|
428 |
|
|
#endif
|
429 |
|
|
|
430 |
|
|
olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
|
431 |
|
|
olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
|
432 |
|
|
|
433 |
|
|
return 0;
|
434 |
|
|
|
435 |
|
|
}
|
436 |
|
|
|
437 |
|
|
static int olympic_open(struct net_device *dev)
|
438 |
|
|
{
|
439 |
|
|
struct olympic_private *olympic_priv=netdev_priv(dev);
|
440 |
|
|
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
|
441 |
|
|
unsigned long flags, t;
|
442 |
|
|
int i, open_finished = 1 ;
|
443 |
|
|
u8 resp, err;
|
444 |
|
|
DECLARE_MAC_BUF(mac);
|
445 |
|
|
|
446 |
|
|
DECLARE_WAITQUEUE(wait,current) ;
|
447 |
|
|
|
448 |
|
|
olympic_init(dev);
|
449 |
|
|
|
450 |
|
|
if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) {
|
451 |
|
|
return -EAGAIN;
|
452 |
|
|
}
|
453 |
|
|
|
454 |
|
|
#if OLYMPIC_DEBUG
|
455 |
|
|
printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
|
456 |
|
|
printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
|
457 |
|
|
#endif
|
458 |
|
|
|
459 |
|
|
writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
|
460 |
|
|
|
461 |
|
|
writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
|
462 |
|
|
|
463 |
|
|
writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
|
464 |
|
|
|
465 |
|
|
/* adapter is closed, so SRB is pointed to by LAPWWO */
|
466 |
|
|
|
467 |
|
|
writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
|
468 |
|
|
init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
|
469 |
|
|
|
470 |
|
|
#if OLYMPIC_DEBUG
|
471 |
|
|
printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
|
472 |
|
|
printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
|
473 |
|
|
printk("Before the open command \n");
|
474 |
|
|
#endif
|
475 |
|
|
do {
|
476 |
|
|
memset_io(init_srb,0,SRB_COMMAND_SIZE);
|
477 |
|
|
|
478 |
|
|
writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
|
479 |
|
|
writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
|
480 |
|
|
|
481 |
|
|
/* If Network Monitor, instruct card to copy MAC frames through the ARB */
|
482 |
|
|
if (olympic_priv->olympic_network_monitor)
|
483 |
|
|
writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
|
484 |
|
|
else
|
485 |
|
|
writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
|
486 |
|
|
|
487 |
|
|
/* Test OR of first 3 bytes as its totally possible for
|
488 |
|
|
* someone to set the first 2 bytes to be zero, although this
|
489 |
|
|
* is an error, the first byte must have bit 6 set to 1 */
|
490 |
|
|
|
491 |
|
|
if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
|
492 |
|
|
writeb(olympic_priv->olympic_laa[0],init_srb+12);
|
493 |
|
|
writeb(olympic_priv->olympic_laa[1],init_srb+13);
|
494 |
|
|
writeb(olympic_priv->olympic_laa[2],init_srb+14);
|
495 |
|
|
writeb(olympic_priv->olympic_laa[3],init_srb+15);
|
496 |
|
|
writeb(olympic_priv->olympic_laa[4],init_srb+16);
|
497 |
|
|
writeb(olympic_priv->olympic_laa[5],init_srb+17);
|
498 |
|
|
memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
|
499 |
|
|
}
|
500 |
|
|
writeb(1,init_srb+30);
|
501 |
|
|
|
502 |
|
|
spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
|
503 |
|
|
olympic_priv->srb_queued=1;
|
504 |
|
|
|
505 |
|
|
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
|
506 |
|
|
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
|
507 |
|
|
|
508 |
|
|
t = jiffies ;
|
509 |
|
|
|
510 |
|
|
add_wait_queue(&olympic_priv->srb_wait,&wait) ;
|
511 |
|
|
set_current_state(TASK_INTERRUPTIBLE) ;
|
512 |
|
|
|
513 |
|
|
while(olympic_priv->srb_queued) {
|
514 |
|
|
schedule() ;
|
515 |
|
|
if(signal_pending(current)) {
|
516 |
|
|
printk(KERN_WARNING "%s: Signal received in open.\n",
|
517 |
|
|
dev->name);
|
518 |
|
|
printk(KERN_WARNING "SISR=%x LISR=%x\n",
|
519 |
|
|
readl(olympic_mmio+SISR),
|
520 |
|
|
readl(olympic_mmio+LISR));
|
521 |
|
|
olympic_priv->srb_queued=0;
|
522 |
|
|
break;
|
523 |
|
|
}
|
524 |
|
|
if (time_after(jiffies, t + 10*HZ)) {
|
525 |
|
|
printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
|
526 |
|
|
olympic_priv->srb_queued=0;
|
527 |
|
|
break ;
|
528 |
|
|
}
|
529 |
|
|
set_current_state(TASK_INTERRUPTIBLE) ;
|
530 |
|
|
}
|
531 |
|
|
remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
|
532 |
|
|
set_current_state(TASK_RUNNING) ;
|
533 |
|
|
olympic_priv->srb_queued = 0 ;
|
534 |
|
|
#if OLYMPIC_DEBUG
|
535 |
|
|
printk("init_srb(%p): ",init_srb);
|
536 |
|
|
for(i=0;i<20;i++)
|
537 |
|
|
printk("%02x ",readb(init_srb+i));
|
538 |
|
|
printk("\n");
|
539 |
|
|
#endif
|
540 |
|
|
|
541 |
|
|
/* If we get the same return response as we set, the interrupt wasn't raised and the open
|
542 |
|
|
* timed out.
|
543 |
|
|
*/
|
544 |
|
|
|
545 |
|
|
switch (resp = readb(init_srb+2)) {
|
546 |
|
|
case OLYMPIC_CLEAR_RET_CODE:
|
547 |
|
|
printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
|
548 |
|
|
goto out;
|
549 |
|
|
case 0:
|
550 |
|
|
open_finished = 1;
|
551 |
|
|
break;
|
552 |
|
|
case 0x07:
|
553 |
|
|
if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
|
554 |
|
|
printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
|
555 |
|
|
open_finished = 0 ;
|
556 |
|
|
continue;
|
557 |
|
|
}
|
558 |
|
|
|
559 |
|
|
err = readb(init_srb+7);
|
560 |
|
|
|
561 |
|
|
if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
|
562 |
|
|
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
|
563 |
|
|
printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
|
564 |
|
|
} else {
|
565 |
|
|
printk(KERN_WARNING "%s: %s - %s\n", dev->name,
|
566 |
|
|
open_maj_error[(err & 0xf0) >> 4],
|
567 |
|
|
open_min_error[(err & 0x0f)]);
|
568 |
|
|
}
|
569 |
|
|
goto out;
|
570 |
|
|
|
571 |
|
|
case 0x32:
|
572 |
|
|
printk(KERN_WARNING "%s: Invalid LAA: %s\n",
|
573 |
|
|
dev->name, print_mac(mac, olympic_priv->olympic_laa));
|
574 |
|
|
goto out;
|
575 |
|
|
|
576 |
|
|
default:
|
577 |
|
|
printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
|
578 |
|
|
goto out;
|
579 |
|
|
|
580 |
|
|
}
|
581 |
|
|
} while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
|
582 |
|
|
|
583 |
|
|
if (readb(init_srb+18) & (1<<3))
|
584 |
|
|
if (olympic_priv->olympic_message_level)
|
585 |
|
|
printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
|
586 |
|
|
|
587 |
|
|
if (readb(init_srb+18) & (1<<1))
|
588 |
|
|
olympic_priv->olympic_ring_speed = 100 ;
|
589 |
|
|
else if (readb(init_srb+18) & 1)
|
590 |
|
|
olympic_priv->olympic_ring_speed = 16 ;
|
591 |
|
|
else
|
592 |
|
|
olympic_priv->olympic_ring_speed = 4 ;
|
593 |
|
|
|
594 |
|
|
if (olympic_priv->olympic_message_level)
|
595 |
|
|
printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
|
596 |
|
|
|
597 |
|
|
olympic_priv->asb = swab16(readw(init_srb+8));
|
598 |
|
|
olympic_priv->srb = swab16(readw(init_srb+10));
|
599 |
|
|
olympic_priv->arb = swab16(readw(init_srb+12));
|
600 |
|
|
olympic_priv->trb = swab16(readw(init_srb+16));
|
601 |
|
|
|
602 |
|
|
olympic_priv->olympic_receive_options = 0x01 ;
|
603 |
|
|
olympic_priv->olympic_copy_all_options = 0 ;
|
604 |
|
|
|
605 |
|
|
/* setup rx ring */
|
606 |
|
|
|
607 |
|
|
writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
|
608 |
|
|
|
609 |
|
|
writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
|
610 |
|
|
|
611 |
|
|
for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
|
612 |
|
|
|
613 |
|
|
struct sk_buff *skb;
|
614 |
|
|
|
615 |
|
|
skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
|
616 |
|
|
if(skb == NULL)
|
617 |
|
|
break;
|
618 |
|
|
|
619 |
|
|
skb->dev = dev;
|
620 |
|
|
|
621 |
|
|
olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
|
622 |
|
|
skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
|
623 |
|
|
olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
|
624 |
|
|
olympic_priv->rx_ring_skb[i]=skb;
|
625 |
|
|
}
|
626 |
|
|
|
627 |
|
|
if (i==0) {
|
628 |
|
|
printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
|
629 |
|
|
goto out;
|
630 |
|
|
}
|
631 |
|
|
|
632 |
|
|
olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
|
633 |
|
|
sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
|
634 |
|
|
writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
|
635 |
|
|
writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
|
636 |
|
|
writew(i, olympic_mmio+RXDESCQCNT);
|
637 |
|
|
|
638 |
|
|
olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
|
639 |
|
|
sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
|
640 |
|
|
writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
|
641 |
|
|
writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
|
642 |
|
|
|
643 |
|
|
olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
|
644 |
|
|
olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
|
645 |
|
|
|
646 |
|
|
writew(i, olympic_mmio+RXSTATQCNT);
|
647 |
|
|
|
648 |
|
|
#if OLYMPIC_DEBUG
|
649 |
|
|
printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
|
650 |
|
|
printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
|
651 |
|
|
printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
|
652 |
|
|
printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
|
653 |
|
|
printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
|
654 |
|
|
|
655 |
|
|
printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
|
656 |
|
|
printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
|
657 |
|
|
olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
|
658 |
|
|
#endif
|
659 |
|
|
|
660 |
|
|
writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
|
661 |
|
|
|
662 |
|
|
#if OLYMPIC_DEBUG
|
663 |
|
|
printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
|
664 |
|
|
printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
|
665 |
|
|
printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
|
666 |
|
|
#endif
|
667 |
|
|
|
668 |
|
|
writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
|
669 |
|
|
|
670 |
|
|
/* setup tx ring */
|
671 |
|
|
|
672 |
|
|
writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
|
673 |
|
|
for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
|
674 |
|
|
olympic_priv->olympic_tx_ring[i].buffer=0xdeadbeef;
|
675 |
|
|
|
676 |
|
|
olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
|
677 |
|
|
olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
|
678 |
|
|
sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
|
679 |
|
|
writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
|
680 |
|
|
writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
|
681 |
|
|
writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
|
682 |
|
|
|
683 |
|
|
olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
|
684 |
|
|
sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
|
685 |
|
|
writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
|
686 |
|
|
writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
|
687 |
|
|
writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
|
688 |
|
|
|
689 |
|
|
olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
|
690 |
|
|
olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
|
691 |
|
|
|
692 |
|
|
writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
|
693 |
|
|
writel(0,olympic_mmio+EISR) ;
|
694 |
|
|
writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
|
695 |
|
|
writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
|
696 |
|
|
|
697 |
|
|
#if OLYMPIC_DEBUG
|
698 |
|
|
printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
|
699 |
|
|
printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
|
700 |
|
|
#endif
|
701 |
|
|
|
702 |
|
|
if (olympic_priv->olympic_network_monitor) {
|
703 |
|
|
u8 __iomem *oat;
|
704 |
|
|
u8 __iomem *opt;
|
705 |
|
|
int i;
|
706 |
|
|
u8 addr[6];
|
707 |
|
|
DECLARE_MAC_BUF(mac);
|
708 |
|
|
oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
|
709 |
|
|
opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
|
710 |
|
|
|
711 |
|
|
for (i = 0; i < 6; i++)
|
712 |
|
|
addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
|
713 |
|
|
printk("%s: Node Address: %s\n",dev->name, print_mac(mac, addr));
|
714 |
|
|
printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
|
715 |
|
|
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
|
716 |
|
|
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
|
717 |
|
|
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
|
718 |
|
|
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
|
719 |
|
|
|
720 |
|
|
for (i = 0; i < 6; i++)
|
721 |
|
|
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
|
722 |
|
|
printk("%s: NAUN Address: %s\n",dev->name, print_mac(mac, addr));
|
723 |
|
|
}
|
724 |
|
|
|
725 |
|
|
netif_start_queue(dev);
|
726 |
|
|
return 0;
|
727 |
|
|
|
728 |
|
|
out:
|
729 |
|
|
free_irq(dev->irq, dev);
|
730 |
|
|
return -EIO;
|
731 |
|
|
}
|
732 |
|
|
|
733 |
|
|
/*
|
734 |
|
|
* When we enter the rx routine we do not know how many frames have been
|
735 |
|
|
* queued on the rx channel. Therefore we start at the next rx status
|
736 |
|
|
* position and travel around the receive ring until we have completed
|
737 |
|
|
* all the frames.
|
738 |
|
|
*
|
739 |
|
|
* This means that we may process the frame before we receive the end
|
740 |
|
|
* of frame interrupt. This is why we always test the status instead
|
741 |
|
|
* of blindly processing the next frame.
|
742 |
|
|
*
|
743 |
|
|
* We also remove the last 4 bytes from the packet as well, these are
|
744 |
|
|
* just token ring trailer info and upset protocols that don't check
|
745 |
|
|
* their own length, i.e. SNA.
|
746 |
|
|
*
|
747 |
|
|
*/
|
748 |
|
|
static void olympic_rx(struct net_device *dev)
|
749 |
|
|
{
|
750 |
|
|
struct olympic_private *olympic_priv=netdev_priv(dev);
|
751 |
|
|
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
|
752 |
|
|
struct olympic_rx_status *rx_status;
|
753 |
|
|
struct olympic_rx_desc *rx_desc ;
|
754 |
|
|
int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
|
755 |
|
|
struct sk_buff *skb, *skb2;
|
756 |
|
|
int i;
|
757 |
|
|
|
758 |
|
|
rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
|
759 |
|
|
|
760 |
|
|
while (rx_status->status_buffercnt) {
|
761 |
|
|
u32 l_status_buffercnt;
|
762 |
|
|
|
763 |
|
|
olympic_priv->rx_status_last_received++ ;
|
764 |
|
|
olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
|
765 |
|
|
#if OLYMPIC_DEBUG
|
766 |
|
|
printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
|
767 |
|
|
#endif
|
768 |
|
|
length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
|
769 |
|
|
buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
|
770 |
|
|
i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
|
771 |
|
|
frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
|
772 |
|
|
|
773 |
|
|
#if OLYMPIC_DEBUG
|
774 |
|
|
printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
|
775 |
|
|
#endif
|
776 |
|
|
l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
|
777 |
|
|
if(l_status_buffercnt & 0xC0000000) {
|
778 |
|
|
if (l_status_buffercnt & 0x3B000000) {
|
779 |
|
|
if (olympic_priv->olympic_message_level) {
|
780 |
|
|
if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
|
781 |
|
|
printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
|
782 |
|
|
if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
|
783 |
|
|
printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
|
784 |
|
|
if (l_status_buffercnt & (1<<27)) /* No receive buffers */
|
785 |
|
|
printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
|
786 |
|
|
if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
|
787 |
|
|
printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
|
788 |
|
|
if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
|
789 |
|
|
printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
|
790 |
|
|
}
|
791 |
|
|
olympic_priv->rx_ring_last_received += i ;
|
792 |
|
|
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
|
793 |
|
|
olympic_priv->olympic_stats.rx_errors++;
|
794 |
|
|
} else {
|
795 |
|
|
|
796 |
|
|
if (buffer_cnt == 1) {
|
797 |
|
|
skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
|
798 |
|
|
} else {
|
799 |
|
|
skb = dev_alloc_skb(length) ;
|
800 |
|
|
}
|
801 |
|
|
|
802 |
|
|
if (skb == NULL) {
|
803 |
|
|
printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
|
804 |
|
|
olympic_priv->olympic_stats.rx_dropped++ ;
|
805 |
|
|
/* Update counters even though we don't transfer the frame */
|
806 |
|
|
olympic_priv->rx_ring_last_received += i ;
|
807 |
|
|
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
|
808 |
|
|
} else {
|
809 |
|
|
/* Optimise based upon number of buffers used.
|
810 |
|
|
If only one buffer is used we can simply swap the buffers around.
|
811 |
|
|
If more than one then we must use the new buffer and copy the information
|
812 |
|
|
first. Ideally all frames would be in a single buffer, this can be tuned by
|
813 |
|
|
altering the buffer size. If the length of the packet is less than
|
814 |
|
|
1500 bytes we're going to copy it over anyway to stop packets getting
|
815 |
|
|
dropped from sockets with buffers smaller than our pkt_buf_sz. */
|
816 |
|
|
|
817 |
|
|
if (buffer_cnt==1) {
|
818 |
|
|
olympic_priv->rx_ring_last_received++ ;
|
819 |
|
|
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
|
820 |
|
|
rx_ring_last_received = olympic_priv->rx_ring_last_received ;
|
821 |
|
|
if (length > 1500) {
|
822 |
|
|
skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
|
823 |
|
|
/* unmap buffer */
|
824 |
|
|
pci_unmap_single(olympic_priv->pdev,
|
825 |
|
|
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
|
826 |
|
|
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
827 |
|
|
skb_put(skb2,length-4);
|
828 |
|
|
skb2->protocol = tr_type_trans(skb2,dev);
|
829 |
|
|
olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
|
830 |
|
|
cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
|
831 |
|
|
olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
|
832 |
|
|
olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
|
833 |
|
|
cpu_to_le32(olympic_priv->pkt_buf_sz);
|
834 |
|
|
olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
|
835 |
|
|
netif_rx(skb2) ;
|
836 |
|
|
} else {
|
837 |
|
|
pci_dma_sync_single_for_cpu(olympic_priv->pdev,
|
838 |
|
|
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
|
839 |
|
|
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
840 |
|
|
skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
|
841 |
|
|
skb_put(skb,length - 4),
|
842 |
|
|
length - 4);
|
843 |
|
|
pci_dma_sync_single_for_device(olympic_priv->pdev,
|
844 |
|
|
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
|
845 |
|
|
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
846 |
|
|
skb->protocol = tr_type_trans(skb,dev) ;
|
847 |
|
|
netif_rx(skb) ;
|
848 |
|
|
}
|
849 |
|
|
} else {
|
850 |
|
|
do { /* Walk the buffers */
|
851 |
|
|
olympic_priv->rx_ring_last_received++ ;
|
852 |
|
|
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
|
853 |
|
|
rx_ring_last_received = olympic_priv->rx_ring_last_received ;
|
854 |
|
|
pci_dma_sync_single_for_cpu(olympic_priv->pdev,
|
855 |
|
|
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
|
856 |
|
|
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
857 |
|
|
rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
|
858 |
|
|
cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
|
859 |
|
|
skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
|
860 |
|
|
skb_put(skb, cpy_length),
|
861 |
|
|
cpy_length);
|
862 |
|
|
pci_dma_sync_single_for_device(olympic_priv->pdev,
|
863 |
|
|
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
|
864 |
|
|
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
|
865 |
|
|
} while (--i) ;
|
866 |
|
|
skb_trim(skb,skb->len-4) ;
|
867 |
|
|
skb->protocol = tr_type_trans(skb,dev);
|
868 |
|
|
netif_rx(skb) ;
|
869 |
|
|
}
|
870 |
|
|
dev->last_rx = jiffies ;
|
871 |
|
|
olympic_priv->olympic_stats.rx_packets++ ;
|
872 |
|
|
olympic_priv->olympic_stats.rx_bytes += length ;
|
873 |
|
|
} /* if skb == null */
|
874 |
|
|
} /* If status & 0x3b */
|
875 |
|
|
|
876 |
|
|
} else { /*if buffercnt & 0xC */
|
877 |
|
|
olympic_priv->rx_ring_last_received += i ;
|
878 |
|
|
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
|
879 |
|
|
}
|
880 |
|
|
|
881 |
|
|
rx_status->fragmentcnt_framelen = 0 ;
|
882 |
|
|
rx_status->status_buffercnt = 0 ;
|
883 |
|
|
rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
|
884 |
|
|
|
885 |
|
|
writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
|
886 |
|
|
} /* while */
|
887 |
|
|
|
888 |
|
|
}
|
889 |
|
|
|
890 |
|
|
static void olympic_freemem(struct net_device *dev)
|
891 |
|
|
{
|
892 |
|
|
struct olympic_private *olympic_priv=netdev_priv(dev);
|
893 |
|
|
int i;
|
894 |
|
|
|
895 |
|
|
for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
|
896 |
|
|
if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
|
897 |
|
|
dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
|
898 |
|
|
olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
|
899 |
|
|
}
|
900 |
|
|
if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
|
901 |
|
|
pci_unmap_single(olympic_priv->pdev,
|
902 |
|
|
le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
|
903 |
|
|
olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
|
904 |
|
|
}
|
905 |
|
|
olympic_priv->rx_status_last_received++;
|
906 |
|
|
olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
|
907 |
|
|
}
|
908 |
|
|
/* unmap rings */
|
909 |
|
|
pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
|
910 |
|
|
sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
|
911 |
|
|
pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
|
912 |
|
|
sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
|
913 |
|
|
|
914 |
|
|
pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
|
915 |
|
|
sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
|
916 |
|
|
pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
|
917 |
|
|
sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
|
918 |
|
|
|
919 |
|
|
return ;
|
920 |
|
|
}
|
921 |
|
|
|
922 |
|
|
static irqreturn_t olympic_interrupt(int irq, void *dev_id)
|
923 |
|
|
{
|
924 |
|
|
struct net_device *dev= (struct net_device *)dev_id;
|
925 |
|
|
struct olympic_private *olympic_priv=netdev_priv(dev);
|
926 |
|
|
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
|
927 |
|
|
u32 sisr;
|
928 |
|
|
u8 __iomem *adapter_check_area ;
|
929 |
|
|
|
930 |
|
|
/*
|
931 |
|
|
* Read sisr but don't reset it yet.
|
932 |
|
|
* The indication bit may have been set but the interrupt latch
|
933 |
|
|
* bit may not be set, so we'd lose the interrupt later.
|
934 |
|
|
*/
|
935 |
|
|
sisr=readl(olympic_mmio+SISR) ;
|
936 |
|
|
if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
|
937 |
|
|
return IRQ_NONE;
|
938 |
|
|
sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
|
939 |
|
|
|
940 |
|
|
spin_lock(&olympic_priv->olympic_lock);
|
941 |
|
|
|
942 |
|
|
/* Hotswap gives us this on removal */
|
943 |
|
|
if (sisr == 0xffffffff) {
|
944 |
|
|
printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
|
945 |
|
|
spin_unlock(&olympic_priv->olympic_lock) ;
|
946 |
|
|
return IRQ_NONE;
|
947 |
|
|
}
|
948 |
|
|
|
949 |
|
|
if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
|
950 |
|
|
SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
|
951 |
|
|
|
952 |
|
|
/* If we ever get this the adapter is seriously dead. Only a reset is going to
|
953 |
|
|
* bring it back to life. We're talking pci bus errors and such like :( */
|
954 |
|
|
if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
|
955 |
|
|
printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
|
956 |
|
|
printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
|
957 |
|
|
printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
|
958 |
|
|
printk(KERN_ERR "or the linux-tr mailing list.\n") ;
|
959 |
|
|
wake_up_interruptible(&olympic_priv->srb_wait);
|
960 |
|
|
spin_unlock(&olympic_priv->olympic_lock) ;
|
961 |
|
|
return IRQ_HANDLED;
|
962 |
|
|
} /* SISR_ERR */
|
963 |
|
|
|
964 |
|
|
if(sisr & SISR_SRB_REPLY) {
|
965 |
|
|
if(olympic_priv->srb_queued==1) {
|
966 |
|
|
wake_up_interruptible(&olympic_priv->srb_wait);
|
967 |
|
|
} else if (olympic_priv->srb_queued==2) {
|
968 |
|
|
olympic_srb_bh(dev) ;
|
969 |
|
|
}
|
970 |
|
|
olympic_priv->srb_queued=0;
|
971 |
|
|
} /* SISR_SRB_REPLY */
|
972 |
|
|
|
973 |
|
|
/* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
|
974 |
|
|
we get all tx completions. */
|
975 |
|
|
if (sisr & SISR_TX1_EOF) {
|
976 |
|
|
while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
|
977 |
|
|
olympic_priv->tx_ring_last_status++;
|
978 |
|
|
olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
|
979 |
|
|
olympic_priv->free_tx_ring_entries++;
|
980 |
|
|
olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
|
981 |
|
|
olympic_priv->olympic_stats.tx_packets++ ;
|
982 |
|
|
pci_unmap_single(olympic_priv->pdev,
|
983 |
|
|
le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
|
984 |
|
|
olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
|
985 |
|
|
dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
|
986 |
|
|
olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;
|
987 |
|
|
olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
|
988 |
|
|
}
|
989 |
|
|
netif_wake_queue(dev);
|
990 |
|
|
} /* SISR_TX1_EOF */
|
991 |
|
|
|
992 |
|
|
if (sisr & SISR_RX_STATUS) {
|
993 |
|
|
olympic_rx(dev);
|
994 |
|
|
} /* SISR_RX_STATUS */
|
995 |
|
|
|
996 |
|
|
if (sisr & SISR_ADAPTER_CHECK) {
|
997 |
|
|
netif_stop_queue(dev);
|
998 |
|
|
printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
|
999 |
|
|
writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
|
1000 |
|
|
adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
|
1001 |
|
|
printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
|
1002 |
|
|
spin_unlock(&olympic_priv->olympic_lock) ;
|
1003 |
|
|
return IRQ_HANDLED;
|
1004 |
|
|
} /* SISR_ADAPTER_CHECK */
|
1005 |
|
|
|
1006 |
|
|
if (sisr & SISR_ASB_FREE) {
|
1007 |
|
|
/* Wake up anything that is waiting for the asb response */
|
1008 |
|
|
if (olympic_priv->asb_queued) {
|
1009 |
|
|
olympic_asb_bh(dev) ;
|
1010 |
|
|
}
|
1011 |
|
|
} /* SISR_ASB_FREE */
|
1012 |
|
|
|
1013 |
|
|
if (sisr & SISR_ARB_CMD) {
|
1014 |
|
|
olympic_arb_cmd(dev) ;
|
1015 |
|
|
} /* SISR_ARB_CMD */
|
1016 |
|
|
|
1017 |
|
|
if (sisr & SISR_TRB_REPLY) {
|
1018 |
|
|
/* Wake up anything that is waiting for the trb response */
|
1019 |
|
|
if (olympic_priv->trb_queued) {
|
1020 |
|
|
wake_up_interruptible(&olympic_priv->trb_wait);
|
1021 |
|
|
}
|
1022 |
|
|
olympic_priv->trb_queued = 0 ;
|
1023 |
|
|
} /* SISR_TRB_REPLY */
|
1024 |
|
|
|
1025 |
|
|
if (sisr & SISR_RX_NOBUF) {
|
1026 |
|
|
/* According to the documentation, we don't have to do anything, but trapping it keeps it out of
|
1027 |
|
|
/var/log/messages. */
|
1028 |
|
|
} /* SISR_RX_NOBUF */
|
1029 |
|
|
} else {
|
1030 |
|
|
printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
|
1031 |
|
|
printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
|
1032 |
|
|
} /* One if the interrupts we want */
|
1033 |
|
|
writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
|
1034 |
|
|
|
1035 |
|
|
spin_unlock(&olympic_priv->olympic_lock) ;
|
1036 |
|
|
return IRQ_HANDLED;
|
1037 |
|
|
}
|
1038 |
|
|
|
1039 |
|
|
static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
|
1040 |
|
|
{
|
1041 |
|
|
struct olympic_private *olympic_priv=netdev_priv(dev);
|
1042 |
|
|
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
|
1043 |
|
|
unsigned long flags ;
|
1044 |
|
|
|
1045 |
|
|
spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
|
1046 |
|
|
|
1047 |
|
|
netif_stop_queue(dev);
|
1048 |
|
|
|
1049 |
|
|
if(olympic_priv->free_tx_ring_entries) {
|
1050 |
|
|
olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
|
1051 |
|
|
cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
|
1052 |
|
|
olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
|
1053 |
|
|
olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
|
1054 |
|
|
olympic_priv->free_tx_ring_entries--;
|
1055 |
|
|
|
1056 |
|
|
olympic_priv->tx_ring_free++;
|
1057 |
|
|
olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
|
1058 |
|
|
writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
|
1059 |
|
|
netif_wake_queue(dev);
|
1060 |
|
|
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
|
1061 |
|
|
return 0;
|
1062 |
|
|
} else {
|
1063 |
|
|
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
|
1064 |
|
|
return 1;
|
1065 |
|
|
}
|
1066 |
|
|
|
1067 |
|
|
}
|
1068 |
|
|
|
1069 |
|
|
|
1070 |
|
|
static int olympic_close(struct net_device *dev)
|
1071 |
|
|
{
|
1072 |
|
|
struct olympic_private *olympic_priv=netdev_priv(dev);
|
1073 |
|
|
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
|
1074 |
|
|
unsigned long t,flags;
|
1075 |
|
|
|
1076 |
|
|
DECLARE_WAITQUEUE(wait,current) ;
|
1077 |
|
|
|
1078 |
|
|
netif_stop_queue(dev);
|
1079 |
|
|
|
1080 |
|
|
writel(olympic_priv->srb,olympic_mmio+LAPA);
|
1081 |
|
|
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
|
1082 |
|
|
|
1083 |
|
|
writeb(SRB_CLOSE_ADAPTER,srb+0);
|
1084 |
|
|
writeb(0,srb+1);
|
1085 |
|
|
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
|
1086 |
|
|
|
1087 |
|
|
add_wait_queue(&olympic_priv->srb_wait,&wait) ;
|
1088 |
|
|
set_current_state(TASK_INTERRUPTIBLE) ;
|
1089 |
|
|
|
1090 |
|
|
spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
|
1091 |
|
|
olympic_priv->srb_queued=1;
|
1092 |
|
|
|
1093 |
|
|
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
|
1094 |
|
|
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
|
1095 |
|
|
|
1096 |
|
|
while(olympic_priv->srb_queued) {
|
1097 |
|
|
|
1098 |
|
|
t = schedule_timeout_interruptible(60*HZ);
|
1099 |
|
|
|
1100 |
|
|
if(signal_pending(current)) {
|
1101 |
|
|
printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
|
1102 |
|
|
printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
|
1103 |
|
|
olympic_priv->srb_queued=0;
|
1104 |
|
|
break;
|
1105 |
|
|
}
|
1106 |
|
|
|
1107 |
|
|
if (t == 0) {
|
1108 |
|
|
printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
|
1109 |
|
|
}
|
1110 |
|
|
olympic_priv->srb_queued=0;
|
1111 |
|
|
}
|
1112 |
|
|
remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
|
1113 |
|
|
|
1114 |
|
|
olympic_priv->rx_status_last_received++;
|
1115 |
|
|
olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
|
1116 |
|
|
|
1117 |
|
|
olympic_freemem(dev) ;
|
1118 |
|
|
|
1119 |
|
|
/* reset tx/rx fifo's and busmaster logic */
|
1120 |
|
|
|
1121 |
|
|
writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
|
1122 |
|
|
udelay(1);
|
1123 |
|
|
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
|
1124 |
|
|
|
1125 |
|
|
#if OLYMPIC_DEBUG
|
1126 |
|
|
{
|
1127 |
|
|
int i ;
|
1128 |
|
|
printk("srb(%p): ",srb);
|
1129 |
|
|
for(i=0;i<4;i++)
|
1130 |
|
|
printk("%x ",readb(srb+i));
|
1131 |
|
|
printk("\n");
|
1132 |
|
|
}
|
1133 |
|
|
#endif
|
1134 |
|
|
free_irq(dev->irq,dev);
|
1135 |
|
|
|
1136 |
|
|
return 0;
|
1137 |
|
|
|
1138 |
|
|
}
|
1139 |
|
|
|
1140 |
|
|
static void olympic_set_rx_mode(struct net_device *dev)
|
1141 |
|
|
{
|
1142 |
|
|
struct olympic_private *olympic_priv = netdev_priv(dev);
|
1143 |
|
|
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
|
1144 |
|
|
u8 options = 0;
|
1145 |
|
|
u8 __iomem *srb;
|
1146 |
|
|
struct dev_mc_list *dmi ;
|
1147 |
|
|
unsigned char dev_mc_address[4] ;
|
1148 |
|
|
int i ;
|
1149 |
|
|
|
1150 |
|
|
writel(olympic_priv->srb,olympic_mmio+LAPA);
|
1151 |
|
|
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
|
1152 |
|
|
options = olympic_priv->olympic_copy_all_options;
|
1153 |
|
|
|
1154 |
|
|
if (dev->flags&IFF_PROMISC)
|
1155 |
|
|
options |= 0x61 ;
|
1156 |
|
|
else
|
1157 |
|
|
options &= ~0x61 ;
|
1158 |
|
|
|
1159 |
|
|
/* Only issue the srb if there is a change in options */
|
1160 |
|
|
|
1161 |
|
|
if ((options ^ olympic_priv->olympic_copy_all_options)) {
|
1162 |
|
|
|
1163 |
|
|
/* Now to issue the srb command to alter the copy.all.options */
|
1164 |
|
|
|
1165 |
|
|
writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
|
1166 |
|
|
writeb(0,srb+1);
|
1167 |
|
|
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
|
1168 |
|
|
writeb(0,srb+3);
|
1169 |
|
|
writeb(olympic_priv->olympic_receive_options,srb+4);
|
1170 |
|
|
writeb(options,srb+5);
|
1171 |
|
|
|
1172 |
|
|
olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
|
1173 |
|
|
|
1174 |
|
|
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
|
1175 |
|
|
|
1176 |
|
|
olympic_priv->olympic_copy_all_options = options ;
|
1177 |
|
|
|
1178 |
|
|
return ;
|
1179 |
|
|
}
|
1180 |
|
|
|
1181 |
|
|
/* Set the functional addresses we need for multicast */
|
1182 |
|
|
|
1183 |
|
|
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
|
1184 |
|
|
|
1185 |
|
|
for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
|
1186 |
|
|
dev_mc_address[0] |= dmi->dmi_addr[2] ;
|
1187 |
|
|
dev_mc_address[1] |= dmi->dmi_addr[3] ;
|
1188 |
|
|
dev_mc_address[2] |= dmi->dmi_addr[4] ;
|
1189 |
|
|
dev_mc_address[3] |= dmi->dmi_addr[5] ;
|
1190 |
|
|
}
|
1191 |
|
|
|
1192 |
|
|
writeb(SRB_SET_FUNC_ADDRESS,srb+0);
|
1193 |
|
|
writeb(0,srb+1);
|
1194 |
|
|
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
|
1195 |
|
|
writeb(0,srb+3);
|
1196 |
|
|
writeb(0,srb+4);
|
1197 |
|
|
writeb(0,srb+5);
|
1198 |
|
|
writeb(dev_mc_address[0],srb+6);
|
1199 |
|
|
writeb(dev_mc_address[1],srb+7);
|
1200 |
|
|
writeb(dev_mc_address[2],srb+8);
|
1201 |
|
|
writeb(dev_mc_address[3],srb+9);
|
1202 |
|
|
|
1203 |
|
|
olympic_priv->srb_queued = 2 ;
|
1204 |
|
|
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
|
1205 |
|
|
|
1206 |
|
|
}
|
1207 |
|
|
|
1208 |
|
|
static void olympic_srb_bh(struct net_device *dev)
|
1209 |
|
|
{
|
1210 |
|
|
struct olympic_private *olympic_priv = netdev_priv(dev);
|
1211 |
|
|
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
|
1212 |
|
|
u8 __iomem *srb;
|
1213 |
|
|
|
1214 |
|
|
writel(olympic_priv->srb,olympic_mmio+LAPA);
|
1215 |
|
|
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
|
1216 |
|
|
|
1217 |
|
|
switch (readb(srb)) {
|
1218 |
|
|
|
1219 |
|
|
/* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
|
1220 |
|
|
* At some point we should do something if we get an error, such as
|
1221 |
|
|
* resetting the IFF_PROMISC flag in dev
|
1222 |
|
|
*/
|
1223 |
|
|
|
1224 |
|
|
case SRB_MODIFY_RECEIVE_OPTIONS:
|
1225 |
|
|
switch (readb(srb+2)) {
|
1226 |
|
|
case 0x01:
|
1227 |
|
|
printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
|
1228 |
|
|
break ;
|
1229 |
|
|
case 0x04:
|
1230 |
|
|
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
|
1231 |
|
|
break ;
|
1232 |
|
|
default:
|
1233 |
|
|
if (olympic_priv->olympic_message_level)
|
1234 |
|
|
printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
|
1235 |
|
|
break ;
|
1236 |
|
|
} /* switch srb[2] */
|
1237 |
|
|
break ;
|
1238 |
|
|
|
1239 |
|
|
/* SRB_SET_GROUP_ADDRESS - Multicast group setting
|
1240 |
|
|
*/
|
1241 |
|
|
|
1242 |
|
|
case SRB_SET_GROUP_ADDRESS:
|
1243 |
|
|
switch (readb(srb+2)) {
|
1244 |
|
|
case 0x00:
|
1245 |
|
|
break ;
|
1246 |
|
|
case 0x01:
|
1247 |
|
|
printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
|
1248 |
|
|
break ;
|
1249 |
|
|
case 0x04:
|
1250 |
|
|
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
|
1251 |
|
|
break ;
|
1252 |
|
|
case 0x3c:
|
1253 |
|
|
printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
|
1254 |
|
|
break ;
|
1255 |
|
|
case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
|
1256 |
|
|
printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
|
1257 |
|
|
break ;
|
1258 |
|
|
case 0x55:
|
1259 |
|
|
printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
|
1260 |
|
|
break ;
|
1261 |
|
|
default:
|
1262 |
|
|
break ;
|
1263 |
|
|
} /* switch srb[2] */
|
1264 |
|
|
break ;
|
1265 |
|
|
|
1266 |
|
|
/* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
|
1267 |
|
|
*/
|
1268 |
|
|
|
1269 |
|
|
case SRB_RESET_GROUP_ADDRESS:
|
1270 |
|
|
switch (readb(srb+2)) {
|
1271 |
|
|
case 0x00:
|
1272 |
|
|
break ;
|
1273 |
|
|
case 0x01:
|
1274 |
|
|
printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
|
1275 |
|
|
break ;
|
1276 |
|
|
case 0x04:
|
1277 |
|
|
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
|
1278 |
|
|
break ;
|
1279 |
|
|
case 0x39: /* Must deal with this if individual multicast addresses used */
|
1280 |
|
|
printk(KERN_INFO "%s: Group address not found \n",dev->name);
|
1281 |
|
|
break ;
|
1282 |
|
|
default:
|
1283 |
|
|
break ;
|
1284 |
|
|
} /* switch srb[2] */
|
1285 |
|
|
break ;
|
1286 |
|
|
|
1287 |
|
|
|
1288 |
|
|
/* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
|
1289 |
|
|
*/
|
1290 |
|
|
|
1291 |
|
|
case SRB_SET_FUNC_ADDRESS:
|
1292 |
|
|
switch (readb(srb+2)) {
|
1293 |
|
|
case 0x00:
|
1294 |
|
|
if (olympic_priv->olympic_message_level)
|
1295 |
|
|
printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
|
1296 |
|
|
break ;
|
1297 |
|
|
case 0x01:
|
1298 |
|
|
printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
|
1299 |
|
|
break ;
|
1300 |
|
|
case 0x04:
|
1301 |
|
|
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
|
1302 |
|
|
break ;
|
1303 |
|
|
default:
|
1304 |
|
|
break ;
|
1305 |
|
|
} /* switch srb[2] */
|
1306 |
|
|
break ;
|
1307 |
|
|
|
1308 |
|
|
/* SRB_READ_LOG - Read and reset the adapter error counters
|
1309 |
|
|
*/
|
1310 |
|
|
|
1311 |
|
|
case SRB_READ_LOG:
|
1312 |
|
|
switch (readb(srb+2)) {
|
1313 |
|
|
case 0x00:
|
1314 |
|
|
if (olympic_priv->olympic_message_level)
|
1315 |
|
|
printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
|
1316 |
|
|
break ;
|
1317 |
|
|
case 0x01:
|
1318 |
|
|
printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
|
1319 |
|
|
break ;
|
1320 |
|
|
case 0x04:
|
1321 |
|
|
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
|
1322 |
|
|
break ;
|
1323 |
|
|
|
1324 |
|
|
} /* switch srb[2] */
|
1325 |
|
|
break ;
|
1326 |
|
|
|
1327 |
|
|
/* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
|
1328 |
|
|
|
1329 |
|
|
case SRB_READ_SR_COUNTERS:
|
1330 |
|
|
switch (readb(srb+2)) {
|
1331 |
|
|
case 0x00:
|
1332 |
|
|
if (olympic_priv->olympic_message_level)
|
1333 |
|
|
printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
|
1334 |
|
|
break ;
|
1335 |
|
|
case 0x01:
|
1336 |
|
|
printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
|
1337 |
|
|
break ;
|
1338 |
|
|
case 0x04:
|
1339 |
|
|
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
|
1340 |
|
|
break ;
|
1341 |
|
|
default:
|
1342 |
|
|
break ;
|
1343 |
|
|
} /* switch srb[2] */
|
1344 |
|
|
break ;
|
1345 |
|
|
|
1346 |
|
|
default:
|
1347 |
|
|
printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
|
1348 |
|
|
break ;
|
1349 |
|
|
} /* switch srb[0] */
|
1350 |
|
|
|
1351 |
|
|
}
|
1352 |
|
|
|
1353 |
|
|
static struct net_device_stats * olympic_get_stats(struct net_device *dev)
|
1354 |
|
|
{
|
1355 |
|
|
struct olympic_private *olympic_priv ;
|
1356 |
|
|
olympic_priv=netdev_priv(dev);
|
1357 |
|
|
return (struct net_device_stats *) &olympic_priv->olympic_stats;
|
1358 |
|
|
}
|
1359 |
|
|
|
1360 |
|
|
static int olympic_set_mac_address (struct net_device *dev, void *addr)
|
1361 |
|
|
{
|
1362 |
|
|
struct sockaddr *saddr = addr ;
|
1363 |
|
|
struct olympic_private *olympic_priv = netdev_priv(dev);
|
1364 |
|
|
|
1365 |
|
|
if (netif_running(dev)) {
|
1366 |
|
|
printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
|
1367 |
|
|
return -EIO ;
|
1368 |
|
|
}
|
1369 |
|
|
|
1370 |
|
|
memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
|
1371 |
|
|
|
1372 |
|
|
if (olympic_priv->olympic_message_level) {
|
1373 |
|
|
printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
|
1374 |
|
|
olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
|
1375 |
|
|
olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
|
1376 |
|
|
olympic_priv->olympic_laa[5]);
|
1377 |
|
|
}
|
1378 |
|
|
|
1379 |
|
|
return 0 ;
|
1380 |
|
|
}
|
1381 |
|
|
|
1382 |
|
|
static void olympic_arb_cmd(struct net_device *dev)
|
1383 |
|
|
{
|
1384 |
|
|
struct olympic_private *olympic_priv = netdev_priv(dev);
|
1385 |
|
|
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
|
1386 |
|
|
u8 __iomem *arb_block, *asb_block, *srb ;
|
1387 |
|
|
u8 header_len ;
|
1388 |
|
|
u16 frame_len, buffer_len ;
|
1389 |
|
|
struct sk_buff *mac_frame ;
|
1390 |
|
|
u8 __iomem *buf_ptr ;
|
1391 |
|
|
u8 __iomem *frame_data ;
|
1392 |
|
|
u16 buff_off ;
|
1393 |
|
|
u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
|
1394 |
|
|
u8 fdx_prot_error ;
|
1395 |
|
|
u16 next_ptr;
|
1396 |
|
|
|
1397 |
|
|
arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
|
1398 |
|
|
asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
|
1399 |
|
|
srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
|
1400 |
|
|
|
1401 |
|
|
if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
|
1402 |
|
|
|
1403 |
|
|
header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
|
1404 |
|
|
frame_len = swab16(readw(arb_block + 10)) ;
|
1405 |
|
|
|
1406 |
|
|
buff_off = swab16(readw(arb_block + 6)) ;
|
1407 |
|
|
|
1408 |
|
|
buf_ptr = olympic_priv->olympic_lap + buff_off ;
|
1409 |
|
|
|
1410 |
|
|
#if OLYMPIC_DEBUG
|
1411 |
|
|
{
|
1412 |
|
|
int i;
|
1413 |
|
|
frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
|
1414 |
|
|
|
1415 |
|
|
for (i=0 ; i < 14 ; i++) {
|
1416 |
|
|
printk("Loc %d = %02x\n",i,readb(frame_data + i));
|
1417 |
|
|
}
|
1418 |
|
|
|
1419 |
|
|
printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
|
1420 |
|
|
}
|
1421 |
|
|
#endif
|
1422 |
|
|
mac_frame = dev_alloc_skb(frame_len) ;
|
1423 |
|
|
if (!mac_frame) {
|
1424 |
|
|
printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
|
1425 |
|
|
goto drop_frame;
|
1426 |
|
|
}
|
1427 |
|
|
|
1428 |
|
|
/* Walk the buffer chain, creating the frame */
|
1429 |
|
|
|
1430 |
|
|
do {
|
1431 |
|
|
frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
|
1432 |
|
|
buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
|
1433 |
|
|
memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
|
1434 |
|
|
next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
|
1435 |
|
|
} while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
|
1436 |
|
|
|
1437 |
|
|
mac_frame->protocol = tr_type_trans(mac_frame, dev);
|
1438 |
|
|
|
1439 |
|
|
if (olympic_priv->olympic_network_monitor) {
|
1440 |
|
|
struct trh_hdr *mac_hdr;
|
1441 |
|
|
DECLARE_MAC_BUF(mac);
|
1442 |
|
|
printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
|
1443 |
|
|
mac_hdr = tr_hdr(mac_frame);
|
1444 |
|
|
printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %s\n",
|
1445 |
|
|
dev->name, print_mac(mac, mac_hdr->daddr));
|
1446 |
|
|
printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %s\n",
|
1447 |
|
|
dev->name, print_mac(mac, mac_hdr->saddr));
|
1448 |
|
|
}
|
1449 |
|
|
netif_rx(mac_frame);
|
1450 |
|
|
dev->last_rx = jiffies;
|
1451 |
|
|
|
1452 |
|
|
drop_frame:
|
1453 |
|
|
/* Now tell the card we have dealt with the received frame */
|
1454 |
|
|
|
1455 |
|
|
/* Set LISR Bit 1 */
|
1456 |
|
|
writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
|
1457 |
|
|
|
1458 |
|
|
/* Is the ASB free ? */
|
1459 |
|
|
|
1460 |
|
|
if (readb(asb_block + 2) != 0xff) {
|
1461 |
|
|
olympic_priv->asb_queued = 1 ;
|
1462 |
|
|
writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
|
1463 |
|
|
return ;
|
1464 |
|
|
/* Drop out and wait for the bottom half to be run */
|
1465 |
|
|
}
|
1466 |
|
|
|
1467 |
|
|
writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
|
1468 |
|
|
writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
|
1469 |
|
|
writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
|
1470 |
|
|
writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
|
1471 |
|
|
|
1472 |
|
|
writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
|
1473 |
|
|
|
1474 |
|
|
olympic_priv->asb_queued = 2 ;
|
1475 |
|
|
|
1476 |
|
|
return ;
|
1477 |
|
|
|
1478 |
|
|
} else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
|
1479 |
|
|
lan_status = swab16(readw(arb_block+6));
|
1480 |
|
|
fdx_prot_error = readb(arb_block+8) ;
|
1481 |
|
|
|
1482 |
|
|
/* Issue ARB Free */
|
1483 |
|
|
writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
|
1484 |
|
|
|
1485 |
|
|
lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
|
1486 |
|
|
|
1487 |
|
|
if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
|
1488 |
|
|
if (lan_status_diff & LSC_LWF)
|
1489 |
|
|
printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
|
1490 |
|
|
if (lan_status_diff & LSC_ARW)
|
1491 |
|
|
printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
|
1492 |
|
|
if (lan_status_diff & LSC_FPE)
|
1493 |
|
|
printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
|
1494 |
|
|
if (lan_status_diff & LSC_RR)
|
1495 |
|
|
printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
|
1496 |
|
|
|
1497 |
|
|
/* Adapter has been closed by the hardware */
|
1498 |
|
|
|
1499 |
|
|
/* reset tx/rx fifo's and busmaster logic */
|
1500 |
|
|
|
1501 |
|
|
writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
|
1502 |
|
|
udelay(1);
|
1503 |
|
|
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
|
1504 |
|
|
netif_stop_queue(dev);
|
1505 |
|
|
olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
|
1506 |
|
|
printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
|
1507 |
|
|
} /* If serious error */
|
1508 |
|
|
|
1509 |
|
|
if (olympic_priv->olympic_message_level) {
|
1510 |
|
|
if (lan_status_diff & LSC_SIG_LOSS)
|
1511 |
|
|
printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
|
1512 |
|
|
if (lan_status_diff & LSC_HARD_ERR)
|
1513 |
|
|
printk(KERN_INFO "%s: Beaconing \n",dev->name);
|
1514 |
|
|
if (lan_status_diff & LSC_SOFT_ERR)
|
1515 |
|
|
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
|
1516 |
|
|
if (lan_status_diff & LSC_TRAN_BCN)
|
1517 |
|
|
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
|
1518 |
|
|
if (lan_status_diff & LSC_SS)
|
1519 |
|
|
printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
|
1520 |
|
|
if (lan_status_diff & LSC_RING_REC)
|
1521 |
|
|
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
|
1522 |
|
|
if (lan_status_diff & LSC_FDX_MODE)
|
1523 |
|
|
printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
|
1524 |
|
|
}
|
1525 |
|
|
|
1526 |
|
|
if (lan_status_diff & LSC_CO) {
|
1527 |
|
|
|
1528 |
|
|
if (olympic_priv->olympic_message_level)
|
1529 |
|
|
printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
|
1530 |
|
|
|
1531 |
|
|
/* Issue READ.LOG command */
|
1532 |
|
|
|
1533 |
|
|
writeb(SRB_READ_LOG, srb);
|
1534 |
|
|
writeb(0,srb+1);
|
1535 |
|
|
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
|
1536 |
|
|
writeb(0,srb+3);
|
1537 |
|
|
writeb(0,srb+4);
|
1538 |
|
|
writeb(0,srb+5);
|
1539 |
|
|
|
1540 |
|
|
olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
|
1541 |
|
|
|
1542 |
|
|
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
|
1543 |
|
|
|
1544 |
|
|
}
|
1545 |
|
|
|
1546 |
|
|
if (lan_status_diff & LSC_SR_CO) {
|
1547 |
|
|
|
1548 |
|
|
if (olympic_priv->olympic_message_level)
|
1549 |
|
|
printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
|
1550 |
|
|
|
1551 |
|
|
/* Issue a READ.SR.COUNTERS */
|
1552 |
|
|
|
1553 |
|
|
writeb(SRB_READ_SR_COUNTERS,srb);
|
1554 |
|
|
writeb(0,srb+1);
|
1555 |
|
|
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
|
1556 |
|
|
writeb(0,srb+3);
|
1557 |
|
|
|
1558 |
|
|
olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
|
1559 |
|
|
|
1560 |
|
|
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
|
1561 |
|
|
|
1562 |
|
|
}
|
1563 |
|
|
|
1564 |
|
|
olympic_priv->olympic_lan_status = lan_status ;
|
1565 |
|
|
|
1566 |
|
|
} /* Lan.change.status */
|
1567 |
|
|
else
|
1568 |
|
|
printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
|
1569 |
|
|
}
|
1570 |
|
|
|
1571 |
|
|
static void olympic_asb_bh(struct net_device *dev)
|
1572 |
|
|
{
|
1573 |
|
|
struct olympic_private *olympic_priv = netdev_priv(dev);
|
1574 |
|
|
u8 __iomem *arb_block, *asb_block ;
|
1575 |
|
|
|
1576 |
|
|
arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
|
1577 |
|
|
asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
|
1578 |
|
|
|
1579 |
|
|
if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
|
1580 |
|
|
|
1581 |
|
|
writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
|
1582 |
|
|
writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
|
1583 |
|
|
writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
|
1584 |
|
|
writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
|
1585 |
|
|
|
1586 |
|
|
writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
|
1587 |
|
|
olympic_priv->asb_queued = 2 ;
|
1588 |
|
|
|
1589 |
|
|
return ;
|
1590 |
|
|
}
|
1591 |
|
|
|
1592 |
|
|
if (olympic_priv->asb_queued == 2) {
|
1593 |
|
|
switch (readb(asb_block+2)) {
|
1594 |
|
|
case 0x01:
|
1595 |
|
|
printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
|
1596 |
|
|
break ;
|
1597 |
|
|
case 0x26:
|
1598 |
|
|
printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
|
1599 |
|
|
break ;
|
1600 |
|
|
case 0xFF:
|
1601 |
|
|
/* Valid response, everything should be ok again */
|
1602 |
|
|
break ;
|
1603 |
|
|
default:
|
1604 |
|
|
printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
|
1605 |
|
|
break ;
|
1606 |
|
|
}
|
1607 |
|
|
}
|
1608 |
|
|
olympic_priv->asb_queued = 0 ;
|
1609 |
|
|
}
|
1610 |
|
|
|
1611 |
|
|
static int olympic_change_mtu(struct net_device *dev, int mtu)
|
1612 |
|
|
{
|
1613 |
|
|
struct olympic_private *olympic_priv = netdev_priv(dev);
|
1614 |
|
|
u16 max_mtu ;
|
1615 |
|
|
|
1616 |
|
|
if (olympic_priv->olympic_ring_speed == 4)
|
1617 |
|
|
max_mtu = 4500 ;
|
1618 |
|
|
else
|
1619 |
|
|
max_mtu = 18000 ;
|
1620 |
|
|
|
1621 |
|
|
if (mtu > max_mtu)
|
1622 |
|
|
return -EINVAL ;
|
1623 |
|
|
if (mtu < 100)
|
1624 |
|
|
return -EINVAL ;
|
1625 |
|
|
|
1626 |
|
|
dev->mtu = mtu ;
|
1627 |
|
|
olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
|
1628 |
|
|
|
1629 |
|
|
return 0 ;
|
1630 |
|
|
}
|
1631 |
|
|
|
1632 |
|
|
static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
|
1633 |
|
|
{
|
1634 |
|
|
struct net_device *dev = (struct net_device *)data ;
|
1635 |
|
|
struct olympic_private *olympic_priv=netdev_priv(dev);
|
1636 |
|
|
u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
|
1637 |
|
|
u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
|
1638 |
|
|
int size = 0 ;
|
1639 |
|
|
int len=0;
|
1640 |
|
|
off_t begin=0;
|
1641 |
|
|
off_t pos=0;
|
1642 |
|
|
u8 addr[6];
|
1643 |
|
|
u8 addr2[6];
|
1644 |
|
|
int i;
|
1645 |
|
|
DECLARE_MAC_BUF(mac);
|
1646 |
|
|
DECLARE_MAC_BUF(mac2);
|
1647 |
|
|
|
1648 |
|
|
size = sprintf(buffer,
|
1649 |
|
|
"IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
|
1650 |
|
|
size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
|
1651 |
|
|
dev->name);
|
1652 |
|
|
|
1653 |
|
|
for (i = 0 ; i < 6 ; i++)
|
1654 |
|
|
addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
|
1655 |
|
|
|
1656 |
|
|
size += sprintf(buffer+size, "%6s: %s : %s : %02x:%02x:%02x:%02x\n",
|
1657 |
|
|
dev->name,
|
1658 |
|
|
print_mac(mac, dev->dev_addr),
|
1659 |
|
|
print_mac(mac2, addr),
|
1660 |
|
|
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
|
1661 |
|
|
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
|
1662 |
|
|
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
|
1663 |
|
|
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
|
1664 |
|
|
|
1665 |
|
|
size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
|
1666 |
|
|
|
1667 |
|
|
size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
|
1668 |
|
|
dev->name) ;
|
1669 |
|
|
|
1670 |
|
|
for (i = 0 ; i < 6 ; i++)
|
1671 |
|
|
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
|
1672 |
|
|
for (i = 0 ; i < 6 ; i++)
|
1673 |
|
|
addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
|
1674 |
|
|
|
1675 |
|
|
size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %s : %s : %04x : %04x : %04x :\n",
|
1676 |
|
|
dev->name,
|
1677 |
|
|
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
|
1678 |
|
|
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
|
1679 |
|
|
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
|
1680 |
|
|
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
|
1681 |
|
|
print_mac(mac, addr),
|
1682 |
|
|
print_mac(mac2, addr2),
|
1683 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
|
1684 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
|
1685 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
|
1686 |
|
|
|
1687 |
|
|
size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
|
1688 |
|
|
dev->name) ;
|
1689 |
|
|
|
1690 |
|
|
for (i = 0 ; i < 6 ; i++)
|
1691 |
|
|
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
|
1692 |
|
|
size += sprintf(buffer+size, "%6s: %s : %04x : %04x : %04x : %04x : %04x : %04x : \n",
|
1693 |
|
|
dev->name,
|
1694 |
|
|
print_mac(mac, addr),
|
1695 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
|
1696 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
|
1697 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
|
1698 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
|
1699 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
|
1700 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
|
1701 |
|
|
|
1702 |
|
|
size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
|
1703 |
|
|
dev->name) ;
|
1704 |
|
|
|
1705 |
|
|
for (i = 0 ; i < 6 ; i++)
|
1706 |
|
|
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
|
1707 |
|
|
size += sprintf(buffer+size, "%6s: : %02x : %02x : %s : %02x:%02x:%02x:%02x : \n",
|
1708 |
|
|
dev->name,
|
1709 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
|
1710 |
|
|
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
|
1711 |
|
|
print_mac(mac, addr),
|
1712 |
|
|
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
|
1713 |
|
|
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
|
1714 |
|
|
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
|
1715 |
|
|
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
|
1716 |
|
|
|
1717 |
|
|
len=size;
|
1718 |
|
|
pos=begin+size;
|
1719 |
|
|
if (pos<offset) {
|
1720 |
|
|
len=0;
|
1721 |
|
|
begin=pos;
|
1722 |
|
|
}
|
1723 |
|
|
*start=buffer+(offset-begin); /* Start of wanted data */
|
1724 |
|
|
len-=(offset-begin); /* Start slop */
|
1725 |
|
|
if(len>length)
|
1726 |
|
|
len=length; /* Ending slop */
|
1727 |
|
|
return len;
|
1728 |
|
|
}
|
1729 |
|
|
|
1730 |
|
|
static void __devexit olympic_remove_one(struct pci_dev *pdev)
|
1731 |
|
|
{
|
1732 |
|
|
struct net_device *dev = pci_get_drvdata(pdev) ;
|
1733 |
|
|
struct olympic_private *olympic_priv=netdev_priv(dev);
|
1734 |
|
|
|
1735 |
|
|
if (olympic_priv->olympic_network_monitor) {
|
1736 |
|
|
char proc_name[20] ;
|
1737 |
|
|
strcpy(proc_name,"olympic_") ;
|
1738 |
|
|
strcat(proc_name,dev->name) ;
|
1739 |
|
|
remove_proc_entry(proc_name,init_net.proc_net);
|
1740 |
|
|
}
|
1741 |
|
|
unregister_netdev(dev) ;
|
1742 |
|
|
iounmap(olympic_priv->olympic_mmio) ;
|
1743 |
|
|
iounmap(olympic_priv->olympic_lap) ;
|
1744 |
|
|
pci_release_regions(pdev) ;
|
1745 |
|
|
pci_set_drvdata(pdev,NULL) ;
|
1746 |
|
|
free_netdev(dev) ;
|
1747 |
|
|
}
|
1748 |
|
|
|
1749 |
|
|
static struct pci_driver olympic_driver = {
|
1750 |
|
|
.name = "olympic",
|
1751 |
|
|
.id_table = olympic_pci_tbl,
|
1752 |
|
|
.probe = olympic_probe,
|
1753 |
|
|
.remove = __devexit_p(olympic_remove_one),
|
1754 |
|
|
};
|
1755 |
|
|
|
1756 |
|
|
static int __init olympic_pci_init(void)
|
1757 |
|
|
{
|
1758 |
|
|
return pci_register_driver(&olympic_driver) ;
|
1759 |
|
|
}
|
1760 |
|
|
|
1761 |
|
|
static void __exit olympic_pci_cleanup(void)
|
1762 |
|
|
{
|
1763 |
|
|
pci_unregister_driver(&olympic_driver) ;
|
1764 |
|
|
}
|
1765 |
|
|
|
1766 |
|
|
|
1767 |
|
|
module_init(olympic_pci_init) ;
|
1768 |
|
|
module_exit(olympic_pci_cleanup) ;
|
1769 |
|
|
|
1770 |
|
|
MODULE_LICENSE("GPL");
|