OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [net/] [ni65.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * ni6510 (am7990 'lance' chip) driver for Linux-net-3
3
 * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
4
 * copyrights (c) 1994,1995,1996 by M.Hipp
5
 *
6
 * This driver can handle the old ni6510 board and the newer ni6510
7
 * EtherBlaster. (probably it also works with every full NE2100
8
 * compatible card)
9
 *
10
 * To compile as module, type:
11
 *     gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni65.c
12
 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
13
 *
14
 * This is an extension to the Linux operating system, and is covered by the
15
 * same GNU General Public License that covers the Linux-kernel.
16
 *
17
 * comments/bugs/suggestions can be sent to:
18
 *   Michael Hipp
19
 *   email: hippm@informatik.uni-tuebingen.de
20
 *
21
 * sources:
22
 *   some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
23
 *   and from the original drivers by D.Becker
24
 *
25
 * known problems:
26
 *   - on some PCI boards (including my own) the card/board/ISA-bridge has
27
 *     problems with bus master DMA. This results in lotsa overruns.
28
 *     It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
29
 *     the XMT and RCV_VIA_SKB option .. this reduces driver performance.
30
 *     Or just play with your BIOS options to optimize ISA-DMA access.
31
 *     Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
32
 *     defines -> please report me your experience then
33
 *   - Harald reported for ASUS SP3G mainboards, that you should use
34
 *     the 'optimal settings' from the user's manual on page 3-12!
35
 *
36
 * credits:
37
 *   thanx to Jason Sullivan for sending me a ni6510 card!
38
 *   lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
39
 *
40
 * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
41
 *    average: FTP -> 8384421 bytes received in 8.5 seconds
42
 *           (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
43
 *    peak: FTP -> 8384421 bytes received in 7.5 seconds
44
 *           (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
45
 */
46
 
47
/*
48
 * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
49
 * 96.Sept.29: virt_to_bus stuff added for new memory modell
50
 * 96.April.29: Added Harald Koenig's Patches (MH)
51
 * 96.April.13: enhanced error handling .. more tests (MH)
52
 * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
53
 * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
54
 * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
55
 *            hopefully no more 16MB limit
56
 *
57
 * 95.Nov.18: multicast tweaked (AC).
58
 *
59
 * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
60
 *
61
 * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff  (MH)
62
 */
63
 
64
#include <linux/kernel.h>
65
#include <linux/string.h>
66
#include <linux/errno.h>
67
#include <linux/ioport.h>
68
#include <linux/slab.h>
69
#include <linux/interrupt.h>
70
#include <linux/delay.h>
71
#include <linux/init.h>
72
#include <linux/netdevice.h>
73
#include <linux/etherdevice.h>
74
#include <linux/skbuff.h>
75
#include <linux/module.h>
76
#include <linux/bitops.h>
77
 
78
#include <asm/io.h>
79
#include <asm/dma.h>
80
 
81
#include "ni65.h"
82
 
83
/*
84
 * the current setting allows an acceptable performance
85
 * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
86
 * the header of this file
87
 * 'invert' the defines for max. performance. This may cause DMA problems
88
 * on some boards (e.g on my ASUS SP3G)
89
 */
90
#undef XMT_VIA_SKB
91
#undef RCV_VIA_SKB
92
#define RCV_PARANOIA_CHECK
93
 
94
#define MID_PERFORMANCE
95
 
96
#if   defined( LOW_PERFORMANCE )
97
 static int isa0=7,isa1=7,csr80=0x0c10;
98
#elif defined( MID_PERFORMANCE )
99
 static int isa0=5,isa1=5,csr80=0x2810;
100
#else   /* high performance */
101
 static int isa0=4,isa1=4,csr80=0x0017;
102
#endif
103
 
104
/*
105
 * a few card/vendor specific defines
106
 */
107
#define NI65_ID0    0x00
108
#define NI65_ID1    0x55
109
#define NI65_EB_ID0 0x52
110
#define NI65_EB_ID1 0x44
111
#define NE2100_ID0  0x57
112
#define NE2100_ID1  0x57
113
 
114
#define PORT p->cmdr_addr
115
 
116
/*
117
 * buffer configuration
118
 */
119
#if 1
120
#define RMDNUM 16
121
#define RMDNUMMASK 0x80000000
122
#else
123
#define RMDNUM 8
124
#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
125
#endif
126
 
127
#if 0
128
#define TMDNUM 1
129
#define TMDNUMMASK 0x00000000
130
#else
131
#define TMDNUM 4
132
#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
133
#endif
134
 
135
/* slightly oversized */
136
#define R_BUF_SIZE 1544
137
#define T_BUF_SIZE 1544
138
 
139
/*
140
 * lance register defines
141
 */
142
#define L_DATAREG 0x00
143
#define L_ADDRREG 0x02
144
#define L_RESET   0x04
145
#define L_CONFIG  0x05
146
#define L_BUSIF   0x06
147
 
148
/*
149
 * to access the lance/am7990-regs, you have to write
150
 * reg-number into L_ADDRREG, then you can access it using L_DATAREG
151
 */
152
#define CSR0  0x00
153
#define CSR1  0x01
154
#define CSR2  0x02
155
#define CSR3  0x03
156
 
157
#define INIT_RING_BEFORE_START  0x1
158
#define FULL_RESET_ON_ERROR     0x2
159
 
160
#if 0
161
#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
162
                           outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
163
#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
164
                       inw(PORT+L_DATAREG))
165
#if 0
166
#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
167
#else
168
#define writedatareg(val) {  writereg(val,CSR0); }
169
#endif
170
#else
171
#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
172
#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
173
#define writedatareg(val) { writereg(val,CSR0); }
174
#endif
175
 
176
static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
177
 
178
static struct card {
179
        unsigned char id0,id1;
180
        short id_offset;
181
        short total_size;
182
        short cmd_offset;
183
        short addr_offset;
184
        unsigned char *vendor_id;
185
        char *cardname;
186
        unsigned long config;
187
} cards[] = {
188
        {
189
                .id0         = NI65_ID0,
190
                .id1         = NI65_ID1,
191
                .id_offset   = 0x0e,
192
                .total_size  = 0x10,
193
                .cmd_offset  = 0x0,
194
                .addr_offset = 0x8,
195
                .vendor_id   = ni_vendor,
196
                .cardname    = "ni6510",
197
                .config      = 0x1,
198
        },
199
        {
200
                .id0         = NI65_EB_ID0,
201
                .id1         = NI65_EB_ID1,
202
                .id_offset   = 0x0e,
203
                .total_size  = 0x18,
204
                .cmd_offset  = 0x10,
205
                .addr_offset = 0x0,
206
                .vendor_id   = ni_vendor,
207
                .cardname    = "ni6510 EtherBlaster",
208
                .config      = 0x2,
209
        },
210
        {
211
                .id0         = NE2100_ID0,
212
                .id1         = NE2100_ID1,
213
                .id_offset   = 0x0e,
214
                .total_size  = 0x18,
215
                .cmd_offset  = 0x10,
216
                .addr_offset = 0x0,
217
                .vendor_id   = NULL,
218
                .cardname    = "generic NE2100",
219
                .config      = 0x0,
220
        },
221
};
222
#define NUM_CARDS 3
223
 
224
struct priv
225
{
226
        struct rmd rmdhead[RMDNUM];
227
        struct tmd tmdhead[TMDNUM];
228
        struct init_block ib;
229
        int rmdnum;
230
        int tmdnum,tmdlast;
231
#ifdef RCV_VIA_SKB
232
        struct sk_buff *recv_skb[RMDNUM];
233
#else
234
        void *recvbounce[RMDNUM];
235
#endif
236
#ifdef XMT_VIA_SKB
237
        struct sk_buff *tmd_skb[TMDNUM];
238
#endif
239
        void *tmdbounce[TMDNUM];
240
        int tmdbouncenum;
241
        int lock,xmit_queued;
242
        struct net_device_stats stats;
243
        void *self;
244
        int cmdr_addr;
245
        int cardno;
246
        int features;
247
        spinlock_t ring_lock;
248
};
249
 
250
static int  ni65_probe1(struct net_device *dev,int);
251
static irqreturn_t ni65_interrupt(int irq, void * dev_id);
252
static void ni65_recv_intr(struct net_device *dev,int);
253
static void ni65_xmit_intr(struct net_device *dev,int);
254
static int  ni65_open(struct net_device *dev);
255
static int  ni65_lance_reinit(struct net_device *dev);
256
static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
257
static int  ni65_send_packet(struct sk_buff *skb, struct net_device *dev);
258
static void  ni65_timeout(struct net_device *dev);
259
static int  ni65_close(struct net_device *dev);
260
static int  ni65_alloc_buffer(struct net_device *dev);
261
static void ni65_free_buffer(struct priv *p);
262
static struct net_device_stats *ni65_get_stats(struct net_device *);
263
static void set_multicast_list(struct net_device *dev);
264
 
265
static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
266
static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
267
 
268
static int debuglevel = 1;
269
 
270
/*
271
 * set 'performance' registers .. we must STOP lance for that
272
 */
273
static void ni65_set_performance(struct priv *p)
274
{
275
        writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
276
 
277
        if( !(cards[p->cardno].config & 0x02) )
278
                return;
279
 
280
        outw(80,PORT+L_ADDRREG);
281
        if(inw(PORT+L_ADDRREG) != 80)
282
                return;
283
 
284
        writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
285
        outw(0,PORT+L_ADDRREG);
286
        outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
287
        outw(1,PORT+L_ADDRREG);
288
        outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns     */
289
 
290
        outw(CSR0,PORT+L_ADDRREG);      /* switch back to CSR0 */
291
}
292
 
293
/*
294
 * open interface (up)
295
 */
296
static int ni65_open(struct net_device *dev)
297
{
298
        struct priv *p = (struct priv *) dev->priv;
299
        int irqval = request_irq(dev->irq, &ni65_interrupt,0,
300
                        cards[p->cardno].cardname,dev);
301
        if (irqval) {
302
                printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
303
                          dev->name,dev->irq, irqval);
304
                return -EAGAIN;
305
        }
306
 
307
        if(ni65_lance_reinit(dev))
308
        {
309
                netif_start_queue(dev);
310
                return 0;
311
        }
312
        else
313
        {
314
                free_irq(dev->irq,dev);
315
                return -EAGAIN;
316
        }
317
}
318
 
319
/*
320
 * close interface (down)
321
 */
322
static int ni65_close(struct net_device *dev)
323
{
324
        struct priv *p = (struct priv *) dev->priv;
325
 
326
        netif_stop_queue(dev);
327
 
328
        outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
329
 
330
#ifdef XMT_VIA_SKB
331
        {
332
                int i;
333
                for(i=0;i<TMDNUM;i++)
334
                {
335
                        if(p->tmd_skb[i]) {
336
                                dev_kfree_skb(p->tmd_skb[i]);
337
                                p->tmd_skb[i] = NULL;
338
                        }
339
                }
340
        }
341
#endif
342
        free_irq(dev->irq,dev);
343
        return 0;
344
}
345
 
346
static void cleanup_card(struct net_device *dev)
347
{
348
        struct priv *p = (struct priv *) dev->priv;
349
        disable_dma(dev->dma);
350
        free_dma(dev->dma);
351
        release_region(dev->base_addr, cards[p->cardno].total_size);
352
        ni65_free_buffer(p);
353
}
354
 
355
/* set: io,irq,dma or set it when calling insmod */
356
static int irq;
357
static int io;
358
static int dma;
359
 
360
/*
361
 * Probe The Card (not the lance-chip)
362
 */
363
struct net_device * __init ni65_probe(int unit)
364
{
365
        struct net_device *dev = alloc_etherdev(0);
366
        static int ports[] = {0x360,0x300,0x320,0x340, 0};
367
        int *port;
368
        int err = 0;
369
 
370
        if (!dev)
371
                return ERR_PTR(-ENOMEM);
372
 
373
        if (unit >= 0) {
374
                sprintf(dev->name, "eth%d", unit);
375
                netdev_boot_setup_check(dev);
376
                irq = dev->irq;
377
                dma = dev->dma;
378
        } else {
379
                dev->base_addr = io;
380
        }
381
 
382
        if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
383
                err = ni65_probe1(dev, dev->base_addr);
384
        } else if (dev->base_addr > 0) { /* Don't probe at all. */
385
                err = -ENXIO;
386
        } else {
387
                for (port = ports; *port && ni65_probe1(dev, *port); port++)
388
                        ;
389
                if (!*port)
390
                        err = -ENODEV;
391
        }
392
        if (err)
393
                goto out;
394
 
395
        err = register_netdev(dev);
396
        if (err)
397
                goto out1;
398
        return dev;
399
out1:
400
        cleanup_card(dev);
401
out:
402
        free_netdev(dev);
403
        return ERR_PTR(err);
404
}
405
 
406
/*
407
 * this is the real card probe ..
408
 */
409
static int __init ni65_probe1(struct net_device *dev,int ioaddr)
410
{
411
        int i,j;
412
        struct priv *p;
413
        unsigned long flags;
414
 
415
        dev->irq = irq;
416
        dev->dma = dma;
417
 
418
        for(i=0;i<NUM_CARDS;i++) {
419
                if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
420
                        continue;
421
                if(cards[i].id_offset >= 0) {
422
                        if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
423
                                 inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
424
                                 release_region(ioaddr, cards[i].total_size);
425
                                 continue;
426
                        }
427
                }
428
                if(cards[i].vendor_id) {
429
                        for(j=0;j<3;j++)
430
                                if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
431
                                        release_region(ioaddr, cards[i].total_size);
432
                                        continue;
433
                          }
434
                }
435
                break;
436
        }
437
        if(i == NUM_CARDS)
438
                return -ENODEV;
439
 
440
        for(j=0;j<6;j++)
441
                dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
442
 
443
        if( (j=ni65_alloc_buffer(dev)) < 0) {
444
                release_region(ioaddr, cards[i].total_size);
445
                return j;
446
        }
447
        p = (struct priv *) dev->priv;
448
        p->cmdr_addr = ioaddr + cards[i].cmd_offset;
449
        p->cardno = i;
450
        spin_lock_init(&p->ring_lock);
451
 
452
        printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
453
 
454
        outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
455
        if( (j=readreg(CSR0)) != 0x4) {
456
                 printk("failed.\n");
457
                 printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
458
                 ni65_free_buffer(p);
459
                 release_region(ioaddr, cards[p->cardno].total_size);
460
                 return -EAGAIN;
461
        }
462
 
463
        outw(88,PORT+L_ADDRREG);
464
        if(inw(PORT+L_ADDRREG) == 88) {
465
                unsigned long v;
466
                v = inw(PORT+L_DATAREG);
467
                v <<= 16;
468
                outw(89,PORT+L_ADDRREG);
469
                v |= inw(PORT+L_DATAREG);
470
                printk("Version %#08lx, ",v);
471
                p->features = INIT_RING_BEFORE_START;
472
        }
473
        else {
474
                printk("ancient LANCE, ");
475
                p->features = 0x0;
476
        }
477
 
478
        if(test_bit(0,&cards[i].config)) {
479
                dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
480
                dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
481
                printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
482
        }
483
        else {
484
                if(dev->dma == 0) {
485
                /* 'stuck test' from lance.c */
486
                        long dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
487
                                            (inb(DMA2_STAT_REG) & 0xf0);
488
                        for(i=1;i<5;i++) {
489
                                int dma = dmatab[i];
490
                                if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
491
                                        continue;
492
 
493
                                flags=claim_dma_lock();
494
                                disable_dma(dma);
495
                                set_dma_mode(dma,DMA_MODE_CASCADE);
496
                                enable_dma(dma);
497
                                release_dma_lock(flags);
498
 
499
                                ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
500
 
501
                                flags=claim_dma_lock();
502
                                disable_dma(dma);
503
                                free_dma(dma);
504
                                release_dma_lock(flags);
505
 
506
                                if(readreg(CSR0) & CSR0_IDON)
507
                                        break;
508
                        }
509
                        if(i == 5) {
510
                                printk("failed.\n");
511
                                printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
512
                                ni65_free_buffer(p);
513
                                release_region(ioaddr, cards[p->cardno].total_size);
514
                                return -EAGAIN;
515
                        }
516
                        dev->dma = dmatab[i];
517
                        printk("DMA %d (autodetected), ",dev->dma);
518
                }
519
                else
520
                        printk("DMA %d (assigned), ",dev->dma);
521
 
522
                if(dev->irq < 2)
523
                {
524
                        unsigned long irq_mask;
525
 
526
                        ni65_init_lance(p,dev->dev_addr,0,0);
527
                        irq_mask = probe_irq_on();
528
                        writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
529
                        msleep(20);
530
                        dev->irq = probe_irq_off(irq_mask);
531
                        if(!dev->irq)
532
                        {
533
                                printk("Failed to detect IRQ line!\n");
534
                                ni65_free_buffer(p);
535
                                release_region(ioaddr, cards[p->cardno].total_size);
536
                                return -EAGAIN;
537
                        }
538
                        printk("IRQ %d (autodetected).\n",dev->irq);
539
                }
540
                else
541
                        printk("IRQ %d (assigned).\n",dev->irq);
542
        }
543
 
544
        if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
545
        {
546
                printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
547
                ni65_free_buffer(p);
548
                release_region(ioaddr, cards[p->cardno].total_size);
549
                return -EAGAIN;
550
        }
551
 
552
        dev->base_addr = ioaddr;
553
        dev->open               = ni65_open;
554
        dev->stop               = ni65_close;
555
        dev->hard_start_xmit    = ni65_send_packet;
556
        dev->tx_timeout         = ni65_timeout;
557
        dev->watchdog_timeo     = HZ/2;
558
        dev->get_stats          = ni65_get_stats;
559
        dev->set_multicast_list = set_multicast_list;
560
        return 0; /* everything is OK */
561
}
562
 
563
/*
564
 * set lance register and trigger init
565
 */
566
static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
567
{
568
        int i;
569
        u32 pib;
570
 
571
        writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
572
 
573
        for(i=0;i<6;i++)
574
                p->ib.eaddr[i] = daddr[i];
575
 
576
        for(i=0;i<8;i++)
577
                p->ib.filter[i] = filter;
578
        p->ib.mode = mode;
579
 
580
        p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
581
        p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
582
        writereg(0,CSR3);        /* busmaster/no word-swap */
583
        pib = (u32) isa_virt_to_bus(&p->ib);
584
        writereg(pib & 0xffff,CSR1);
585
        writereg(pib >> 16,CSR2);
586
 
587
        writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
588
 
589
        for(i=0;i<32;i++)
590
        {
591
                mdelay(4);
592
                if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
593
                        break; /* init ok ? */
594
        }
595
}
596
 
597
/*
598
 * allocate memory area and check the 16MB border
599
 */
600
static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
601
{
602
        struct sk_buff *skb=NULL;
603
        unsigned char *ptr;
604
        void *ret;
605
 
606
        if(type) {
607
                ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
608
                if(!skb) {
609
                        printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
610
                        return NULL;
611
                }
612
                skb_reserve(skb,2+16);
613
                skb_put(skb,R_BUF_SIZE);         /* grab the whole space .. (not necessary) */
614
                ptr = skb->data;
615
        }
616
        else {
617
                ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
618
                if(!ret) {
619
                        printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
620
                        return NULL;
621
                }
622
        }
623
        if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
624
                printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
625
                if(type)
626
                        kfree_skb(skb);
627
                else
628
                        kfree(ptr);
629
                return NULL;
630
        }
631
        return ret;
632
}
633
 
634
/*
635
 * allocate all memory structures .. send/recv buffers etc ...
636
 */
637
static int ni65_alloc_buffer(struct net_device *dev)
638
{
639
        unsigned char *ptr;
640
        struct priv *p;
641
        int i;
642
 
643
        /*
644
         * we need 8-aligned memory ..
645
         */
646
        ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
647
        if(!ptr)
648
                return -ENOMEM;
649
 
650
        p = dev->priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
651
        memset((char *) dev->priv,0,sizeof(struct priv));
652
        p->self = ptr;
653
 
654
        for(i=0;i<TMDNUM;i++)
655
        {
656
#ifdef XMT_VIA_SKB
657
                p->tmd_skb[i] = NULL;
658
#endif
659
                p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
660
                if(!p->tmdbounce[i]) {
661
                        ni65_free_buffer(p);
662
                        return -ENOMEM;
663
                }
664
        }
665
 
666
        for(i=0;i<RMDNUM;i++)
667
        {
668
#ifdef RCV_VIA_SKB
669
                p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
670
                if(!p->recv_skb[i]) {
671
                        ni65_free_buffer(p);
672
                        return -ENOMEM;
673
                }
674
#else
675
                p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
676
                if(!p->recvbounce[i]) {
677
                        ni65_free_buffer(p);
678
                        return -ENOMEM;
679
                }
680
#endif
681
        }
682
 
683
        return 0; /* everything is OK */
684
}
685
 
686
/*
687
 * free buffers and private struct
688
 */
689
static void ni65_free_buffer(struct priv *p)
690
{
691
        int i;
692
 
693
        if(!p)
694
                return;
695
 
696
        for(i=0;i<TMDNUM;i++) {
697
                kfree(p->tmdbounce[i]);
698
#ifdef XMT_VIA_SKB
699
                if(p->tmd_skb[i])
700
                        dev_kfree_skb(p->tmd_skb[i]);
701
#endif
702
        }
703
 
704
        for(i=0;i<RMDNUM;i++)
705
        {
706
#ifdef RCV_VIA_SKB
707
                if(p->recv_skb[i])
708
                        dev_kfree_skb(p->recv_skb[i]);
709
#else
710
                kfree(p->recvbounce[i]);
711
#endif
712
        }
713
        kfree(p->self);
714
}
715
 
716
 
717
/*
718
 * stop and (re)start lance .. e.g after an error
719
 */
720
static void ni65_stop_start(struct net_device *dev,struct priv *p)
721
{
722
        int csr0 = CSR0_INEA;
723
 
724
        writedatareg(CSR0_STOP);
725
 
726
        if(debuglevel > 1)
727
                printk(KERN_DEBUG "ni65_stop_start\n");
728
 
729
        if(p->features & INIT_RING_BEFORE_START) {
730
                int i;
731
#ifdef XMT_VIA_SKB
732
                struct sk_buff *skb_save[TMDNUM];
733
#endif
734
                unsigned long buffer[TMDNUM];
735
                short blen[TMDNUM];
736
 
737
                if(p->xmit_queued) {
738
                        while(1) {
739
                                if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
740
                                        break;
741
                                p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
742
                                if(p->tmdlast == p->tmdnum)
743
                                        break;
744
                        }
745
                }
746
 
747
                for(i=0;i<TMDNUM;i++) {
748
                        struct tmd *tmdp = p->tmdhead + i;
749
#ifdef XMT_VIA_SKB
750
                        skb_save[i] = p->tmd_skb[i];
751
#endif
752
                        buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
753
                        blen[i] = tmdp->blen;
754
                        tmdp->u.s.status = 0x0;
755
                }
756
 
757
                for(i=0;i<RMDNUM;i++) {
758
                        struct rmd *rmdp = p->rmdhead + i;
759
                        rmdp->u.s.status = RCV_OWN;
760
                }
761
                p->tmdnum = p->xmit_queued = 0;
762
                writedatareg(CSR0_STRT | csr0);
763
 
764
                for(i=0;i<TMDNUM;i++) {
765
                        int num = (i + p->tmdlast) & (TMDNUM-1);
766
                        p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
767
                        p->tmdhead[i].blen = blen[num];
768
                        if(p->tmdhead[i].u.s.status & XMIT_OWN) {
769
                                 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
770
                                 p->xmit_queued = 1;
771
         writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
772
                        }
773
#ifdef XMT_VIA_SKB
774
                        p->tmd_skb[i] = skb_save[num];
775
#endif
776
                }
777
                p->rmdnum = p->tmdlast = 0;
778
                if(!p->lock)
779
                        if (p->tmdnum || !p->xmit_queued)
780
                                netif_wake_queue(dev);
781
                dev->trans_start = jiffies;
782
        }
783
        else
784
                writedatareg(CSR0_STRT | csr0);
785
}
786
 
787
/*
788
 * init lance (write init-values .. init-buffers) (open-helper)
789
 */
790
static int ni65_lance_reinit(struct net_device *dev)
791
{
792
         int i;
793
         struct priv *p = (struct priv *) dev->priv;
794
         unsigned long flags;
795
 
796
         p->lock = 0;
797
         p->xmit_queued = 0;
798
 
799
         flags=claim_dma_lock();
800
         disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
801
         set_dma_mode(dev->dma,DMA_MODE_CASCADE);
802
         enable_dma(dev->dma);
803
         release_dma_lock(flags);
804
 
805
         outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
806
         if( (i=readreg(CSR0) ) != 0x4)
807
         {
808
                 printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
809
                                                        cards[p->cardno].cardname,(int) i);
810
                 flags=claim_dma_lock();
811
                 disable_dma(dev->dma);
812
                 release_dma_lock(flags);
813
                 return 0;
814
         }
815
 
816
         p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
817
         for(i=0;i<TMDNUM;i++)
818
         {
819
                 struct tmd *tmdp = p->tmdhead + i;
820
#ifdef XMT_VIA_SKB
821
                 if(p->tmd_skb[i]) {
822
                         dev_kfree_skb(p->tmd_skb[i]);
823
                         p->tmd_skb[i] = NULL;
824
                 }
825
#endif
826
                 tmdp->u.buffer = 0x0;
827
                 tmdp->u.s.status = XMIT_START | XMIT_END;
828
                 tmdp->blen = tmdp->status2 = 0;
829
         }
830
 
831
         for(i=0;i<RMDNUM;i++)
832
         {
833
                 struct rmd *rmdp = p->rmdhead + i;
834
#ifdef RCV_VIA_SKB
835
                 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
836
#else
837
                 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
838
#endif
839
                 rmdp->blen = -(R_BUF_SIZE-8);
840
                 rmdp->mlen = 0;
841
                 rmdp->u.s.status = RCV_OWN;
842
         }
843
 
844
         if(dev->flags & IFF_PROMISC)
845
                 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
846
         else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
847
                 ni65_init_lance(p,dev->dev_addr,0xff,0x0);
848
         else
849
                 ni65_init_lance(p,dev->dev_addr,0x00,0x00);
850
 
851
        /*
852
         * ni65_set_lance_mem() sets L_ADDRREG to CSR0
853
         * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
854
         */
855
 
856
         if(inw(PORT+L_DATAREG) & CSR0_IDON)    {
857
                 ni65_set_performance(p);
858
                                         /* init OK: start lance , enable interrupts */
859
                 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
860
                 return 1; /* ->OK */
861
         }
862
         printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
863
         flags=claim_dma_lock();
864
         disable_dma(dev->dma);
865
         release_dma_lock(flags);
866
         return 0; /* ->Error */
867
}
868
 
869
/*
870
 * interrupt handler
871
 */
872
static irqreturn_t ni65_interrupt(int irq, void * dev_id)
873
{
874
        int csr0 = 0;
875
        struct net_device *dev = dev_id;
876
        struct priv *p;
877
        int bcnt = 32;
878
 
879
        p = (struct priv *) dev->priv;
880
 
881
        spin_lock(&p->ring_lock);
882
 
883
        while(--bcnt) {
884
                csr0 = inw(PORT+L_DATAREG);
885
 
886
#if 0
887
                writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
888
#else
889
                writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
890
#endif
891
 
892
                if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
893
                        break;
894
 
895
                if(csr0 & CSR0_RINT) /* RECV-int? */
896
                        ni65_recv_intr(dev,csr0);
897
                if(csr0 & CSR0_TINT) /* XMIT-int? */
898
                        ni65_xmit_intr(dev,csr0);
899
 
900
                if(csr0 & CSR0_ERR)
901
                {
902
                        struct priv *p = (struct priv *) dev->priv;
903
                        if(debuglevel > 1)
904
                                printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
905
                        if(csr0 & CSR0_BABL)
906
                                p->stats.tx_errors++;
907
                        if(csr0 & CSR0_MISS) {
908
                                int i;
909
                                for(i=0;i<RMDNUM;i++)
910
                                        printk("%02x ",p->rmdhead[i].u.s.status);
911
                                printk("\n");
912
                                p->stats.rx_errors++;
913
                        }
914
                        if(csr0 & CSR0_MERR) {
915
                                if(debuglevel > 1)
916
                                        printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
917
                                ni65_stop_start(dev,p);
918
                        }
919
                }
920
        }
921
 
922
#ifdef RCV_PARANOIA_CHECK
923
{
924
 int j;
925
 for(j=0;j<RMDNUM;j++)
926
 {
927
        struct priv *p = (struct priv *) dev->priv;
928
        int i,k,num1,num2;
929
        for(i=RMDNUM-1;i>0;i--) {
930
                 num2 = (p->rmdnum + i) & (RMDNUM-1);
931
                 if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
932
                                break;
933
        }
934
 
935
        if(i) {
936
                for(k=0;k<RMDNUM;k++) {
937
                        num1 = (p->rmdnum + k) & (RMDNUM-1);
938
                        if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
939
                                break;
940
                }
941
                if(!k)
942
                        break;
943
 
944
                if(debuglevel > 0)
945
                {
946
                        char buf[256],*buf1;
947
                        int k;
948
                        buf1 = buf;
949
                        for(k=0;k<RMDNUM;k++) {
950
                                sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
951
                                buf1 += 3;
952
                        }
953
                        *buf1 = 0;
954
                        printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
955
                }
956
 
957
                p->rmdnum = num1;
958
                ni65_recv_intr(dev,csr0);
959
                if((p->rmdhead[num2].u.s.status & RCV_OWN))
960
                        break;  /* ok, we are 'in sync' again */
961
        }
962
        else
963
                break;
964
 }
965
}
966
#endif
967
 
968
        if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
969
                printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
970
                ni65_stop_start(dev,p);
971
        }
972
        else
973
                writedatareg(CSR0_INEA);
974
 
975
        spin_unlock(&p->ring_lock);
976
        return IRQ_HANDLED;
977
}
978
 
979
/*
980
 * We have received an Xmit-Interrupt ..
981
 * send a new packet if necessary
982
 */
983
static void ni65_xmit_intr(struct net_device *dev,int csr0)
984
{
985
        struct priv *p = (struct priv *) dev->priv;
986
 
987
        while(p->xmit_queued)
988
        {
989
                struct tmd *tmdp = p->tmdhead + p->tmdlast;
990
                int tmdstat = tmdp->u.s.status;
991
 
992
                if(tmdstat & XMIT_OWN)
993
                        break;
994
 
995
                if(tmdstat & XMIT_ERR)
996
                {
997
#if 0
998
                        if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
999
                                printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
1000
#endif
1001
                 /* checking some errors */
1002
                        if(tmdp->status2 & XMIT_RTRY)
1003
                                p->stats.tx_aborted_errors++;
1004
                        if(tmdp->status2 & XMIT_LCAR)
1005
                                p->stats.tx_carrier_errors++;
1006
                        if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
1007
                /* this stops the xmitter */
1008
                                p->stats.tx_fifo_errors++;
1009
                                if(debuglevel > 0)
1010
                                        printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
1011
                                if(p->features & INIT_RING_BEFORE_START) {
1012
                                        tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;    /* test: resend this frame */
1013
                                        ni65_stop_start(dev,p);
1014
                                        break;  /* no more Xmit processing .. */
1015
                                }
1016
                                else
1017
                                 ni65_stop_start(dev,p);
1018
                        }
1019
                        if(debuglevel > 2)
1020
                                printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
1021
                        if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
1022
                                p->stats.tx_errors++;
1023
                        tmdp->status2 = 0;
1024
                }
1025
                else {
1026
                        p->stats.tx_bytes -= (short)(tmdp->blen);
1027
                        p->stats.tx_packets++;
1028
                }
1029
 
1030
#ifdef XMT_VIA_SKB
1031
                if(p->tmd_skb[p->tmdlast]) {
1032
                         dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
1033
                         p->tmd_skb[p->tmdlast] = NULL;
1034
                }
1035
#endif
1036
 
1037
                p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
1038
                if(p->tmdlast == p->tmdnum)
1039
                        p->xmit_queued = 0;
1040
        }
1041
        netif_wake_queue(dev);
1042
}
1043
 
1044
/*
1045
 * We have received a packet
1046
 */
1047
static void ni65_recv_intr(struct net_device *dev,int csr0)
1048
{
1049
        struct rmd *rmdp;
1050
        int rmdstat,len;
1051
        int cnt=0;
1052
        struct priv *p = (struct priv *) dev->priv;
1053
 
1054
        rmdp = p->rmdhead + p->rmdnum;
1055
        while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
1056
        {
1057
                cnt++;
1058
                if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
1059
                {
1060
                        if(!(rmdstat & RCV_ERR)) {
1061
                                if(rmdstat & RCV_START)
1062
                                {
1063
                                        p->stats.rx_length_errors++;
1064
                                        printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
1065
                                }
1066
                        }
1067
                        else {
1068
                                if(debuglevel > 2)
1069
                                        printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1070
                                                                        dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
1071
                                if(rmdstat & RCV_FRAM)
1072
                                        p->stats.rx_frame_errors++;
1073
                                if(rmdstat & RCV_OFLO)
1074
                                        p->stats.rx_over_errors++;
1075
                                if(rmdstat & RCV_CRC)
1076
                                        p->stats.rx_crc_errors++;
1077
                                if(rmdstat & RCV_BUF_ERR)
1078
                                        p->stats.rx_fifo_errors++;
1079
                        }
1080
                        if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
1081
                                p->stats.rx_errors++;
1082
                }
1083
                else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
1084
                {
1085
#ifdef RCV_VIA_SKB
1086
                        struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
1087
                        if (skb)
1088
                                skb_reserve(skb,16);
1089
#else
1090
                        struct sk_buff *skb = dev_alloc_skb(len+2);
1091
#endif
1092
                        if(skb)
1093
                        {
1094
                                skb_reserve(skb,2);
1095
#ifdef RCV_VIA_SKB
1096
                                if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1097
                                        skb_put(skb,len);
1098
                                        skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
1099
                                }
1100
                                else {
1101
                                        struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
1102
                                        skb_put(skb,R_BUF_SIZE);
1103
                                        p->recv_skb[p->rmdnum] = skb;
1104
                                        rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1105
                                        skb = skb1;
1106
                                        skb_trim(skb,len);
1107
                                }
1108
#else
1109
                                skb_put(skb,len);
1110
                                skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
1111
#endif
1112
                                p->stats.rx_packets++;
1113
                                p->stats.rx_bytes += len;
1114
                                skb->protocol=eth_type_trans(skb,dev);
1115
                                netif_rx(skb);
1116
                                dev->last_rx = jiffies;
1117
                        }
1118
                        else
1119
                        {
1120
                                printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
1121
                                p->stats.rx_dropped++;
1122
                        }
1123
                }
1124
                else {
1125
                        printk(KERN_INFO "%s: received runt packet\n",dev->name);
1126
                        p->stats.rx_errors++;
1127
                }
1128
                rmdp->blen = -(R_BUF_SIZE-8);
1129
                rmdp->mlen = 0;
1130
                rmdp->u.s.status = RCV_OWN; /* change owner */
1131
                p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
1132
                rmdp = p->rmdhead + p->rmdnum;
1133
        }
1134
}
1135
 
1136
/*
1137
 * kick xmitter ..
1138
 */
1139
 
1140
static void ni65_timeout(struct net_device *dev)
1141
{
1142
        int i;
1143
        struct priv *p = (struct priv *) dev->priv;
1144
 
1145
        printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
1146
        for(i=0;i<TMDNUM;i++)
1147
                printk("%02x ",p->tmdhead[i].u.s.status);
1148
        printk("\n");
1149
        ni65_lance_reinit(dev);
1150
        dev->trans_start = jiffies;
1151
        netif_wake_queue(dev);
1152
}
1153
 
1154
/*
1155
 *      Send a packet
1156
 */
1157
 
1158
static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1159
{
1160
        struct priv *p = (struct priv *) dev->priv;
1161
 
1162
        netif_stop_queue(dev);
1163
 
1164
        if (test_and_set_bit(0, (void*)&p->lock)) {
1165
                printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
1166
                return 1;
1167
        }
1168
 
1169
        {
1170
                short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1171
                struct tmd *tmdp;
1172
                unsigned long flags;
1173
 
1174
#ifdef XMT_VIA_SKB
1175
                if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
1176
#endif
1177
 
1178
                        skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
1179
                                      skb->len > T_BUF_SIZE ? T_BUF_SIZE :
1180
                                                              skb->len);
1181
                        if (len > skb->len)
1182
                                memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
1183
                        dev_kfree_skb (skb);
1184
 
1185
                        spin_lock_irqsave(&p->ring_lock, flags);
1186
                        tmdp = p->tmdhead + p->tmdnum;
1187
                        tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
1188
                        p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
1189
 
1190
#ifdef XMT_VIA_SKB
1191
                }
1192
                else {
1193
                        spin_lock_irqsave(&p->ring_lock, flags);
1194
 
1195
                        tmdp = p->tmdhead + p->tmdnum;
1196
                        tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1197
                        p->tmd_skb[p->tmdnum] = skb;
1198
                }
1199
#endif
1200
                tmdp->blen = -len;
1201
 
1202
                tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
1203
                writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
1204
 
1205
                p->xmit_queued = 1;
1206
                p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
1207
 
1208
                if(p->tmdnum != p->tmdlast)
1209
                        netif_wake_queue(dev);
1210
 
1211
                p->lock = 0;
1212
                dev->trans_start = jiffies;
1213
 
1214
                spin_unlock_irqrestore(&p->ring_lock, flags);
1215
        }
1216
 
1217
        return 0;
1218
}
1219
 
1220
static struct net_device_stats *ni65_get_stats(struct net_device *dev)
1221
{
1222
 
1223
#if 0
1224
        int i;
1225
        struct priv *p = (struct priv *) dev->priv;
1226
        for(i=0;i<RMDNUM;i++)
1227
        {
1228
                struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
1229
                printk("%02x ",rmdp->u.s.status);
1230
        }
1231
        printk("\n");
1232
#endif
1233
 
1234
        return &((struct priv *) dev->priv)->stats;
1235
}
1236
 
1237
static void set_multicast_list(struct net_device *dev)
1238
{
1239
        if(!ni65_lance_reinit(dev))
1240
                printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
1241
        netif_wake_queue(dev);
1242
}
1243
 
1244
#ifdef MODULE
1245
static struct net_device *dev_ni65;
1246
 
1247
module_param(irq, int, 0);
1248
module_param(io, int, 0);
1249
module_param(dma, int, 0);
1250
MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1251
MODULE_PARM_DESC(io, "ni6510 I/O base address");
1252
MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1253
 
1254
int __init init_module(void)
1255
{
1256
        dev_ni65 = ni65_probe(-1);
1257
        return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
1258
}
1259
 
1260
void __exit cleanup_module(void)
1261
{
1262
        unregister_netdev(dev_ni65);
1263
        cleanup_card(dev_ni65);
1264
        free_netdev(dev_ni65);
1265
}
1266
#endif /* MODULE */
1267
 
1268
MODULE_LICENSE("GPL");
1269
 
1270
/*
1271
 * END of ni65.c
1272
 */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.