OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [82596.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* 82596.c: A generic 82596 ethernet driver for linux. */
2
/*
3
   Based on Apricot.c
4
   Written 1994 by Mark Evans.
5
   This driver is for the Apricot 82596 bus-master interface
6
 
7
   Modularised 12/94 Mark Evans
8
 
9
 
10
   Modified to support the 82596 ethernet chips on 680x0 VME boards.
11
   by Richard Hirst <richard@sleepie.demon.co.uk>
12
   Renamed to be 82596.c
13
 
14
   980825:  Changed to receive directly in to sk_buffs which are
15
   allocated at open() time.  Eliminates copy on incoming frames
16
   (small ones are still copied).  Shared data now held in a
17
   non-cached page, so we can run on 68060 in copyback mode.
18
 
19
   TBD:
20
   * look at deferring rx frames rather than discarding (as per tulip)
21
   * handle tx ring full as per tulip
22
   * performace test to tune rx_copybreak
23
 
24
   Most of my modifications relate to the braindead big-endian
25
   implementation by Intel.  When the i596 is operating in
26
   'big-endian' mode, it thinks a 32 bit value of 0x12345678
27
   should be stored as 0x56781234.  This is a real pain, when
28
   you have linked lists which are shared by the 680x0 and the
29
   i596.
30
 
31
   Driver skeleton
32
   Written 1993 by Donald Becker.
33
   Copyright 1993 United States Government as represented by the Director,
34
   National Security Agency. This software may only be used and distributed
35
   according to the terms of the GNU General Public License as modified by SRC,
36
   incorporated herein by reference.
37
 
38
   The author may be reached as becker@scyld.com, or C/O
39
   Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
40
 
41
 */
42
 
43
#include <linux/config.h>
44
#include <linux/module.h>
45
 
46
#include <linux/kernel.h>
47
#include <linux/sched.h>
48
#include <linux/string.h>
49
#include <linux/ptrace.h>
50
#include <linux/errno.h>
51
#include <linux/ioport.h>
52
#include <linux/slab.h>
53
#include <linux/interrupt.h>
54
#include <linux/delay.h>
55
#include <linux/netdevice.h>
56
#include <linux/etherdevice.h>
57
#include <linux/skbuff.h>
58
#include <linux/init.h>
59
 
60
#include <asm/bitops.h>
61
#include <asm/io.h>
62
#include <asm/dma.h>
63
#include <asm/pgtable.h>
64
#include <asm/pgalloc.h>
65
 
66
static char version[] __initdata =
67
        "82596.c $Revision: 1.1.1.1 $\n";
68
 
69
/* DEBUG flags
70
 */
71
 
72
#define DEB_INIT        0x0001
73
#define DEB_PROBE       0x0002
74
#define DEB_SERIOUS     0x0004
75
#define DEB_ERRORS      0x0008
76
#define DEB_MULTI       0x0010
77
#define DEB_TDR         0x0020
78
#define DEB_OPEN        0x0040
79
#define DEB_RESET       0x0080
80
#define DEB_ADDCMD      0x0100
81
#define DEB_STATUS      0x0200
82
#define DEB_STARTTX     0x0400
83
#define DEB_RXADDR      0x0800
84
#define DEB_TXADDR      0x1000
85
#define DEB_RXFRAME     0x2000
86
#define DEB_INTS        0x4000
87
#define DEB_STRUCT      0x8000
88
#define DEB_ANY         0xffff
89
 
90
 
91
#define DEB(x,y)        if (i596_debug & (x)) y
92
 
93
 
94
#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
95
#define ENABLE_MVME16x_NET
96
#endif
97
#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
98
#define ENABLE_BVME6000_NET
99
#endif
100
#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
101
#define ENABLE_APRICOT
102
#endif
103
 
104
#ifdef ENABLE_MVME16x_NET
105
#include <asm/mvme16xhw.h>
106
#endif
107
#ifdef ENABLE_BVME6000_NET
108
#include <asm/bvme6000hw.h>
109
#endif
110
 
111
/*
112
 * Define various macros for Channel Attention, word swapping etc., dependent
113
 * on architecture.  MVME and BVME are 680x0 based, otherwise it is Intel.
114
 */
115
 
116
#ifdef __mc68000__
117
#define WSWAPrfd(x)  ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
118
#define WSWAPrbd(x)  ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
119
#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
120
#define WSWAPscb(x)  ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
121
#define WSWAPcmd(x)  ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
122
#define WSWAPtbd(x)  ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
123
#define WSWAPchar(x) ((char *)            (((u32)(x)<<16) | ((((u32)(x)))>>16)))
124
#define ISCP_BUSY       0x00010000
125
#define MACH_IS_APRICOT 0
126
#else
127
#define WSWAPrfd(x)     ((struct i596_rfd *)(x))
128
#define WSWAPrbd(x)     ((struct i596_rbd *)(x))
129
#define WSWAPiscp(x)    ((struct i596_iscp *)(x))
130
#define WSWAPscb(x)     ((struct i596_scb *)(x))
131
#define WSWAPcmd(x)     ((struct i596_cmd *)(x))
132
#define WSWAPtbd(x)     ((struct i596_tbd *)(x))
133
#define WSWAPchar(x)    ((char *)(x))
134
#define ISCP_BUSY       0x0001
135
#define MACH_IS_APRICOT 1
136
#endif
137
 
138
/*
139
 * The MPU_PORT command allows direct access to the 82596. With PORT access
140
 * the following commands are available (p5-18). The 32-bit port command
141
 * must be word-swapped with the most significant word written first.
142
 * This only applies to VME boards.
143
 */
144
#define PORT_RESET              0x00    /* reset 82596 */
145
#define PORT_SELFTEST           0x01    /* selftest */
146
#define PORT_ALTSCP             0x02    /* alternate SCB address */
147
#define PORT_ALTDUMP            0x03    /* Alternate DUMP address */
148
 
149
static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
150
 
151
MODULE_AUTHOR("Richard Hirst");
152
MODULE_DESCRIPTION("i82596 driver");
153
MODULE_LICENSE("GPL");
154
 
155
MODULE_PARM(i596_debug, "i");
156
MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
157
 
158
 
159
/* Copy frames shorter than rx_copybreak, otherwise pass on up in
160
 * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
161
 */
162
static int rx_copybreak = 100;
163
 
164
#define PKT_BUF_SZ      1536
165
#define MAX_MC_CNT      64
166
 
167
#define I596_TOTAL_SIZE 17
168
 
169
#define I596_NULL ((void *)0xffffffff)
170
 
171
#define CMD_EOL         0x8000  /* The last command of the list, stop. */
172
#define CMD_SUSP        0x4000  /* Suspend after doing cmd. */
173
#define CMD_INTR        0x2000  /* Interrupt after doing cmd. */
174
 
175
#define CMD_FLEX        0x0008  /* Enable flexible memory model */
176
 
177
enum commands {
178
        CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
179
        CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
180
};
181
 
182
#define STAT_C          0x8000  /* Set to 0 after execution */
183
#define STAT_B          0x4000  /* Command being executed */
184
#define STAT_OK         0x2000  /* Command executed ok */
185
#define STAT_A          0x1000  /* Command aborted */
186
 
187
#define  CUC_START      0x0100
188
#define  CUC_RESUME     0x0200
189
#define  CUC_SUSPEND    0x0300
190
#define  CUC_ABORT      0x0400
191
#define  RX_START       0x0010
192
#define  RX_RESUME      0x0020
193
#define  RX_SUSPEND     0x0030
194
#define  RX_ABORT       0x0040
195
 
196
#define TX_TIMEOUT      5
197
 
198
 
199
struct i596_reg {
200
        unsigned short porthi;
201
        unsigned short portlo;
202
        unsigned long ca;
203
};
204
 
205
#define EOF             0x8000
206
#define SIZE_MASK       0x3fff
207
 
208
struct i596_tbd {
209
        unsigned short size;
210
        unsigned short pad;
211
        struct i596_tbd *next;
212
        char *data;
213
};
214
 
215
/* The command structure has two 'next' pointers; v_next is the address of
216
 * the next command as seen by the CPU, b_next is the address of the next
217
 * command as seen by the 82596.  The b_next pointer, as used by the 82596
218
 * always references the status field of the next command, rather than the
219
 * v_next field, because the 82596 is unaware of v_next.  It may seem more
220
 * logical to put v_next at the end of the structure, but we cannot do that
221
 * because the 82596 expects other fields to be there, depending on command
222
 * type.
223
 */
224
 
225
struct i596_cmd {
226
        struct i596_cmd *v_next;        /* Address from CPUs viewpoint */
227
        unsigned short status;
228
        unsigned short command;
229
        struct i596_cmd *b_next;        /* Address from i596 viewpoint */
230
};
231
 
232
struct tx_cmd {
233
        struct i596_cmd cmd;
234
        struct i596_tbd *tbd;
235
        unsigned short size;
236
        unsigned short pad;
237
        struct sk_buff *skb;    /* So we can free it after tx */
238
};
239
 
240
struct tdr_cmd {
241
        struct i596_cmd cmd;
242
        unsigned short status;
243
        unsigned short pad;
244
};
245
 
246
struct mc_cmd {
247
        struct i596_cmd cmd;
248
        short mc_cnt;
249
        char mc_addrs[MAX_MC_CNT*6];
250
};
251
 
252
struct sa_cmd {
253
        struct i596_cmd cmd;
254
        char eth_addr[8];
255
};
256
 
257
struct cf_cmd {
258
        struct i596_cmd cmd;
259
        char i596_config[16];
260
};
261
 
262
struct i596_rfd {
263
        unsigned short stat;
264
        unsigned short cmd;
265
        struct i596_rfd *b_next;        /* Address from i596 viewpoint */
266
        struct i596_rbd *rbd;
267
        unsigned short count;
268
        unsigned short size;
269
        struct i596_rfd *v_next;        /* Address from CPUs viewpoint */
270
        struct i596_rfd *v_prev;
271
};
272
 
273
struct i596_rbd {
274
    unsigned short count;
275
    unsigned short zero1;
276
    struct i596_rbd *b_next;
277
    unsigned char *b_data;              /* Address from i596 viewpoint */
278
    unsigned short size;
279
    unsigned short zero2;
280
    struct sk_buff *skb;
281
    struct i596_rbd *v_next;
282
    struct i596_rbd *b_addr;            /* This rbd addr from i596 view */
283
    unsigned char *v_data;              /* Address from CPUs viewpoint */
284
};
285
 
286
#define TX_RING_SIZE 64
287
#define RX_RING_SIZE 16
288
 
289
struct i596_scb {
290
        unsigned short status;
291
        unsigned short command;
292
        struct i596_cmd *cmd;
293
        struct i596_rfd *rfd;
294
        unsigned long crc_err;
295
        unsigned long align_err;
296
        unsigned long resource_err;
297
        unsigned long over_err;
298
        unsigned long rcvdt_err;
299
        unsigned long short_err;
300
        unsigned short t_on;
301
        unsigned short t_off;
302
};
303
 
304
struct i596_iscp {
305
        unsigned long stat;
306
        struct i596_scb *scb;
307
};
308
 
309
struct i596_scp {
310
        unsigned long sysbus;
311
        unsigned long pad;
312
        struct i596_iscp *iscp;
313
};
314
 
315
struct i596_private {
316
        volatile struct i596_scp scp;
317
        volatile struct i596_iscp iscp;
318
        volatile struct i596_scb scb;
319
        struct sa_cmd sa_cmd;
320
        struct cf_cmd cf_cmd;
321
        struct tdr_cmd tdr_cmd;
322
        struct mc_cmd mc_cmd;
323
        unsigned long stat;
324
        int last_restart __attribute__((aligned(4)));
325
        struct i596_rfd *rfd_head;
326
        struct i596_rbd *rbd_head;
327
        struct i596_cmd *cmd_tail;
328
        struct i596_cmd *cmd_head;
329
        int cmd_backlog;
330
        unsigned long last_cmd;
331
        struct net_device_stats stats;
332
        struct i596_rfd rfds[RX_RING_SIZE];
333
        struct i596_rbd rbds[RX_RING_SIZE];
334
        struct tx_cmd tx_cmds[TX_RING_SIZE];
335
        struct i596_tbd tbds[TX_RING_SIZE];
336
        int next_tx_cmd;
337
        spinlock_t lock;
338
};
339
 
340
static char init_setup[] =
341
{
342
        0x8E,                   /* length, prefetch on */
343
        0xC8,                   /* fifo to 8, monitor off */
344
#ifdef CONFIG_VME
345
        0xc0,                   /* don't save bad frames */
346
#else
347
        0x80,                   /* don't save bad frames */
348
#endif
349
        0x2E,                   /* No source address insertion, 8 byte preamble */
350
        0x00,                   /* priority and backoff defaults */
351
        0x60,                   /* interframe spacing */
352
        0x00,                   /* slot time LSB */
353
        0xf2,                   /* slot time and retries */
354
        0x00,                   /* promiscuous mode */
355
        0x00,                   /* collision detect */
356
        0x40,                   /* minimum frame length */
357
        0xff,
358
        0x00,
359
        0x7f /*  *multi IA */ };
360
 
361
static int i596_open(struct net_device *dev);
362
static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
363
static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
364
static int i596_close(struct net_device *dev);
365
static struct net_device_stats *i596_get_stats(struct net_device *dev);
366
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
367
static void i596_tx_timeout (struct net_device *dev);
368
static void print_eth(unsigned char *buf, char *str);
369
static void set_multicast_list(struct net_device *dev);
370
 
371
static int rx_ring_size = RX_RING_SIZE;
372
static int ticks_limit = 25;
373
static int max_cmd_backlog = TX_RING_SIZE-1;
374
 
375
 
376
static inline void CA(struct net_device *dev)
377
{
378
#ifdef ENABLE_MVME16x_NET
379
        if (MACH_IS_MVME16x) {
380
                ((struct i596_reg *) dev->base_addr)->ca = 1;
381
        }
382
#endif
383
#ifdef ENABLE_BVME6000_NET
384
        if (MACH_IS_BVME6000) {
385
                volatile u32 i;
386
 
387
                i = *(volatile u32 *) (dev->base_addr);
388
        }
389
#endif
390
#ifdef ENABLE_APRICOT
391
        if (MACH_IS_APRICOT) {
392
                outw(0, (short) (dev->base_addr) + 4);
393
        }
394
#endif
395
}
396
 
397
 
398
static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
399
{
400
#ifdef ENABLE_MVME16x_NET
401
        if (MACH_IS_MVME16x) {
402
                struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
403
                p->porthi = ((c) | (u32) (x)) & 0xffff;
404
                p->portlo = ((c) | (u32) (x)) >> 16;
405
        }
406
#endif
407
#ifdef ENABLE_BVME6000_NET
408
        if (MACH_IS_BVME6000) {
409
                u32 v = (u32) (c) | (u32) (x);
410
                v = ((u32) (v) << 16) | ((u32) (v) >> 16);
411
                *(volatile u32 *) dev->base_addr = v;
412
                udelay(1);
413
                *(volatile u32 *) dev->base_addr = v;
414
        }
415
#endif
416
}
417
 
418
 
419
static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
420
{
421
        while (--delcnt && lp->iscp.stat)
422
                udelay(10);
423
        if (!delcnt) {
424
                printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
425
                     dev->name, str, lp->scb.status, lp->scb.command);
426
                return -1;
427
        }
428
        else
429
                return 0;
430
}
431
 
432
 
433
static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
434
{
435
        while (--delcnt && lp->scb.command)
436
                udelay(10);
437
        if (!delcnt) {
438
                printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
439
                     dev->name, str, lp->scb.status, lp->scb.command);
440
                return -1;
441
        }
442
        else
443
                return 0;
444
}
445
 
446
 
447
static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
448
{
449
        volatile struct i596_cmd *c = cmd;
450
 
451
        while (--delcnt && c->command)
452
                udelay(10);
453
        if (!delcnt) {
454
                printk(KERN_ERR "%s: %s.\n", dev->name, str);
455
                return -1;
456
        }
457
        else
458
                return 0;
459
}
460
 
461
 
462
static void i596_display_data(struct net_device *dev)
463
{
464
        struct i596_private *lp = (struct i596_private *) dev->priv;
465
        struct i596_cmd *cmd;
466
        struct i596_rfd *rfd;
467
        struct i596_rbd *rbd;
468
 
469
        printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
470
               &lp->scp, lp->scp.sysbus, lp->scp.iscp);
471
        printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
472
               &lp->iscp, lp->iscp.stat, lp->iscp.scb);
473
        printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
474
                " .cmd = %p, .rfd = %p\n",
475
               &lp->scb, lp->scb.status, lp->scb.command,
476
                lp->scb.cmd, lp->scb.rfd);
477
        printk(KERN_ERR "   errors: crc %lx, align %lx, resource %lx,"
478
               " over %lx, rcvdt %lx, short %lx\n",
479
                lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
480
                lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
481
        cmd = lp->cmd_head;
482
        while (cmd != I596_NULL) {
483
                printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
484
                  cmd, cmd->status, cmd->command, cmd->b_next);
485
                cmd = cmd->v_next;
486
        }
487
        rfd = lp->rfd_head;
488
        printk(KERN_ERR "rfd_head = %p\n", rfd);
489
        do {
490
                printk(KERN_ERR "   %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
491
                        " count %04x\n",
492
                        rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
493
                        rfd->count);
494
                rfd = rfd->v_next;
495
        } while (rfd != lp->rfd_head);
496
        rbd = lp->rbd_head;
497
        printk(KERN_ERR "rbd_head = %p\n", rbd);
498
        do {
499
                printk(KERN_ERR "   %p .count %04x, b_next %p, b_data %p, size %04x\n",
500
                        rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
501
                rbd = rbd->v_next;
502
        } while (rbd != lp->rbd_head);
503
}
504
 
505
 
506
#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
507
static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
508
{
509
        struct net_device *dev = dev_id;
510
#ifdef ENABLE_MVME16x_NET
511
        if (MACH_IS_MVME16x) {
512
                volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
513
 
514
                pcc2[0x28] = 1;
515
                pcc2[0x2b] = 0x1d;
516
        }
517
#endif
518
#ifdef ENABLE_BVME6000_NET
519
        if (MACH_IS_BVME6000) {
520
                volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
521
 
522
                *ethirq = 1;
523
                *ethirq = 3;
524
        }
525
#endif
526
        printk(KERN_ERR "%s: Error interrupt\n", dev->name);
527
        i596_display_data(dev);
528
}
529
#endif
530
 
531
static inline void init_rx_bufs(struct net_device *dev)
532
{
533
        struct i596_private *lp = (struct i596_private *)dev->priv;
534
        int i;
535
        struct i596_rfd *rfd;
536
        struct i596_rbd *rbd;
537
 
538
        /* First build the Receive Buffer Descriptor List */
539
 
540
        for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
541
                struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
542
 
543
                if (skb == NULL)
544
                        panic("82596: alloc_skb() failed");
545
                skb->dev = dev;
546
                rbd->v_next = rbd+1;
547
                rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
548
                rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
549
                rbd->skb = skb;
550
                rbd->v_data = skb->tail;
551
                rbd->b_data = WSWAPchar(virt_to_bus(skb->tail));
552
                rbd->size = PKT_BUF_SZ;
553
#ifdef __mc68000__
554
                cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ);
555
#endif
556
        }
557
        lp->rbd_head = lp->rbds;
558
        rbd = lp->rbds + rx_ring_size - 1;
559
        rbd->v_next = lp->rbds;
560
        rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
561
 
562
        /* Now build the Receive Frame Descriptor List */
563
 
564
        for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
565
                rfd->rbd = I596_NULL;
566
                rfd->v_next = rfd+1;
567
                rfd->v_prev = rfd-1;
568
                rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
569
                rfd->cmd = CMD_FLEX;
570
        }
571
        lp->rfd_head = lp->rfds;
572
        lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
573
        rfd = lp->rfds;
574
        rfd->rbd = lp->rbd_head;
575
        rfd->v_prev = lp->rfds + rx_ring_size - 1;
576
        rfd = lp->rfds + rx_ring_size - 1;
577
        rfd->v_next = lp->rfds;
578
        rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
579
        rfd->cmd = CMD_EOL|CMD_FLEX;
580
}
581
 
582
static inline void remove_rx_bufs(struct net_device *dev)
583
{
584
        struct i596_private *lp = (struct i596_private *)dev->priv;
585
        struct i596_rbd *rbd;
586
        int i;
587
 
588
        for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
589
                if (rbd->skb == NULL)
590
                        break;
591
                dev_kfree_skb(rbd->skb);
592
        }
593
}
594
 
595
 
596
static void rebuild_rx_bufs(struct net_device *dev)
597
{
598
        struct i596_private *lp = (struct i596_private *) dev->priv;
599
        int i;
600
 
601
        /* Ensure rx frame/buffer descriptors are tidy */
602
 
603
        for (i = 0; i < rx_ring_size; i++) {
604
                lp->rfds[i].rbd = I596_NULL;
605
                lp->rfds[i].cmd = CMD_FLEX;
606
        }
607
        lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
608
        lp->rfd_head = lp->rfds;
609
        lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
610
        lp->rbd_head = lp->rbds;
611
        lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
612
}
613
 
614
 
615
static int init_i596_mem(struct net_device *dev)
616
{
617
        struct i596_private *lp = (struct i596_private *) dev->priv;
618
#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET)
619
        short ioaddr = dev->base_addr;
620
#endif
621
        unsigned long flags;
622
 
623
        MPU_PORT(dev, PORT_RESET, 0);
624
 
625
        udelay(100);            /* Wait 100us - seems to help */
626
 
627
#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
628
#ifdef ENABLE_MVME16x_NET
629
        if (MACH_IS_MVME16x) {
630
                volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
631
 
632
                /* Disable all ints for now */
633
                pcc2[0x28] = 1;
634
                pcc2[0x2a] = 0x48;
635
                /* Following disables snooping.  Snooping is not required
636
                 * as we make appropriate use of non-cached pages for
637
                 * shared data, and cache_push/cache_clear.
638
                 */
639
                pcc2[0x2b] = 0x08;
640
        }
641
#endif
642
#ifdef ENABLE_BVME6000_NET
643
        if (MACH_IS_BVME6000) {
644
                volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
645
 
646
                *ethirq = 1;
647
        }
648
#endif
649
 
650
        /* change the scp address */
651
 
652
        MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
653
 
654
#elif defined(ENABLE_APRICOT)
655
 
656
        {
657
                u32 scp = virt_to_bus(&lp->scp);
658
 
659
                /* change the scp address */
660
                outw(0, ioaddr);
661
                outw(0, ioaddr);
662
                outb(4, ioaddr + 0xf);
663
                outw(scp | 2, ioaddr);
664
                outw(scp >> 16, ioaddr);
665
        }
666
#endif
667
 
668
        lp->last_cmd = jiffies;
669
 
670
#ifdef ENABLE_MVME16x_NET
671
        if (MACH_IS_MVME16x)
672
                lp->scp.sysbus = 0x00000054;
673
#endif
674
#ifdef ENABLE_BVME6000_NET
675
        if (MACH_IS_BVME6000)
676
                lp->scp.sysbus = 0x0000004c;
677
#endif
678
#ifdef ENABLE_APRICOT
679
        if (MACH_IS_APRICOT)
680
                lp->scp.sysbus = 0x00440000;
681
#endif
682
 
683
        lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
684
        lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
685
        lp->iscp.stat = ISCP_BUSY;
686
        lp->cmd_backlog = 0;
687
 
688
        lp->cmd_head = lp->scb.cmd = I596_NULL;
689
 
690
#ifdef ENABLE_BVME6000_NET
691
        if (MACH_IS_BVME6000) {
692
                lp->scb.t_on  = 7 * 25;
693
                lp->scb.t_off = 1 * 25;
694
        }
695
#endif
696
 
697
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
698
 
699
#if defined(ENABLE_APRICOT)
700
        (void) inb(ioaddr + 0x10);
701
        outb(4, ioaddr + 0xf);
702
#endif
703
        CA(dev);
704
 
705
        if (wait_istat(dev,lp,1000,"initialization timed out"))
706
                goto failed;
707
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
708
 
709
        /* Ensure rx frame/buffer descriptors are tidy */
710
        rebuild_rx_bufs(dev);
711
        lp->scb.command = 0;
712
 
713
#ifdef ENABLE_MVME16x_NET
714
        if (MACH_IS_MVME16x) {
715
                volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
716
 
717
                /* Enable ints, etc. now */
718
                pcc2[0x2a] = 0x55;      /* Edge sensitive */
719
                pcc2[0x2b] = 0x15;
720
        }
721
#endif
722
#ifdef ENABLE_BVME6000_NET
723
        if (MACH_IS_BVME6000) {
724
                volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
725
 
726
                *ethirq = 3;
727
        }
728
#endif
729
 
730
 
731
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
732
        memcpy(lp->cf_cmd.i596_config, init_setup, 14);
733
        lp->cf_cmd.cmd.command = CmdConfigure;
734
        i596_add_cmd(dev, &lp->cf_cmd.cmd);
735
 
736
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
737
        memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
738
        lp->sa_cmd.cmd.command = CmdSASetup;
739
        i596_add_cmd(dev, &lp->sa_cmd.cmd);
740
 
741
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
742
        lp->tdr_cmd.cmd.command = CmdTDR;
743
        i596_add_cmd(dev, &lp->tdr_cmd.cmd);
744
 
745
        spin_lock_irqsave (&lp->lock, flags);
746
 
747
        if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
748
                spin_unlock_irqrestore (&lp->lock, flags);
749
                goto failed;
750
        }
751
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
752
        lp->scb.command = RX_START;
753
        CA(dev);
754
 
755
        spin_unlock_irqrestore (&lp->lock, flags);
756
 
757
        if (wait_cmd(dev,lp,1000,"RX_START not processed"))
758
                goto failed;
759
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
760
        return 0;
761
 
762
failed:
763
        printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
764
        MPU_PORT(dev, PORT_RESET, 0);
765
        return -1;
766
}
767
 
768
static inline int i596_rx(struct net_device *dev)
769
{
770
        struct i596_private *lp = (struct i596_private *)dev->priv;
771
        struct i596_rfd *rfd;
772
        struct i596_rbd *rbd;
773
        int frames = 0;
774
 
775
        DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
776
                        lp->rfd_head, lp->rbd_head));
777
 
778
        rfd = lp->rfd_head;             /* Ref next frame to check */
779
 
780
        while ((rfd->stat) & STAT_C) {  /* Loop while complete frames */
781
                if (rfd->rbd == I596_NULL)
782
                        rbd = I596_NULL;
783
                else if (rfd->rbd == lp->rbd_head->b_addr)
784
                        rbd = lp->rbd_head;
785
                else {
786
                        printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
787
                        /* XXX Now what? */
788
                        rbd = I596_NULL;
789
                }
790
                DEB(DEB_RXFRAME, printk(KERN_DEBUG "  rfd %p, rfd.rbd %p, rfd.stat %04x\n",
791
                        rfd, rfd->rbd, rfd->stat));
792
 
793
                if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
794
                        /* a good frame */
795
                        int pkt_len = rbd->count & 0x3fff;
796
                        struct sk_buff *skb = rbd->skb;
797
                        int rx_in_place = 0;
798
 
799
                        DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
800
                        frames++;
801
 
802
                        /* Check if the packet is long enough to just accept
803
                         * without copying to a properly sized skbuff.
804
                         */
805
 
806
                        if (pkt_len > rx_copybreak) {
807
                                struct sk_buff *newskb;
808
 
809
                                /* Get fresh skbuff to replace filled one. */
810
                                newskb = dev_alloc_skb(PKT_BUF_SZ);
811
                                if (newskb == NULL) {
812
                                        skb = NULL;     /* drop pkt */
813
                                        goto memory_squeeze;
814
                                }
815
                                /* Pass up the skb already on the Rx ring. */
816
                                skb_put(skb, pkt_len);
817
                                rx_in_place = 1;
818
                                rbd->skb = newskb;
819
                                newskb->dev = dev;
820
                                rbd->v_data = newskb->tail;
821
                                rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail));
822
#ifdef __mc68000__
823
                                cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ);
824
#endif
825
                        }
826
                        else
827
                                skb = dev_alloc_skb(pkt_len + 2);
828
memory_squeeze:
829
                        if (skb == NULL) {
830
                                /* XXX tulip.c can defer packets here!! */
831
                                printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
832
                                lp->stats.rx_dropped++;
833
                        }
834
                        else {
835
                                skb->dev = dev;
836
                                if (!rx_in_place) {
837
                                        /* 16 byte align the data fields */
838
                                        skb_reserve(skb, 2);
839
                                        memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
840
                                }
841
                                skb->protocol=eth_type_trans(skb,dev);
842
                                skb->len = pkt_len;
843
#ifdef __mc68000__
844
                                cache_clear(virt_to_phys(rbd->skb->tail),
845
                                                pkt_len);
846
#endif
847
                                netif_rx(skb);
848
                                dev->last_rx = jiffies;
849
                                lp->stats.rx_packets++;
850
                                lp->stats.rx_bytes+=pkt_len;
851
                        }
852
                }
853
                else {
854
                        DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
855
                                        dev->name, rfd->stat));
856
                        lp->stats.rx_errors++;
857
                        if ((rfd->stat) & 0x0001)
858
                                lp->stats.collisions++;
859
                        if ((rfd->stat) & 0x0080)
860
                                lp->stats.rx_length_errors++;
861
                        if ((rfd->stat) & 0x0100)
862
                                lp->stats.rx_over_errors++;
863
                        if ((rfd->stat) & 0x0200)
864
                                lp->stats.rx_fifo_errors++;
865
                        if ((rfd->stat) & 0x0400)
866
                                lp->stats.rx_frame_errors++;
867
                        if ((rfd->stat) & 0x0800)
868
                                lp->stats.rx_crc_errors++;
869
                        if ((rfd->stat) & 0x1000)
870
                                lp->stats.rx_length_errors++;
871
                }
872
 
873
                /* Clear the buffer descriptor count and EOF + F flags */
874
 
875
                if (rbd != I596_NULL && (rbd->count & 0x4000)) {
876
                        rbd->count = 0;
877
                        lp->rbd_head = rbd->v_next;
878
                }
879
 
880
                /* Tidy the frame descriptor, marking it as end of list */
881
 
882
                rfd->rbd = I596_NULL;
883
                rfd->stat = 0;
884
                rfd->cmd = CMD_EOL|CMD_FLEX;
885
                rfd->count = 0;
886
 
887
                /* Remove end-of-list from old end descriptor */
888
 
889
                rfd->v_prev->cmd = CMD_FLEX;
890
 
891
                /* Update record of next frame descriptor to process */
892
 
893
                lp->scb.rfd = rfd->b_next;
894
                lp->rfd_head = rfd->v_next;
895
                rfd = lp->rfd_head;
896
        }
897
 
898
        DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
899
 
900
        return 0;
901
}
902
 
903
 
904
static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
905
{
906
        struct i596_cmd *ptr;
907
 
908
        while (lp->cmd_head != I596_NULL) {
909
                ptr = lp->cmd_head;
910
                lp->cmd_head = ptr->v_next;
911
                lp->cmd_backlog--;
912
 
913
                switch ((ptr->command) & 0x7) {
914
                case CmdTx:
915
                        {
916
                                struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
917
                                struct sk_buff *skb = tx_cmd->skb;
918
 
919
                                dev_kfree_skb(skb);
920
 
921
                                lp->stats.tx_errors++;
922
                                lp->stats.tx_aborted_errors++;
923
 
924
                                ptr->v_next = ptr->b_next = I596_NULL;
925
                                tx_cmd->cmd.command = 0;  /* Mark as free */
926
                                break;
927
                        }
928
                default:
929
                        ptr->v_next = ptr->b_next = I596_NULL;
930
                }
931
        }
932
 
933
        wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
934
        lp->scb.cmd = I596_NULL;
935
}
936
 
937
static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr)
938
{
939
        unsigned long flags;
940
 
941
        DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
942
 
943
        spin_lock_irqsave (&lp->lock, flags);
944
 
945
        wait_cmd(dev,lp,100,"i596_reset timed out");
946
 
947
        netif_stop_queue(dev);
948
 
949
        lp->scb.command = CUC_ABORT | RX_ABORT;
950
        CA(dev);
951
 
952
        /* wait for shutdown */
953
        wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
954
        spin_unlock_irqrestore (&lp->lock, flags);
955
 
956
        i596_cleanup_cmd(dev,lp);
957
        i596_rx(dev);
958
 
959
        netif_start_queue(dev);
960
        init_i596_mem(dev);
961
}
962
 
963
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
964
{
965
        struct i596_private *lp = (struct i596_private *) dev->priv;
966
        int ioaddr = dev->base_addr;
967
        unsigned long flags;
968
 
969
        DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
970
 
971
        cmd->status = 0;
972
        cmd->command |= (CMD_EOL | CMD_INTR);
973
        cmd->v_next = cmd->b_next = I596_NULL;
974
 
975
        spin_lock_irqsave (&lp->lock, flags);
976
 
977
        if (lp->cmd_head != I596_NULL) {
978
                lp->cmd_tail->v_next = cmd;
979
                lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
980
        } else {
981
                lp->cmd_head = cmd;
982
                wait_cmd(dev,lp,100,"i596_add_cmd timed out");
983
                lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
984
                lp->scb.command = CUC_START;
985
                CA(dev);
986
        }
987
        lp->cmd_tail = cmd;
988
        lp->cmd_backlog++;
989
 
990
        spin_unlock_irqrestore (&lp->lock, flags);
991
 
992
        if (lp->cmd_backlog > max_cmd_backlog) {
993
                unsigned long tickssofar = jiffies - lp->last_cmd;
994
 
995
                if (tickssofar < ticks_limit)
996
                        return;
997
 
998
                printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
999
 
1000
                i596_reset(dev, lp, ioaddr);
1001
        }
1002
}
1003
 
1004
static int i596_open(struct net_device *dev)
1005
{
1006
        int res = 0;
1007
 
1008
        DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
1009
 
1010
        if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1011
                printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
1012
                return -EAGAIN;
1013
        }
1014
#ifdef ENABLE_MVME16x_NET
1015
        if (MACH_IS_MVME16x) {
1016
                if (request_irq(0x56, &i596_error, 0, "i82596_error", dev))
1017
                        return -EAGAIN;
1018
        }
1019
#endif
1020
        init_rx_bufs(dev);
1021
 
1022
        netif_start_queue(dev);
1023
 
1024
        MOD_INC_USE_COUNT;
1025
 
1026
        /* Initialize the 82596 memory */
1027
        if (init_i596_mem(dev)) {
1028
                res = -EAGAIN;
1029
                free_irq(dev->irq, dev);
1030
        }
1031
 
1032
        return res;
1033
}
1034
 
1035
static void i596_tx_timeout (struct net_device *dev)
1036
{
1037
        struct i596_private *lp = (struct i596_private *) dev->priv;
1038
        int ioaddr = dev->base_addr;
1039
 
1040
        /* Transmitter timeout, serious problems. */
1041
        DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
1042
                        dev->name));
1043
 
1044
        lp->stats.tx_errors++;
1045
 
1046
        /* Try to restart the adaptor */
1047
        if (lp->last_restart == lp->stats.tx_packets) {
1048
                DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
1049
                /* Shutdown and restart */
1050
                i596_reset (dev, lp, ioaddr);
1051
        } else {
1052
                /* Issue a channel attention signal */
1053
                DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
1054
                lp->scb.command = CUC_START | RX_START;
1055
                CA (dev);
1056
                lp->last_restart = lp->stats.tx_packets;
1057
        }
1058
 
1059
        dev->trans_start = jiffies;
1060
        netif_wake_queue (dev);
1061
}
1062
 
1063
 
1064
static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1065
{
1066
        struct i596_private *lp = (struct i596_private *) dev->priv;
1067
        struct tx_cmd *tx_cmd;
1068
        struct i596_tbd *tbd;
1069
        short length = skb->len;
1070
        dev->trans_start = jiffies;
1071
 
1072
        DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%x) called\n", dev->name,
1073
                                skb->len, (unsigned int)skb->data));
1074
 
1075
        if(skb->len < ETH_ZLEN)
1076
        {
1077
                skb = skb_padto(skb, ETH_ZLEN);
1078
                if(skb == NULL)
1079
                        return 0;
1080
                length = ETH_ZLEN;
1081
        }
1082
        netif_stop_queue(dev);
1083
 
1084
        tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1085
        tbd = lp->tbds + lp->next_tx_cmd;
1086
 
1087
        if (tx_cmd->cmd.command) {
1088
                printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
1089
                                dev->name);
1090
                lp->stats.tx_dropped++;
1091
 
1092
                dev_kfree_skb(skb);
1093
        } else {
1094
                if (++lp->next_tx_cmd == TX_RING_SIZE)
1095
                        lp->next_tx_cmd = 0;
1096
                tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1097
                tbd->next = I596_NULL;
1098
 
1099
                tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1100
                tx_cmd->skb = skb;
1101
 
1102
                tx_cmd->pad = 0;
1103
                tx_cmd->size = 0;
1104
                tbd->pad = 0;
1105
                tbd->size = EOF | length;
1106
 
1107
                tbd->data = WSWAPchar(virt_to_bus(skb->data));
1108
 
1109
#ifdef __mc68000__
1110
                cache_push(virt_to_phys(skb->data), length);
1111
#endif
1112
                DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1113
                i596_add_cmd(dev, &tx_cmd->cmd);
1114
 
1115
                lp->stats.tx_packets++;
1116
                lp->stats.tx_bytes += length;
1117
        }
1118
 
1119
        netif_start_queue(dev);
1120
 
1121
        return 0;
1122
}
1123
 
1124
static void print_eth(unsigned char *add, char *str)
1125
{
1126
        int i;
1127
 
1128
        printk(KERN_DEBUG "i596 0x%p, ", add);
1129
        for (i = 0; i < 6; i++)
1130
                printk(" %02X", add[i + 6]);
1131
        printk(" -->");
1132
        for (i = 0; i < 6; i++)
1133
                printk(" %02X", add[i]);
1134
        printk(" %02X%02X, %s\n", add[12], add[13], str);
1135
}
1136
 
1137
int __init i82596_probe(struct net_device *dev)
1138
{
1139
        int i;
1140
        struct i596_private *lp;
1141
        char eth_addr[8];
1142
        static int probed;
1143
 
1144
        if (probed)
1145
                return -ENODEV;
1146
        probed++;
1147
#ifdef ENABLE_MVME16x_NET
1148
        if (MACH_IS_MVME16x) {
1149
                if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
1150
                        printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
1151
                        return -ENODEV;
1152
                }
1153
                memcpy(eth_addr, (void *) 0xfffc1f2c, 6);       /* YUCK! Get addr from NOVRAM */
1154
                dev->base_addr = MVME_I596_BASE;
1155
                dev->irq = (unsigned) MVME16x_IRQ_I596;
1156
        }
1157
#endif
1158
#ifdef ENABLE_BVME6000_NET
1159
        if (MACH_IS_BVME6000) {
1160
                volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1161
                unsigned char msr = rtc[3];
1162
                int i;
1163
 
1164
                rtc[3] |= 0x80;
1165
                for (i = 0; i < 6; i++)
1166
                        eth_addr[i] = rtc[i * 4 + 7];   /* Stored in RTC RAM at offset 1 */
1167
                rtc[3] = msr;
1168
                dev->base_addr = BVME_I596_BASE;
1169
                dev->irq = (unsigned) BVME_IRQ_I596;
1170
        }
1171
#endif
1172
#ifdef ENABLE_APRICOT
1173
        {
1174
                int checksum = 0;
1175
                int ioaddr = 0x300;
1176
 
1177
                /* this is easy the ethernet interface can only be at 0x300 */
1178
                /* first check nothing is already registered here */
1179
 
1180
                if (!request_region(ioaddr, I596_TOTAL_SIZE, dev->name)) {
1181
                        printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
1182
                        return -EBUSY;
1183
                }
1184
 
1185
                for (i = 0; i < 8; i++) {
1186
                        eth_addr[i] = inb(ioaddr + 8 + i);
1187
                        checksum += eth_addr[i];
1188
                }
1189
 
1190
                /* checksum is a multiple of 0x100, got this wrong first time
1191
                   some machines have 0x100, some 0x200. The DOS driver doesn't
1192
                   even bother with the checksum.
1193
                   Some other boards trip the checksum.. but then appear as
1194
                   ether address 0. Trap these - AC */
1195
 
1196
                if ((checksum % 0x100) ||
1197
                    (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
1198
                        release_region(ioaddr, I596_TOTAL_SIZE);
1199
                        return -ENODEV;
1200
                }
1201
 
1202
                dev->base_addr = ioaddr;
1203
                dev->irq = 10;
1204
        }
1205
#endif
1206
        dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1207
        if (!dev->mem_start) {
1208
#ifdef ENABLE_APRICOT
1209
                release_region(dev->base_addr, I596_TOTAL_SIZE);
1210
#endif
1211
                return -ENOMEM;
1212
        }
1213
 
1214
        ether_setup(dev);
1215
        DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1216
 
1217
        for (i = 0; i < 6; i++)
1218
                DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1219
 
1220
        DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1221
 
1222
        DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
1223
 
1224
        /* The 82596-specific entries in the device structure. */
1225
        dev->open = i596_open;
1226
        dev->stop = i596_close;
1227
        dev->hard_start_xmit = i596_start_xmit;
1228
        dev->get_stats = i596_get_stats;
1229
        dev->set_multicast_list = set_multicast_list;
1230
        dev->tx_timeout = i596_tx_timeout;
1231
        dev->watchdog_timeo = TX_TIMEOUT;
1232
 
1233
        dev->priv = (void *)(dev->mem_start);
1234
 
1235
        lp = (struct i596_private *) dev->priv;
1236
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1237
                        dev->name, (unsigned long)lp,
1238
                        sizeof(struct i596_private), (unsigned long)&lp->scb));
1239
        memset((void *) lp, 0, sizeof(struct i596_private));
1240
 
1241
#ifdef __mc68000__
1242
        cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1243
        cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1244
        kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1245
#endif
1246
        lp->scb.command = 0;
1247
        lp->scb.cmd = I596_NULL;
1248
        lp->scb.rfd = I596_NULL;
1249
        lp->lock = SPIN_LOCK_UNLOCKED;
1250
 
1251
        return 0;
1252
}
1253
 
1254
static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1255
{
1256
        struct net_device *dev = dev_id;
1257
        struct i596_private *lp;
1258
        short ioaddr;
1259
        unsigned short status, ack_cmd = 0;
1260
 
1261
#ifdef ENABLE_BVME6000_NET
1262
        if (MACH_IS_BVME6000) {
1263
                if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1264
                        i596_error(irq, dev_id, regs);
1265
                        return;
1266
                }
1267
        }
1268
#endif
1269
        if (dev == NULL) {
1270
                printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
1271
                return;
1272
        }
1273
 
1274
        ioaddr = dev->base_addr;
1275
        lp = (struct i596_private *) dev->priv;
1276
 
1277
        spin_lock (&lp->lock);
1278
 
1279
        wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1280
        status = lp->scb.status;
1281
 
1282
        DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1283
                        dev->name, irq, status));
1284
 
1285
        ack_cmd = status & 0xf000;
1286
 
1287
        if ((status & 0x8000) || (status & 0x2000)) {
1288
                struct i596_cmd *ptr;
1289
 
1290
                if ((status & 0x8000))
1291
                        DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
1292
                if ((status & 0x2000))
1293
                        DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1294
 
1295
                while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1296
                        ptr = lp->cmd_head;
1297
 
1298
                        DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
1299
                                       lp->cmd_head->status, lp->cmd_head->command));
1300
                        lp->cmd_head = ptr->v_next;
1301
                        lp->cmd_backlog--;
1302
 
1303
                        switch ((ptr->command) & 0x7) {
1304
                        case CmdTx:
1305
                            {
1306
                                struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1307
                                struct sk_buff *skb = tx_cmd->skb;
1308
 
1309
                                if ((ptr->status) & STAT_OK) {
1310
                                        DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1311
                                } else {
1312
                                        lp->stats.tx_errors++;
1313
                                        if ((ptr->status) & 0x0020)
1314
                                                lp->stats.collisions++;
1315
                                        if (!((ptr->status) & 0x0040))
1316
                                                lp->stats.tx_heartbeat_errors++;
1317
                                        if ((ptr->status) & 0x0400)
1318
                                                lp->stats.tx_carrier_errors++;
1319
                                        if ((ptr->status) & 0x0800)
1320
                                                lp->stats.collisions++;
1321
                                        if ((ptr->status) & 0x1000)
1322
                                                lp->stats.tx_aborted_errors++;
1323
                                }
1324
 
1325
                                dev_kfree_skb_irq(skb);
1326
 
1327
                                tx_cmd->cmd.command = 0; /* Mark free */
1328
                                break;
1329
                            }
1330
                        case CmdTDR:
1331
                            {
1332
                                unsigned short status = ((struct tdr_cmd *)ptr)->status;
1333
 
1334
                                if (status & 0x8000) {
1335
                                        DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
1336
                                } else {
1337
                                        if (status & 0x4000)
1338
                                                printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
1339
                                        if (status & 0x2000)
1340
                                                printk(KERN_ERR "%s: Termination problem.\n", dev->name);
1341
                                        if (status & 0x1000)
1342
                                                printk(KERN_ERR "%s: Short circuit.\n", dev->name);
1343
 
1344
                                        DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
1345
                                }
1346
                                break;
1347
                            }
1348
                        case CmdConfigure:
1349
                        case CmdMulticastList:
1350
                                /* Zap command so set_multicast_list() knows it is free */
1351
                                ptr->command = 0;
1352
                                break;
1353
                        }
1354
                        ptr->v_next = ptr->b_next = I596_NULL;
1355
                        lp->last_cmd = jiffies;
1356
                }
1357
 
1358
                ptr = lp->cmd_head;
1359
                while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1360
                        ptr->command &= 0x1fff;
1361
                        ptr = ptr->v_next;
1362
                }
1363
 
1364
                if ((lp->cmd_head != I596_NULL))
1365
                        ack_cmd |= CUC_START;
1366
                lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1367
        }
1368
        if ((status & 0x1000) || (status & 0x4000)) {
1369
                if ((status & 0x4000))
1370
                        DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
1371
                i596_rx(dev);
1372
                /* Only RX_START if stopped - RGH 07-07-96 */
1373
                if (status & 0x1000) {
1374
                        if (netif_running(dev)) {
1375
                                DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1376
                                ack_cmd |= RX_START;
1377
                                lp->stats.rx_errors++;
1378
                                lp->stats.rx_fifo_errors++;
1379
                                rebuild_rx_bufs(dev);
1380
                        }
1381
                }
1382
        }
1383
        wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1384
        lp->scb.command = ack_cmd;
1385
 
1386
#ifdef ENABLE_MVME16x_NET
1387
        if (MACH_IS_MVME16x) {
1388
                /* Ack the interrupt */
1389
 
1390
                volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1391
 
1392
                pcc2[0x2a] |= 0x08;
1393
        }
1394
#endif
1395
#ifdef ENABLE_BVME6000_NET
1396
        if (MACH_IS_BVME6000) {
1397
                volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1398
 
1399
                *ethirq = 1;
1400
                *ethirq = 3;
1401
        }
1402
#endif
1403
#ifdef ENABLE_APRICOT
1404
        (void) inb(ioaddr + 0x10);
1405
        outb(4, ioaddr + 0xf);
1406
#endif
1407
        CA(dev);
1408
 
1409
        DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1410
 
1411
        spin_unlock (&lp->lock);
1412
        return;
1413
}
1414
 
1415
static int i596_close(struct net_device *dev)
1416
{
1417
        struct i596_private *lp = (struct i596_private *) dev->priv;
1418
        unsigned long flags;
1419
 
1420
        netif_stop_queue(dev);
1421
 
1422
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1423
                       dev->name, lp->scb.status));
1424
 
1425
        save_flags(flags);
1426
        cli();
1427
 
1428
        wait_cmd(dev,lp,100,"close1 timed out");
1429
        lp->scb.command = CUC_ABORT | RX_ABORT;
1430
        CA(dev);
1431
 
1432
        wait_cmd(dev,lp,100,"close2 timed out");
1433
        restore_flags(flags);
1434
        DEB(DEB_STRUCT,i596_display_data(dev));
1435
        i596_cleanup_cmd(dev,lp);
1436
 
1437
#ifdef ENABLE_MVME16x_NET
1438
        if (MACH_IS_MVME16x) {
1439
                volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1440
 
1441
                /* Disable all ints */
1442
                pcc2[0x28] = 1;
1443
                pcc2[0x2a] = 0x40;
1444
                pcc2[0x2b] = 0x40;      /* Set snooping bits now! */
1445
        }
1446
#endif
1447
#ifdef ENABLE_BVME6000_NET
1448
        if (MACH_IS_BVME6000) {
1449
                volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1450
 
1451
                *ethirq = 1;
1452
        }
1453
#endif
1454
 
1455
        free_irq(dev->irq, dev);
1456
        remove_rx_bufs(dev);
1457
        MOD_DEC_USE_COUNT;
1458
 
1459
        return 0;
1460
}
1461
 
1462
static struct net_device_stats *
1463
 i596_get_stats(struct net_device *dev)
1464
{
1465
        struct i596_private *lp = (struct i596_private *) dev->priv;
1466
 
1467
        return &lp->stats;
1468
}
1469
 
1470
/*
1471
 *    Set or clear the multicast filter for this adaptor.
1472
 */
1473
 
1474
static void set_multicast_list(struct net_device *dev)
1475
{
1476
        struct i596_private *lp = (struct i596_private *) dev->priv;
1477
        int config = 0, cnt;
1478
 
1479
        DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1480
                dev->name, dev->mc_count,
1481
                dev->flags & IFF_PROMISC  ? "ON" : "OFF",
1482
                dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1483
 
1484
        if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
1485
                return;
1486
 
1487
        if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1488
                lp->cf_cmd.i596_config[8] |= 0x01;
1489
                config = 1;
1490
        }
1491
        if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1492
                lp->cf_cmd.i596_config[8] &= ~0x01;
1493
                config = 1;
1494
        }
1495
        if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1496
                lp->cf_cmd.i596_config[11] &= ~0x20;
1497
                config = 1;
1498
        }
1499
        if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1500
                lp->cf_cmd.i596_config[11] |= 0x20;
1501
                config = 1;
1502
        }
1503
        if (config) {
1504
                lp->cf_cmd.cmd.command = CmdConfigure;
1505
                i596_add_cmd(dev, &lp->cf_cmd.cmd);
1506
        }
1507
 
1508
        cnt = dev->mc_count;
1509
        if (cnt > MAX_MC_CNT)
1510
        {
1511
                cnt = MAX_MC_CNT;
1512
                printk(KERN_ERR "%s: Only %d multicast addresses supported",
1513
                        dev->name, cnt);
1514
        }
1515
 
1516
        if (dev->mc_count > 0) {
1517
                struct dev_mc_list *dmi;
1518
                unsigned char *cp;
1519
                struct mc_cmd *cmd;
1520
 
1521
                if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1522
                        return;
1523
                cmd = &lp->mc_cmd;
1524
                cmd->cmd.command = CmdMulticastList;
1525
                cmd->mc_cnt = dev->mc_count * 6;
1526
                cp = cmd->mc_addrs;
1527
                for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1528
                        memcpy(cp, dmi->dmi_addr, 6);
1529
                        if (i596_debug > 1)
1530
                                DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1531
                                                dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1532
                }
1533
                i596_add_cmd(dev, &cmd->cmd);
1534
        }
1535
}
1536
 
1537
#ifdef MODULE
1538
static struct net_device dev_82596 = { init: i82596_probe };
1539
 
1540
#ifdef ENABLE_APRICOT
1541
static int io = 0x300;
1542
static int irq = 10;
1543
MODULE_PARM(irq, "i");
1544
MODULE_PARM_DESC(irq, "Apricot IRQ number");
1545
#endif
1546
 
1547
MODULE_PARM(debug, "i");
1548
MODULE_PARM_DESC(debug, "i82596 debug mask");
1549
static int debug = -1;
1550
 
1551
int init_module(void)
1552
{
1553
#ifdef ENABLE_APRICOT
1554
        dev_82596.base_addr = io;
1555
        dev_82596.irq = irq;
1556
#endif
1557
        if (debug >= 0)
1558
                i596_debug = debug;
1559
        if (register_netdev(&dev_82596) != 0)
1560
                return -EIO;
1561
        return 0;
1562
}
1563
 
1564
void cleanup_module(void)
1565
{
1566
        unregister_netdev(&dev_82596);
1567
#ifdef __mc68000__
1568
        /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1569
         * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1570
         */
1571
 
1572
        kernel_set_cachemode((void *)(dev_82596.mem_start), 4096,
1573
                        IOMAP_FULL_CACHING);
1574
#endif
1575
        free_page ((u32)(dev_82596.mem_start));
1576
        dev_82596.priv = NULL;
1577
#ifdef ENABLE_APRICOT
1578
        /* If we don't do this, we can't re-insmod it later. */
1579
        release_region(dev_82596.base_addr, I596_TOTAL_SIZE);
1580
#endif
1581
}
1582
 
1583
#endif                          /* MODULE */
1584
 
1585
/*
1586
 * Local variables:
1587
 *  compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 82596.c"
1588
 * End:
1589
 */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.