OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [bmac.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Network device driver for the BMAC ethernet controller on
3
 * Apple Powermacs.  Assumes it's under a DBDMA controller.
4
 *
5
 * Copyright (C) 1998 Randy Gobbel.
6
 *
7
 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8
 * dynamic procfs inode.
9
 */
10
#include <linux/config.h>
11
#include <linux/module.h>
12
#include <linux/kernel.h>
13
#include <linux/netdevice.h>
14
#include <linux/etherdevice.h>
15
#include <linux/delay.h>
16
#include <linux/string.h>
17
#include <linux/timer.h>
18
#include <linux/proc_fs.h>
19
#include <linux/init.h>
20
#include <linux/crc32.h>
21
#include <linux/ethtool.h>
22
#include <asm/uaccess.h>
23
#include <asm/prom.h>
24
#include <asm/dbdma.h>
25
#include <asm/io.h>
26
#include <asm/page.h>
27
#include <asm/pgtable.h>
28
#include <asm/machdep.h>
29
#include <asm/pmac_feature.h>
30
#include <asm/irq.h>
31
#ifdef CONFIG_PMAC_PBOOK
32
#include <linux/adb.h>
33
#include <linux/pmu.h>
34
#endif /* CONFIG_PMAC_PBOOK */
35
#include "bmac.h"
36
 
37
#define trunc_page(x)   ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
38
#define round_page(x)   trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
39
 
40
/*
41
 * CRC polynomial - used in working out multicast filter bits.
42
 */
43
#define ENET_CRCPOLY 0x04c11db7
44
 
45
/* switch to use multicast code lifted from sunhme driver */
46
#define SUNHME_MULTICAST
47
 
48
#define N_RX_RING       64
49
#define N_TX_RING       32
50
#define MAX_TX_ACTIVE   1
51
#define ETHERCRC        4
52
#define ETHERMINPACKET  64
53
#define ETHERMTU        1500
54
#define RX_BUFLEN       (ETHERMTU + 14 + ETHERCRC + 2)
55
#define TX_TIMEOUT      HZ      /* 1 second */
56
 
57
/* Bits in transmit DMA status */
58
#define TX_DMA_ERR      0x80
59
 
60
#define XXDEBUG(args)
61
 
62
struct bmac_data {
63
        /* volatile struct bmac *bmac; */
64
        struct sk_buff_head *queue;
65
        volatile struct dbdma_regs *tx_dma;
66
        int tx_dma_intr;
67
        volatile struct dbdma_regs *rx_dma;
68
        int rx_dma_intr;
69
        volatile struct dbdma_cmd *tx_cmds;     /* xmit dma command list */
70
        volatile struct dbdma_cmd *rx_cmds;     /* recv dma command list */
71
        struct device_node *node;
72
        struct sk_buff *rx_bufs[N_RX_RING];
73
        int rx_fill;
74
        int rx_empty;
75
        struct sk_buff *tx_bufs[N_TX_RING];
76
        int tx_fill;
77
        int tx_empty;
78
        unsigned char tx_fullup;
79
        struct net_device_stats stats;
80
        struct timer_list tx_timeout;
81
        int timeout_active;
82
        int sleeping;
83
        int opened;
84
        int is_bmac_plus;
85
        u32 device_id;
86
        unsigned short hash_use_count[64];
87
        unsigned short hash_table_mask[4];
88
        struct net_device *next_bmac;
89
};
90
 
91
typedef struct bmac_reg_entry {
92
        char *name;
93
        unsigned short reg_offset;
94
} bmac_reg_entry_t;
95
 
96
#define N_REG_ENTRIES 31
97
 
98
static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
99
        {"MEMADD", MEMADD},
100
        {"MEMDATAHI", MEMDATAHI},
101
        {"MEMDATALO", MEMDATALO},
102
        {"TXPNTR", TXPNTR},
103
        {"RXPNTR", RXPNTR},
104
        {"IPG1", IPG1},
105
        {"IPG2", IPG2},
106
        {"ALIMIT", ALIMIT},
107
        {"SLOT", SLOT},
108
        {"PALEN", PALEN},
109
        {"PAPAT", PAPAT},
110
        {"TXSFD", TXSFD},
111
        {"JAM", JAM},
112
        {"TXCFG", TXCFG},
113
        {"TXMAX", TXMAX},
114
        {"TXMIN", TXMIN},
115
        {"PAREG", PAREG},
116
        {"DCNT", DCNT},
117
        {"NCCNT", NCCNT},
118
        {"NTCNT", NTCNT},
119
        {"EXCNT", EXCNT},
120
        {"LTCNT", LTCNT},
121
        {"TXSM", TXSM},
122
        {"RXCFG", RXCFG},
123
        {"RXMAX", RXMAX},
124
        {"RXMIN", RXMIN},
125
        {"FRCNT", FRCNT},
126
        {"AECNT", AECNT},
127
        {"FECNT", FECNT},
128
        {"RXSM", RXSM},
129
        {"RXCV", RXCV}
130
};
131
 
132
static struct net_device *bmac_devs;
133
static unsigned char *bmac_emergency_rxbuf;
134
 
135
#ifdef CONFIG_PMAC_PBOOK
136
static int bmac_sleep_notify(struct pmu_sleep_notifier *self, int when);
137
static struct pmu_sleep_notifier bmac_sleep_notifier = {
138
        bmac_sleep_notify, SLEEP_LEVEL_NET,
139
};
140
#endif
141
 
142
/*
143
 * Number of bytes of private data per BMAC: allow enough for
144
 * the rx and tx dma commands plus a branch dma command each,
145
 * and another 16 bytes to allow us to align the dma command
146
 * buffers on a 16 byte boundary.
147
 */
148
#define PRIV_BYTES      (sizeof(struct bmac_data) \
149
        + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
150
        + sizeof(struct sk_buff_head))
151
 
152
static unsigned char bitrev(unsigned char b);
153
static void bmac_probe1(struct device_node *bmac, int is_bmac_plus);
154
static int bmac_open(struct net_device *dev);
155
static int bmac_close(struct net_device *dev);
156
static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
157
static struct net_device_stats *bmac_stats(struct net_device *dev);
158
static void bmac_set_multicast(struct net_device *dev);
159
static int bmac_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
160
static void bmac_reset_and_enable(struct net_device *dev);
161
static void bmac_start_chip(struct net_device *dev);
162
static void bmac_init_chip(struct net_device *dev);
163
static void bmac_init_registers(struct net_device *dev);
164
static void bmac_enable_and_reset_chip(struct net_device *dev);
165
static int bmac_set_address(struct net_device *dev, void *addr);
166
static void bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs);
167
static void bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
168
static void bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
169
static void bmac_set_timeout(struct net_device *dev);
170
static void bmac_tx_timeout(unsigned long data);
171
static int bmac_proc_info ( char *buffer, char **start, off_t offset, int length);
172
static int bmac_output(struct sk_buff *skb, struct net_device *dev);
173
static void bmac_start(struct net_device *dev);
174
 
175
#define DBDMA_SET(x)    ( ((x) | (x) << 16) )
176
#define DBDMA_CLEAR(x)  ( (x) << 16)
177
 
178
static inline void
179
dbdma_st32(volatile unsigned long *a, unsigned long x)
180
{
181
        __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
182
        return;
183
}
184
 
185
static inline unsigned long
186
dbdma_ld32(volatile unsigned long *a)
187
{
188
        unsigned long swap;
189
        __asm__ volatile ("lwbrx %0,0,%1" :  "=r" (swap) : "r" (a));
190
        return swap;
191
}
192
 
193
static void
194
dbdma_continue(volatile struct dbdma_regs *dmap)
195
{
196
        dbdma_st32((volatile unsigned long *)&dmap->control,
197
                   DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
198
        eieio();
199
}
200
 
201
static void
202
dbdma_reset(volatile struct dbdma_regs *dmap)
203
{
204
        dbdma_st32((volatile unsigned long *)&dmap->control,
205
                   DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
206
        eieio();
207
        while (dbdma_ld32((volatile unsigned long *)&dmap->status) & RUN)
208
                eieio();
209
}
210
 
211
static void
212
dbdma_setcmd(volatile struct dbdma_cmd *cp,
213
             unsigned short cmd, unsigned count, unsigned long addr,
214
             unsigned long cmd_dep)
215
{
216
        out_le16(&cp->command, cmd);
217
        out_le16(&cp->req_count, count);
218
        out_le32(&cp->phy_addr, addr);
219
        out_le32(&cp->cmd_dep, cmd_dep);
220
        out_le16(&cp->xfer_status, 0);
221
        out_le16(&cp->res_count, 0);
222
}
223
 
224
static inline
225
void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
226
{
227
        out_le16((void *)dev->base_addr + reg_offset, data);
228
}
229
 
230
 
231
static inline
232
volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
233
{
234
        return in_le16((void *)dev->base_addr + reg_offset);
235
}
236
 
237
static void
238
bmac_enable_and_reset_chip(struct net_device *dev)
239
{
240
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
241
        volatile struct dbdma_regs *rd = bp->rx_dma;
242
        volatile struct dbdma_regs *td = bp->tx_dma;
243
 
244
        if (rd)
245
                dbdma_reset(rd);
246
        if (td)
247
                dbdma_reset(td);
248
 
249
        pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 1);
250
}
251
 
252
#define MIFDELAY        udelay(10)
253
 
254
static unsigned int
255
bmac_mif_readbits(struct net_device *dev, int nb)
256
{
257
        unsigned int val = 0;
258
 
259
        while (--nb >= 0) {
260
                bmwrite(dev, MIFCSR, 0);
261
                MIFDELAY;
262
                if (bmread(dev, MIFCSR) & 8)
263
                        val |= 1 << nb;
264
                bmwrite(dev, MIFCSR, 1);
265
                MIFDELAY;
266
        }
267
        bmwrite(dev, MIFCSR, 0);
268
        MIFDELAY;
269
        bmwrite(dev, MIFCSR, 1);
270
        MIFDELAY;
271
        return val;
272
}
273
 
274
static void
275
bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
276
{
277
        int b;
278
 
279
        while (--nb >= 0) {
280
                b = (val & (1 << nb))? 6: 4;
281
                bmwrite(dev, MIFCSR, b);
282
                MIFDELAY;
283
                bmwrite(dev, MIFCSR, b|1);
284
                MIFDELAY;
285
        }
286
}
287
 
288
static unsigned int
289
bmac_mif_read(struct net_device *dev, unsigned int addr)
290
{
291
        unsigned int val;
292
 
293
        bmwrite(dev, MIFCSR, 4);
294
        MIFDELAY;
295
        bmac_mif_writebits(dev, ~0U, 32);
296
        bmac_mif_writebits(dev, 6, 4);
297
        bmac_mif_writebits(dev, addr, 10);
298
        bmwrite(dev, MIFCSR, 2);
299
        MIFDELAY;
300
        bmwrite(dev, MIFCSR, 1);
301
        MIFDELAY;
302
        val = bmac_mif_readbits(dev, 17);
303
        bmwrite(dev, MIFCSR, 4);
304
        MIFDELAY;
305
        return val;
306
}
307
 
308
static void
309
bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
310
{
311
        bmwrite(dev, MIFCSR, 4);
312
        MIFDELAY;
313
        bmac_mif_writebits(dev, ~0U, 32);
314
        bmac_mif_writebits(dev, 5, 4);
315
        bmac_mif_writebits(dev, addr, 10);
316
        bmac_mif_writebits(dev, 2, 2);
317
        bmac_mif_writebits(dev, val, 16);
318
        bmac_mif_writebits(dev, 3, 2);
319
}
320
 
321
static void
322
bmac_init_registers(struct net_device *dev)
323
{
324
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
325
        volatile unsigned short regValue;
326
        unsigned short *pWord16;
327
        int i;
328
 
329
        /* XXDEBUG(("bmac: enter init_registers\n")); */
330
 
331
        bmwrite(dev, RXRST, RxResetValue);
332
        bmwrite(dev, TXRST, TxResetBit);
333
 
334
        i = 100;
335
        do {
336
                --i;
337
                udelay(10000);
338
                regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
339
        } while ((regValue & TxResetBit) && i > 0);
340
 
341
        if (!bp->is_bmac_plus) {
342
                regValue = bmread(dev, XCVRIF);
343
                regValue |= ClkBit | SerialMode | COLActiveLow;
344
                bmwrite(dev, XCVRIF, regValue);
345
                udelay(10000);
346
        }
347
 
348
        bmwrite(dev, RSEED, (unsigned short)0x1968);
349
 
350
        regValue = bmread(dev, XIFC);
351
        regValue |= TxOutputEnable;
352
        bmwrite(dev, XIFC, regValue);
353
 
354
        bmread(dev, PAREG);
355
 
356
        /* set collision counters to 0 */
357
        bmwrite(dev, NCCNT, 0);
358
        bmwrite(dev, NTCNT, 0);
359
        bmwrite(dev, EXCNT, 0);
360
        bmwrite(dev, LTCNT, 0);
361
 
362
        /* set rx counters to 0 */
363
        bmwrite(dev, FRCNT, 0);
364
        bmwrite(dev, LECNT, 0);
365
        bmwrite(dev, AECNT, 0);
366
        bmwrite(dev, FECNT, 0);
367
        bmwrite(dev, RXCV, 0);
368
 
369
        /* set tx fifo information */
370
        bmwrite(dev, TXTH, 4);  /* 4 octets before tx starts */
371
 
372
        bmwrite(dev, TXFIFOCSR, 0);      /* first disable txFIFO */
373
        bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
374
 
375
        /* set rx fifo information */
376
        bmwrite(dev, RXFIFOCSR, 0);      /* first disable rxFIFO */
377
        bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
378
 
379
        //bmwrite(dev, TXCFG, TxMACEnable);             /* TxNeverGiveUp maybe later */
380
        bmread(dev, STATUS);            /* read it just to clear it */
381
 
382
        /* zero out the chip Hash Filter registers */
383
        for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
384
        bmwrite(dev, BHASH3, bp->hash_table_mask[0]);    /* bits 15 - 0 */
385
        bmwrite(dev, BHASH2, bp->hash_table_mask[1]);   /* bits 31 - 16 */
386
        bmwrite(dev, BHASH1, bp->hash_table_mask[2]);   /* bits 47 - 32 */
387
        bmwrite(dev, BHASH0, bp->hash_table_mask[3]);   /* bits 63 - 48 */
388
 
389
        pWord16 = (unsigned short *)dev->dev_addr;
390
        bmwrite(dev, MADD0, *pWord16++);
391
        bmwrite(dev, MADD1, *pWord16++);
392
        bmwrite(dev, MADD2, *pWord16);
393
 
394
        bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
395
 
396
        bmwrite(dev, INTDISABLE, EnableNormal);
397
 
398
        return;
399
}
400
 
401
#if 0
402
static void
403
bmac_disable_interrupts(struct net_device *dev)
404
{
405
        bmwrite(dev, INTDISABLE, DisableAll);
406
}
407
 
408
static void
409
bmac_enable_interrupts(struct net_device *dev)
410
{
411
        bmwrite(dev, INTDISABLE, EnableNormal);
412
}
413
#endif
414
 
415
 
416
static void
417
bmac_start_chip(struct net_device *dev)
418
{
419
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
420
        volatile struct dbdma_regs *rd = bp->rx_dma;
421
        unsigned short  oldConfig;
422
 
423
        /* enable rx dma channel */
424
        dbdma_continue(rd);
425
 
426
        oldConfig = bmread(dev, TXCFG);
427
        bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
428
 
429
        /* turn on rx plus any other bits already on (promiscuous possibly) */
430
        oldConfig = bmread(dev, RXCFG);
431
        bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
432
        udelay(20000);
433
}
434
 
435
static void
436
bmac_init_phy(struct net_device *dev)
437
{
438
        unsigned int addr;
439
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
440
 
441
        printk(KERN_DEBUG "phy registers:");
442
        for (addr = 0; addr < 32; ++addr) {
443
                if ((addr & 7) == 0)
444
                        printk("\n" KERN_DEBUG);
445
                printk(" %.4x", bmac_mif_read(dev, addr));
446
        }
447
        printk("\n");
448
        if (bp->is_bmac_plus) {
449
                unsigned int capable, ctrl;
450
 
451
                ctrl = bmac_mif_read(dev, 0);
452
                capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
453
                if (bmac_mif_read(dev, 4) != capable
454
                    || (ctrl & 0x1000) == 0) {
455
                        bmac_mif_write(dev, 4, capable);
456
                        bmac_mif_write(dev, 0, 0x1200);
457
                } else
458
                        bmac_mif_write(dev, 0, 0x1000);
459
        }
460
}
461
 
462
static void
463
bmac_init_chip(struct net_device *dev)
464
{
465
        bmac_init_phy(dev);
466
        bmac_init_registers(dev);
467
}
468
 
469
#ifdef CONFIG_PMAC_PBOOK
470
static int
471
bmac_sleep_notify(struct pmu_sleep_notifier *self, int when)
472
{
473
        struct bmac_data *bp;
474
        unsigned long flags;
475
        unsigned short config;
476
        struct net_device* dev = bmac_devs;
477
        int i;
478
 
479
        if (bmac_devs == 0)
480
                return PBOOK_SLEEP_OK;
481
 
482
        bp = (struct bmac_data *) dev->priv;
483
 
484
        switch (when) {
485
        case PBOOK_SLEEP_REQUEST:
486
                break;
487
        case PBOOK_SLEEP_REJECT:
488
                break;
489
        case PBOOK_SLEEP_NOW:
490
                netif_device_detach(dev);
491
                /* prolly should wait for dma to finish & turn off the chip */
492
                save_flags(flags); cli();
493
                if (bp->timeout_active) {
494
                        del_timer(&bp->tx_timeout);
495
                        bp->timeout_active = 0;
496
                }
497
                disable_irq(dev->irq);
498
                disable_irq(bp->tx_dma_intr);
499
                disable_irq(bp->rx_dma_intr);
500
                bp->sleeping = 1;
501
                restore_flags(flags);
502
                if (bp->opened) {
503
                        volatile struct dbdma_regs *rd = bp->rx_dma;
504
                        volatile struct dbdma_regs *td = bp->tx_dma;
505
 
506
                        config = bmread(dev, RXCFG);
507
                        bmwrite(dev, RXCFG, (config & ~RxMACEnable));
508
                        config = bmread(dev, TXCFG);
509
                        bmwrite(dev, TXCFG, (config & ~TxMACEnable));
510
                        bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
511
                        /* disable rx and tx dma */
512
                        st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));       /* clear run bit */
513
                        st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));       /* clear run bit */
514
                        /* free some skb's */
515
                        for (i=0; i<N_RX_RING; i++) {
516
                                if (bp->rx_bufs[i] != NULL) {
517
                                        dev_kfree_skb(bp->rx_bufs[i]);
518
                                        bp->rx_bufs[i] = NULL;
519
                                }
520
                        }
521
                        for (i = 0; i<N_TX_RING; i++) {
522
                                if (bp->tx_bufs[i] != NULL) {
523
                                        dev_kfree_skb(bp->tx_bufs[i]);
524
                                        bp->tx_bufs[i] = NULL;
525
                                }
526
                        }
527
                }
528
                pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
529
                break;
530
        case PBOOK_WAKE:
531
                /* see if this is enough */
532
                if (bp->opened)
533
                        bmac_reset_and_enable(dev);
534
                enable_irq(dev->irq);
535
                enable_irq(bp->tx_dma_intr);
536
                enable_irq(bp->rx_dma_intr);
537
                netif_device_attach(dev);
538
                break;
539
        }
540
        return PBOOK_SLEEP_OK;
541
}
542
#endif
543
 
544
static int bmac_set_address(struct net_device *dev, void *addr)
545
{
546
        unsigned char *p = addr;
547
        unsigned short *pWord16;
548
        unsigned long flags;
549
        int i;
550
 
551
        XXDEBUG(("bmac: enter set_address\n"));
552
        save_flags(flags); cli();
553
 
554
        for (i = 0; i < 6; ++i) {
555
                dev->dev_addr[i] = p[i];
556
        }
557
        /* load up the hardware address */
558
        pWord16  = (unsigned short *)dev->dev_addr;
559
        bmwrite(dev, MADD0, *pWord16++);
560
        bmwrite(dev, MADD1, *pWord16++);
561
        bmwrite(dev, MADD2, *pWord16);
562
 
563
        restore_flags(flags);
564
        XXDEBUG(("bmac: exit set_address\n"));
565
        return 0;
566
}
567
 
568
static inline void bmac_set_timeout(struct net_device *dev)
569
{
570
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
571
        unsigned long flags;
572
 
573
        save_flags(flags);
574
        cli();
575
        if (bp->timeout_active)
576
                del_timer(&bp->tx_timeout);
577
        bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
578
        bp->tx_timeout.function = bmac_tx_timeout;
579
        bp->tx_timeout.data = (unsigned long) dev;
580
        add_timer(&bp->tx_timeout);
581
        bp->timeout_active = 1;
582
        restore_flags(flags);
583
}
584
 
585
static void
586
bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
587
{
588
        void *vaddr;
589
        unsigned long baddr;
590
        unsigned long len;
591
 
592
        len = skb->len;
593
        vaddr = skb->data;
594
        baddr = virt_to_bus(vaddr);
595
 
596
        dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
597
}
598
 
599
static void
600
bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
601
{
602
        unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
603
 
604
        dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
605
                     virt_to_bus(addr), 0);
606
}
607
 
608
/* Bit-reverse one byte of an ethernet hardware address. */
609
static unsigned char
610
bitrev(unsigned char b)
611
{
612
        int d = 0, i;
613
 
614
        for (i = 0; i < 8; ++i, b >>= 1)
615
                d = (d << 1) | (b & 1);
616
        return d;
617
}
618
 
619
 
620
static void
621
bmac_init_tx_ring(struct bmac_data *bp)
622
{
623
        volatile struct dbdma_regs *td = bp->tx_dma;
624
 
625
        memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
626
 
627
        bp->tx_empty = 0;
628
        bp->tx_fill = 0;
629
        bp->tx_fullup = 0;
630
 
631
        /* put a branch at the end of the tx command list */
632
        dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
633
                     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
634
 
635
        /* reset tx dma */
636
        dbdma_reset(td);
637
        out_le32(&td->wait_sel, 0x00200020);
638
        out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
639
}
640
 
641
static int
642
bmac_init_rx_ring(struct bmac_data *bp)
643
{
644
        volatile struct dbdma_regs *rd = bp->rx_dma;
645
        int i;
646
        struct sk_buff *skb;
647
 
648
        /* initialize list of sk_buffs for receiving and set up recv dma */
649
        memset((char *)bp->rx_cmds, 0,
650
               (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
651
        for (i = 0; i < N_RX_RING; i++) {
652
                if ((skb = bp->rx_bufs[i]) == NULL) {
653
                        bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
654
                        if (skb != NULL)
655
                                skb_reserve(skb, 2);
656
                }
657
                bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
658
        }
659
 
660
        bp->rx_empty = 0;
661
        bp->rx_fill = i;
662
 
663
        /* Put a branch back to the beginning of the receive command list */
664
        dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
665
                     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
666
 
667
        /* start rx dma */
668
        dbdma_reset(rd);
669
        out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
670
 
671
        return 1;
672
}
673
 
674
 
675
static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
676
{
677
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
678
        volatile struct dbdma_regs *td = bp->tx_dma;
679
        int i;
680
 
681
        /* see if there's a free slot in the tx ring */
682
        /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
683
        /*           bp->tx_empty, bp->tx_fill)); */
684
        i = bp->tx_fill + 1;
685
        if (i >= N_TX_RING)
686
                i = 0;
687
        if (i == bp->tx_empty) {
688
                netif_stop_queue(dev);
689
                bp->tx_fullup = 1;
690
                XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
691
                return -1;              /* can't take it at the moment */
692
        }
693
 
694
        dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
695
 
696
        bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
697
 
698
        bp->tx_bufs[bp->tx_fill] = skb;
699
        bp->tx_fill = i;
700
 
701
        bp->stats.tx_bytes += skb->len;
702
 
703
        dbdma_continue(td);
704
 
705
        return 0;
706
}
707
 
708
static int rxintcount;
709
 
710
static void bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
711
{
712
        struct net_device *dev = (struct net_device *) dev_id;
713
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
714
        volatile struct dbdma_regs *rd = bp->rx_dma;
715
        volatile struct dbdma_cmd *cp;
716
        int i, nb, stat;
717
        struct sk_buff *skb;
718
        unsigned int residual;
719
        int last;
720
        unsigned long flags;
721
 
722
        save_flags(flags); cli();
723
 
724
        if (++rxintcount < 10) {
725
                XXDEBUG(("bmac_rxdma_intr\n"));
726
        }
727
 
728
        last = -1;
729
        i = bp->rx_empty;
730
 
731
        while (1) {
732
                cp = &bp->rx_cmds[i];
733
                stat = ld_le16(&cp->xfer_status);
734
                residual = ld_le16(&cp->res_count);
735
                if ((stat & ACTIVE) == 0)
736
                        break;
737
                nb = RX_BUFLEN - residual - 2;
738
                if (nb < (ETHERMINPACKET - ETHERCRC)) {
739
                        skb = NULL;
740
                        bp->stats.rx_length_errors++;
741
                        bp->stats.rx_errors++;
742
                } else {
743
                        skb = bp->rx_bufs[i];
744
                        bp->rx_bufs[i] = NULL;
745
                }
746
                if (skb != NULL) {
747
                        nb -= ETHERCRC;
748
                        skb_put(skb, nb);
749
                        skb->dev = dev;
750
                        skb->protocol = eth_type_trans(skb, dev);
751
                        netif_rx(skb);
752
                        dev->last_rx = jiffies;
753
                        ++bp->stats.rx_packets;
754
                        bp->stats.rx_bytes += nb;
755
                } else {
756
                        ++bp->stats.rx_dropped;
757
                }
758
                dev->last_rx = jiffies;
759
                if ((skb = bp->rx_bufs[i]) == NULL) {
760
                        bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
761
                        if (skb != NULL)
762
                                skb_reserve(bp->rx_bufs[i], 2);
763
                }
764
                bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
765
                st_le16(&cp->res_count, 0);
766
                st_le16(&cp->xfer_status, 0);
767
                last = i;
768
                if (++i >= N_RX_RING) i = 0;
769
        }
770
 
771
        if (last != -1) {
772
                bp->rx_fill = last;
773
                bp->rx_empty = i;
774
        }
775
 
776
        restore_flags(flags);
777
 
778
        dbdma_continue(rd);
779
 
780
        if (rxintcount < 10) {
781
                XXDEBUG(("bmac_rxdma_intr done\n"));
782
        }
783
}
784
 
785
static int txintcount;
786
 
787
static void bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
788
{
789
        struct net_device *dev = (struct net_device *) dev_id;
790
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
791
        volatile struct dbdma_cmd *cp;
792
        int stat;
793
        unsigned long flags;
794
 
795
        save_flags(flags); cli();
796
 
797
        if (txintcount++ < 10) {
798
                XXDEBUG(("bmac_txdma_intr\n"));
799
        }
800
 
801
        /*     del_timer(&bp->tx_timeout); */
802
        /*     bp->timeout_active = 0; */
803
 
804
        while (1) {
805
                cp = &bp->tx_cmds[bp->tx_empty];
806
                stat = ld_le16(&cp->xfer_status);
807
                if (txintcount < 10) {
808
                        XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
809
                }
810
                if (!(stat & ACTIVE)) {
811
                        /*
812
                         * status field might not have been filled by DBDMA
813
                         */
814
                        if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
815
                                break;
816
                }
817
 
818
                if (bp->tx_bufs[bp->tx_empty]) {
819
                        ++bp->stats.tx_packets;
820
                        dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
821
                }
822
                bp->tx_bufs[bp->tx_empty] = NULL;
823
                bp->tx_fullup = 0;
824
                netif_wake_queue(dev);
825
                if (++bp->tx_empty >= N_TX_RING)
826
                        bp->tx_empty = 0;
827
                if (bp->tx_empty == bp->tx_fill)
828
                        break;
829
        }
830
 
831
        restore_flags(flags);
832
 
833
        if (txintcount < 10) {
834
                XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
835
        }
836
 
837
        bmac_start(dev);
838
}
839
 
840
static struct net_device_stats *bmac_stats(struct net_device *dev)
841
{
842
        struct bmac_data *p = (struct bmac_data *) dev->priv;
843
 
844
        return &p->stats;
845
}
846
 
847
#ifndef SUNHME_MULTICAST
848
/* Real fast bit-reversal algorithm, 6-bit values */
849
static int reverse6[64] = {
850
        0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
851
        0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
852
        0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
853
        0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
854
        0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
855
        0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
856
        0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
857
        0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
858
};
859
 
860
static unsigned int
861
crc416(unsigned int curval, unsigned short nxtval)
862
{
863
        register unsigned int counter, cur = curval, next = nxtval;
864
        register int high_crc_set, low_data_set;
865
 
866
        /* Swap bytes */
867
        next = ((next & 0x00FF) << 8) | (next >> 8);
868
 
869
        /* Compute bit-by-bit */
870
        for (counter = 0; counter < 16; ++counter) {
871
                /* is high CRC bit set? */
872
                if ((cur & 0x80000000) == 0) high_crc_set = 0;
873
                else high_crc_set = 1;
874
 
875
                cur = cur << 1;
876
 
877
                if ((next & 0x0001) == 0) low_data_set = 0;
878
                else low_data_set = 1;
879
 
880
                next = next >> 1;
881
 
882
                /* do the XOR */
883
                if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
884
        }
885
        return cur;
886
}
887
 
888
static unsigned int
889
bmac_crc(unsigned short *address)
890
{
891
        unsigned int newcrc;
892
 
893
        XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
894
        newcrc = crc416(0xffffffff, *address);  /* address bits 47 - 32 */
895
        newcrc = crc416(newcrc, address[1]);    /* address bits 31 - 16 */
896
        newcrc = crc416(newcrc, address[2]);    /* address bits 15 - 0  */
897
 
898
        return(newcrc);
899
}
900
 
901
/*
902
 * Add requested mcast addr to BMac's hash table filter.
903
 *
904
 */
905
 
906
static void
907
bmac_addhash(struct bmac_data *bp, unsigned char *addr)
908
{
909
        unsigned int     crc;
910
        unsigned short   mask;
911
 
912
        if (!(*addr)) return;
913
        crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
914
        crc = reverse6[crc];    /* Hyperfast bit-reversing algorithm */
915
        if (bp->hash_use_count[crc]++) return; /* This bit is already set */
916
        mask = crc % 16;
917
        mask = (unsigned char)1 << mask;
918
        bp->hash_use_count[crc/16] |= mask;
919
}
920
 
921
static void
922
bmac_removehash(struct bmac_data *bp, unsigned char *addr)
923
{
924
        unsigned int crc;
925
        unsigned char mask;
926
 
927
        /* Now, delete the address from the filter copy, as indicated */
928
        crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
929
        crc = reverse6[crc];    /* Hyperfast bit-reversing algorithm */
930
        if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
931
        if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
932
        mask = crc % 16;
933
        mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
934
        bp->hash_table_mask[crc/16] &= mask;
935
}
936
 
937
/*
938
 * Sync the adapter with the software copy of the multicast mask
939
 *  (logical address filter).
940
 */
941
 
942
static void
943
bmac_rx_off(struct net_device *dev)
944
{
945
        unsigned short rx_cfg;
946
 
947
        rx_cfg = bmread(dev, RXCFG);
948
        rx_cfg &= ~RxMACEnable;
949
        bmwrite(dev, RXCFG, rx_cfg);
950
        do {
951
                rx_cfg = bmread(dev, RXCFG);
952
        }  while (rx_cfg & RxMACEnable);
953
}
954
 
955
unsigned short
956
bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
957
{
958
        unsigned short rx_cfg;
959
 
960
        rx_cfg = bmread(dev, RXCFG);
961
        rx_cfg |= RxMACEnable;
962
        if (hash_enable) rx_cfg |= RxHashFilterEnable;
963
        else rx_cfg &= ~RxHashFilterEnable;
964
        if (promisc_enable) rx_cfg |= RxPromiscEnable;
965
        else rx_cfg &= ~RxPromiscEnable;
966
        bmwrite(dev, RXRST, RxResetValue);
967
        bmwrite(dev, RXFIFOCSR, 0);      /* first disable rxFIFO */
968
        bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
969
        bmwrite(dev, RXCFG, rx_cfg );
970
        return rx_cfg;
971
}
972
 
973
static void
974
bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
975
{
976
        bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
977
        bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
978
        bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
979
        bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
980
}
981
 
982
#if 0
983
static void
984
bmac_add_multi(struct net_device *dev,
985
               struct bmac_data *bp, unsigned char *addr)
986
{
987
        /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
988
        bmac_addhash(bp, addr);
989
        bmac_rx_off(dev);
990
        bmac_update_hash_table_mask(dev, bp);
991
        bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
992
        /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
993
}
994
 
995
static void
996
bmac_remove_multi(struct net_device *dev,
997
                  struct bmac_data *bp, unsigned char *addr)
998
{
999
        bmac_removehash(bp, addr);
1000
        bmac_rx_off(dev);
1001
        bmac_update_hash_table_mask(dev, bp);
1002
        bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
1003
}
1004
#endif
1005
 
1006
/* Set or clear the multicast filter for this adaptor.
1007
    num_addrs == -1     Promiscuous mode, receive all packets
1008
    num_addrs == 0      Normal mode, clear multicast list
1009
    num_addrs > 0       Multicast mode, receive normal and MC packets, and do
1010
                        best-effort filtering.
1011
 */
1012
static void bmac_set_multicast(struct net_device *dev)
1013
{
1014
        struct dev_mc_list *dmi;
1015
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
1016
        int num_addrs = dev->mc_count;
1017
        unsigned short rx_cfg;
1018
        int i;
1019
 
1020
        if (bp->sleeping)
1021
                return;
1022
 
1023
        XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
1024
 
1025
        if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1026
                for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
1027
                bmac_update_hash_table_mask(dev, bp);
1028
                rx_cfg = bmac_rx_on(dev, 1, 0);
1029
                XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
1030
        } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
1031
                rx_cfg = bmread(dev, RXCFG);
1032
                rx_cfg |= RxPromiscEnable;
1033
                bmwrite(dev, RXCFG, rx_cfg);
1034
                rx_cfg = bmac_rx_on(dev, 0, 1);
1035
                XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
1036
        } else {
1037
                for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
1038
                for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
1039
                if (num_addrs == 0) {
1040
                        rx_cfg = bmac_rx_on(dev, 0, 0);
1041
                        XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1042
                } else {
1043
                        for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
1044
                                bmac_addhash(bp, dmi->dmi_addr);
1045
                        bmac_update_hash_table_mask(dev, bp);
1046
                        rx_cfg = bmac_rx_on(dev, 1, 0);
1047
                        XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1048
                }
1049
        }
1050
        /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1051
}
1052
#else /* ifdef SUNHME_MULTICAST */
1053
 
1054
/* The version of set_multicast below was lifted from sunhme.c */
1055
 
1056
static void bmac_set_multicast(struct net_device *dev)
1057
{
1058
        struct dev_mc_list *dmi = dev->mc_list;
1059
        char *addrs;
1060
        int i;
1061
        unsigned short rx_cfg;
1062
        u32 crc;
1063
 
1064
        if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1065
                bmwrite(dev, BHASH0, 0xffff);
1066
                bmwrite(dev, BHASH1, 0xffff);
1067
                bmwrite(dev, BHASH2, 0xffff);
1068
                bmwrite(dev, BHASH3, 0xffff);
1069
        } else if(dev->flags & IFF_PROMISC) {
1070
                rx_cfg = bmread(dev, RXCFG);
1071
                rx_cfg |= RxPromiscEnable;
1072
                bmwrite(dev, RXCFG, rx_cfg);
1073
        } else {
1074
                u16 hash_table[4];
1075
 
1076
                rx_cfg = bmread(dev, RXCFG);
1077
                rx_cfg &= ~RxPromiscEnable;
1078
                bmwrite(dev, RXCFG, rx_cfg);
1079
 
1080
                for(i = 0; i < 4; i++) hash_table[i] = 0;
1081
 
1082
                for(i = 0; i < dev->mc_count; i++) {
1083
                        addrs = dmi->dmi_addr;
1084
                        dmi = dmi->next;
1085
 
1086
                        if(!(*addrs & 1))
1087
                                continue;
1088
 
1089
                        crc = ether_crc_le(6, addrs);
1090
                        crc >>= 26;
1091
                        hash_table[crc >> 4] |= 1 << (crc & 0xf);
1092
                }
1093
                bmwrite(dev, BHASH0, hash_table[0]);
1094
                bmwrite(dev, BHASH1, hash_table[1]);
1095
                bmwrite(dev, BHASH2, hash_table[2]);
1096
                bmwrite(dev, BHASH3, hash_table[3]);
1097
        }
1098
}
1099
#endif /* SUNHME_MULTICAST */
1100
 
1101
static int miscintcount;
1102
 
1103
static void bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs)
1104
{
1105
        struct net_device *dev = (struct net_device *) dev_id;
1106
        struct bmac_data *bp = (struct bmac_data *)dev->priv;
1107
        unsigned int status = bmread(dev, STATUS);
1108
        if (miscintcount++ < 10) {
1109
                XXDEBUG(("bmac_misc_intr\n"));
1110
        }
1111
        /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1112
        /*     bmac_txdma_intr_inner(irq, dev_id, regs); */
1113
        /*   if (status & FrameReceived) bp->stats.rx_dropped++; */
1114
        if (status & RxErrorMask) bp->stats.rx_errors++;
1115
        if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
1116
        if (status & RxLenCntExp) bp->stats.rx_length_errors++;
1117
        if (status & RxOverFlow) bp->stats.rx_over_errors++;
1118
        if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
1119
 
1120
        /*   if (status & FrameSent) bp->stats.tx_dropped++; */
1121
        if (status & TxErrorMask) bp->stats.tx_errors++;
1122
        if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
1123
        if (status & TxNormalCollExp) bp->stats.collisions++;
1124
}
1125
 
1126
/*
1127
 * Procedure for reading EEPROM
1128
 */
1129
#define SROMAddressLength       5
1130
#define DataInOn                0x0008
1131
#define DataInOff               0x0000
1132
#define Clk                     0x0002
1133
#define ChipSelect              0x0001
1134
#define SDIShiftCount           3
1135
#define SD0ShiftCount           2
1136
#define DelayValue              1000    /* number of microseconds */
1137
#define SROMStartOffset         10      /* this is in words */
1138
#define SROMReadCount           3       /* number of words to read from SROM */
1139
#define SROMAddressBits         6
1140
#define EnetAddressOffset       20
1141
 
1142
static unsigned char
1143
bmac_clock_out_bit(struct net_device *dev)
1144
{
1145
        unsigned short         data;
1146
        unsigned short         val;
1147
 
1148
        bmwrite(dev, SROMCSR, ChipSelect | Clk);
1149
        udelay(DelayValue);
1150
 
1151
        data = bmread(dev, SROMCSR);
1152
        udelay(DelayValue);
1153
        val = (data >> SD0ShiftCount) & 1;
1154
 
1155
        bmwrite(dev, SROMCSR, ChipSelect);
1156
        udelay(DelayValue);
1157
 
1158
        return val;
1159
}
1160
 
1161
static void
1162
bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1163
{
1164
        unsigned short data;
1165
 
1166
        if (val != 0 && val != 1) return;
1167
 
1168
        data = (val << SDIShiftCount);
1169
        bmwrite(dev, SROMCSR, data | ChipSelect  );
1170
        udelay(DelayValue);
1171
 
1172
        bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1173
        udelay(DelayValue);
1174
 
1175
        bmwrite(dev, SROMCSR, data | ChipSelect);
1176
        udelay(DelayValue);
1177
}
1178
 
1179
static void
1180
reset_and_select_srom(struct net_device *dev)
1181
{
1182
        /* first reset */
1183
        bmwrite(dev, SROMCSR, 0);
1184
        udelay(DelayValue);
1185
 
1186
        /* send it the read command (110) */
1187
        bmac_clock_in_bit(dev, 1);
1188
        bmac_clock_in_bit(dev, 1);
1189
        bmac_clock_in_bit(dev, 0);
1190
}
1191
 
1192
static unsigned short
1193
read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1194
{
1195
        unsigned short data, val;
1196
        unsigned int i;
1197
 
1198
        /* send out the address we want to read from */
1199
        for (i = 0; i < addr_len; i++)   {
1200
                val = addr >> (addr_len-i-1);
1201
                bmac_clock_in_bit(dev, val & 1);
1202
        }
1203
 
1204
        /* Now read in the 16-bit data */
1205
        data = 0;
1206
        for (i = 0; i < 16; i++) {
1207
                val = bmac_clock_out_bit(dev);
1208
                data <<= 1;
1209
                data |= val;
1210
        }
1211
        bmwrite(dev, SROMCSR, 0);
1212
 
1213
        return data;
1214
}
1215
 
1216
/*
1217
 * It looks like Cogent and SMC use different methods for calculating
1218
 * checksums. What a pain..
1219
 */
1220
 
1221
static int
1222
bmac_verify_checksum(struct net_device *dev)
1223
{
1224
        unsigned short data, storedCS;
1225
 
1226
        reset_and_select_srom(dev);
1227
        data = read_srom(dev, 3, SROMAddressBits);
1228
        storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1229
 
1230
        return 0;
1231
}
1232
 
1233
 
1234
static void
1235
bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1236
{
1237
        int i;
1238
        unsigned short data;
1239
 
1240
        for (i = 0; i < 6; i++)
1241
                {
1242
                        reset_and_select_srom(dev);
1243
                        data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1244
                        ea[2*i]   = bitrev(data & 0x0ff);
1245
                        ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
1246
                }
1247
}
1248
 
1249
static void bmac_reset_and_enable(struct net_device *dev)
1250
{
1251
        struct bmac_data *bp = dev->priv;
1252
        unsigned long flags;
1253
        struct sk_buff *skb;
1254
        unsigned char *data;
1255
 
1256
        save_flags(flags); cli();
1257
        bmac_enable_and_reset_chip(dev);
1258
        bmac_init_tx_ring(bp);
1259
        bmac_init_rx_ring(bp);
1260
        bmac_init_chip(dev);
1261
        bmac_start_chip(dev);
1262
        bmwrite(dev, INTDISABLE, EnableNormal);
1263
        bp->sleeping = 0;
1264
 
1265
        /*
1266
         * It seems that the bmac can't receive until it's transmitted
1267
         * a packet.  So we give it a dummy packet to transmit.
1268
         */
1269
        skb = dev_alloc_skb(ETHERMINPACKET);
1270
        if (skb != NULL) {
1271
                data = skb_put(skb, ETHERMINPACKET);
1272
                memset(data, 0, ETHERMINPACKET);
1273
                memcpy(data, dev->dev_addr, 6);
1274
                memcpy(data+6, dev->dev_addr, 6);
1275
                bmac_transmit_packet(skb, dev);
1276
        }
1277
        restore_flags(flags);
1278
}
1279
 
1280
static int __init bmac_probe(void)
1281
{
1282
        struct device_node *bmac;
1283
 
1284
        MOD_INC_USE_COUNT;
1285
 
1286
        for (bmac = find_devices("bmac"); bmac != 0; bmac = bmac->next)
1287
                bmac_probe1(bmac, 0);
1288
        for (bmac = find_compatible_devices("network", "bmac+"); bmac != 0;
1289
             bmac = bmac->next)
1290
                bmac_probe1(bmac, 1);
1291
 
1292
        if (bmac_devs != 0) {
1293
                proc_net_create ("bmac", 0, bmac_proc_info);
1294
#ifdef CONFIG_PMAC_PBOOK
1295
                pmu_register_sleep_notifier(&bmac_sleep_notifier);
1296
#endif
1297
        }
1298
 
1299
        MOD_DEC_USE_COUNT;
1300
 
1301
        return bmac_devs? 0: -ENODEV;
1302
}
1303
 
1304
static void __init bmac_probe1(struct device_node *bmac, int is_bmac_plus)
1305
{
1306
        int j, rev, ret;
1307
        struct bmac_data *bp;
1308
        unsigned char *addr;
1309
        struct net_device *dev;
1310
        u32 *deviceid;
1311
 
1312
        if (bmac->n_addrs != 3 || bmac->n_intrs != 3) {
1313
                printk(KERN_ERR "can't use BMAC %s: need 3 addrs and 3 intrs\n",
1314
                       bmac->full_name);
1315
                return;
1316
        }
1317
        addr = get_property(bmac, "mac-address", NULL);
1318
        if (addr == NULL) {
1319
                addr = get_property(bmac, "local-mac-address", NULL);
1320
                if (addr == NULL) {
1321
                        printk(KERN_ERR "Can't get mac-address for BMAC %s\n",
1322
                               bmac->full_name);
1323
                        return;
1324
                }
1325
        }
1326
 
1327
        if (bmac_emergency_rxbuf == NULL) {
1328
                bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1329
                if (bmac_emergency_rxbuf == NULL) {
1330
                        printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1331
                        return;
1332
                }
1333
        }
1334
 
1335
        dev = init_etherdev(NULL, PRIV_BYTES);
1336
        if (!dev) {
1337
                printk(KERN_ERR "init_etherdev failed, out of memory for BMAC %s\n",
1338
                       bmac->full_name);
1339
                return;
1340
        }
1341
        bp = (struct bmac_data *) dev->priv;
1342
        SET_MODULE_OWNER(dev);
1343
        bp->node = bmac;
1344
 
1345
        if (!request_OF_resource(bmac, 0, " (bmac)")) {
1346
                printk(KERN_ERR "BMAC: can't request IO resource !\n");
1347
                goto err_out;
1348
        }
1349
        if (!request_OF_resource(bmac, 1, " (bmac tx dma)")) {
1350
                printk(KERN_ERR "BMAC: can't request TX DMA resource !\n");
1351
                goto err_out;
1352
        }
1353
 
1354
        if (!request_OF_resource(bmac, 2, " (bmac rx dma)")) {
1355
                printk(KERN_ERR "BMAC: can't request RX DMA resource !\n");
1356
                goto err_out;
1357
        }
1358
        dev->base_addr = (unsigned long)
1359
                ioremap(bmac->addrs[0].address, bmac->addrs[0].size);
1360
        if (!dev->base_addr)
1361
                goto err_out;
1362
        dev->irq = bmac->intrs[0].line;
1363
 
1364
        deviceid = (u32 *)get_property(bmac, "device-id", NULL);
1365
        if (deviceid)
1366
                bp->device_id = *deviceid;
1367
 
1368
        bmac_enable_and_reset_chip(dev);
1369
        bmwrite(dev, INTDISABLE, DisableAll);
1370
 
1371
        printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
1372
        rev = addr[0] == 0 && addr[1] == 0xA0;
1373
        for (j = 0; j < 6; ++j) {
1374
                dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
1375
                printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
1376
        }
1377
        XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1378
        printk("\n");
1379
 
1380
        /* Enable chip without interrupts for now */
1381
        bmac_enable_and_reset_chip(dev);
1382
        bmwrite(dev, INTDISABLE, DisableAll);
1383
 
1384
        dev->open = bmac_open;
1385
        dev->stop = bmac_close;
1386
        dev->hard_start_xmit = bmac_output;
1387
        dev->get_stats = bmac_stats;
1388
        dev->set_multicast_list = bmac_set_multicast;
1389
        dev->set_mac_address = bmac_set_address;
1390
        dev->do_ioctl = bmac_do_ioctl;
1391
 
1392
        bmac_get_station_address(dev, addr);
1393
        if (bmac_verify_checksum(dev) != 0)
1394
                goto err_out_iounmap;
1395
 
1396
        bp->is_bmac_plus = is_bmac_plus;
1397
        bp->tx_dma = (volatile struct dbdma_regs *)
1398
                ioremap(bmac->addrs[1].address, bmac->addrs[1].size);
1399
        if (!bp->tx_dma)
1400
                goto err_out_iounmap;
1401
        bp->tx_dma_intr = bmac->intrs[1].line;
1402
        bp->rx_dma = (volatile struct dbdma_regs *)
1403
                ioremap(bmac->addrs[2].address, bmac->addrs[2].size);
1404
        if (!bp->rx_dma)
1405
                goto err_out_iounmap_tx;
1406
        bp->rx_dma_intr = bmac->intrs[2].line;
1407
 
1408
        bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1409
        bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1410
 
1411
        bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1412
        skb_queue_head_init(bp->queue);
1413
 
1414
        init_timer(&bp->tx_timeout);
1415
        /*     bp->timeout_active = 0; */
1416
 
1417
        ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1418
        if (ret) {
1419
                printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1420
                goto err_out_iounmap_rx;
1421
        }
1422
        ret = request_irq(bmac->intrs[1].line, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1423
        if (ret) {
1424
                printk(KERN_ERR "BMAC: can't get irq %d\n", bmac->intrs[1].line);
1425
                goto err_out_irq0;
1426
        }
1427
        ret = request_irq(bmac->intrs[2].line, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1428
        if (ret) {
1429
                printk(KERN_ERR "BMAC: can't get irq %d\n", bmac->intrs[2].line);
1430
                goto err_out_irq1;
1431
        }
1432
 
1433
        /* Mask chip interrupts and disable chip, will be
1434
         * re-enabled on open()
1435
         */
1436
        disable_irq(dev->irq);
1437
        pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1438
 
1439
        bp->next_bmac = bmac_devs;
1440
        bmac_devs = dev;
1441
        return;
1442
 
1443
err_out_irq1:
1444
        free_irq(bmac->intrs[1].line, dev);
1445
err_out_irq0:
1446
        free_irq(dev->irq, dev);
1447
err_out_iounmap_rx:
1448
        iounmap((void *)bp->rx_dma);
1449
err_out_iounmap_tx:
1450
        iounmap((void *)bp->tx_dma);
1451
err_out_iounmap:
1452
        iounmap((void *)dev->base_addr);
1453
err_out:
1454
        if (bp->node) {
1455
                release_OF_resource(bp->node, 0);
1456
                release_OF_resource(bp->node, 1);
1457
                release_OF_resource(bp->node, 2);
1458
                pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1459
        }
1460
        unregister_netdev(dev);
1461
        kfree(dev);
1462
}
1463
 
1464
static int bmac_ethtool_ioctl(struct net_device *dev, void *useraddr)
1465
{
1466
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
1467
        u32 ethcmd;
1468
 
1469
        if (get_user(ethcmd, (u32 *)useraddr))
1470
                return -EFAULT;
1471
 
1472
        switch (ethcmd) {
1473
        case ETHTOOL_GDRVINFO: {
1474
                struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1475
                strcpy (info.driver, "bmac");
1476
                info.version[0] = '\0';
1477
                snprintf(info.fw_version, 31, "chip id %x", bp->device_id);
1478
                if (copy_to_user (useraddr, &info, sizeof (info)))
1479
                        return -EFAULT;
1480
                return 0;
1481
        }
1482
 
1483
        case ETHTOOL_GSET:
1484
        case ETHTOOL_SSET:
1485
        case ETHTOOL_NWAY_RST:
1486
        case ETHTOOL_GLINK:
1487
        case ETHTOOL_GMSGLVL:
1488
        case ETHTOOL_SMSGLVL:
1489
        default:
1490
                ;
1491
        }
1492
 
1493
        return -EOPNOTSUPP;
1494
}
1495
 
1496
static int bmac_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1497
{
1498
        switch(cmd) {
1499
        case SIOCETHTOOL:
1500
                return bmac_ethtool_ioctl(dev, (void *) ifr->ifr_data);
1501
 
1502
        case SIOCGMIIPHY:
1503
        case SIOCDEVPRIVATE:
1504
        case SIOCGMIIREG:
1505
        case SIOCDEVPRIVATE+1:
1506
        case SIOCSMIIREG:
1507
        case SIOCDEVPRIVATE+2:
1508
        default:
1509
                ;
1510
        }
1511
        return -EOPNOTSUPP;
1512
}
1513
 
1514
static int bmac_open(struct net_device *dev)
1515
{
1516
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
1517
        /* XXDEBUG(("bmac: enter open\n")); */
1518
        /* reset the chip */
1519
        bp->opened = 1;
1520
        bmac_reset_and_enable(dev);
1521
        enable_irq(dev->irq);
1522
        dev->flags |= IFF_RUNNING;
1523
        return 0;
1524
}
1525
 
1526
static int bmac_close(struct net_device *dev)
1527
{
1528
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
1529
        volatile struct dbdma_regs *rd = bp->rx_dma;
1530
        volatile struct dbdma_regs *td = bp->tx_dma;
1531
        unsigned short config;
1532
        int i;
1533
 
1534
        bp->sleeping = 1;
1535
        dev->flags &= ~(IFF_UP | IFF_RUNNING);
1536
 
1537
        /* disable rx and tx */
1538
        config = bmread(dev, RXCFG);
1539
        bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1540
 
1541
        config = bmread(dev, TXCFG);
1542
        bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1543
 
1544
        bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1545
 
1546
        /* disable rx and tx dma */
1547
        st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));       /* clear run bit */
1548
        st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));       /* clear run bit */
1549
 
1550
        /* free some skb's */
1551
        XXDEBUG(("bmac: free rx bufs\n"));
1552
        for (i=0; i<N_RX_RING; i++) {
1553
                if (bp->rx_bufs[i] != NULL) {
1554
                        dev_kfree_skb(bp->rx_bufs[i]);
1555
                        bp->rx_bufs[i] = NULL;
1556
                }
1557
        }
1558
        XXDEBUG(("bmac: free tx bufs\n"));
1559
        for (i = 0; i<N_TX_RING; i++) {
1560
                if (bp->tx_bufs[i] != NULL) {
1561
                        dev_kfree_skb(bp->tx_bufs[i]);
1562
                        bp->tx_bufs[i] = NULL;
1563
                }
1564
        }
1565
        XXDEBUG(("bmac: all bufs freed\n"));
1566
 
1567
        bp->opened = 0;
1568
        disable_irq(dev->irq);
1569
        pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1570
 
1571
        return 0;
1572
}
1573
 
1574
static void
1575
bmac_start(struct net_device *dev)
1576
{
1577
        struct bmac_data *bp = dev->priv;
1578
        int i;
1579
        struct sk_buff *skb;
1580
        unsigned long flags;
1581
 
1582
        if (bp->sleeping)
1583
                return;
1584
 
1585
        save_flags(flags); cli();
1586
        while (1) {
1587
                i = bp->tx_fill + 1;
1588
                if (i >= N_TX_RING)
1589
                        i = 0;
1590
                if (i == bp->tx_empty)
1591
                        break;
1592
                skb = skb_dequeue(bp->queue);
1593
                if (skb == NULL)
1594
                        break;
1595
                bmac_transmit_packet(skb, dev);
1596
        }
1597
        restore_flags(flags);
1598
}
1599
 
1600
static int
1601
bmac_output(struct sk_buff *skb, struct net_device *dev)
1602
{
1603
        struct bmac_data *bp = dev->priv;
1604
        skb_queue_tail(bp->queue, skb);
1605
        bmac_start(dev);
1606
        return 0;
1607
}
1608
 
1609
static void bmac_tx_timeout(unsigned long data)
1610
{
1611
        struct net_device *dev = (struct net_device *) data;
1612
        struct bmac_data *bp = (struct bmac_data *) dev->priv;
1613
        volatile struct dbdma_regs *td = bp->tx_dma;
1614
        volatile struct dbdma_regs *rd = bp->rx_dma;
1615
        volatile struct dbdma_cmd *cp;
1616
        unsigned long flags;
1617
        unsigned short config, oldConfig;
1618
        int i;
1619
 
1620
        XXDEBUG(("bmac: tx_timeout called\n"));
1621
        save_flags(flags); cli();
1622
        bp->timeout_active = 0;
1623
 
1624
        /* update various counters */
1625
/*      bmac_handle_misc_intrs(bp, 0); */
1626
 
1627
        cp = &bp->tx_cmds[bp->tx_empty];
1628
/*      XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1629
/*         ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1630
/*         mb->pr, mb->xmtfs, mb->fifofc)); */
1631
 
1632
        /* turn off both tx and rx and reset the chip */
1633
        config = bmread(dev, RXCFG);
1634
        bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1635
        config = bmread(dev, TXCFG);
1636
        bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1637
        out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1638
        printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1639
        bmac_enable_and_reset_chip(dev);
1640
 
1641
        /* restart rx dma */
1642
        cp = bus_to_virt(ld_le32(&rd->cmdptr));
1643
        out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1644
        out_le16(&cp->xfer_status, 0);
1645
        out_le32(&rd->cmdptr, virt_to_bus(cp));
1646
        out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1647
 
1648
        /* fix up the transmit side */
1649
        XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1650
                 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1651
        i = bp->tx_empty;
1652
        ++bp->stats.tx_errors;
1653
        if (i != bp->tx_fill) {
1654
                dev_kfree_skb(bp->tx_bufs[i]);
1655
                bp->tx_bufs[i] = NULL;
1656
                if (++i >= N_TX_RING) i = 0;
1657
                bp->tx_empty = i;
1658
        }
1659
        bp->tx_fullup = 0;
1660
        netif_wake_queue(dev);
1661
        if (i != bp->tx_fill) {
1662
                cp = &bp->tx_cmds[i];
1663
                out_le16(&cp->xfer_status, 0);
1664
                out_le16(&cp->command, OUTPUT_LAST);
1665
                out_le32(&td->cmdptr, virt_to_bus(cp));
1666
                out_le32(&td->control, DBDMA_SET(RUN));
1667
                /*      bmac_set_timeout(dev); */
1668
                XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1669
        }
1670
 
1671
        /* turn it back on */
1672
        oldConfig = bmread(dev, RXCFG);
1673
        bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1674
        oldConfig = bmread(dev, TXCFG);
1675
        bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1676
 
1677
        restore_flags(flags);
1678
}
1679
 
1680
#if 0
1681
static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1682
{
1683
        int i,*ip;
1684
 
1685
        for (i=0;i< count;i++) {
1686
                ip = (int*)(cp+i);
1687
 
1688
                printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1689
                       ld_le32(ip+0),
1690
                       ld_le32(ip+1),
1691
                       ld_le32(ip+2),
1692
                       ld_le32(ip+3));
1693
        }
1694
 
1695
}
1696
#endif
1697
 
1698
static int
1699
bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1700
{
1701
        int len = 0;
1702
        off_t pos   = 0;
1703
        off_t begin = 0;
1704
        int i;
1705
 
1706
        if (bmac_devs == NULL)
1707
                return (-ENOSYS);
1708
 
1709
        len += sprintf(buffer, "BMAC counters & registers\n");
1710
 
1711
        for (i = 0; i<N_REG_ENTRIES; i++) {
1712
                len += sprintf(buffer + len, "%s: %#08x\n",
1713
                               reg_entries[i].name,
1714
                               bmread(bmac_devs, reg_entries[i].reg_offset));
1715
                pos = begin + len;
1716
 
1717
                if (pos < offset) {
1718
                        len = 0;
1719
                        begin = pos;
1720
                }
1721
 
1722
                if (pos > offset+length) break;
1723
        }
1724
 
1725
        *start = buffer + (offset - begin);
1726
        len -= (offset - begin);
1727
 
1728
        if (len > length) len = length;
1729
 
1730
        return len;
1731
}
1732
 
1733
 
1734
MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1735
MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1736
MODULE_LICENSE("GPL");
1737
EXPORT_NO_SYMBOLS;
1738
 
1739
static void __exit bmac_cleanup (void)
1740
{
1741
        struct bmac_data *bp;
1742
        struct net_device *dev;
1743
 
1744
        if (bmac_emergency_rxbuf != NULL) {
1745
                kfree(bmac_emergency_rxbuf);
1746
                bmac_emergency_rxbuf = NULL;
1747
        }
1748
 
1749
        if (bmac_devs == 0)
1750
                return;
1751
#ifdef CONFIG_PMAC_PBOOK
1752
        pmu_unregister_sleep_notifier(&bmac_sleep_notifier);
1753
#endif
1754
        proc_net_remove("bmac");
1755
 
1756
        do {
1757
                dev = bmac_devs;
1758
                bp = (struct bmac_data *) dev->priv;
1759
                bmac_devs = bp->next_bmac;
1760
 
1761
                unregister_netdev(dev);
1762
 
1763
                release_OF_resource(bp->node, 0);
1764
                release_OF_resource(bp->node, 1);
1765
                release_OF_resource(bp->node, 2);
1766
                free_irq(dev->irq, dev);
1767
                free_irq(bp->tx_dma_intr, dev);
1768
                free_irq(bp->rx_dma_intr, dev);
1769
 
1770
                kfree(dev);
1771
        } while (bmac_devs != NULL);
1772
}
1773
 
1774
module_init(bmac_probe);
1775
module_exit(bmac_cleanup);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.