OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [ether00.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  drivers/net/ether00.c
3
 *
4
 *  Copyright (C) 2001 Altera Corporation
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation; either version 2 of the License, or
9
 * (at your option) any later version.
10
 *
11
 * This program is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU General Public License
17
 * along with this program; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
 
21
/* includes */
22
 
23
#include <linux/pci.h>
24
#include <linux/netdevice.h>
25
#include <linux/sched.h>
26
#include <linux/netdevice.h>
27
#include <linux/skbuff.h>
28
#include <linux/etherdevice.h>
29
#include <linux/module.h>
30
#include <linux/tqueue.h>
31
#include <linux/mtd/mtd.h>
32
#include <asm/arch/excalibur.h>
33
#include <asm/arch/hardware.h>
34
#include <asm/irq.h>
35
#include <asm/io.h>
36
#include <asm/sizes.h>
37
 
38
#include <asm/arch/ether00.h>
39
#include <asm/arch/tdkphy.h>
40
 
41
 
42
MODULE_AUTHOR("Clive Davies");
43
MODULE_DESCRIPTION("Altera Ether00 IP core driver");
44
MODULE_LICENSE("GPL");
45
 
46
static long base=0x60000000;
47
static int irq=0x1;
48
static int phy_irq=0x2;
49
MODULE_PARM(base,"l");
50
MODULE_PARM(irq,"i");
51
MODULE_PARM(phy_irq,"i");
52
 
53
#define TX_TIMEOUT  (400*HZ/1000)
54
#define PKT_BUF_SZ 1540 /* Size of each rx buffer */
55
 
56
 
57
#undef  DEBUG
58
#define DEBUG(x)
59
 
60
#define __dma_va(x) (unsigned int)((unsigned int)priv->dma_data+(((unsigned int)(x))&(EXC_SPSRAM_BLOCK0_SIZE-1)))
61
#define __dma_pa(x) (unsigned int)(EXC_SPSRAM_BLOCK0_BASE+(((unsigned int)(x))-(unsigned int)priv->dma_data))
62
 
63
#define ETHER00_BASE    0
64
#define ETHER00_TYPE
65
#define ETHER00_NAME "ether00"
66
#define MAC_REG_SIZE 0x400 /* size of MAC register area */
67
 
68
 
69
 
70
/* typedefs */
71
 
72
/* The definition of the driver control structure */
73
 
74
#define RX_NUM_BUFF     10
75
#define RX_NUM_FDESC    10
76
#define TX_NUM_FDESC    10
77
 
78
struct tx_fda_ent{
79
        FDA_DESC  fd;
80
        BUF_DESC  bd;
81
        BUF_DESC  pad;
82
};
83
struct rx_fda_ent{
84
        FDA_DESC  fd;
85
        BUF_DESC  bd;
86
        BUF_DESC  pad;
87
};
88
struct rx_blist_ent{
89
        FDA_DESC  fd;
90
        BUF_DESC  bd;
91
        BUF_DESC  pad;
92
};
93
struct net_priv
94
{
95
        struct net_device_stats stats;
96
        struct sk_buff* skb;
97
        void* dma_data;
98
        struct rx_blist_ent*  rx_blist_vp;
99
        struct rx_fda_ent* rx_fda_ptr;
100
        struct tx_fda_ent* tx_fdalist_vp;
101
        struct tq_struct  tq_memupdate;
102
        unsigned char   rx_disabled;
103
        unsigned char   memupdate_scheduled;
104
        unsigned char   queue_stopped;
105
        spinlock_t dma_lock;
106
        unsigned int tx_head;
107
        unsigned int tx_tail;
108
};
109
 
110
static const char vendor_id[2]={0x07,0xed};
111
 
112
#ifdef ETHER00_DEBUG
113
 
114
/* Dump (most) registers for debugging puposes */
115
 
116
static void dump_regs(struct net_device *dev){
117
        struct net_priv* priv=dev->priv;
118
        unsigned int* i;
119
 
120
        printk("\n RX free descriptor area:\n");
121
 
122
        for(i=(unsigned int*)priv->rx_fda_ptr;
123
            i<((unsigned int*)(priv->rx_fda_ptr+RX_NUM_FDESC));){
124
                printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
125
                i+=4;
126
        }
127
 
128
        printk("\n RX buffer list:\n");
129
 
130
        for(i=(unsigned int*)priv->rx_blist_vp;
131
            i<((unsigned int*)(priv->rx_blist_vp+RX_NUM_BUFF));){
132
                printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
133
                i+=4;
134
        }
135
 
136
        printk("\n TX frame descriptor list:\n");
137
 
138
        for(i=(unsigned int*)priv->tx_fdalist_vp;
139
            i<((unsigned int*)(priv->tx_fdalist_vp+TX_NUM_FDESC));){
140
                printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
141
                i+=4;
142
        }
143
 
144
        printk("\ndma ctl=%#x\n",readw(ETHER_DMA_CTL(dev->base_addr)));
145
        printk("txfrmptr=%#x\n",readw(ETHER_TXFRMPTR(dev->base_addr)));
146
        printk("txthrsh=%#x\n",readw(ETHER_TXTHRSH(dev->base_addr)));
147
        printk("txpollctr=%#x\n",readw(ETHER_TXPOLLCTR(dev->base_addr)));
148
        printk("blfrmptr=%#x\n",readw(ETHER_BLFRMPTR(dev->base_addr)));
149
        printk("rxfragsize=%#x\n",readw(ETHER_RXFRAGSIZE(dev->base_addr)));
150
        printk("tx_int_en=%#x\n",readw(ETHER_INT_EN(dev->base_addr)));
151
        printk("fda_bas=%#x\n",readw(ETHER_FDA_BAS(dev->base_addr)));
152
        printk("fda_lim=%#x\n",readw(ETHER_FDA_LIM(dev->base_addr)));
153
        printk("int_src=%#x\n",readw(ETHER_INT_SRC(dev->base_addr)));
154
        printk("pausecnt=%#x\n",readw(ETHER_PAUSECNT(dev->base_addr)));
155
        printk("rempaucnt=%#x\n",readw(ETHER_REMPAUCNT(dev->base_addr)));
156
        printk("txconfrmstat=%#x\n",readw(ETHER_TXCONFRMSTAT(dev->base_addr)));
157
        printk("mac_ctl=%#x\n",readw(ETHER_MAC_CTL(dev->base_addr)));
158
        printk("arc_ctl=%#x\n",readw(ETHER_ARC_CTL(dev->base_addr)));
159
        printk("tx_ctl=%#x\n",readw(ETHER_TX_CTL(dev->base_addr)));
160
}
161
#endif /* ETHER00_DEBUG */
162
 
163
 
164
static int ether00_write_phy(struct net_device *dev, short address, short value)
165
{
166
        volatile int count = 1024;
167
        writew(value,ETHER_MD_DATA(dev->base_addr));
168
        writew( ETHER_MD_CA_BUSY_MSK |
169
                ETHER_MD_CA_WR_MSK |
170
                (address & ETHER_MD_CA_ADDR_MSK),
171
                ETHER_MD_CA(dev->base_addr));
172
 
173
        /* Wait for the command to complete */
174
        while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
175
                count--;
176
        }
177
        if (!count){
178
                printk("Write to phy failed, addr=%#x, data=%#x\n",address, value);
179
                return -EIO;
180
        }
181
        return 0;
182
}
183
 
184
static int ether00_read_phy(struct net_device *dev, short address)
185
{
186
        volatile int count = 1024;
187
        writew( ETHER_MD_CA_BUSY_MSK |
188
                (address & ETHER_MD_CA_ADDR_MSK),
189
                ETHER_MD_CA(dev->base_addr));
190
 
191
        /* Wait for the command to complete */
192
        while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
193
                count--;
194
        }
195
        if (!count){
196
                printk(KERN_WARNING "Read from phy timed out\n");
197
                return -EIO;
198
        }
199
        return readw(ETHER_MD_DATA(dev->base_addr));
200
}
201
 
202
static void ether00_phy_int(int irq_num, void* dev_id, struct pt_regs* regs)
203
{
204
        struct net_device* dev=dev_id;
205
        int irq_status;
206
 
207
        irq_status=ether00_read_phy(dev, PHY_IRQ_CONTROL);
208
 
209
        if(irq_status & PHY_IRQ_CONTROL_ANEG_COMP_INT_MSK){
210
                /*
211
                 * Autonegotiation complete on epxa10db. The mac doesn't
212
                 * twig if we're in full duplex so we need to check the
213
                 * phy diagnostic register and configure the mac accordingly
214
                 */
215
          if(ether00_read_phy(dev, PHY_DIAGNOSTIC)&PHY_DIAGNOSTIC_DPLX_MSK){
216
                  int tmp;
217
                  tmp=readl(ETHER_MAC_CTL(dev->base_addr));
218
                  writel(tmp|ETHER_MAC_CTL_FULLDUP_MSK,
219
                         ETHER_MAC_CTL(dev->base_addr));
220
                }
221
                else
222
                  {
223
                    int tmp;
224
                    tmp=readl(ETHER_MAC_CTL(dev->base_addr));
225
                    writel(tmp&(~ETHER_MAC_CTL_FULLDUP_MSK),
226
                           ETHER_MAC_CTL(dev->base_addr));
227
 
228
                  }
229
        }
230
 
231
        if(irq_status&PHY_IRQ_CONTROL_LS_CHG_INT_MSK){
232
 
233
                if(ether00_read_phy(dev, PHY_STATUS)& PHY_STATUS_LINK_MSK){
234
                        /* Link is up */
235
                        netif_carrier_on(dev);
236
                }else{
237
                        netif_carrier_off(dev);
238
                }
239
        }
240
 
241
}
242
 
243
static void setup_blist_entry(struct sk_buff* skb,struct rx_blist_ent* blist_ent_ptr){
244
        /* Make the buffer consistent with the cache as the mac is going to write
245
         * directly into it*/
246
        blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
247
        blist_ent_ptr->bd.BuffData=(char*)__pa(skb->data);
248
        consistent_sync(skb->data,PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
249
        /* align IP on 16 Byte (DMA_CTL set to skip 2 bytes) */
250
        skb_reserve(skb,2);
251
        blist_ent_ptr->bd.BuffLength=PKT_BUF_SZ-2;
252
        blist_ent_ptr->fd.FDLength=1;
253
        blist_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
254
        blist_ent_ptr->bd.BDCtl=BDCTL_COWNSBD_MSK;
255
}
256
 
257
 
258
static int ether00_mem_init(struct net_device* dev)
259
{
260
        struct net_priv* priv=dev->priv;
261
        struct tx_fda_ent *tx_fd_ptr,*tx_end_ptr;
262
        struct rx_blist_ent* blist_ent_ptr;
263
        int i;
264
 
265
        /*
266
         * Grab a block of on chip SRAM to contain the control stuctures for
267
         * the ethernet MAC. This uncached becuase it needs to be accesses by both
268
         * bus masters (cpu + mac). However, it shouldn't matter too much in terms
269
         * of speed as its on chip memory
270
         */
271
        priv->dma_data=ioremap_nocache(EXC_SPSRAM_BLOCK0_BASE,EXC_SPSRAM_BLOCK0_SIZE );
272
        if (!priv->dma_data)
273
                return -ENOMEM;
274
 
275
        priv->rx_fda_ptr=(struct rx_fda_ent*)priv->dma_data;
276
        /*
277
         * Now share it out amongst the Frame descriptors and the buffer list
278
         */
279
        priv->rx_blist_vp=(struct rx_blist_ent*)((unsigned int)priv->dma_data+RX_NUM_FDESC*sizeof(struct rx_fda_ent));
280
 
281
        /*
282
         *Initalise the FDA list
283
         */
284
        /* set ownership to the controller */
285
        memset(priv->rx_fda_ptr,0x80,RX_NUM_FDESC*sizeof(struct rx_fda_ent));
286
 
287
        /*
288
         *Initialise the buffer list
289
         */
290
        blist_ent_ptr=priv->rx_blist_vp;
291
        i=0;
292
        while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
293
                struct sk_buff *skb;
294
                blist_ent_ptr->fd.FDLength=1;
295
                skb=dev_alloc_skb(PKT_BUF_SZ);
296
                if(skb){
297
                        setup_blist_entry(skb,blist_ent_ptr);
298
                        blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(blist_ent_ptr+1);
299
                        blist_ent_ptr->bd.BDStat=i++;
300
                        blist_ent_ptr++;
301
                }
302
                else
303
                {
304
                        printk("Failed to initalise buffer list\n");
305
                }
306
 
307
        }
308
        blist_ent_ptr--;
309
        blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->rx_blist_vp);
310
 
311
        priv->tx_fdalist_vp=(struct tx_fda_ent*)(priv->rx_blist_vp+RX_NUM_BUFF);
312
 
313
        /* Initialise the buffers to be a circular list. The mac will then go poll
314
         * the list until it finds a frame ready to transmit */
315
        tx_end_ptr=priv->tx_fdalist_vp+TX_NUM_FDESC;
316
        for(tx_fd_ptr=priv->tx_fdalist_vp;tx_fd_ptr<tx_end_ptr;tx_fd_ptr++){
317
                tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa((tx_fd_ptr+1));
318
                tx_fd_ptr->fd.FDCtl=1;
319
                tx_fd_ptr->fd.FDStat=0;
320
                tx_fd_ptr->fd.FDSystem=0;
321
                tx_fd_ptr->fd.FDLength=1;
322
 
323
        }
324
        /* Change the last FDNext pointer to make a circular list */
325
        tx_fd_ptr--;
326
        tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->tx_fdalist_vp);
327
 
328
        /* Point the device at the chain of Rx and Tx Buffers */
329
        writel((unsigned int)__dma_pa(priv->rx_fda_ptr),ETHER_FDA_BAS(dev->base_addr));
330
        writel((RX_NUM_FDESC-1)*sizeof(struct rx_fda_ent),ETHER_FDA_LIM(dev->base_addr));
331
        writel((unsigned int)__dma_pa(priv->rx_blist_vp),ETHER_BLFRMPTR(dev->base_addr));
332
 
333
        priv->tx_tail = priv->tx_head = (unsigned int) priv->tx_fdalist_vp;
334
        writel((unsigned int) __dma_pa(priv->tx_head), ETHER_TXFRMPTR(dev->base_addr));
335
 
336
        return 0;
337
}
338
 
339
 
340
void ether00_mem_update(void* dev_id)
341
{
342
        struct net_device* dev=dev_id;
343
        struct net_priv* priv=dev->priv;
344
        struct rx_blist_ent* blist_ent_ptr;
345
        unsigned long flags;
346
        int enable_rx = 0;
347
 
348
        priv->tq_memupdate.sync=0;
349
        priv->memupdate_scheduled=0;
350
 
351
        /* Fill in any missing buffers from the received queue */
352
        blist_ent_ptr=priv->rx_blist_vp;
353
        while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
354
                spin_lock_irqsave(&priv->dma_lock,flags);
355
                /* fd.FDSystem of 0 indicates we failed to allocate the buffer in the ISR */
356
                if(!blist_ent_ptr->fd.FDSystem){
357
                        struct sk_buff *skb;
358
                        skb=dev_alloc_skb(PKT_BUF_SZ);
359
                        blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
360
                        if(skb){
361
                                setup_blist_entry(skb,blist_ent_ptr);
362
                                enable_rx = 1;
363
                        }
364
                        else
365
                        {
366
                                /*
367
                                 * reschedule the clean up, since we
368
                                 * didn't patch up all the buffers
369
                                 */
370
 
371
                                 if(!priv->memupdate_scheduled){
372
                                   schedule_task(&priv->tq_memupdate);
373
                                   priv->memupdate_scheduled=1;
374
                                 }
375
                                 spin_unlock_irqrestore(&priv->dma_lock,flags);
376
                                 break;
377
                        }
378
                }
379
                spin_unlock_irqrestore(&priv->dma_lock,flags);
380
                blist_ent_ptr++;
381
        }
382
 
383
        if(enable_rx){
384
          if (!priv->rx_disabled){
385
                priv->rx_disabled = 0;
386
                writel(ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
387
          }
388
        }
389
}
390
 
391
 
392
static void ether00_int( int irq_num, void* dev_id, struct pt_regs* regs)
393
{
394
        struct net_device* dev=dev_id;
395
        struct net_priv* priv=dev->priv;
396
 
397
        unsigned int   interruptValue;
398
 
399
        int enable_tx = 0;
400
        struct tx_fda_ent *fda_ptr;
401
        struct sk_buff* skb;
402
 
403
        interruptValue=readl(ETHER_INT_SRC(dev->base_addr));
404
 
405
        if(!(readl(ETHER_INT_SRC(dev->base_addr)) & ETHER_INT_SRC_IRQ_MSK))
406
        {
407
                return;         /* Interrupt wasn't caused by us!! */
408
        }
409
 
410
        if(readl(ETHER_INT_SRC(dev->base_addr))&
411
           (ETHER_INT_SRC_INTMACRX_MSK |
412
            ETHER_INT_SRC_FDAEX_MSK |
413
            ETHER_INT_SRC_BLEX_MSK)) {
414
                struct rx_blist_ent* blist_ent_ptr;
415
                struct rx_fda_ent* fda_ent_ptr;
416
                struct sk_buff* skb;
417
 
418
                fda_ent_ptr=priv->rx_fda_ptr;
419
                spin_lock(&priv->dma_lock);
420
                while(fda_ent_ptr<(priv->rx_fda_ptr+RX_NUM_FDESC)){
421
                        int result;
422
 
423
                        if(!(fda_ent_ptr->fd.FDCtl&FDCTL_COWNSFD_MSK))
424
                        {
425
                                /* This frame is ready for processing */
426
                                /*find the corresponding buffer in the bufferlist */
427
                                blist_ent_ptr=priv->rx_blist_vp+fda_ent_ptr->bd.BDStat;
428
                                skb=(struct sk_buff*)blist_ent_ptr->fd.FDSystem;
429
 
430
                                /* Pass this skb up the stack */
431
                                skb->dev=dev;
432
                                skb_put(skb,fda_ent_ptr->fd.FDLength);
433
                                skb->protocol=eth_type_trans(skb,dev);
434
                                skb->ip_summed=CHECKSUM_UNNECESSARY;
435
                                result=netif_rx(skb);
436
                                /* Update statistics */
437
                                priv->stats.rx_packets++;
438
                                priv->stats.rx_bytes+=fda_ent_ptr->fd.FDLength;
439
 
440
                                /* Free the FDA entry */
441
                                fda_ent_ptr->bd.BDStat=0xff;
442
                                fda_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
443
 
444
                                /* Allocate a new skb and point the bd entry to it */
445
                                blist_ent_ptr->fd.FDSystem=0;
446
                                skb=dev_alloc_skb(PKT_BUF_SZ);
447
                                if(skb){
448
                                        setup_blist_entry(skb,blist_ent_ptr);
449
 
450
                                }
451
                                else if(!priv->memupdate_scheduled){
452
                                        int tmp;
453
                                        /* There are no buffers at the moment, so schedule */
454
                                        /* the background task to sort this out */
455
                                        schedule_task(&priv->tq_memupdate);
456
                                        priv->memupdate_scheduled=1;
457
                                        printk(KERN_DEBUG "%s:No buffers",dev->name);
458
                                        /* If this interrupt was due to a lack of buffers then
459
                                         * we'd better stop the receiver too */
460
                                        if(interruptValue&ETHER_INT_SRC_BLEX_MSK){
461
                                                priv->rx_disabled=1;
462
                                                tmp=readl(ETHER_INT_SRC(dev->base_addr));
463
                                                writel(tmp&~ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
464
                                                printk(KERN_DEBUG "%s:Halting rx",dev->name);
465
                                        }
466
 
467
                                }
468
 
469
                        }
470
                        fda_ent_ptr++;
471
                }
472
                spin_unlock(&priv->dma_lock);
473
 
474
                /* Clear the  interrupts */
475
                writel(ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK
476
                       | ETHER_INT_SRC_BLEX_MSK,ETHER_INT_SRC(dev->base_addr));
477
 
478
        }
479
 
480
        if(readl(ETHER_INT_SRC(dev->base_addr))&ETHER_INT_SRC_INTMACTX_MSK){
481
 
482
          /* Transmit interrupt */
483
 
484
          fda_ptr=(struct tx_fda_ent*) priv->tx_tail;
485
 
486
          /* free up all completed frames */
487
 
488
          while(!(FDCTL_COWNSFD_MSK&fda_ptr->fd.FDCtl) && fda_ptr->fd.FDSystem){
489
            priv->stats.tx_packets++;
490
            priv->stats.tx_bytes+=fda_ptr->bd.BuffLength;
491
            skb=(struct sk_buff*)fda_ptr->fd.FDSystem;
492
            dev_kfree_skb_irq(skb);
493
            fda_ptr->fd.FDSystem=0;
494
            fda_ptr->fd.FDStat=0;
495
            fda_ptr->fd.FDCtl=0;
496
            fda_ptr = (struct tx_fda_ent *)__dma_va(fda_ptr->fd.FDNext);
497
            enable_tx = 1;
498
          }
499
          priv->tx_tail = (unsigned int) fda_ptr;
500
 
501
          if(priv->queue_stopped && enable_tx){
502
            priv->queue_stopped=0;
503
            netif_wake_queue(dev);
504
          }
505
 
506
          /* Clear the interrupt */
507
          writel(ETHER_INT_SRC_INTMACTX_MSK,ETHER_INT_SRC(dev->base_addr));
508
        }
509
 
510
        if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_SWINT_MSK|
511
                                                    ETHER_INT_SRC_INTEARNOT_MSK|
512
                                                    ETHER_INT_SRC_INTLINK_MSK|
513
                                                    ETHER_INT_SRC_INTEXBD_MSK|
514
                                                    ETHER_INT_SRC_INTTXCTLCMP_MSK))
515
        {
516
                /*
517
                 *      Not using any of these so they shouldn't happen
518
                 *
519
                 *      In the cased of INTEXBD - if you allocate more
520
                 *      than 28 decsriptors you may need to think about this
521
                 */
522
                printk("Not using this interrupt\n");
523
        }
524
 
525
        if (readl(ETHER_INT_SRC(dev->base_addr)) &
526
            (ETHER_INT_SRC_INTSBUS_MSK |
527
             ETHER_INT_SRC_INTNRABT_MSK
528
             |ETHER_INT_SRC_DMPARERR_MSK))
529
        {
530
                /*
531
                 * Hardware errors, we can either ignore them and hope they go away
532
                 *or reset the device, I'll try the first for now to see if they happen
533
                 */
534
                printk("Hardware error\n");
535
        }
536
}
537
 
538
static void ether00_setup_ethernet_address(struct net_device* dev)
539
{
540
        int tmp;
541
 
542
        dev->addr_len=6;
543
        writew(0,ETHER_ARC_ADR(dev->base_addr));
544
        writel((dev->dev_addr[0]<<24) |
545
                (dev->dev_addr[1]<<16) |
546
                (dev->dev_addr[2]<<8) |
547
                dev->dev_addr[3],
548
                ETHER_ARC_DATA(dev->base_addr));
549
 
550
        writew(4,ETHER_ARC_ADR(dev->base_addr));
551
        tmp=readl(ETHER_ARC_DATA(dev->base_addr));
552
        tmp&=0xffff;
553
        tmp|=(dev->dev_addr[4]<<24) | (dev->dev_addr[5]<<16);
554
        writel(tmp, ETHER_ARC_DATA(dev->base_addr));
555
        /* Enable this entry in the ARC */
556
 
557
        writel(1,ETHER_ARC_ENA(dev->base_addr));
558
 
559
        return;
560
}
561
 
562
 
563
static void ether00_reset(struct net_device *dev)
564
{
565
        /* reset the controller */
566
        writew(ETHER_MAC_CTL_RESET_MSK,ETHER_MAC_CTL(dev->base_addr));
567
 
568
        /*
569
         * Make sure we're not going to send anything
570
         */
571
 
572
        writew(ETHER_TX_CTL_TXHALT_MSK,ETHER_TX_CTL(dev->base_addr));
573
 
574
        /*
575
         * Make sure we're not going to receive anything
576
         */
577
        writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
578
 
579
        /*
580
         * Disable Interrupts for now, and set the burst size to 8 bytes
581
         */
582
 
583
        writel(ETHER_DMA_CTL_INTMASK_MSK |
584
               ((8 << ETHER_DMA_CTL_DMBURST_OFST) & ETHER_DMA_CTL_DMBURST_MSK)
585
               |(2<<ETHER_DMA_CTL_RXALIGN_OFST),
586
               ETHER_DMA_CTL(dev->base_addr));
587
 
588
 
589
        /*
590
         * Set TxThrsh - start transmitting a packet after 1514
591
         * bytes or when a packet is complete, whichever comes first
592
         */
593
         writew(1514,ETHER_TXTHRSH(dev->base_addr));
594
 
595
        /*
596
         * Set TxPollCtr.  Each cycle is
597
         * 61.44 microseconds with a 33 MHz bus
598
         */
599
         writew(1,ETHER_TXPOLLCTR(dev->base_addr));
600
 
601
        /*
602
         * Set Rx_Ctl - Turn off reception and let RxData turn it
603
         * on later
604
         */
605
         writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
606
 
607
}
608
 
609
 
610
static void ether00_set_multicast(struct net_device* dev)
611
{
612
        int count=dev->mc_count;
613
 
614
        /* Set promiscuous mode if it's asked for. */
615
 
616
        if (dev->flags&IFF_PROMISC){
617
 
618
                writew( ETHER_ARC_CTL_COMPEN_MSK |
619
                        ETHER_ARC_CTL_BROADACC_MSK |
620
                        ETHER_ARC_CTL_GROUPACC_MSK |
621
                        ETHER_ARC_CTL_STATIONACC_MSK,
622
                        ETHER_ARC_CTL(dev->base_addr));
623
                return;
624
        }
625
 
626
        /*
627
         * Get all multicast packets if required, or if there are too
628
         * many addresses to fit in hardware
629
         */
630
        if (dev->flags & IFF_ALLMULTI){
631
                writew( ETHER_ARC_CTL_COMPEN_MSK |
632
                        ETHER_ARC_CTL_GROUPACC_MSK |
633
                        ETHER_ARC_CTL_BROADACC_MSK,
634
                        ETHER_ARC_CTL(dev->base_addr));
635
                return;
636
        }
637
        if (dev->mc_count > (ETHER_ARC_SIZE - 1)){
638
 
639
                printk(KERN_WARNING "Too many multicast addresses for hardware to filter - receiving all multicast packets\n");
640
                writew( ETHER_ARC_CTL_COMPEN_MSK |
641
                        ETHER_ARC_CTL_GROUPACC_MSK |
642
                        ETHER_ARC_CTL_BROADACC_MSK,
643
                        ETHER_ARC_CTL(dev->base_addr));
644
                return;
645
        }
646
 
647
        if(dev->mc_count){
648
                struct dev_mc_list *mc_list_ent=dev->mc_list;
649
                unsigned int temp,i;
650
                DEBUG(printk("mc_count=%d mc_list=%#x\n",dev-> mc_count, dev->mc_list));
651
                DEBUG(printk("mc addr=%02#x%02x%02x%02x%02x%02x\n",
652
                             mc_list_ent->dmi_addr[5],
653
                             mc_list_ent->dmi_addr[4],
654
                             mc_list_ent->dmi_addr[3],
655
                             mc_list_ent->dmi_addr[2],
656
                             mc_list_ent->dmi_addr[1],
657
                             mc_list_ent->dmi_addr[0]);)
658
 
659
                /*
660
                 * The first 6 bytes are the MAC address, so
661
                 * don't change them!
662
                 */
663
                writew(4,ETHER_ARC_ADR(dev->base_addr));
664
                temp=readl(ETHER_ARC_DATA(dev->base_addr));
665
                temp&=0xffff0000;
666
 
667
                /* Disable the current multicast stuff */
668
                writel(1,ETHER_ARC_ENA(dev->base_addr));
669
 
670
                for(;;){
671
                        temp|=mc_list_ent->dmi_addr[1] |
672
                                mc_list_ent->dmi_addr[0]<<8;
673
                        writel(temp,ETHER_ARC_DATA(dev->base_addr));
674
 
675
                        i=readl(ETHER_ARC_ADR(dev->base_addr));
676
                        writew(i+4,ETHER_ARC_ADR(dev->base_addr));
677
 
678
                        temp=mc_list_ent->dmi_addr[5]|
679
                                mc_list_ent->dmi_addr[4]<<8 |
680
                                mc_list_ent->dmi_addr[3]<<16 |
681
                                mc_list_ent->dmi_addr[2]<<24;
682
                        writel(temp,ETHER_ARC_DATA(dev->base_addr));
683
 
684
                        count--;
685
                        if(!mc_list_ent->next || !count){
686
                                break;
687
                        }
688
                        DEBUG(printk("mc_list_next=%#x\n",mc_list_ent->next);)
689
                        mc_list_ent=mc_list_ent->next;
690
 
691
 
692
                        i=readl(ETHER_ARC_ADR(dev->base_addr));
693
                        writel(i+4,ETHER_ARC_ADR(dev->base_addr));
694
 
695
                        temp=mc_list_ent->dmi_addr[3]|
696
                                mc_list_ent->dmi_addr[2]<<8 |
697
                                mc_list_ent->dmi_addr[1]<<16 |
698
                                mc_list_ent->dmi_addr[0]<<24;
699
                        writel(temp,ETHER_ARC_DATA(dev->base_addr));
700
 
701
                        i=readl(ETHER_ARC_ADR(dev->base_addr));
702
                        writel(i+4,ETHER_ARC_ADR(dev->base_addr));
703
 
704
                        temp=mc_list_ent->dmi_addr[4]<<16 |
705
                                mc_list_ent->dmi_addr[5]<<24;
706
 
707
                        writel(temp,ETHER_ARC_DATA(dev->base_addr));
708
 
709
                        count--;
710
                        if(!mc_list_ent->next || !count){
711
                                break;
712
                        }
713
                        mc_list_ent=mc_list_ent->next;
714
                }
715
 
716
 
717
                if(count)
718
                        printk(KERN_WARNING "Multicast list size error\n");
719
 
720
 
721
                writew( ETHER_ARC_CTL_BROADACC_MSK|
722
                        ETHER_ARC_CTL_COMPEN_MSK,
723
                        ETHER_ARC_CTL(dev->base_addr));
724
 
725
        }
726
 
727
        /* enable the active ARC enties */
728
        writew((1<<(dev->mc_count+2))-1,ETHER_ARC_ENA(dev->base_addr));
729
}
730
 
731
 
732
static int ether00_open(struct net_device* dev)
733
{
734
        int result,tmp;
735
        struct net_priv* priv;
736
 
737
        if (!is_valid_ether_addr(dev->dev_addr))
738
                return -EINVAL;
739
 
740
        dev->base_addr=(unsigned int)ioremap_nocache(base,SZ_4K);
741
 
742
        dev->irq=irq;
743
 
744
        /* Allocate private memory */
745
        dev->priv=kmalloc(sizeof(struct net_priv),GFP_KERNEL);
746
        if(!dev->priv)
747
                return -ENOMEM;
748
        memset(dev->priv,0,sizeof(struct net_priv));
749
        priv=(struct net_priv*)dev->priv;
750
        priv->tq_memupdate.routine=ether00_mem_update;
751
        priv->tq_memupdate.data=(void*) dev;
752
        spin_lock_init(&priv->dma_lock);
753
 
754
        /* Install interrupt handlers */
755
        result=request_irq(dev->irq,ether00_int,0,"ether00",dev);
756
        if(result)
757
                goto open_err1;
758
 
759
        result=request_irq(phy_irq,ether00_phy_int,0,"ether00_phy",dev);
760
        if(result)
761
                goto open_err2;
762
 
763
        ether00_reset(dev);
764
        result=ether00_mem_init(dev);
765
        if(result)
766
                goto open_err3;
767
 
768
 
769
        ether00_setup_ethernet_address(dev);
770
 
771
        ether00_set_multicast(dev);
772
 
773
        result=ether00_write_phy(dev,PHY_CONTROL, PHY_CONTROL_ANEGEN_MSK | PHY_CONTROL_RANEG_MSK);
774
        if(result)
775
                goto open_err4;
776
        result=ether00_write_phy(dev,PHY_IRQ_CONTROL, PHY_IRQ_CONTROL_LS_CHG_IE_MSK |
777
                                 PHY_IRQ_CONTROL_ANEG_COMP_IE_MSK);
778
        if(result)
779
                goto open_err4;
780
 
781
        /* Start the device enable interrupts */
782
        writew(ETHER_RX_CTL_RXEN_MSK
783
//             | ETHER_RX_CTL_STRIPCRC_MSK
784
               | ETHER_RX_CTL_ENGOOD_MSK
785
               | ETHER_RX_CTL_ENRXPAR_MSK| ETHER_RX_CTL_ENLONGERR_MSK
786
               | ETHER_RX_CTL_ENOVER_MSK| ETHER_RX_CTL_ENCRCERR_MSK,
787
               ETHER_RX_CTL(dev->base_addr));
788
 
789
        writew(ETHER_TX_CTL_TXEN_MSK|
790
               ETHER_TX_CTL_ENEXDEFER_MSK|
791
               ETHER_TX_CTL_ENLCARR_MSK|
792
               ETHER_TX_CTL_ENEXCOLL_MSK|
793
               ETHER_TX_CTL_ENLATECOLL_MSK|
794
               ETHER_TX_CTL_ENTXPAR_MSK|
795
               ETHER_TX_CTL_ENCOMP_MSK,
796
               ETHER_TX_CTL(dev->base_addr));
797
 
798
        tmp=readl(ETHER_DMA_CTL(dev->base_addr));
799
        writel(tmp&~ETHER_DMA_CTL_INTMASK_MSK,ETHER_DMA_CTL(dev->base_addr));
800
 
801
        return 0;
802
 
803
 open_err4:
804
        ether00_reset(dev);
805
 open_err3:
806
        free_irq(2,dev);
807
 open_err2:
808
        free_irq(dev->irq,dev);
809
 open_err1:
810
        iounmap((void*)dev->base_addr);
811
        kfree(dev->priv);
812
        return result;
813
 
814
}
815
 
816
 
817
static int ether00_tx(struct sk_buff* skb, struct net_device* dev)
818
{
819
        struct net_priv *priv=dev->priv;
820
        struct tx_fda_ent *fda_ptr;
821
        unsigned long flags;
822
        int retcode = 0;
823
 
824
        /*
825
         *      Find an empty slot in which to stick the frame
826
         */
827
 
828
        spin_lock_irqsave(&priv->dma_lock,flags);
829
 
830
        fda_ptr=(struct tx_fda_ent*) priv->tx_head;
831
 
832
        priv->tx_head =(unsigned int) __dma_va(fda_ptr->fd.FDNext);
833
 
834
        /* Write the skb data from the cache*/
835
        consistent_sync(skb->data,skb->len,PCI_DMA_TODEVICE);
836
        fda_ptr->bd.BuffData=(char*)__pa(skb->data);
837
        fda_ptr->bd.BuffLength=(unsigned short)skb->len;
838
        /* Save the pointer to the skb for freeing later */
839
        fda_ptr->fd.FDSystem=(unsigned int)skb;
840
        fda_ptr->fd.FDStat=0;
841
        /* Pass ownership of the buffers to the controller */
842
        fda_ptr->fd.FDCtl=1;
843
        fda_ptr->fd.FDCtl|=FDCTL_COWNSFD_MSK;
844
 
845
        fda_ptr=(struct tx_fda_ent*) priv->tx_head;
846
 
847
        /* If the next buffer in the list is full, stop the queue */
848
        if (fda_ptr->fd.FDSystem){
849
                netif_stop_queue(dev);
850
                priv->queue_stopped=1;
851
        }
852
 
853
        spin_unlock_irqrestore(&priv->dma_lock,flags);
854
        return retcode;
855
}
856
 
857
static void ether00_tx_timeout(struct net_device* dev)
858
{
859
         /*
860
          * Something really bad has happened here. This
861
          * SHOULD never happen. Given that, it's difficult
862
          * to know what to do to recover. For now we'll just
863
          * count the error, and restart the queue.
864
          */
865
 
866
        struct net_priv *priv=dev->priv;
867
 
868
        priv->stats.tx_errors++;
869
 
870
        priv->queue_stopped=0;
871
        netif_wake_queue(dev);
872
 
873
}
874
 
875
static struct net_device_stats *ether00_stats(struct net_device* dev)
876
{
877
        struct net_priv *priv=dev->priv;
878
        return &priv->stats;
879
}
880
 
881
 
882
static int ether00_stop(struct net_device* dev)
883
{
884
        struct net_priv *priv=dev->priv;
885
        int tmp;
886
 
887
        /* Stop/disable the device. */
888
        tmp=readw(ETHER_RX_CTL(dev->base_addr));
889
        tmp&=~(ETHER_RX_CTL_RXEN_MSK | ETHER_RX_CTL_ENGOOD_MSK);
890
        tmp|=ETHER_RX_CTL_RXHALT_MSK;
891
        writew(tmp,ETHER_RX_CTL(dev->base_addr));
892
 
893
        tmp=readl(ETHER_TX_CTL(dev->base_addr));
894
        tmp&=~ETHER_TX_CTL_TXEN_MSK;
895
        tmp|=ETHER_TX_CTL_TXHALT_MSK;
896
        writel(tmp,ETHER_TX_CTL(dev->base_addr));
897
 
898
        /* Free up system resources */
899
        free_irq(dev->irq,dev);
900
        free_irq(2,dev);
901
        iounmap(priv->dma_data);
902
        iounmap((void*)dev->base_addr);
903
        kfree(priv);
904
 
905
        return 0;
906
}
907
 
908
 
909
static void ether00_get_ethernet_address(struct net_device* dev)
910
{
911
        struct mtd_info *mymtd=NULL;
912
        int i;
913
        size_t retlen;
914
 
915
        /*
916
         * For the Epxa10 dev board (camelot), the ethernet MAC
917
         * address is of the form  00:aa:aa:00:xx:xx where
918
         * 00:aa:aa is the Altera vendor ID and xx:xx is the
919
         * last 2 bytes of the board serial number, as programmed
920
         * into the OTP area of the flash device on EBI1. If this
921
         * isn't an expa10 dev board, or there's no mtd support to
922
         * read the serial number from flash then we'll force the
923
         * use to set their own mac address using ifconfig.
924
         */
925
 
926
#ifdef CONFIG_ARCH_CAMELOT
927
#ifdef CONFIG_MTD
928
        /* get the mtd_info structure for the first mtd device*/
929
        for(i=0;i<MAX_MTD_DEVICES;i++){
930
                mymtd=get_mtd_device(NULL,i);
931
                if(!mymtd||!strcmp(mymtd->name,"EPXA10DB flash"))
932
                        break;
933
        }
934
 
935
        if(!mymtd || !mymtd->read_user_prot_reg){
936
                printk(KERN_WARNING "%s: Failed to read MAC address from flash\n",dev->name);
937
        }else{
938
                mymtd->read_user_prot_reg(mymtd,2,1,&retlen,&dev->dev_addr[5]);
939
                mymtd->read_user_prot_reg(mymtd,3,1,&retlen,&dev->dev_addr[4]);
940
                dev->dev_addr[3]=0;
941
                dev->dev_addr[2]=vendor_id[1];
942
                dev->dev_addr[1]=vendor_id[0];
943
                dev->dev_addr[0]=0;
944
        }
945
#else
946
        printk(KERN_WARNING "%s: MTD support required to read MAC address from EPXA10 dev board\n", dev->name);
947
#endif
948
#endif
949
 
950
        if (!is_valid_ether_addr(dev->dev_addr))
951
                printk("%s: Invalid ethernet MAC address.  Please set using "
952
                        "ifconfig\n", dev->name);
953
 
954
}
955
 
956
static int ether00_init(struct net_device* dev)
957
{
958
 
959
        ether_setup(dev);
960
 
961
        dev->open=ether00_open;
962
        dev->stop=ether00_stop;
963
        dev->set_multicast_list=ether00_set_multicast;
964
        dev->hard_start_xmit=ether00_tx;
965
        dev->get_stats=ether00_stats;
966
        dev->tx_timeout=ether00_tx_timeout;
967
        dev->watchdog_timeo=TX_TIMEOUT;
968
 
969
        ether00_get_ethernet_address(dev);
970
 
971
        SET_MODULE_OWNER(dev);
972
        return 0;
973
}
974
 
975
 
976
struct net_device ether00_dev={
977
        init:ether00_init,
978
        name:"eth%d",
979
};
980
 
981
 
982
static void __exit ether00_cleanup_module(void)
983
{
984
        unregister_netdev(&ether00_dev);
985
}
986
module_exit(ether00_cleanup_module);
987
 
988
 
989
static int __init ether00_mod_init(void)
990
{
991
        int result;
992
 
993
        result=register_netdev(&ether00_dev);
994
        if(result)
995
                printk("Ether00: Error %i registering driver\n",result);
996
        return result;
997
}
998
 
999
module_init(ether00_mod_init);
1000
 
1001
 

powered by: WebSVN 2.1.0

© copyright 1999-2026 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.