OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [ibm_emac/] [ibm_ocp_mal.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * ibm_ocp_mal.c
3
 *
4
 *      Armin Kuster akuster@mvista.com
5
 *      Juen, 2002
6
 *
7
 * Copyright 2002 MontaVista Softare Inc.
8
 *
9
 * This program is free software; you can redistribute  it and/or modify it
10
 *  under  the terms of  the GNU General  Public License as published by the
11
 *  Free Software Foundation;  either version 2 of the  License, or (at your
12
 *  option) any later version.
13
 *
14
 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR   IMPLIED
15
 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
16
 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17
 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT,  INDIRECT,
18
 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19
 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
20
 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21
 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
22
 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23
 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
 *
25
 *  You should have received a copy of the  GNU General Public License along
26
 *  with this program; if not, write  to the Free Software Foundation, Inc.,
27
 *  675 Mass Ave, Cambridge, MA 02139, USA.
28
 *
29
 *  TODO: Move to a separate module
30
 */
31
 
32
#include <linux/config.h>
33
#include <linux/module.h>
34
#include <linux/kernel.h>
35
#include <linux/errno.h>
36
#include <linux/netdevice.h>
37
#include <linux/init.h>
38
 
39
#include <asm/io.h>
40
#include <asm/irq.h>
41
#include <asm/ocp.h>
42
 
43
#include "ibm_ocp_mal.h"
44
 
45
// Locking: Should we share a lock with the client ? The client could provide
46
// a lock pointer (optionally) in the commac structure... I don't think this is
47
// really necessary though
48
 
49
 
50
/* This lock protects the commac list. On today UP implementations, it's
51
 * really only used as IRQ protection in mal_{register,unregister}_commac()
52
 */
53
static rwlock_t mal_list_lock = RW_LOCK_UNLOCKED;
54
 
55
int
56
mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
57
{
58
        unsigned long flags;
59
 
60
        write_lock_irqsave(&mal_list_lock, flags);
61
 
62
        /* Don't let multiple commacs claim the same channel */
63
        if ( (mal->tx_chan_mask & commac->tx_chan_mask) ||
64
             (mal->rx_chan_mask & commac->rx_chan_mask) ) {
65
                write_unlock_irqrestore(&mal_list_lock, flags);
66
                return -EBUSY;
67
        }
68
 
69
        mal->tx_chan_mask |= commac->tx_chan_mask;
70
        mal->rx_chan_mask |= commac->rx_chan_mask;
71
 
72
        list_add(&commac->list, &mal->commac);
73
 
74
        write_unlock_irqrestore(&mal_list_lock, flags);
75
 
76
        MOD_INC_USE_COUNT;
77
 
78
        return 0;
79
}
80
 
81
int
82
mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
83
{
84
        unsigned long flags;
85
 
86
        write_lock_irqsave(&mal_list_lock, flags);
87
 
88
        mal->tx_chan_mask &= ~commac->tx_chan_mask;
89
        mal->rx_chan_mask &= ~commac->rx_chan_mask;
90
 
91
        list_del_init(&commac->list);
92
 
93
        write_unlock_irqrestore(&mal_list_lock, flags);
94
 
95
        MOD_DEC_USE_COUNT;
96
 
97
        return 0;
98
}
99
 
100
int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
101
{
102
        switch (channel) {
103
        case 0:
104
                set_mal_dcrn(mal, DCRN_MALRCBS0, size);
105
                break;
106
#ifdef DCRN_MALRCBS1
107
        case 1:
108
                set_mal_dcrn(mal, DCRN_MALRCBS1, size);
109
                break;
110
#endif
111
#ifdef DCRN_MALRCBS2
112
        case 2:
113
                set_mal_dcrn(mal, DCRN_MALRCBS2, size);
114
                break;
115
#endif
116
#ifdef DCRN_MALRCBS3
117
        case 3:
118
                set_mal_dcrn(mal, DCRN_MALRCBS3, size);
119
                break;
120
#endif
121
        default:
122
                return -EINVAL;
123
        }
124
 
125
        return 0;
126
}
127
 
128
static void
129
mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
130
{
131
        struct ibm_ocp_mal *mal = dev_instance;
132
        unsigned long mal_error;
133
 
134
        /*
135
         * This SERR applies to one of the devices on the MAL, here we charge
136
         * it against the first EMAC registered for the MAL.
137
         */
138
 
139
        mal_error = get_mal_dcrn(mal, DCRN_MALESR);
140
 
141
        printk(KERN_ERR "%s: System Error (MALESR=%lx)\n",
142
               "MAL" /* FIXME: get the name right */, mal_error);
143
 
144
        /* FIXME: decipher error */
145
        /* DIXME: distribute to commacs, if possible */
146
 
147
        /* Clear the error status register */
148
        set_mal_dcrn(mal, DCRN_MALESR, mal_error);
149
}
150
 
151
static void
152
mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
153
{
154
        struct ibm_ocp_mal *mal = dev_instance;
155
        struct list_head *l;
156
        unsigned long isr;
157
 
158
        isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR);
159
        set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr);
160
 
161
        read_lock(&mal_list_lock);
162
        list_for_each(l, &mal->commac) {
163
                struct mal_commac *mc = list_entry(l, struct mal_commac, list);
164
 
165
                if (isr & mc->tx_chan_mask) {
166
                        mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask);
167
                }
168
        }
169
        read_unlock(&mal_list_lock);
170
}
171
 
172
static void
173
mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
174
{
175
        struct ibm_ocp_mal *mal = dev_instance;
176
        struct list_head *l;
177
        unsigned long isr;
178
 
179
 
180
        isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR);
181
        set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr);
182
 
183
        read_lock(&mal_list_lock);
184
        list_for_each(l, &mal->commac) {
185
                struct mal_commac *mc = list_entry(l, struct mal_commac, list);
186
 
187
                if (isr & mc->rx_chan_mask) {
188
                        mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask);
189
                }
190
        }
191
        read_unlock(&mal_list_lock);
192
}
193
 
194
static void
195
mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
196
{
197
        struct ibm_ocp_mal *mal = dev_instance;
198
        struct list_head *l;
199
        unsigned long deir;
200
 
201
        deir = get_mal_dcrn(mal, DCRN_MALTXDEIR);
202
 
203
        /* FIXME: print which MAL correctly */
204
        printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
205
               "MAL", deir);
206
 
207
        read_lock(&mal_list_lock);
208
        list_for_each(l, &mal->commac) {
209
                struct mal_commac *mc = list_entry(l, struct mal_commac, list);
210
 
211
                if (deir & mc->tx_chan_mask) {
212
                        mc->ops->txde(mc->dev, deir & mc->tx_chan_mask);
213
                }
214
        }
215
        read_unlock(&mal_list_lock);
216
}
217
 
218
/*
219
 * This interrupt should be very rare at best.  This occurs when
220
 * the hardware has a problem with the receive descriptors.  The manual
221
 * states that it occurs when the hardware cannot the receive descriptor
222
 * empty bit is not set.  The recovery mechanism will be to
223
 * traverse through the descriptors, handle any that are marked to be
224
 * handled and reinitialize each along the way.  At that point the driver
225
 * will be restarted.
226
 */
227
static void
228
mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
229
{
230
        struct ibm_ocp_mal *mal = dev_instance;
231
        struct list_head *l;
232
        unsigned long deir;
233
 
234
        deir = get_mal_dcrn(mal, DCRN_MALRXDEIR);
235
 
236
        /*
237
         * This really is needed.  This case encountered in stress testing.
238
         */
239
        if (deir == 0)
240
                return;
241
 
242
        /* FIXME: print which MAL correctly */
243
        printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n",
244
               "MAL", deir);
245
 
246
        read_lock(&mal_list_lock);
247
        list_for_each(l, &mal->commac) {
248
                struct mal_commac *mc = list_entry(l, struct mal_commac, list);
249
 
250
                if (deir & mc->rx_chan_mask) {
251
                        mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask);
252
                }
253
        }
254
        read_unlock(&mal_list_lock);
255
}
256
 
257
static int __init
258
mal_probe(struct ocp_device *ocpdev)
259
{
260
        struct ibm_ocp_mal *mal = NULL;
261
        struct ocp_func_mal_data *maldata;
262
        int err = 0;
263
 
264
        maldata = (struct ocp_func_mal_data *)ocpdev->def->additions;
265
        if (maldata == NULL) {
266
                printk(KERN_ERR "mal%d: Missing additional datas !\n", ocpdev->def->index);
267
                return -ENODEV;
268
        }
269
 
270
        mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
271
        if (mal == NULL) {
272
                printk(KERN_ERR "mal%d: Out of memory allocating MAL structure !\n",
273
                        ocpdev->def->index);
274
                return -ENOMEM;
275
        }
276
        memset(mal, 0, sizeof(*mal));
277
 
278
        switch (ocpdev->def->index) {
279
        case 0:
280
                mal->dcrbase = DCRN_MAL_BASE;
281
                break;
282
#ifdef DCRN_MAL1_BASE
283
        case 1:
284
                mal->dcrbase = DCRN_MAL1_BASE;
285
                break;
286
#endif
287
        default:
288
                BUG();
289
        }
290
        mal->serr_irq = BL_MAL_SERR;
291
        mal->txde_irq = BL_MAL_TXDE;
292
        mal->txeob_irq = BL_MAL_TXEOB;
293
        mal->rxde_irq = BL_MAL_RXDE;
294
        mal->rxeob_irq = BL_MAL_RXEOB;
295
 
296
        mal->num_tx_channels = maldata->num_tx_chans;
297
        mal->num_rx_channels = maldata->num_rx_chans;
298
 
299
        /**************************/
300
 
301
        INIT_LIST_HEAD(&mal->commac);
302
 
303
 
304
        set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF);
305
        set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
306
 
307
        set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR);      /* 384 */
308
        /* FIXME: Add delay */
309
 
310
        /* Set the MAL configuration register */
311
        set_mal_dcrn(mal, DCRN_MALCR,
312
                     MALCR_PLBB | MALCR_OPBBL | MALCR_LEA |
313
                     MALCR_PLBLT_DEFAULT);
314
 
315
        /* It would be nice to allocate buffers separately for each
316
         * channel, but we can't because the channels share the upper
317
         * 13 bits of address lines.  Each channels buffer must also
318
         * be 4k aligned, so we allocate 4k for each channel.  This is
319
         * inefficient FIXME: do better, if possible */
320
 
321
        mal->tx_virt_addr = consistent_alloc(GFP_KERNEL,
322
                                             MAL_DT_ALIGN * mal->num_tx_channels,
323
                                             &mal->tx_phys_addr);
324
        if (mal->tx_virt_addr == NULL) {
325
                printk(KERN_ERR "mal%d: Out of memory allocating MAL descriptors !\n",
326
                        ocpdev->def->index);
327
                err = -ENOMEM;
328
                goto fail;
329
        }
330
 
331
        /* God, oh, god, I hate DCRs */
332
        set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr);
333
#ifdef DCRN_MALTXCTP1R
334
        if (mal->num_tx_channels > 1)
335
                set_mal_dcrn(mal, DCRN_MALTXCTP1R,
336
                             mal->tx_phys_addr + MAL_DT_ALIGN);
337
#endif /* DCRN_MALTXCTP1R */
338
#ifdef DCRN_MALTXCTP2R
339
        if (mal->num_tx_channels > 2)
340
                set_mal_dcrn(mal, DCRN_MALTXCTP2R,
341
                             mal->tx_phys_addr + 2*MAL_DT_ALIGN);
342
#endif /* DCRN_MALTXCTP2R */
343
#ifdef DCRN_MALTXCTP3R
344
        if (mal->num_tx_channels > 3)
345
                set_mal_dcrn(mal, DCRN_MALTXCTP3R,
346
                             mal->tx_phys_addr + 3*MAL_DT_ALIGN);
347
#endif /* DCRN_MALTXCTP3R */
348
#ifdef DCRN_MALTXCTP4R
349
        if (mal->num_tx_channels > 4)
350
                set_mal_dcrn(mal, DCRN_MALTXCTP4R,
351
                             mal->tx_phys_addr + 4*MAL_DT_ALIGN);
352
#endif /* DCRN_MALTXCTP4R */
353
#ifdef DCRN_MALTXCTP5R
354
        if (mal->num_tx_channels > 5)
355
                set_mal_dcrn(mal, DCRN_MALTXCTP5R,
356
                             mal->tx_phys_addr + 5*MAL_DT_ALIGN);
357
#endif /* DCRN_MALTXCTP5R */
358
#ifdef DCRN_MALTXCTP6R
359
        if (mal->num_tx_channels > 6)
360
                set_mal_dcrn(mal, DCRN_MALTXCTP6R,
361
                             mal->tx_phys_addr + 6*MAL_DT_ALIGN);
362
#endif /* DCRN_MALTXCTP6R */
363
#ifdef DCRN_MALTXCTP7R
364
        if (mal->num_tx_channels > 7)
365
                set_mal_dcrn(mal, DCRN_MALTXCTP7R,
366
                             mal->tx_phys_addr + 7*MAL_DT_ALIGN);
367
#endif /* DCRN_MALTXCTP7R */
368
 
369
        mal->rx_virt_addr = consistent_alloc(GFP_KERNEL,
370
                                             MAL_DT_ALIGN * mal->num_rx_channels,
371
                                             &mal->rx_phys_addr);
372
 
373
        set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
374
#ifdef DCRN_MALRXCTP1R
375
        if (mal->num_rx_channels > 1)
376
                set_mal_dcrn(mal, DCRN_MALRXCTP1R,
377
                             mal->rx_phys_addr + MAL_DT_ALIGN);
378
#endif /* DCRN_MALRXCTP1R */
379
#ifdef DCRN_MALRXCTP2R
380
        if (mal->num_rx_channels > 2)
381
                set_mal_dcrn(mal, DCRN_MALRXCTP2R,
382
                             mal->rx_phys_addr + 2*MAL_DT_ALIGN);
383
#endif /* DCRN_MALRXCTP2R */
384
#ifdef DCRN_MALRXCTP3R
385
        if (mal->num_rx_channels > 3)
386
                set_mal_dcrn(mal, DCRN_MALRXCTP3R,
387
                             mal->rx_phys_addr + 3*MAL_DT_ALIGN);
388
#endif /* DCRN_MALRXCTP3R */
389
 
390
        err = request_irq(mal->serr_irq, mal_serr, 0 ,"MAL SERR", mal);
391
        if (err)
392
                goto fail;
393
        err = request_irq(mal->txde_irq, mal_txde,0, "MAL TX DE ", mal);
394
        if (err)
395
                goto fail;
396
        err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
397
        if (err)
398
                goto fail;
399
        err = request_irq(mal->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
400
        if (err)
401
                goto fail;
402
        err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
403
        if (err)
404
                goto fail;
405
 
406
        set_mal_dcrn(mal, DCRN_MALIER,
407
                     MALIER_DE | MALIER_NE | MALIER_TE |
408
                     MALIER_OPBE | MALIER_PLBE);
409
 
410
        /* Advertise me to the rest of the world */
411
        ocp_set_drvdata(ocpdev, mal);
412
 
413
        printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n",
414
                        ocpdev->def->index, mal->num_tx_channels, mal->num_rx_channels);
415
 
416
        return 0;
417
 
418
 fail:
419
        /* FIXME: dispose requested IRQs ! */
420
        if (err && mal)
421
                kfree(mal);
422
        return err;
423
}
424
 
425
static void __exit
426
mal_remove(struct ocp_device *ocpdev)
427
{
428
        struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
429
 
430
        ocp_set_drvdata(ocpdev, NULL);
431
 
432
        /* FIXME: shut down the MAL, deal with dependency with emac */
433
        free_irq(mal->serr_irq, mal);
434
        free_irq(mal->txde_irq, mal);
435
        free_irq(mal->txeob_irq, mal);
436
        free_irq(mal->rxde_irq, mal);
437
        free_irq(mal->rxeob_irq, mal);
438
 
439
        if (mal->tx_virt_addr)
440
                consistent_free(mal->tx_virt_addr);
441
        if (mal->rx_virt_addr)
442
                consistent_free(mal->rx_virt_addr);
443
 
444
        kfree(mal);
445
}
446
 
447
/* Structure for a device driver */
448
static struct ocp_device_id mal_ids[] =
449
{
450
        { .vendor = OCP_ANY_ID, .function = OCP_FUNC_MAL },
451
        { .vendor = OCP_VENDOR_INVALID }
452
};
453
 
454
static struct ocp_driver mal_driver =
455
{
456
        .name           = "mal",
457
        .id_table       = mal_ids,
458
 
459
        .probe          = mal_probe,
460
        .remove         = mal_remove,
461
};
462
 
463
static int __init
464
init_mals(void)
465
{
466
        int rc;
467
 
468
        rc = ocp_register_driver(&mal_driver);
469
        if (rc == 0) {
470
                ocp_unregister_driver(&mal_driver);
471
                return -ENODEV;
472
        }
473
 
474
        return 0;
475
}
476
 
477
static void __exit
478
exit_mals(void)
479
{
480
        ocp_unregister_driver(&mal_driver);
481
}
482
 
483
module_init(init_mals);
484
module_exit(exit_mals);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.