OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [sparc64/] [kernel/] [sbus.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* $Id: sbus.c,v 1.1.1.1 2004-04-15 01:34:38 phoenix Exp $
2
 * sbus.c: UltraSparc SBUS controller support.
3
 *
4
 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5
 */
6
 
7
#include <linux/kernel.h>
8
#include <linux/types.h>
9
#include <linux/mm.h>
10
#include <linux/spinlock.h>
11
#include <linux/slab.h>
12
#include <linux/init.h>
13
 
14
#include <asm/page.h>
15
#include <asm/sbus.h>
16
#include <asm/io.h>
17
#include <asm/upa.h>
18
#include <asm/cache.h>
19
#include <asm/dma.h>
20
#include <asm/irq.h>
21
#include <asm/starfire.h>
22
 
23
#include "iommu_common.h"
24
 
25
/* These should be allocated on an SMP_CACHE_BYTES
26
 * aligned boundry for optimal performance.
27
 *
28
 * On SYSIO, using an 8K page size we have 1GB of SBUS
29
 * DMA space mapped.  We divide this space into equally
30
 * sized clusters.  Currently we allow clusters up to a
31
 * size of 1MB.  If anything begins to generate DMA
32
 * mapping requests larger than this we will need to
33
 * increase things a bit.
34
 */
35
 
36
#define NCLUSTERS       8UL
37
#define ONE_GIG         (1UL * 1024UL * 1024UL * 1024UL)
38
#define CLUSTER_SIZE    (ONE_GIG / NCLUSTERS)
39
#define CLUSTER_MASK    (CLUSTER_SIZE - 1)
40
#define CLUSTER_NPAGES  (CLUSTER_SIZE >> IO_PAGE_SHIFT)
41
#define MAP_BASE        ((u32)0xc0000000)
42
 
43
struct sbus_iommu {
44
/*0x00*/spinlock_t              lock;
45
 
46
/*0x08*/iopte_t                 *page_table;
47
/*0x10*/unsigned long           strbuf_regs;
48
/*0x18*/unsigned long           iommu_regs;
49
/*0x20*/unsigned long           sbus_control_reg;
50
 
51
/*0x28*/volatile unsigned long  strbuf_flushflag;
52
 
53
        /* If NCLUSTERS is ever decresed to 4 or lower,
54
         * you must increase the size of the type of
55
         * these counters.  You have been duly warned. -DaveM
56
         */
57
/*0x30*/struct {
58
                u16     next;
59
                u16     flush;
60
        } alloc_info[NCLUSTERS];
61
 
62
        /* The lowest used consistent mapping entry.  Since
63
         * we allocate consistent maps out of cluster 0 this
64
         * is relative to the beginning of closter 0.
65
         */
66
/*0x50*/u32             lowest_consistent_map;
67
};
68
 
69
/* Offsets from iommu_regs */
70
#define SYSIO_IOMMUREG_BASE     0x2400UL
71
#define IOMMU_CONTROL   (0x2400UL - 0x2400UL)   /* IOMMU control register */
72
#define IOMMU_TSBBASE   (0x2408UL - 0x2400UL)   /* TSB base address register */
73
#define IOMMU_FLUSH     (0x2410UL - 0x2400UL)   /* IOMMU flush register */
74
#define IOMMU_VADIAG    (0x4400UL - 0x2400UL)   /* SBUS virtual address diagnostic */
75
#define IOMMU_TAGCMP    (0x4408UL - 0x2400UL)   /* TLB tag compare diagnostics */
76
#define IOMMU_LRUDIAG   (0x4500UL - 0x2400UL)   /* IOMMU LRU queue diagnostics */
77
#define IOMMU_TAGDIAG   (0x4580UL - 0x2400UL)   /* TLB tag diagnostics */
78
#define IOMMU_DRAMDIAG  (0x4600UL - 0x2400UL)   /* TLB data RAM diagnostics */
79
 
80
#define IOMMU_DRAM_VALID        (1UL << 30UL)
81
 
82
static void __iommu_flushall(struct sbus_iommu *iommu)
83
{
84
        unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
85
        int entry;
86
 
87
        for (entry = 0; entry < 16; entry++) {
88
                upa_writeq(0, tag);
89
                tag += 8UL;
90
        }
91
        upa_readq(iommu->sbus_control_reg);
92
 
93
        for (entry = 0; entry < NCLUSTERS; entry++) {
94
                iommu->alloc_info[entry].flush =
95
                        iommu->alloc_info[entry].next;
96
        }
97
}
98
 
99
static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
100
{
101
        while (npages--)
102
                upa_writeq(base + (npages << IO_PAGE_SHIFT),
103
                           iommu->iommu_regs + IOMMU_FLUSH);
104
        upa_readq(iommu->sbus_control_reg);
105
}
106
 
107
/* Offsets from strbuf_regs */
108
#define SYSIO_STRBUFREG_BASE    0x2800UL
109
#define STRBUF_CONTROL  (0x2800UL - 0x2800UL)   /* Control */
110
#define STRBUF_PFLUSH   (0x2808UL - 0x2800UL)   /* Page flush/invalidate */
111
#define STRBUF_FSYNC    (0x2810UL - 0x2800UL)   /* Flush synchronization */
112
#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL)   /* data RAM diagnostic */
113
#define STRBUF_ERRDIAG  (0x5400UL - 0x2800UL)   /* error status diagnostics */
114
#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL)   /* Page tag diagnostics */
115
#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL)   /* Line tag diagnostics */
116
 
117
#define STRBUF_TAG_VALID        0x02UL
118
 
119
static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
120
{
121
        iommu->strbuf_flushflag = 0UL;
122
        while (npages--)
123
                upa_writeq(base + (npages << IO_PAGE_SHIFT),
124
                           iommu->strbuf_regs + STRBUF_PFLUSH);
125
 
126
        /* Whoopee cushion! */
127
        upa_writeq(__pa(&iommu->strbuf_flushflag),
128
                   iommu->strbuf_regs + STRBUF_FSYNC);
129
        upa_readq(iommu->sbus_control_reg);
130
        while (iommu->strbuf_flushflag == 0UL)
131
                membar("#LoadLoad");
132
}
133
 
134
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
135
{
136
        iopte_t *iopte, *limit, *first;
137
        unsigned long cnum, ent, flush_point;
138
 
139
        cnum = 0;
140
        while ((1UL << cnum) < npages)
141
                cnum++;
142
        iopte  = iommu->page_table + (cnum * CLUSTER_NPAGES);
143
 
144
        if (cnum == 0)
145
                limit = (iommu->page_table +
146
                         iommu->lowest_consistent_map);
147
        else
148
                limit = (iopte + CLUSTER_NPAGES);
149
 
150
        iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
151
        flush_point = iommu->alloc_info[cnum].flush;
152
 
153
        first = iopte;
154
        for (;;) {
155
                if (iopte_val(*iopte) == 0UL) {
156
                        if ((iopte + (1 << cnum)) >= limit)
157
                                ent = 0;
158
                        else
159
                                ent = ent + 1;
160
                        iommu->alloc_info[cnum].next = ent;
161
                        if (ent == flush_point)
162
                                __iommu_flushall(iommu);
163
                        break;
164
                }
165
                iopte += (1 << cnum);
166
                ent++;
167
                if (iopte >= limit) {
168
                        iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
169
                        ent = 0;
170
                }
171
                if (ent == flush_point)
172
                        __iommu_flushall(iommu);
173
                if (iopte == first)
174
                        goto bad;
175
        }
176
 
177
        /* I've got your streaming cluster right here buddy boy... */
178
        return iopte;
179
 
180
bad:
181
        printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
182
               npages);
183
        return NULL;
184
}
185
 
186
static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
187
{
188
        unsigned long cnum, ent;
189
        iopte_t *iopte;
190
 
191
        cnum = 0;
192
        while ((1UL << cnum) < npages)
193
                cnum++;
194
        ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
195
        iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
196
        iopte_val(*iopte) = 0UL;
197
 
198
        /* If the global flush might not have caught this entry,
199
         * adjust the flush point such that we will flush before
200
         * ever trying to reuse it.
201
         */
202
#define between(X,Y,Z)  (((Z) - (Y)) >= ((X) - (Y)))
203
        if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
204
                iommu->alloc_info[cnum].flush = ent;
205
#undef between
206
}
207
 
208
/* We allocate consistent mappings from the end of cluster zero. */
209
static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
210
{
211
        iopte_t *iopte;
212
 
213
        iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
214
        while (iopte > iommu->page_table) {
215
                iopte--;
216
                if (!(iopte_val(*iopte) & IOPTE_VALID)) {
217
                        unsigned long tmp = npages;
218
 
219
                        while (--tmp) {
220
                                iopte--;
221
                                if (iopte_val(*iopte) & IOPTE_VALID)
222
                                        break;
223
                        }
224
                        if (tmp == 0) {
225
                                u32 entry = (iopte - iommu->page_table);
226
 
227
                                if (entry < iommu->lowest_consistent_map)
228
                                        iommu->lowest_consistent_map = entry;
229
                                return iopte;
230
                        }
231
                }
232
        }
233
        return NULL;
234
}
235
 
236
static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
237
{
238
        iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
239
 
240
        if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
241
                iopte_t *walk = iopte + npages;
242
                iopte_t *limit;
243
 
244
                limit = iommu->page_table + CLUSTER_NPAGES;
245
                while (walk < limit) {
246
                        if (iopte_val(*walk) != 0UL)
247
                                break;
248
                        walk++;
249
                }
250
                iommu->lowest_consistent_map =
251
                        (walk - iommu->page_table);
252
        }
253
 
254
        while (npages--)
255
                *iopte++ = __iopte(0UL);
256
}
257
 
258
void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
259
{
260
        unsigned long order, first_page, flags;
261
        struct sbus_iommu *iommu;
262
        iopte_t *iopte;
263
        void *ret;
264
        int npages;
265
 
266
        if (size <= 0 || sdev == NULL || dvma_addr == NULL)
267
                return NULL;
268
 
269
        size = IO_PAGE_ALIGN(size);
270
        order = get_order(size);
271
        if (order >= 10)
272
                return NULL;
273
        first_page = __get_free_pages(GFP_KERNEL, order);
274
        if (first_page == 0UL)
275
                return NULL;
276
        memset((char *)first_page, 0, PAGE_SIZE << order);
277
 
278
        iommu = sdev->bus->iommu;
279
 
280
        spin_lock_irqsave(&iommu->lock, flags);
281
        iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
282
        if (iopte == NULL) {
283
                spin_unlock_irqrestore(&iommu->lock, flags);
284
                free_pages(first_page, order);
285
                return NULL;
286
        }
287
 
288
        /* Ok, we're committed at this point. */
289
        *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
290
        ret = (void *) first_page;
291
        npages = size >> IO_PAGE_SHIFT;
292
        while (npages--) {
293
                *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
294
                                   (__pa(first_page) & IOPTE_PAGE));
295
                first_page += IO_PAGE_SIZE;
296
        }
297
        iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
298
        spin_unlock_irqrestore(&iommu->lock, flags);
299
 
300
        return ret;
301
}
302
 
303
void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
304
{
305
        unsigned long order, npages;
306
        struct sbus_iommu *iommu;
307
 
308
        if (size <= 0 || sdev == NULL || cpu == NULL)
309
                return;
310
 
311
        npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
312
        iommu = sdev->bus->iommu;
313
 
314
        spin_lock_irq(&iommu->lock);
315
        free_consistent_cluster(iommu, dvma, npages);
316
        iommu_flush(iommu, dvma, npages);
317
        spin_unlock_irq(&iommu->lock);
318
 
319
        order = get_order(size);
320
        if (order < 10)
321
                free_pages((unsigned long)cpu, order);
322
}
323
 
324
dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
325
{
326
        struct sbus_iommu *iommu = sdev->bus->iommu;
327
        unsigned long npages, pbase, flags;
328
        iopte_t *iopte;
329
        u32 dma_base, offset;
330
        unsigned long iopte_bits;
331
 
332
        if (dir == SBUS_DMA_NONE)
333
                BUG();
334
 
335
        pbase = (unsigned long) ptr;
336
        offset = (u32) (pbase & ~IO_PAGE_MASK);
337
        size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
338
        pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
339
 
340
        spin_lock_irqsave(&iommu->lock, flags);
341
        npages = size >> IO_PAGE_SHIFT;
342
        iopte = alloc_streaming_cluster(iommu, npages);
343
        if (iopte == NULL)
344
                goto bad;
345
        dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
346
        npages = size >> IO_PAGE_SHIFT;
347
        iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
348
        if (dir != SBUS_DMA_TODEVICE)
349
                iopte_bits |= IOPTE_WRITE;
350
        while (npages--) {
351
                *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
352
                pbase += IO_PAGE_SIZE;
353
        }
354
        npages = size >> IO_PAGE_SHIFT;
355
        spin_unlock_irqrestore(&iommu->lock, flags);
356
 
357
        return (dma_base | offset);
358
 
359
bad:
360
        spin_unlock_irqrestore(&iommu->lock, flags);
361
        BUG();
362
        return 0;
363
}
364
 
365
void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
366
{
367
        struct sbus_iommu *iommu = sdev->bus->iommu;
368
        u32 dma_base = dma_addr & IO_PAGE_MASK;
369
        unsigned long flags;
370
 
371
        size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
372
 
373
        spin_lock_irqsave(&iommu->lock, flags);
374
        free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
375
        strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
376
        spin_unlock_irqrestore(&iommu->lock, flags);
377
}
378
 
379
#define SG_ENT_PHYS_ADDRESS(SG) \
380
        ((SG)->address ? \
381
         __pa((SG)->address) : \
382
         (__pa(page_address((SG)->page)) + (SG)->offset))
383
 
384
static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
385
{
386
        struct scatterlist *dma_sg = sg;
387
        struct scatterlist *sg_end = sg + nelems;
388
        int i;
389
 
390
        for (i = 0; i < nused; i++) {
391
                unsigned long pteval = ~0UL;
392
                u32 dma_npages;
393
 
394
                dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
395
                              dma_sg->dma_length +
396
                              ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
397
                do {
398
                        unsigned long offset;
399
                        signed int len;
400
 
401
                        /* If we are here, we know we have at least one
402
                         * more page to map.  So walk forward until we
403
                         * hit a page crossing, and begin creating new
404
                         * mappings from that spot.
405
                         */
406
                        for (;;) {
407
                                unsigned long tmp;
408
 
409
                                tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
410
                                len = sg->length;
411
                                if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
412
                                        pteval = tmp & IO_PAGE_MASK;
413
                                        offset = tmp & (IO_PAGE_SIZE - 1UL);
414
                                        break;
415
                                }
416
                                if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
417
                                        pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
418
                                        offset = 0UL;
419
                                        len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
420
                                        break;
421
                                }
422
                                sg++;
423
                        }
424
 
425
                        pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
426
                        while (len > 0) {
427
                                *iopte++ = __iopte(pteval);
428
                                pteval += IO_PAGE_SIZE;
429
                                len -= (IO_PAGE_SIZE - offset);
430
                                offset = 0;
431
                                dma_npages--;
432
                        }
433
 
434
                        pteval = (pteval & IOPTE_PAGE) + len;
435
                        sg++;
436
 
437
                        /* Skip over any tail mappings we've fully mapped,
438
                         * adjusting pteval along the way.  Stop when we
439
                         * detect a page crossing event.
440
                         */
441
                        while (sg < sg_end &&
442
                               (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
443
                               (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
444
                               ((pteval ^
445
                                 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
446
                                pteval += sg->length;
447
                                sg++;
448
                        }
449
                        if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
450
                                pteval = ~0UL;
451
                } while (dma_npages != 0);
452
                dma_sg++;
453
        }
454
}
455
 
456
int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
457
{
458
        struct sbus_iommu *iommu = sdev->bus->iommu;
459
        unsigned long flags, npages;
460
        iopte_t *iopte;
461
        u32 dma_base;
462
        struct scatterlist *sgtmp;
463
        int used;
464
        unsigned long iopte_bits;
465
 
466
        if (dir == SBUS_DMA_NONE)
467
                BUG();
468
 
469
        /* Fast path single entry scatterlists. */
470
        if (nents == 1) {
471
                sg->dma_address =
472
                        sbus_map_single(sdev,
473
                                        (sg->address ?
474
                                         sg->address :
475
                                         (page_address(sg->page) + sg->offset)),
476
                                        sg->length, dir);
477
                sg->dma_length = sg->length;
478
                return 1;
479
        }
480
 
481
        npages = prepare_sg(sg, nents);
482
 
483
        spin_lock_irqsave(&iommu->lock, flags);
484
        iopte = alloc_streaming_cluster(iommu, npages);
485
        if (iopte == NULL)
486
                goto bad;
487
        dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
488
 
489
        /* Normalize DVMA addresses. */
490
        sgtmp = sg;
491
        used = nents;
492
 
493
        while (used && sgtmp->dma_length) {
494
                sgtmp->dma_address += dma_base;
495
                sgtmp++;
496
                used--;
497
        }
498
        used = nents - used;
499
 
500
        iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
501
        if (dir != SBUS_DMA_TODEVICE)
502
                iopte_bits |= IOPTE_WRITE;
503
 
504
        fill_sg(iopte, sg, used, nents, iopte_bits);
505
#ifdef VERIFY_SG
506
        verify_sglist(sg, nents, iopte, npages);
507
#endif
508
        spin_unlock_irqrestore(&iommu->lock, flags);
509
 
510
        return used;
511
 
512
bad:
513
        spin_unlock_irqrestore(&iommu->lock, flags);
514
        BUG();
515
        return 0;
516
}
517
 
518
void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
519
{
520
        unsigned long size, flags;
521
        struct sbus_iommu *iommu;
522
        u32 dvma_base;
523
        int i;
524
 
525
        /* Fast path single entry scatterlists. */
526
        if (nents == 1) {
527
                sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
528
                return;
529
        }
530
 
531
        dvma_base = sg[0].dma_address & IO_PAGE_MASK;
532
        for (i = 0; i < nents; i++) {
533
                if (sg[i].dma_length == 0)
534
                        break;
535
        }
536
        i--;
537
        size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
538
 
539
        iommu = sdev->bus->iommu;
540
        spin_lock_irqsave(&iommu->lock, flags);
541
        free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
542
        strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
543
        spin_unlock_irqrestore(&iommu->lock, flags);
544
}
545
 
546
void sbus_dma_sync_single(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
547
{
548
        struct sbus_iommu *iommu = sdev->bus->iommu;
549
        unsigned long flags;
550
 
551
        size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
552
 
553
        spin_lock_irqsave(&iommu->lock, flags);
554
        strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
555
        spin_unlock_irqrestore(&iommu->lock, flags);
556
}
557
 
558
void sbus_dma_sync_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
559
{
560
        struct sbus_iommu *iommu = sdev->bus->iommu;
561
        unsigned long flags, size;
562
        u32 base;
563
        int i;
564
 
565
        base = sg[0].dma_address & IO_PAGE_MASK;
566
        for (i = 0; i < nents; i++) {
567
                if (sg[i].dma_length == 0)
568
                        break;
569
        }
570
        i--;
571
        size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
572
 
573
        spin_lock_irqsave(&iommu->lock, flags);
574
        strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
575
        spin_unlock_irqrestore(&iommu->lock, flags);
576
}
577
 
578
/* Enable 64-bit DVMA mode for the given device. */
579
void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
580
{
581
        struct sbus_iommu *iommu = sdev->bus->iommu;
582
        int slot = sdev->slot;
583
        unsigned long cfg_reg;
584
        u64 val;
585
 
586
        cfg_reg = iommu->sbus_control_reg;
587
        switch (slot) {
588
        case 0:
589
                cfg_reg += 0x20UL;
590
                break;
591
        case 1:
592
                cfg_reg += 0x28UL;
593
                break;
594
        case 2:
595
                cfg_reg += 0x30UL;
596
                break;
597
        case 3:
598
                cfg_reg += 0x38UL;
599
                break;
600
        case 13:
601
                cfg_reg += 0x40UL;
602
                break;
603
        case 14:
604
                cfg_reg += 0x48UL;
605
                break;
606
        case 15:
607
                cfg_reg += 0x50UL;
608
                break;
609
 
610
        default:
611
                return;
612
        };
613
 
614
        val = upa_readq(cfg_reg);
615
        if (val & (1UL << 14UL)) {
616
                /* Extended transfer mode already enabled. */
617
                return;
618
        }
619
 
620
        val |= (1UL << 14UL);
621
 
622
        if (bursts & DMA_BURST8)
623
                val |= (1UL << 1UL);
624
        if (bursts & DMA_BURST16)
625
                val |= (1UL << 2UL);
626
        if (bursts & DMA_BURST32)
627
                val |= (1UL << 3UL);
628
        if (bursts & DMA_BURST64)
629
                val |= (1UL << 4UL);
630
        upa_writeq(val, cfg_reg);
631
}
632
 
633
/* SBUS SYSIO INO number to Sparc PIL level. */
634
static unsigned char sysio_ino_to_pil[] = {
635
        0, 4, 4, 7, 5, 7, 8, 9,          /* SBUS slot 0 */
636
        0, 4, 4, 7, 5, 7, 8, 9,          /* SBUS slot 1 */
637
        0, 4, 4, 7, 5, 7, 8, 9,          /* SBUS slot 2 */
638
        0, 4, 4, 7, 5, 7, 8, 9,          /* SBUS slot 3 */
639
        4, /* Onboard SCSI */
640
        5, /* Onboard Ethernet */
641
/*XXX*/ 8, /* Onboard BPP */
642
        0, /* Bogon */
643
       13, /* Audio */
644
/*XXX*/15, /* PowerFail */
645
        0, /* Bogon */
646
        0, /* Bogon */
647
       12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
648
       11, /* Floppy */
649
        0, /* Spare Hardware (bogon for now) */
650
        0, /* Keyboard (bogon for now) */
651
        0, /* Mouse (bogon for now) */
652
        0, /* Serial (bogon for now) */
653
     0, 0, /* Bogon, Bogon */
654
       10, /* Timer 0 */
655
       11, /* Timer 1 */
656
     0, 0, /* Bogon, Bogon */
657
       15, /* Uncorrectable SBUS Error */
658
       15, /* Correctable SBUS Error */
659
       15, /* SBUS Error */
660
/*XXX*/ 0, /* Power Management (bogon for now) */
661
};
662
 
663
/* INO number to IMAP register offset for SYSIO external IRQ's.
664
 * This should conform to both Sunfire/Wildfire server and Fusion
665
 * desktop designs.
666
 */
667
#define SYSIO_IMAP_SLOT0        0x2c04UL
668
#define SYSIO_IMAP_SLOT1        0x2c0cUL
669
#define SYSIO_IMAP_SLOT2        0x2c14UL
670
#define SYSIO_IMAP_SLOT3        0x2c1cUL
671
#define SYSIO_IMAP_SCSI         0x3004UL
672
#define SYSIO_IMAP_ETH          0x300cUL
673
#define SYSIO_IMAP_BPP          0x3014UL
674
#define SYSIO_IMAP_AUDIO        0x301cUL
675
#define SYSIO_IMAP_PFAIL        0x3024UL
676
#define SYSIO_IMAP_KMS          0x302cUL
677
#define SYSIO_IMAP_FLPY         0x3034UL
678
#define SYSIO_IMAP_SHW          0x303cUL
679
#define SYSIO_IMAP_KBD          0x3044UL
680
#define SYSIO_IMAP_MS           0x304cUL
681
#define SYSIO_IMAP_SER          0x3054UL
682
#define SYSIO_IMAP_TIM0         0x3064UL
683
#define SYSIO_IMAP_TIM1         0x306cUL
684
#define SYSIO_IMAP_UE           0x3074UL
685
#define SYSIO_IMAP_CE           0x307cUL
686
#define SYSIO_IMAP_SBERR        0x3084UL
687
#define SYSIO_IMAP_PMGMT        0x308cUL
688
#define SYSIO_IMAP_GFX          0x3094UL
689
#define SYSIO_IMAP_EUPA         0x309cUL
690
 
691
#define bogon     ((unsigned long) -1)
692
static unsigned long sysio_irq_offsets[] = {
693
        /* SBUS Slot 0 --> 3, level 1 --> 7 */
694
        SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
695
        SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
696
        SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
697
        SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
698
        SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
699
        SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
700
        SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
701
        SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
702
 
703
        /* Onboard devices (not relevant/used on SunFire). */
704
        SYSIO_IMAP_SCSI,
705
        SYSIO_IMAP_ETH,
706
        SYSIO_IMAP_BPP,
707
        bogon,
708
        SYSIO_IMAP_AUDIO,
709
        SYSIO_IMAP_PFAIL,
710
        bogon,
711
        bogon,
712
        SYSIO_IMAP_KMS,
713
        SYSIO_IMAP_FLPY,
714
        SYSIO_IMAP_SHW,
715
        SYSIO_IMAP_KBD,
716
        SYSIO_IMAP_MS,
717
        SYSIO_IMAP_SER,
718
        bogon,
719
        bogon,
720
        SYSIO_IMAP_TIM0,
721
        SYSIO_IMAP_TIM1,
722
        bogon,
723
        bogon,
724
        SYSIO_IMAP_UE,
725
        SYSIO_IMAP_CE,
726
        SYSIO_IMAP_SBERR,
727
        SYSIO_IMAP_PMGMT,
728
};
729
 
730
#undef bogon
731
 
732
#define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
733
 
734
/* Convert Interrupt Mapping register pointer to assosciated
735
 * Interrupt Clear register pointer, SYSIO specific version.
736
 */
737
#define SYSIO_ICLR_UNUSED0      0x3400UL
738
#define SYSIO_ICLR_SLOT0        0x340cUL
739
#define SYSIO_ICLR_SLOT1        0x344cUL
740
#define SYSIO_ICLR_SLOT2        0x348cUL
741
#define SYSIO_ICLR_SLOT3        0x34ccUL
742
static unsigned long sysio_imap_to_iclr(unsigned long imap)
743
{
744
        unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
745
        return imap + diff;
746
}
747
 
748
unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
749
{
750
        struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
751
        struct sbus_iommu *iommu = sbus->iommu;
752
        unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
753
        unsigned long imap, iclr;
754
        int pil, sbus_level = 0;
755
 
756
        pil = sysio_ino_to_pil[ino];
757
        if (!pil) {
758
                printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
759
                panic("Bad SYSIO IRQ translations...");
760
        }
761
 
762
        if (PIL_RESERVED(pil))
763
                BUG();
764
 
765
        imap = sysio_irq_offsets[ino];
766
        if (imap == ((unsigned long)-1)) {
767
                prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
768
                            ino, pil);
769
                prom_halt();
770
        }
771
        imap += reg_base;
772
 
773
        /* SYSIO inconsistancy.  For external SLOTS, we have to select
774
         * the right ICLR register based upon the lower SBUS irq level
775
         * bits.
776
         */
777
        if (ino >= 0x20) {
778
                iclr = sysio_imap_to_iclr(imap);
779
        } else {
780
                int sbus_slot = (ino & 0x18)>>3;
781
 
782
                sbus_level = ino & 0x7;
783
 
784
                switch(sbus_slot) {
785
                case 0:
786
                        iclr = reg_base + SYSIO_ICLR_SLOT0;
787
                        break;
788
                case 1:
789
                        iclr = reg_base + SYSIO_ICLR_SLOT1;
790
                        break;
791
                case 2:
792
                        iclr = reg_base + SYSIO_ICLR_SLOT2;
793
                        break;
794
                default:
795
                case 3:
796
                        iclr = reg_base + SYSIO_ICLR_SLOT3;
797
                        break;
798
                };
799
 
800
                iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
801
        }
802
        return build_irq(pil, sbus_level, iclr, imap);
803
}
804
 
805
/* Error interrupt handling. */
806
#define SYSIO_UE_AFSR   0x0030UL
807
#define SYSIO_UE_AFAR   0x0038UL
808
#define  SYSIO_UEAFSR_PPIO      0x8000000000000000 /* Primary PIO is cause         */
809
#define  SYSIO_UEAFSR_PDRD      0x4000000000000000 /* Primary DVMA read is cause   */
810
#define  SYSIO_UEAFSR_PDWR      0x2000000000000000 /* Primary DVMA write is cause  */
811
#define  SYSIO_UEAFSR_SPIO      0x1000000000000000 /* Secondary PIO is cause       */
812
#define  SYSIO_UEAFSR_SDRD      0x0800000000000000 /* Secondary DVMA read is cause */
813
#define  SYSIO_UEAFSR_SDWR      0x0400000000000000 /* Secondary DVMA write is cause*/
814
#define  SYSIO_UEAFSR_RESV1     0x03ff000000000000 /* Reserved                     */
815
#define  SYSIO_UEAFSR_DOFF      0x0000e00000000000 /* Doubleword Offset            */
816
#define  SYSIO_UEAFSR_SIZE      0x00001c0000000000 /* Bad transfer size is 2**SIZE */
817
#define  SYSIO_UEAFSR_MID       0x000003e000000000 /* UPA MID causing the fault    */
818
#define  SYSIO_UEAFSR_RESV2     0x0000001fffffffff /* Reserved                     */
819
static void sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
820
{
821
        struct sbus_bus *sbus = dev_id;
822
        struct sbus_iommu *iommu = sbus->iommu;
823
        unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
824
        unsigned long afsr_reg, afar_reg;
825
        unsigned long afsr, afar, error_bits;
826
        int reported;
827
 
828
        afsr_reg = reg_base + SYSIO_UE_AFSR;
829
        afar_reg = reg_base + SYSIO_UE_AFAR;
830
 
831
        /* Latch error status. */
832
        afsr = upa_readq(afsr_reg);
833
        afar = upa_readq(afar_reg);
834
 
835
        /* Clear primary/secondary error status bits. */
836
        error_bits = afsr &
837
                (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
838
                 SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
839
        upa_writeq(error_bits, afsr_reg);
840
 
841
        /* Log the error. */
842
        printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
843
               sbus->portid,
844
               (((error_bits & SYSIO_UEAFSR_PPIO) ?
845
                 "PIO" :
846
                 ((error_bits & SYSIO_UEAFSR_PDRD) ?
847
                  "DVMA Read" :
848
                  ((error_bits & SYSIO_UEAFSR_PDWR) ?
849
                   "DVMA Write" : "???")))));
850
        printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
851
               sbus->portid,
852
               (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
853
               (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
854
               (afsr & SYSIO_UEAFSR_MID) >> 37UL);
855
        printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
856
        printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
857
        reported = 0;
858
        if (afsr & SYSIO_UEAFSR_SPIO) {
859
                reported++;
860
                printk("(PIO)");
861
        }
862
        if (afsr & SYSIO_UEAFSR_SDRD) {
863
                reported++;
864
                printk("(DVMA Read)");
865
        }
866
        if (afsr & SYSIO_UEAFSR_SDWR) {
867
                reported++;
868
                printk("(DVMA Write)");
869
        }
870
        if (!reported)
871
                printk("(none)");
872
        printk("]\n");
873
}
874
 
875
#define SYSIO_CE_AFSR   0x0040UL
876
#define SYSIO_CE_AFAR   0x0048UL
877
#define  SYSIO_CEAFSR_PPIO      0x8000000000000000 /* Primary PIO is cause         */
878
#define  SYSIO_CEAFSR_PDRD      0x4000000000000000 /* Primary DVMA read is cause   */
879
#define  SYSIO_CEAFSR_PDWR      0x2000000000000000 /* Primary DVMA write is cause  */
880
#define  SYSIO_CEAFSR_SPIO      0x1000000000000000 /* Secondary PIO is cause       */
881
#define  SYSIO_CEAFSR_SDRD      0x0800000000000000 /* Secondary DVMA read is cause */
882
#define  SYSIO_CEAFSR_SDWR      0x0400000000000000 /* Secondary DVMA write is cause*/
883
#define  SYSIO_CEAFSR_RESV1     0x0300000000000000 /* Reserved                     */
884
#define  SYSIO_CEAFSR_ESYND     0x00ff000000000000 /* Syndrome Bits                */
885
#define  SYSIO_CEAFSR_DOFF      0x0000e00000000000 /* Double Offset                */
886
#define  SYSIO_CEAFSR_SIZE      0x00001c0000000000 /* Bad transfer size is 2**SIZE */
887
#define  SYSIO_CEAFSR_MID       0x000003e000000000 /* UPA MID causing the fault    */
888
#define  SYSIO_CEAFSR_RESV2     0x0000001fffffffff /* Reserved                     */
889
static void sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
890
{
891
        struct sbus_bus *sbus = dev_id;
892
        struct sbus_iommu *iommu = sbus->iommu;
893
        unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
894
        unsigned long afsr_reg, afar_reg;
895
        unsigned long afsr, afar, error_bits;
896
        int reported;
897
 
898
        afsr_reg = reg_base + SYSIO_CE_AFSR;
899
        afar_reg = reg_base + SYSIO_CE_AFAR;
900
 
901
        /* Latch error status. */
902
        afsr = upa_readq(afsr_reg);
903
        afar = upa_readq(afar_reg);
904
 
905
        /* Clear primary/secondary error status bits. */
906
        error_bits = afsr &
907
                (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
908
                 SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
909
        upa_writeq(error_bits, afsr_reg);
910
 
911
        printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
912
               sbus->portid,
913
               (((error_bits & SYSIO_CEAFSR_PPIO) ?
914
                 "PIO" :
915
                 ((error_bits & SYSIO_CEAFSR_PDRD) ?
916
                  "DVMA Read" :
917
                  ((error_bits & SYSIO_CEAFSR_PDWR) ?
918
                   "DVMA Write" : "???")))));
919
 
920
        /* XXX Use syndrome and afar to print out module string just like
921
         * XXX UDB CE trap handler does... -DaveM
922
         */
923
        printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
924
               sbus->portid,
925
               (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
926
               (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
927
               (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
928
               (afsr & SYSIO_CEAFSR_MID) >> 37UL);
929
        printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
930
 
931
        printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
932
        reported = 0;
933
        if (afsr & SYSIO_CEAFSR_SPIO) {
934
                reported++;
935
                printk("(PIO)");
936
        }
937
        if (afsr & SYSIO_CEAFSR_SDRD) {
938
                reported++;
939
                printk("(DVMA Read)");
940
        }
941
        if (afsr & SYSIO_CEAFSR_SDWR) {
942
                reported++;
943
                printk("(DVMA Write)");
944
        }
945
        if (!reported)
946
                printk("(none)");
947
        printk("]\n");
948
}
949
 
950
#define SYSIO_SBUS_AFSR         0x2010UL
951
#define SYSIO_SBUS_AFAR         0x2018UL
952
#define  SYSIO_SBAFSR_PLE       0x8000000000000000 /* Primary Late PIO Error       */
953
#define  SYSIO_SBAFSR_PTO       0x4000000000000000 /* Primary SBUS Timeout         */
954
#define  SYSIO_SBAFSR_PBERR     0x2000000000000000 /* Primary SBUS Error ACK       */
955
#define  SYSIO_SBAFSR_SLE       0x1000000000000000 /* Secondary Late PIO Error     */
956
#define  SYSIO_SBAFSR_STO       0x0800000000000000 /* Secondary SBUS Timeout       */
957
#define  SYSIO_SBAFSR_SBERR     0x0400000000000000 /* Secondary SBUS Error ACK     */
958
#define  SYSIO_SBAFSR_RESV1     0x03ff000000000000 /* Reserved                     */
959
#define  SYSIO_SBAFSR_RD        0x0000800000000000 /* Primary was late PIO read    */
960
#define  SYSIO_SBAFSR_RESV2     0x0000600000000000 /* Reserved                     */
961
#define  SYSIO_SBAFSR_SIZE      0x00001c0000000000 /* Size of transfer             */
962
#define  SYSIO_SBAFSR_MID       0x000003e000000000 /* MID causing the error        */
963
#define  SYSIO_SBAFSR_RESV3     0x0000001fffffffff /* Reserved                     */
964
static void sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
965
{
966
        struct sbus_bus *sbus = dev_id;
967
        struct sbus_iommu *iommu = sbus->iommu;
968
        unsigned long afsr_reg, afar_reg, reg_base;
969
        unsigned long afsr, afar, error_bits;
970
        int reported;
971
 
972
        reg_base = iommu->sbus_control_reg - 0x2000UL;
973
        afsr_reg = reg_base + SYSIO_SBUS_AFSR;
974
        afar_reg = reg_base + SYSIO_SBUS_AFAR;
975
 
976
        afsr = upa_readq(afsr_reg);
977
        afar = upa_readq(afar_reg);
978
 
979
        /* Clear primary/secondary error status bits. */
980
        error_bits = afsr &
981
                (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
982
                 SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
983
        upa_writeq(error_bits, afsr_reg);
984
 
985
        /* Log the error. */
986
        printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
987
               sbus->portid,
988
               (((error_bits & SYSIO_SBAFSR_PLE) ?
989
                 "Late PIO Error" :
990
                 ((error_bits & SYSIO_SBAFSR_PTO) ?
991
                  "Time Out" :
992
                  ((error_bits & SYSIO_SBAFSR_PBERR) ?
993
                   "Error Ack" : "???")))),
994
               (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
995
        printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
996
               sbus->portid,
997
               (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
998
               (afsr & SYSIO_SBAFSR_MID) >> 37UL);
999
        printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
1000
        printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
1001
        reported = 0;
1002
        if (afsr & SYSIO_SBAFSR_SLE) {
1003
                reported++;
1004
                printk("(Late PIO Error)");
1005
        }
1006
        if (afsr & SYSIO_SBAFSR_STO) {
1007
                reported++;
1008
                printk("(Time Out)");
1009
        }
1010
        if (afsr & SYSIO_SBAFSR_SBERR) {
1011
                reported++;
1012
                printk("(Error Ack)");
1013
        }
1014
        if (!reported)
1015
                printk("(none)");
1016
        printk("]\n");
1017
 
1018
        /* XXX check iommu/strbuf for further error status XXX */
1019
}
1020
 
1021
#define ECC_CONTROL     0x0020UL
1022
#define  SYSIO_ECNTRL_ECCEN     0x8000000000000000 /* Enable ECC Checking          */
1023
#define  SYSIO_ECNTRL_UEEN      0x4000000000000000 /* Enable UE Interrupts         */
1024
#define  SYSIO_ECNTRL_CEEN      0x2000000000000000 /* Enable CE Interrupts         */
1025
 
1026
#define SYSIO_UE_INO            0x34
1027
#define SYSIO_CE_INO            0x35
1028
#define SYSIO_SBUSERR_INO       0x36
1029
 
1030
static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
1031
{
1032
        struct sbus_iommu *iommu = sbus->iommu;
1033
        unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
1034
        unsigned int irq;
1035
        u64 control;
1036
 
1037
        irq = sbus_build_irq(sbus, SYSIO_UE_INO);
1038
        if (request_irq(irq, sysio_ue_handler,
1039
                        SA_SHIRQ, "SYSIO UE", sbus) < 0) {
1040
                prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
1041
                            sbus->portid);
1042
                prom_halt();
1043
        }
1044
 
1045
        irq = sbus_build_irq(sbus, SYSIO_CE_INO);
1046
        if (request_irq(irq, sysio_ce_handler,
1047
                        SA_SHIRQ, "SYSIO CE", sbus) < 0) {
1048
                prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
1049
                            sbus->portid);
1050
                prom_halt();
1051
        }
1052
 
1053
        irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
1054
        if (request_irq(irq, sysio_sbus_error_handler,
1055
                        SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
1056
                prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
1057
                            sbus->portid);
1058
                prom_halt();
1059
        }
1060
 
1061
        /* Now turn the error interrupts on and also enable ECC checking. */
1062
        upa_writeq((SYSIO_ECNTRL_ECCEN |
1063
                    SYSIO_ECNTRL_UEEN  |
1064
                    SYSIO_ECNTRL_CEEN),
1065
                   reg_base + ECC_CONTROL);
1066
 
1067
        control = upa_readq(iommu->sbus_control_reg);
1068
        control |= 0x100UL; /* SBUS Error Interrupt Enable */
1069
        upa_writeq(control, iommu->sbus_control_reg);
1070
}
1071
 
1072
/* Boot time initialization. */
1073
void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
1074
{
1075
        struct linux_prom64_registers rprop;
1076
        struct sbus_iommu *iommu;
1077
        unsigned long regs, tsb_base;
1078
        u64 control;
1079
        int err, i;
1080
 
1081
        sbus->portid = prom_getintdefault(sbus->prom_node,
1082
                                          "upa-portid", -1);
1083
 
1084
        err = prom_getproperty(prom_node, "reg",
1085
                               (char *)&rprop, sizeof(rprop));
1086
        if (err < 0) {
1087
                prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
1088
                prom_halt();
1089
        }
1090
        regs = rprop.phys_addr;
1091
 
1092
        iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
1093
        if (iommu == NULL) {
1094
                prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
1095
                prom_halt();
1096
        }
1097
 
1098
        /* Align on E$ line boundry. */
1099
        iommu = (struct sbus_iommu *)
1100
                (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
1101
                 ~(SMP_CACHE_BYTES - 1UL));
1102
 
1103
        memset(iommu, 0, sizeof(*iommu));
1104
 
1105
        /* We start with no consistent mappings. */
1106
        iommu->lowest_consistent_map = CLUSTER_NPAGES;
1107
 
1108
        for (i = 0; i < NCLUSTERS; i++) {
1109
                iommu->alloc_info[i].flush = 0;
1110
                iommu->alloc_info[i].next = 0;
1111
        }
1112
 
1113
        /* Setup spinlock. */
1114
        spin_lock_init(&iommu->lock);
1115
 
1116
        /* Init register offsets. */
1117
        iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
1118
        iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
1119
 
1120
        /* The SYSIO SBUS control register is used for dummy reads
1121
         * in order to ensure write completion.
1122
         */
1123
        iommu->sbus_control_reg = regs + 0x2000UL;
1124
 
1125
        /* Link into SYSIO software state. */
1126
        sbus->iommu = iommu;
1127
 
1128
        printk("SYSIO: UPA portID %x, at %016lx\n",
1129
               sbus->portid, regs);
1130
 
1131
        /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1132
        control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
1133
        control = ((7UL << 16UL)        |
1134
                   (0UL << 2UL)         |
1135
                   (1UL << 1UL)         |
1136
                   (1UL << 0UL));
1137
 
1138
        /* Using the above configuration we need 1MB iommu page
1139
         * table (128K ioptes * 8 bytes per iopte).  This is
1140
         * page order 7 on UltraSparc.
1141
         */
1142
        tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
1143
        if (tsb_base == 0UL) {
1144
                prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
1145
                prom_halt();
1146
        }
1147
 
1148
        iommu->page_table = (iopte_t *) tsb_base;
1149
        memset(iommu->page_table, 0, IO_TSB_SIZE);
1150
 
1151
        upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
1152
 
1153
        /* Clean out any cruft in the IOMMU using
1154
         * diagnostic accesses.
1155
         */
1156
        for (i = 0; i < 16; i++) {
1157
                unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
1158
                unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
1159
 
1160
                dram += (unsigned long)i * 8UL;
1161
                tag += (unsigned long)i * 8UL;
1162
                upa_writeq(0, dram);
1163
                upa_writeq(0, tag);
1164
        }
1165
        upa_readq(iommu->sbus_control_reg);
1166
 
1167
        /* Give the TSB to SYSIO. */
1168
        upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
1169
 
1170
        /* Setup streaming buffer, DE=1 SB_EN=1 */
1171
        control = (1UL << 1UL) | (1UL << 0UL);
1172
        upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
1173
 
1174
        /* Clear out the tags using diagnostics. */
1175
        for (i = 0; i < 16; i++) {
1176
                unsigned long ptag, ltag;
1177
 
1178
                ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
1179
                ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
1180
                ptag += (unsigned long)i * 8UL;
1181
                ltag += (unsigned long)i * 8UL;
1182
 
1183
                upa_writeq(0UL, ptag);
1184
                upa_writeq(0UL, ltag);
1185
        }
1186
 
1187
        /* Enable DVMA arbitration for all devices/slots. */
1188
        control = upa_readq(iommu->sbus_control_reg);
1189
        control |= 0x3fUL;
1190
        upa_writeq(control, iommu->sbus_control_reg);
1191
 
1192
        /* Now some Xfire specific grot... */
1193
        if (this_is_starfire)
1194
                sbus->starfire_cookie = starfire_hookup(sbus->portid);
1195
        else
1196
                sbus->starfire_cookie = NULL;
1197
 
1198
        sysio_register_error_handlers(sbus);
1199
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.