1 |
199 |
simons |
#ifndef __ALPHA_T2__H__
|
2 |
|
|
#define __ALPHA_T2__H__
|
3 |
|
|
|
4 |
|
|
#include <linux/config.h>
|
5 |
|
|
#include <linux/types.h>
|
6 |
|
|
|
7 |
|
|
/*
|
8 |
|
|
* T2 is the internal name for the core logic chipset which provides
|
9 |
|
|
* memory controller and PCI access for the SABLE-based systems.
|
10 |
|
|
*
|
11 |
|
|
* This file is based on:
|
12 |
|
|
*
|
13 |
|
|
* SABLE I/O Specification
|
14 |
|
|
* Revision/Update Information: 1.3
|
15 |
|
|
*
|
16 |
|
|
* jestabro@amt.tay1.dec.com Initial Version.
|
17 |
|
|
*
|
18 |
|
|
*/
|
19 |
|
|
|
20 |
|
|
#define BYTE_ENABLE_SHIFT 5
|
21 |
|
|
#define TRANSFER_LENGTH_SHIFT 3
|
22 |
|
|
#define MEM_R1_MASK 0x03ffffff /* Mem sparse space region 1 mask is 26 bits */
|
23 |
|
|
|
24 |
|
|
#ifdef CONFIG_ALPHA_SRM_SETUP
|
25 |
|
|
/* if we are using the SRM PCI setup, we'll need to use variables instead */
|
26 |
|
|
#define T2_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
|
27 |
|
|
#define T2_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
|
28 |
|
|
|
29 |
|
|
extern unsigned int T2_DMA_WIN_BASE;
|
30 |
|
|
extern unsigned int T2_DMA_WIN_SIZE;
|
31 |
|
|
|
32 |
|
|
#else /* SRM_SETUP */
|
33 |
|
|
#define T2_DMA_WIN_BASE (1024*1024*1024)
|
34 |
|
|
#define T2_DMA_WIN_SIZE (1024*1024*1024)
|
35 |
|
|
#endif /* SRM_SETUP */
|
36 |
|
|
|
37 |
|
|
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
|
38 |
|
|
#ifdef CONFIG_ALPHA_GAMMA
|
39 |
|
|
# define GAMMA_BIAS 0x8000000000UL
|
40 |
|
|
#else /* GAMMA */
|
41 |
|
|
# define GAMMA_BIAS 0x0000000000UL
|
42 |
|
|
#endif /* GAMMA */
|
43 |
|
|
|
44 |
|
|
/*
|
45 |
|
|
* Memory spaces:
|
46 |
|
|
*/
|
47 |
|
|
#define T2_CONF (IDENT_ADDR + GAMMA_BIAS + 0x390000000UL)
|
48 |
|
|
#define T2_IO (IDENT_ADDR + GAMMA_BIAS + 0x3a0000000UL)
|
49 |
|
|
#define T2_SPARSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x200000000UL)
|
50 |
|
|
#define T2_DENSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x3c0000000UL)
|
51 |
|
|
|
52 |
|
|
#define T2_IOCSR (IDENT_ADDR + GAMMA_BIAS + 0x38e000000UL)
|
53 |
|
|
#define T2_CERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000020UL)
|
54 |
|
|
#define T2_CERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000040UL)
|
55 |
|
|
#define T2_CERR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000060UL)
|
56 |
|
|
#define T2_PERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000080UL)
|
57 |
|
|
#define T2_PERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000a0UL)
|
58 |
|
|
#define T2_PSCR (IDENT_ADDR + GAMMA_BIAS + 0x38e0000c0UL)
|
59 |
|
|
#define T2_HAE_1 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000e0UL)
|
60 |
|
|
#define T2_HAE_2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000100UL)
|
61 |
|
|
#define T2_HBASE (IDENT_ADDR + GAMMA_BIAS + 0x38e000120UL)
|
62 |
|
|
#define T2_WBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000140UL)
|
63 |
|
|
#define T2_WMASK1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000160UL)
|
64 |
|
|
#define T2_TBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000180UL)
|
65 |
|
|
#define T2_WBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001a0UL)
|
66 |
|
|
#define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL)
|
67 |
|
|
#define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL)
|
68 |
|
|
#define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL)
|
69 |
|
|
|
70 |
|
|
#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL)
|
71 |
|
|
#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL)
|
72 |
|
|
|
73 |
|
|
#define HAE_ADDRESS T2_HAE_1
|
74 |
|
|
|
75 |
|
|
/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
|
76 |
|
|
3.8fff.ffff
|
77 |
|
|
*
|
78 |
|
|
* +--------------+ 3 8000 0000
|
79 |
|
|
* | CPU 0 CSRs |
|
80 |
|
|
* +--------------+ 3 8100 0000
|
81 |
|
|
* | CPU 1 CSRs |
|
82 |
|
|
* +--------------+ 3 8200 0000
|
83 |
|
|
* | CPU 2 CSRs |
|
84 |
|
|
* +--------------+ 3 8300 0000
|
85 |
|
|
* | CPU 3 CSRs |
|
86 |
|
|
* +--------------+ 3 8400 0000
|
87 |
|
|
* | CPU Reserved |
|
88 |
|
|
* +--------------+ 3 8700 0000
|
89 |
|
|
* | Mem Reserved |
|
90 |
|
|
* +--------------+ 3 8800 0000
|
91 |
|
|
* | Mem 0 CSRs |
|
92 |
|
|
* +--------------+ 3 8900 0000
|
93 |
|
|
* | Mem 1 CSRs |
|
94 |
|
|
* +--------------+ 3 8a00 0000
|
95 |
|
|
* | Mem 2 CSRs |
|
96 |
|
|
* +--------------+ 3 8b00 0000
|
97 |
|
|
* | Mem 3 CSRs |
|
98 |
|
|
* +--------------+ 3 8c00 0000
|
99 |
|
|
* | Mem Reserved |
|
100 |
|
|
* +--------------+ 3 8e00 0000
|
101 |
|
|
* | PCI Bridge |
|
102 |
|
|
* +--------------+ 3 8f00 0000
|
103 |
|
|
* | Expansion IO |
|
104 |
|
|
* +--------------+ 3 9000 0000
|
105 |
|
|
*
|
106 |
|
|
*
|
107 |
|
|
*/
|
108 |
|
|
#define CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L)
|
109 |
|
|
#define CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
|
110 |
|
|
#define CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
|
111 |
|
|
#define CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
|
112 |
|
|
#define MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
|
113 |
|
|
#define MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
|
114 |
|
|
#define MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
|
115 |
|
|
#define MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L)
|
116 |
|
|
|
117 |
|
|
#ifdef __KERNEL__
|
118 |
|
|
|
119 |
|
|
/*
|
120 |
|
|
* Translate physical memory address as seen on (PCI) bus into
|
121 |
|
|
* a kernel virtual address and vv.
|
122 |
|
|
*/
|
123 |
|
|
extern inline unsigned long virt_to_bus(void * address)
|
124 |
|
|
{
|
125 |
|
|
return virt_to_phys(address) + T2_DMA_WIN_BASE;
|
126 |
|
|
}
|
127 |
|
|
|
128 |
|
|
extern inline void * bus_to_virt(unsigned long address)
|
129 |
|
|
{
|
130 |
|
|
return phys_to_virt(address - T2_DMA_WIN_BASE);
|
131 |
|
|
}
|
132 |
|
|
|
133 |
|
|
/*
|
134 |
|
|
* I/O functions:
|
135 |
|
|
*
|
136 |
|
|
* T2 (the core logic PCI/memory support chipset for the SABLE
|
137 |
|
|
* series of processors uses a sparse address mapping scheme to
|
138 |
|
|
* get at PCI memory and I/O.
|
139 |
|
|
*/
|
140 |
|
|
|
141 |
|
|
#define vuip volatile unsigned int *
|
142 |
|
|
|
143 |
|
|
extern inline unsigned int __inb(unsigned long addr)
|
144 |
|
|
{
|
145 |
|
|
long result = *(vuip) ((addr << 5) + T2_IO + 0x00);
|
146 |
|
|
result >>= (addr & 3) * 8;
|
147 |
|
|
return 0xffUL & result;
|
148 |
|
|
}
|
149 |
|
|
|
150 |
|
|
extern inline void __outb(unsigned char b, unsigned long addr)
|
151 |
|
|
{
|
152 |
|
|
unsigned int w;
|
153 |
|
|
|
154 |
|
|
asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
|
155 |
|
|
*(vuip) ((addr << 5) + T2_IO + 0x00) = w;
|
156 |
|
|
mb();
|
157 |
|
|
}
|
158 |
|
|
|
159 |
|
|
extern inline unsigned int __inw(unsigned long addr)
|
160 |
|
|
{
|
161 |
|
|
long result = *(vuip) ((addr << 5) + T2_IO + 0x08);
|
162 |
|
|
result >>= (addr & 3) * 8;
|
163 |
|
|
return 0xffffUL & result;
|
164 |
|
|
}
|
165 |
|
|
|
166 |
|
|
extern inline void __outw(unsigned short b, unsigned long addr)
|
167 |
|
|
{
|
168 |
|
|
unsigned int w;
|
169 |
|
|
|
170 |
|
|
asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
|
171 |
|
|
*(vuip) ((addr << 5) + T2_IO + 0x08) = w;
|
172 |
|
|
mb();
|
173 |
|
|
}
|
174 |
|
|
|
175 |
|
|
extern inline unsigned int __inl(unsigned long addr)
|
176 |
|
|
{
|
177 |
|
|
return *(vuip) ((addr << 5) + T2_IO + 0x18);
|
178 |
|
|
}
|
179 |
|
|
|
180 |
|
|
extern inline void __outl(unsigned int b, unsigned long addr)
|
181 |
|
|
{
|
182 |
|
|
*(vuip) ((addr << 5) + T2_IO + 0x18) = b;
|
183 |
|
|
mb();
|
184 |
|
|
}
|
185 |
|
|
|
186 |
|
|
|
187 |
|
|
/*
|
188 |
|
|
* Memory functions. 64-bit and 32-bit accesses are done through
|
189 |
|
|
* dense memory space, everything else through sparse space.
|
190 |
|
|
*
|
191 |
|
|
* For reading and writing 8 and 16 bit quantities we need to
|
192 |
|
|
* go through one of the three sparse address mapping regions
|
193 |
|
|
* and use the HAE_MEM CSR to provide some bits of the address.
|
194 |
|
|
* The following few routines use only sparse address region 1
|
195 |
|
|
* which gives 1Gbyte of accessible space which relates exactly
|
196 |
|
|
* to the amount of PCI memory mapping *into* system address space.
|
197 |
|
|
* See p 6-17 of the specification but it looks something like this:
|
198 |
|
|
*
|
199 |
|
|
* 21164 Address:
|
200 |
|
|
*
|
201 |
|
|
* 3 2 1
|
202 |
|
|
* 9876543210987654321098765432109876543210
|
203 |
|
|
* 1ZZZZ0.PCI.QW.Address............BBLL
|
204 |
|
|
*
|
205 |
|
|
* ZZ = SBZ
|
206 |
|
|
* BB = Byte offset
|
207 |
|
|
* LL = Transfer length
|
208 |
|
|
*
|
209 |
|
|
* PCI Address:
|
210 |
|
|
*
|
211 |
|
|
* 3 2 1
|
212 |
|
|
* 10987654321098765432109876543210
|
213 |
|
|
* HHH....PCI.QW.Address........ 00
|
214 |
|
|
*
|
215 |
|
|
* HHH = 31:29 HAE_MEM CSR
|
216 |
|
|
*
|
217 |
|
|
*/
|
218 |
|
|
#ifdef CONFIG_ALPHA_SRM_SETUP
|
219 |
|
|
|
220 |
|
|
extern unsigned long t2_sm_base;
|
221 |
|
|
|
222 |
|
|
extern inline unsigned long __readb(unsigned long addr)
|
223 |
|
|
{
|
224 |
|
|
unsigned long result, shift, work;
|
225 |
|
|
|
226 |
|
|
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
|
227 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
|
228 |
|
|
else
|
229 |
|
|
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
|
230 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
|
231 |
|
|
else
|
232 |
|
|
{
|
233 |
|
|
#if 0
|
234 |
|
|
printk("__readb: address 0x%lx not covered by HAE\n", addr);
|
235 |
|
|
#endif
|
236 |
|
|
return 0x0ffUL;
|
237 |
|
|
}
|
238 |
|
|
shift = (addr & 0x3) << 3;
|
239 |
|
|
result = *(vuip) work;
|
240 |
|
|
result >>= shift;
|
241 |
|
|
return 0x0ffUL & result;
|
242 |
|
|
}
|
243 |
|
|
|
244 |
|
|
extern inline unsigned long __readw(unsigned long addr)
|
245 |
|
|
{
|
246 |
|
|
unsigned long result, shift, work;
|
247 |
|
|
|
248 |
|
|
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
|
249 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
|
250 |
|
|
else
|
251 |
|
|
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
|
252 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
|
253 |
|
|
else
|
254 |
|
|
{
|
255 |
|
|
#if 0
|
256 |
|
|
printk("__readw: address 0x%lx not covered by HAE\n", addr);
|
257 |
|
|
#endif
|
258 |
|
|
return 0x0ffffUL;
|
259 |
|
|
}
|
260 |
|
|
shift = (addr & 0x3) << 3;
|
261 |
|
|
result = *(vuip) work;
|
262 |
|
|
result >>= shift;
|
263 |
|
|
return 0x0ffffUL & result;
|
264 |
|
|
}
|
265 |
|
|
|
266 |
|
|
/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
|
267 |
|
|
extern inline unsigned long __readl(unsigned long addr)
|
268 |
|
|
{
|
269 |
|
|
unsigned long result, work;
|
270 |
|
|
|
271 |
|
|
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
|
272 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
|
273 |
|
|
else
|
274 |
|
|
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
|
275 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
|
276 |
|
|
else
|
277 |
|
|
{
|
278 |
|
|
#if 0
|
279 |
|
|
printk("__readl: address 0x%lx not covered by HAE\n", addr);
|
280 |
|
|
#endif
|
281 |
|
|
return 0x0ffffffffUL;
|
282 |
|
|
}
|
283 |
|
|
result = *(vuip) work;
|
284 |
|
|
return 0xffffffffUL & result;
|
285 |
|
|
}
|
286 |
|
|
|
287 |
|
|
extern inline void __writeb(unsigned char b, unsigned long addr)
|
288 |
|
|
{
|
289 |
|
|
unsigned long work;
|
290 |
|
|
|
291 |
|
|
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
|
292 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
|
293 |
|
|
else
|
294 |
|
|
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
|
295 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
|
296 |
|
|
else
|
297 |
|
|
{
|
298 |
|
|
#if 0
|
299 |
|
|
printk("__writeb: address 0x%lx not covered by HAE\n", addr);
|
300 |
|
|
#endif
|
301 |
|
|
return;
|
302 |
|
|
}
|
303 |
|
|
*(vuip) work = b * 0x01010101;
|
304 |
|
|
}
|
305 |
|
|
|
306 |
|
|
extern inline void __writew(unsigned short b, unsigned long addr)
|
307 |
|
|
{
|
308 |
|
|
unsigned long work;
|
309 |
|
|
|
310 |
|
|
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
|
311 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
|
312 |
|
|
else
|
313 |
|
|
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
|
314 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
|
315 |
|
|
else
|
316 |
|
|
{
|
317 |
|
|
#if 0
|
318 |
|
|
printk("__writew: address 0x%lx not covered by HAE\n", addr);
|
319 |
|
|
#endif
|
320 |
|
|
return;
|
321 |
|
|
}
|
322 |
|
|
*(vuip) work = b * 0x00010001;
|
323 |
|
|
}
|
324 |
|
|
|
325 |
|
|
/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
|
326 |
|
|
extern inline void __writel(unsigned int b, unsigned long addr)
|
327 |
|
|
{
|
328 |
|
|
unsigned long work;
|
329 |
|
|
|
330 |
|
|
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
|
331 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
|
332 |
|
|
else
|
333 |
|
|
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
|
334 |
|
|
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
|
335 |
|
|
{
|
336 |
|
|
#if 0
|
337 |
|
|
printk("__writel: address 0x%lx not covered by HAE\n", addr);
|
338 |
|
|
#endif
|
339 |
|
|
return;
|
340 |
|
|
}
|
341 |
|
|
*(vuip) work = b;
|
342 |
|
|
}
|
343 |
|
|
|
344 |
|
|
#else /* SRM_SETUP */
|
345 |
|
|
|
346 |
|
|
extern inline unsigned long __readb(unsigned long addr)
|
347 |
|
|
{
|
348 |
|
|
unsigned long result, shift, msb;
|
349 |
|
|
|
350 |
|
|
shift = (addr & 0x3) * 8 ;
|
351 |
|
|
msb = addr & 0xE0000000 ;
|
352 |
|
|
addr &= MEM_R1_MASK ;
|
353 |
|
|
if (msb != hae.cache) {
|
354 |
|
|
set_hae(msb);
|
355 |
|
|
}
|
356 |
|
|
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) ;
|
357 |
|
|
result >>= shift;
|
358 |
|
|
return 0xffUL & result;
|
359 |
|
|
}
|
360 |
|
|
|
361 |
|
|
extern inline unsigned long __readw(unsigned long addr)
|
362 |
|
|
{
|
363 |
|
|
unsigned long result, shift, msb;
|
364 |
|
|
|
365 |
|
|
shift = (addr & 0x3) * 8;
|
366 |
|
|
msb = addr & 0xE0000000 ;
|
367 |
|
|
addr &= MEM_R1_MASK ;
|
368 |
|
|
if (msb != hae.cache) {
|
369 |
|
|
set_hae(msb);
|
370 |
|
|
}
|
371 |
|
|
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
|
372 |
|
|
result >>= shift;
|
373 |
|
|
return 0xffffUL & result;
|
374 |
|
|
}
|
375 |
|
|
|
376 |
|
|
/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
|
377 |
|
|
extern inline unsigned long __readl(unsigned long addr)
|
378 |
|
|
{
|
379 |
|
|
unsigned long result, msb;
|
380 |
|
|
|
381 |
|
|
msb = addr & 0xE0000000 ;
|
382 |
|
|
addr &= MEM_R1_MASK ;
|
383 |
|
|
if (msb != hae.cache) {
|
384 |
|
|
set_hae(msb);
|
385 |
|
|
}
|
386 |
|
|
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
|
387 |
|
|
return 0xffffffffUL & result;
|
388 |
|
|
}
|
389 |
|
|
|
390 |
|
|
extern inline void __writeb(unsigned char b, unsigned long addr)
|
391 |
|
|
{
|
392 |
|
|
unsigned long msb ;
|
393 |
|
|
|
394 |
|
|
msb = addr & 0xE0000000 ;
|
395 |
|
|
addr &= MEM_R1_MASK ;
|
396 |
|
|
if (msb != hae.cache) {
|
397 |
|
|
set_hae(msb);
|
398 |
|
|
}
|
399 |
|
|
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = b * 0x01010101;
|
400 |
|
|
}
|
401 |
|
|
|
402 |
|
|
extern inline void __writew(unsigned short b, unsigned long addr)
|
403 |
|
|
{
|
404 |
|
|
unsigned long msb ;
|
405 |
|
|
|
406 |
|
|
msb = addr & 0xE0000000 ;
|
407 |
|
|
addr &= MEM_R1_MASK ;
|
408 |
|
|
if (msb != hae.cache) {
|
409 |
|
|
set_hae(msb);
|
410 |
|
|
}
|
411 |
|
|
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = b * 0x00010001;
|
412 |
|
|
}
|
413 |
|
|
|
414 |
|
|
/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
|
415 |
|
|
extern inline void __writel(unsigned int b, unsigned long addr)
|
416 |
|
|
{
|
417 |
|
|
unsigned long msb ;
|
418 |
|
|
|
419 |
|
|
msb = addr & 0xE0000000 ;
|
420 |
|
|
addr &= MEM_R1_MASK ;
|
421 |
|
|
if (msb != hae.cache) {
|
422 |
|
|
set_hae(msb);
|
423 |
|
|
}
|
424 |
|
|
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
|
425 |
|
|
}
|
426 |
|
|
|
427 |
|
|
#endif /* SRM_SETUP */
|
428 |
|
|
|
429 |
|
|
#define inb(port) \
|
430 |
|
|
(__builtin_constant_p((port))?__inb(port):_inb(port))
|
431 |
|
|
|
432 |
|
|
#define outb(x, port) \
|
433 |
|
|
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
|
434 |
|
|
|
435 |
|
|
#define readl(a) __readl((unsigned long)(a))
|
436 |
|
|
#define writel(v,a) __writel((v),(unsigned long)(a))
|
437 |
|
|
|
438 |
|
|
#undef vuip
|
439 |
|
|
|
440 |
|
|
extern unsigned long t2_init (unsigned long mem_start,
|
441 |
|
|
unsigned long mem_end);
|
442 |
|
|
|
443 |
|
|
#endif /* __KERNEL__ */
|
444 |
|
|
|
445 |
|
|
/*
|
446 |
|
|
* Sable CPU Module CSRS
|
447 |
|
|
*
|
448 |
|
|
* These are CSRs for hardware other than the CPU chip on the CPU module.
|
449 |
|
|
* The CPU module has Backup Cache control logic, Cbus control logic, and
|
450 |
|
|
* interrupt control logic on it. There is a duplicate tag store to speed
|
451 |
|
|
* up maintaining cache coherency.
|
452 |
|
|
*/
|
453 |
|
|
|
454 |
|
|
struct sable_cpu_csr {
|
455 |
|
|
unsigned long bcc; long fill_00[3]; /* Backup Cache Control */
|
456 |
|
|
unsigned long bcce; long fill_01[3]; /* Backup Cache Correctable Error */
|
457 |
|
|
unsigned long bccea; long fill_02[3]; /* B-Cache Corr Err Address Latch */
|
458 |
|
|
unsigned long bcue; long fill_03[3]; /* B-Cache Uncorrectable Error */
|
459 |
|
|
unsigned long bcuea; long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */
|
460 |
|
|
unsigned long dter; long fill_05[3]; /* Duplicate Tag Error */
|
461 |
|
|
unsigned long cbctl; long fill_06[3]; /* CBus Control */
|
462 |
|
|
unsigned long cbe; long fill_07[3]; /* CBus Error */
|
463 |
|
|
unsigned long cbeal; long fill_08[3]; /* CBus Error Addr Latch low */
|
464 |
|
|
unsigned long cbeah; long fill_09[3]; /* CBus Error Addr Latch high */
|
465 |
|
|
unsigned long pmbx; long fill_10[3]; /* Processor Mailbox */
|
466 |
|
|
unsigned long ipir; long fill_11[3]; /* Inter-Processor Int Request */
|
467 |
|
|
unsigned long sic; long fill_12[3]; /* System Interrupt Clear */
|
468 |
|
|
unsigned long adlk; long fill_13[3]; /* Address Lock (LDxL/STxC) */
|
469 |
|
|
unsigned long madrl; long fill_14[3]; /* CBus Miss Address */
|
470 |
|
|
unsigned long rev; long fill_15[3]; /* CMIC Revision */
|
471 |
|
|
};
|
472 |
|
|
|
473 |
|
|
/*
|
474 |
|
|
* Data structure for handling T2 machine checks:
|
475 |
|
|
*/
|
476 |
|
|
struct el_t2_frame_header {
|
477 |
|
|
unsigned int elcf_fid; /* Frame ID (from above) */
|
478 |
|
|
unsigned int elcf_size; /* Size of frame in bytes */
|
479 |
|
|
};
|
480 |
|
|
|
481 |
|
|
struct el_t2_procdata_mcheck {
|
482 |
|
|
unsigned long elfmc_paltemp[32]; /* PAL TEMP REGS. */
|
483 |
|
|
/* EV4-specific fields */
|
484 |
|
|
unsigned long elfmc_exc_addr; /* Addr of excepting insn. */
|
485 |
|
|
unsigned long elfmc_exc_sum; /* Summary of arith traps. */
|
486 |
|
|
unsigned long elfmc_exc_mask; /* Exception mask (from exc_sum). */
|
487 |
|
|
unsigned long elfmc_iccsr; /* IBox hardware enables. */
|
488 |
|
|
unsigned long elfmc_pal_base; /* Base address for PALcode. */
|
489 |
|
|
unsigned long elfmc_hier; /* Hardware Interrupt Enable. */
|
490 |
|
|
unsigned long elfmc_hirr; /* Hardware Interrupt Request. */
|
491 |
|
|
unsigned long elfmc_mm_csr; /* D-stream fault info. */
|
492 |
|
|
unsigned long elfmc_dc_stat; /* D-cache status (ECC/Parity Err). */
|
493 |
|
|
unsigned long elfmc_dc_addr; /* EV3 Phys Addr for ECC/DPERR. */
|
494 |
|
|
unsigned long elfmc_abox_ctl; /* ABox Control Register. */
|
495 |
|
|
unsigned long elfmc_biu_stat; /* BIU Status. */
|
496 |
|
|
unsigned long elfmc_biu_addr; /* BUI Address. */
|
497 |
|
|
unsigned long elfmc_biu_ctl; /* BIU Control. */
|
498 |
|
|
unsigned long elfmc_fill_syndrome; /* For correcting ECC errors. */
|
499 |
|
|
unsigned long elfmc_fill_addr;/* Cache block which was being read. */
|
500 |
|
|
unsigned long elfmc_va; /* Effective VA of fault or miss. */
|
501 |
|
|
unsigned long elfmc_bc_tag; /* Backup Cache Tag Probe Results. */
|
502 |
|
|
};
|
503 |
|
|
|
504 |
|
|
/*
|
505 |
|
|
* Sable processor specific Machine Check Data segment.
|
506 |
|
|
*/
|
507 |
|
|
|
508 |
|
|
struct el_t2_logout_header {
|
509 |
|
|
unsigned int elfl_size; /* size in bytes of logout area. */
|
510 |
|
|
int elfl_sbz1:31; /* Should be zero. */
|
511 |
|
|
char elfl_retry:1; /* Retry flag. */
|
512 |
|
|
unsigned int elfl_procoffset; /* Processor-specific offset. */
|
513 |
|
|
unsigned int elfl_sysoffset; /* Offset of system-specific. */
|
514 |
|
|
unsigned int elfl_error_type; /* PAL error type code. */
|
515 |
|
|
unsigned int elfl_frame_rev; /* PAL Frame revision. */
|
516 |
|
|
};
|
517 |
|
|
struct el_t2_sysdata_mcheck {
|
518 |
|
|
unsigned long elcmc_bcc; /* CSR 0 */
|
519 |
|
|
unsigned long elcmc_bcce; /* CSR 1 */
|
520 |
|
|
unsigned long elcmc_bccea; /* CSR 2 */
|
521 |
|
|
unsigned long elcmc_bcue; /* CSR 3 */
|
522 |
|
|
unsigned long elcmc_bcuea; /* CSR 4 */
|
523 |
|
|
unsigned long elcmc_dter; /* CSR 5 */
|
524 |
|
|
unsigned long elcmc_cbctl; /* CSR 6 */
|
525 |
|
|
unsigned long elcmc_cbe; /* CSR 7 */
|
526 |
|
|
unsigned long elcmc_cbeal; /* CSR 8 */
|
527 |
|
|
unsigned long elcmc_cbeah; /* CSR 9 */
|
528 |
|
|
unsigned long elcmc_pmbx; /* CSR 10 */
|
529 |
|
|
unsigned long elcmc_ipir; /* CSR 11 */
|
530 |
|
|
unsigned long elcmc_sic; /* CSR 12 */
|
531 |
|
|
unsigned long elcmc_adlk; /* CSR 13 */
|
532 |
|
|
unsigned long elcmc_madrl; /* CSR 14 */
|
533 |
|
|
unsigned long elcmc_crrev4; /* CSR 15 */
|
534 |
|
|
};
|
535 |
|
|
|
536 |
|
|
/*
|
537 |
|
|
* Sable memory error frame - sable pfms section 3.42
|
538 |
|
|
*/
|
539 |
|
|
struct el_t2_data_memory {
|
540 |
|
|
struct el_t2_frame_header elcm_hdr; /* ID$MEM-FERR = 0x08 */
|
541 |
|
|
unsigned int elcm_module; /* Module id. */
|
542 |
|
|
unsigned int elcm_res04; /* Reserved. */
|
543 |
|
|
unsigned long elcm_merr; /* CSR0: Error Reg 1. */
|
544 |
|
|
unsigned long elcm_mcmd1; /* CSR1: Command Trap 1. */
|
545 |
|
|
unsigned long elcm_mcmd2; /* CSR2: Command Trap 2. */
|
546 |
|
|
unsigned long elcm_mconf; /* CSR3: Configuration. */
|
547 |
|
|
unsigned long elcm_medc1; /* CSR4: EDC Status 1. */
|
548 |
|
|
unsigned long elcm_medc2; /* CSR5: EDC Status 2. */
|
549 |
|
|
unsigned long elcm_medcc; /* CSR6: EDC Control. */
|
550 |
|
|
unsigned long elcm_msctl; /* CSR7: Stream Buffer Control. */
|
551 |
|
|
unsigned long elcm_mref; /* CSR8: Refresh Control. */
|
552 |
|
|
unsigned long elcm_filter; /* CSR9: CRD Filter Control. */
|
553 |
|
|
};
|
554 |
|
|
|
555 |
|
|
|
556 |
|
|
/*
|
557 |
|
|
* Sable other cpu error frame - sable pfms section 3.43
|
558 |
|
|
*/
|
559 |
|
|
struct el_t2_data_other_cpu {
|
560 |
|
|
short elco_cpuid; /* CPU ID */
|
561 |
|
|
short elco_res02[3];
|
562 |
|
|
unsigned long elco_bcc; /* CSR 0 */
|
563 |
|
|
unsigned long elco_bcce; /* CSR 1 */
|
564 |
|
|
unsigned long elco_bccea; /* CSR 2 */
|
565 |
|
|
unsigned long elco_bcue; /* CSR 3 */
|
566 |
|
|
unsigned long elco_bcuea; /* CSR 4 */
|
567 |
|
|
unsigned long elco_dter; /* CSR 5 */
|
568 |
|
|
unsigned long elco_cbctl; /* CSR 6 */
|
569 |
|
|
unsigned long elco_cbe; /* CSR 7 */
|
570 |
|
|
unsigned long elco_cbeal; /* CSR 8 */
|
571 |
|
|
unsigned long elco_cbeah; /* CSR 9 */
|
572 |
|
|
unsigned long elco_pmbx; /* CSR 10 */
|
573 |
|
|
unsigned long elco_ipir; /* CSR 11 */
|
574 |
|
|
unsigned long elco_sic; /* CSR 12 */
|
575 |
|
|
unsigned long elco_adlk; /* CSR 13 */
|
576 |
|
|
unsigned long elco_madrl; /* CSR 14 */
|
577 |
|
|
unsigned long elco_crrev4; /* CSR 15 */
|
578 |
|
|
};
|
579 |
|
|
|
580 |
|
|
/*
|
581 |
|
|
* Sable other cpu error frame - sable pfms section 3.44
|
582 |
|
|
*/
|
583 |
|
|
struct el_t2_data_t2{
|
584 |
|
|
struct el_t2_frame_header elct_hdr; /* ID$T2-FRAME */
|
585 |
|
|
unsigned long elct_iocsr; /* IO Control and Status Register */
|
586 |
|
|
unsigned long elct_cerr1; /* Cbus Error Register 1 */
|
587 |
|
|
unsigned long elct_cerr2; /* Cbus Error Register 2 */
|
588 |
|
|
unsigned long elct_cerr3; /* Cbus Error Register 3 */
|
589 |
|
|
unsigned long elct_perr1; /* PCI Error Register 1 */
|
590 |
|
|
unsigned long elct_perr2; /* PCI Error Register 2 */
|
591 |
|
|
unsigned long elct_hae0_1; /* High Address Extension Register 1 */
|
592 |
|
|
unsigned long elct_hae0_2; /* High Address Extension Register 2 */
|
593 |
|
|
unsigned long elct_hbase; /* High Base Register */
|
594 |
|
|
unsigned long elct_wbase1; /* Window Base Register 1 */
|
595 |
|
|
unsigned long elct_wmask1; /* Window Mask Register 1 */
|
596 |
|
|
unsigned long elct_tbase1; /* Translated Base Register 1 */
|
597 |
|
|
unsigned long elct_wbase2; /* Window Base Register 2 */
|
598 |
|
|
unsigned long elct_wmask2; /* Window Mask Register 2 */
|
599 |
|
|
unsigned long elct_tbase2; /* Translated Base Register 2 */
|
600 |
|
|
unsigned long elct_tdr0; /* TLB Data Register 0 */
|
601 |
|
|
unsigned long elct_tdr1; /* TLB Data Register 1 */
|
602 |
|
|
unsigned long elct_tdr2; /* TLB Data Register 2 */
|
603 |
|
|
unsigned long elct_tdr3; /* TLB Data Register 3 */
|
604 |
|
|
unsigned long elct_tdr4; /* TLB Data Register 4 */
|
605 |
|
|
unsigned long elct_tdr5; /* TLB Data Register 5 */
|
606 |
|
|
unsigned long elct_tdr6; /* TLB Data Register 6 */
|
607 |
|
|
unsigned long elct_tdr7; /* TLB Data Register 7 */
|
608 |
|
|
};
|
609 |
|
|
|
610 |
|
|
/*
|
611 |
|
|
* Sable error log data structure - sable pfms section 3.40
|
612 |
|
|
*/
|
613 |
|
|
struct el_t2_data_corrected {
|
614 |
|
|
unsigned long elcpb_biu_stat;
|
615 |
|
|
unsigned long elcpb_biu_addr;
|
616 |
|
|
unsigned long elcpb_biu_ctl;
|
617 |
|
|
unsigned long elcpb_fill_syndrome;
|
618 |
|
|
unsigned long elcpb_fill_addr;
|
619 |
|
|
unsigned long elcpb_bc_tag;
|
620 |
|
|
};
|
621 |
|
|
|
622 |
|
|
/*
|
623 |
|
|
* Sable error log data structure
|
624 |
|
|
* Note there are 4 memory slots on sable (see t2.h)
|
625 |
|
|
*/
|
626 |
|
|
struct el_t2_frame_mcheck {
|
627 |
|
|
struct el_t2_frame_header elfmc_header; /* ID$P-FRAME_MCHECK */
|
628 |
|
|
struct el_t2_logout_header elfmc_hdr;
|
629 |
|
|
struct el_t2_procdata_mcheck elfmc_procdata;
|
630 |
|
|
struct el_t2_sysdata_mcheck elfmc_sysdata;
|
631 |
|
|
struct el_t2_data_t2 elfmc_t2data;
|
632 |
|
|
struct el_t2_data_memory elfmc_memdata[4];
|
633 |
|
|
struct el_t2_frame_header elfmc_footer; /* empty */
|
634 |
|
|
};
|
635 |
|
|
|
636 |
|
|
|
637 |
|
|
/*
|
638 |
|
|
* Sable error log data structures on memory errors
|
639 |
|
|
*/
|
640 |
|
|
struct el_t2_frame_corrected {
|
641 |
|
|
struct el_t2_frame_header elfcc_header; /* ID$P-BC-COR */
|
642 |
|
|
struct el_t2_logout_header elfcc_hdr;
|
643 |
|
|
struct el_t2_data_corrected elfcc_procdata;
|
644 |
|
|
/* struct el_t2_data_t2 elfcc_t2data; */
|
645 |
|
|
/* struct el_t2_data_memory elfcc_memdata[4]; */
|
646 |
|
|
struct el_t2_frame_header elfcc_footer; /* empty */
|
647 |
|
|
};
|
648 |
|
|
|
649 |
|
|
|
650 |
|
|
#define RTC_PORT(x) (0x70 + (x))
|
651 |
|
|
#define RTC_ADDR(x) (0x80 | (x))
|
652 |
|
|
#define RTC_ALWAYS_BCD 0
|
653 |
|
|
|
654 |
|
|
#endif /* __ALPHA_T2__H__ */
|