OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /
    from Rev 1631 to Rev 1632
    Reverse comparison

Rev 1631 → Rev 1632

/trunk/rc203soc/sw/uClinux/include/asm-alpha/termbits.h
0,0 → 1,177
#ifndef _ALPHA_TERMBITS_H
#define _ALPHA_TERMBITS_H
 
#include <linux/posix_types.h>
 
typedef unsigned char cc_t;
typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
 
/*
* termios type and macro definitions. Be careful about adding stuff
* to this file since it's used in GNU libc and there are strict rules
* concerning namespace pollution.
*/
 
#define NCCS 19
struct termios {
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
tcflag_t c_cflag; /* control mode flags */
tcflag_t c_lflag; /* local mode flags */
cc_t c_cc[NCCS]; /* control characters */
cc_t c_line; /* line discipline (== c_cc[19]) */
speed_t c_ispeed; /* input speed */
speed_t c_ospeed; /* output speed */
};
 
/* c_cc characters */
#define VEOF 0
#define VEOL 1
#define VEOL2 2
#define VERASE 3
#define VWERASE 4
#define VKILL 5
#define VREPRINT 6
#define VSWTC 7
#define VINTR 8
#define VQUIT 9
#define VSUSP 10
#define VSTART 12
#define VSTOP 13
#define VLNEXT 14
#define VDISCARD 15
#define VMIN 16
#define VTIME 17
 
/* c_iflag bits */
#define IGNBRK 0000001
#define BRKINT 0000002
#define IGNPAR 0000004
#define PARMRK 0000010
#define INPCK 0000020
#define ISTRIP 0000040
#define INLCR 0000100
#define IGNCR 0000200
#define ICRNL 0000400
#define IXON 0001000
#define IXOFF 0002000
#if !defined(KERNEL) || defined(__USE_BSD)
/* POSIX.1 doesn't want these... */
# define IXANY 0004000
# define IUCLC 0010000
# define IMAXBEL 0020000
#endif
 
/* c_oflag bits */
#define OPOST 0000001
#define ONLCR 0000002
#define OLCUC 0000004
 
#define OCRNL 0000010
#define ONOCR 0000020
#define ONLRET 0000040
 
#define OFILL 00000100
#define OFDEL 00000200
#define NLDLY 00001400
#define NL0 00000000
#define NL1 00000400
#define NL2 00001000
#define NL3 00001400
#define TABDLY 00006000
#define TAB0 00000000
#define TAB1 00002000
#define TAB2 00004000
#define TAB3 00006000
#define CRDLY 00030000
#define CR0 00000000
#define CR1 00010000
#define CR2 00020000
#define CR3 00030000
#define FFDLY 00040000
#define FF0 00000000
#define FF1 00040000
#define BSDLY 00100000
#define BS0 00000000
#define BS1 00100000
#define VTDLY 00200000
#define VT0 00000000
#define VT1 00200000
#define XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */
 
/* c_cflag bit meaning */
#define CBAUD 0000017
#define B0 0000000 /* hang up */
#define B50 0000001
#define B75 0000002
#define B110 0000003
#define B134 0000004
#define B150 0000005
#define B200 0000006
#define B300 0000007
#define B600 0000010
#define B1200 0000011
#define B1800 0000012
#define B2400 0000013
#define B4800 0000014
#define B9600 0000015
#define B19200 0000016
#define B38400 0000017
#define EXTA B19200
#define EXTB B38400
#define CBAUDEX 0000020
#define B57600 00020
#define B115200 00021
#define B230400 00022
#define B460800 00023
 
#define CSIZE 00001400
#define CS5 00000000
#define CS6 00000400
#define CS7 00001000
#define CS8 00001400
 
#define CSTOPB 00002000
#define CREAD 00004000
#define PARENB 00010000
#define PARODD 00020000
#define HUPCL 00040000
 
#define CLOCAL 00100000
#define CRTSCTS 020000000000 /* flow control */
 
/* c_lflag bits */
#define ISIG 0x00000080
#define ICANON 0x00000100
#define XCASE 0x00004000
#define ECHO 0x00000008
#define ECHOE 0x00000002
#define ECHOK 0x00000004
#define ECHONL 0x00000010
#define NOFLSH 0x80000000
#define TOSTOP 0x00400000
#define ECHOCTL 0x00000040
#define ECHOPRT 0x00000020
#define ECHOKE 0x00000001
#define FLUSHO 0x00800000
#define PENDIN 0x20000000
#define IEXTEN 0x00000400
 
/* Values for the ACTION argument to `tcflow'. */
#define TCOOFF 0
#define TCOON 1
#define TCIOFF 2
#define TCION 3
 
/* Values for the QUEUE_SELECTOR argument to `tcflush'. */
#define TCIFLUSH 0
#define TCOFLUSH 1
#define TCIOFLUSH 2
 
/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
#define TCSANOW 0
#define TCSADRAIN 1
#define TCSAFLUSH 2
 
#endif /* _ALPHA_TERMBITS_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/apecs.h
0,0 → 1,573
#ifndef __ALPHA_APECS__H__
#define __ALPHA_APECS__H__
 
#include <linux/types.h>
 
/*
* APECS is the internal name for the 2107x chipset which provides
* memory controller and PCI access for the 21064 chip based systems.
*
* This file is based on:
*
* DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets
* Data Sheet
*
* EC-N0648-72
*
*
* david.rusling@reo.mts.dec.com Initial Version.
*
*/
#include <linux/config.h>
 
#ifdef CONFIG_ALPHA_XL
/*
An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address
that get passed through the PCI<->ISA bridge chip. So we've gotta use
both windows to max out the physical memory we can DMA to. Sigh...
 
If we try a window at 0 for 1GB as a work-around, we run into conflicts
with ISA/PCI bus memory which can't be relocated, like VGA aperture and
BIOS ROMs. So we must put the windows high enough to avoid these areas.
 
We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1,
and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1.
Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually
be used for that range (via virt_to_bus()).
 
Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb,
to keep virt_to_bus() from returning an address in the first window, for
a data area that goes beyond the 64Mb first DMA window. Sigh...
The fudge factor MUST match with <asm/dma.h> MAX_DMA_ADDRESS, but
we can't just use that here, because of header file looping... :-(
 
Window 1 will be used for all DMA from the ISA bus; yes, that does
limit what memory an ISA floppy or soundcard or Ethernet can touch, but
it's also a known limitation on other platforms as well. We use the
same technique that is used on INTEL platforms with similar limitation:
set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init().
We trust that any ISA bus device drivers will *always* ask for DMAable
memory explicitly via kmalloc()/get_free_pages() flags arguments.
 
Note that most PCI bus devices' drivers do *not* explicitly ask for
DMAable memory; they count on being able to DMA to any memory they
get from kmalloc()/get_free_pages(). They will also use window 1 for
any physical memory accesses below 64Mb; the rest will be handled by
window 2, maxing out at 1Gb of memory. I trust this is enough... :-)
 
We hope that the area before the first window is large enough so that
there will be no overlap at the top end (64Mb). We *must* locate the
PCI cards' memory just below window 1, so that there's still the
possibility of being able to access it via SPARSE space. This is
important for cards such as the Matrox Millennium, whose Xserver
wants to access memory-mapped registers in byte and short lengths.
 
Note that the XL is treated differently from the AVANTI, even though
for most other things they are identical. It didn't seem reasonable to
make the AVANTI support pay for the limitations of the XL. It is true,
however, that an XL kernel will run on an AVANTI without problems.
 
*/
#define APECS_XL_DMA_WIN1_BASE (64*1024*1024)
#define APECS_XL_DMA_WIN1_SIZE (64*1024*1024)
#define APECS_XL_DMA_WIN1_SIZE_PARANOID (48*1024*1024)
#define APECS_XL_DMA_WIN2_BASE (1024*1024*1024)
#define APECS_XL_DMA_WIN2_SIZE (1024*1024*1024)
 
#else /* CONFIG_ALPHA_XL */
 
/* these are for normal APECS family machines, AVANTI/MUSTANG/EB64/PC64 */
#ifdef CONFIG_ALPHA_SRM_SETUP
/* if we are using the SRM PCI setup, we'll need to use variables instead */
#define APECS_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
#define APECS_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
 
extern unsigned int APECS_DMA_WIN_BASE;
extern unsigned int APECS_DMA_WIN_SIZE;
 
#else /* SRM_SETUP */
#define APECS_DMA_WIN_BASE (1024*1024*1024)
#define APECS_DMA_WIN_SIZE (1024*1024*1024)
#endif /* SRM_SETUP */
 
#endif /* CONFIG_ALPHA_XL */
 
/*
* 21071-DA Control and Status registers.
* These are used for PCI memory access.
*/
#define APECS_IOC_DCSR (IDENT_ADDR + 0x1A0000000UL)
#define APECS_IOC_PEAR (IDENT_ADDR + 0x1A0000020UL)
#define APECS_IOC_SEAR (IDENT_ADDR + 0x1A0000040UL)
#define APECS_IOC_DR1 (IDENT_ADDR + 0x1A0000060UL)
#define APECS_IOC_DR2 (IDENT_ADDR + 0x1A0000080UL)
#define APECS_IOC_DR3 (IDENT_ADDR + 0x1A00000A0UL)
 
#define APECS_IOC_TB1R (IDENT_ADDR + 0x1A00000C0UL)
#define APECS_IOC_TB2R (IDENT_ADDR + 0x1A00000E0UL)
 
#define APECS_IOC_PB1R (IDENT_ADDR + 0x1A0000100UL)
#define APECS_IOC_PB2R (IDENT_ADDR + 0x1A0000120UL)
 
#define APECS_IOC_PM1R (IDENT_ADDR + 0x1A0000140UL)
#define APECS_IOC_PM2R (IDENT_ADDR + 0x1A0000160UL)
 
#define APECS_IOC_HAXR0 (IDENT_ADDR + 0x1A0000180UL)
#define APECS_IOC_HAXR1 (IDENT_ADDR + 0x1A00001A0UL)
#define APECS_IOC_HAXR2 (IDENT_ADDR + 0x1A00001C0UL)
 
#define APECS_IOC_PMLT (IDENT_ADDR + 0x1A00001E0UL)
 
#define APECS_IOC_TLBTAG0 (IDENT_ADDR + 0x1A0000200UL)
#define APECS_IOC_TLBTAG1 (IDENT_ADDR + 0x1A0000220UL)
#define APECS_IOC_TLBTAG2 (IDENT_ADDR + 0x1A0000240UL)
#define APECS_IOC_TLBTAG3 (IDENT_ADDR + 0x1A0000260UL)
#define APECS_IOC_TLBTAG4 (IDENT_ADDR + 0x1A0000280UL)
#define APECS_IOC_TLBTAG5 (IDENT_ADDR + 0x1A00002A0UL)
#define APECS_IOC_TLBTAG6 (IDENT_ADDR + 0x1A00002C0UL)
#define APECS_IOC_TLBTAG7 (IDENT_ADDR + 0x1A00002E0UL)
 
#define APECS_IOC_TLBDATA0 (IDENT_ADDR + 0x1A0000300UL)
#define APECS_IOC_TLBDATA1 (IDENT_ADDR + 0x1A0000320UL)
#define APECS_IOC_TLBDATA2 (IDENT_ADDR + 0x1A0000340UL)
#define APECS_IOC_TLBDATA3 (IDENT_ADDR + 0x1A0000360UL)
#define APECS_IOC_TLBDATA4 (IDENT_ADDR + 0x1A0000380UL)
#define APECS_IOC_TLBDATA5 (IDENT_ADDR + 0x1A00003A0UL)
#define APECS_IOC_TLBDATA6 (IDENT_ADDR + 0x1A00003C0UL)
#define APECS_IOC_TLBDATA7 (IDENT_ADDR + 0x1A00003E0UL)
 
#define APECS_IOC_TBIA (IDENT_ADDR + 0x1A0000400UL)
 
 
/*
* 21071-CA Control and Status registers.
* These are used to program memory timing,
* configure memory and initialise the B-Cache.
*/
#define APECS_MEM_GCR (IDENT_ADDR + 0x180000000UL)
#define APECS_MEM_EDSR (IDENT_ADDR + 0x180000040UL)
#define APECS_MEM_TAR (IDENT_ADDR + 0x180000060UL)
#define APECS_MEM_ELAR (IDENT_ADDR + 0x180000080UL)
#define APECS_MEM_EHAR (IDENT_ADDR + 0x1800000a0UL)
#define APECS_MEM_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
#define APECS_MEM_LDxLAR (IDENT_ADDR + 0x1800000e0UL)
#define APECS_MEM_LDxHAR (IDENT_ADDR + 0x180000100UL)
#define APECS_MEM_GTR (IDENT_ADDR + 0x180000200UL)
#define APECS_MEM_RTR (IDENT_ADDR + 0x180000220UL)
#define APECS_MEM_VFPR (IDENT_ADDR + 0x180000240UL)
#define APECS_MEM_PDLDR (IDENT_ADDR + 0x180000260UL)
#define APECS_MEM_PDhDR (IDENT_ADDR + 0x180000280UL)
 
/* Bank x Base Address Register */
#define APECS_MEM_B0BAR (IDENT_ADDR + 0x180000800UL)
#define APECS_MEM_B1BAR (IDENT_ADDR + 0x180000820UL)
#define APECS_MEM_B2BAR (IDENT_ADDR + 0x180000840UL)
#define APECS_MEM_B3BAR (IDENT_ADDR + 0x180000860UL)
#define APECS_MEM_B4BAR (IDENT_ADDR + 0x180000880UL)
#define APECS_MEM_B5BAR (IDENT_ADDR + 0x1800008A0UL)
#define APECS_MEM_B6BAR (IDENT_ADDR + 0x1800008C0UL)
#define APECS_MEM_B7BAR (IDENT_ADDR + 0x1800008E0UL)
#define APECS_MEM_B8BAR (IDENT_ADDR + 0x180000900UL)
 
/* Bank x Configuration Register */
#define APECS_MEM_B0BCR (IDENT_ADDR + 0x180000A00UL)
#define APECS_MEM_B1BCR (IDENT_ADDR + 0x180000A20UL)
#define APECS_MEM_B2BCR (IDENT_ADDR + 0x180000A40UL)
#define APECS_MEM_B3BCR (IDENT_ADDR + 0x180000A60UL)
#define APECS_MEM_B4BCR (IDENT_ADDR + 0x180000A80UL)
#define APECS_MEM_B5BCR (IDENT_ADDR + 0x180000AA0UL)
#define APECS_MEM_B6BCR (IDENT_ADDR + 0x180000AC0UL)
#define APECS_MEM_B7BCR (IDENT_ADDR + 0x180000AE0UL)
#define APECS_MEM_B8BCR (IDENT_ADDR + 0x180000B00UL)
 
/* Bank x Timing Register A */
#define APECS_MEM_B0TRA (IDENT_ADDR + 0x180000C00UL)
#define APECS_MEM_B1TRA (IDENT_ADDR + 0x180000C20UL)
#define APECS_MEM_B2TRA (IDENT_ADDR + 0x180000C40UL)
#define APECS_MEM_B3TRA (IDENT_ADDR + 0x180000C60UL)
#define APECS_MEM_B4TRA (IDENT_ADDR + 0x180000C80UL)
#define APECS_MEM_B5TRA (IDENT_ADDR + 0x180000CA0UL)
#define APECS_MEM_B6TRA (IDENT_ADDR + 0x180000CC0UL)
#define APECS_MEM_B7TRA (IDENT_ADDR + 0x180000CE0UL)
#define APECS_MEM_B8TRA (IDENT_ADDR + 0x180000D00UL)
 
/* Bank x Timing Register B */
#define APECS_MEM_B0TRB (IDENT_ADDR + 0x180000E00UL)
#define APECS_MEM_B1TRB (IDENT_ADDR + 0x180000E20UL)
#define APECS_MEM_B2TRB (IDENT_ADDR + 0x180000E40UL)
#define APECS_MEM_B3TRB (IDENT_ADDR + 0x180000E60UL)
#define APECS_MEM_B4TRB (IDENT_ADDR + 0x180000E80UL)
#define APECS_MEM_B5TRB (IDENT_ADDR + 0x180000EA0UL)
#define APECS_MEM_B6TRB (IDENT_ADDR + 0x180000EC0UL)
#define APECS_MEM_B7TRB (IDENT_ADDR + 0x180000EE0UL)
#define APECS_MEM_B8TRB (IDENT_ADDR + 0x180000F00UL)
 
 
/*
* Memory spaces:
*/
#define APECS_IACK_SC (IDENT_ADDR + 0x1b0000000UL)
#define APECS_CONF (IDENT_ADDR + 0x1e0000000UL)
#define APECS_IO (IDENT_ADDR + 0x1c0000000UL)
#define APECS_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
#define APECS_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
 
/*
* Bit definitions for I/O Controller status register 0:
*/
#define APECS_IOC_STAT0_CMD 0xf
#define APECS_IOC_STAT0_ERR (1<<4)
#define APECS_IOC_STAT0_LOST (1<<5)
#define APECS_IOC_STAT0_THIT (1<<6)
#define APECS_IOC_STAT0_TREF (1<<7)
#define APECS_IOC_STAT0_CODE_SHIFT 8
#define APECS_IOC_STAT0_CODE_MASK 0x7
#define APECS_IOC_STAT0_P_NBR_SHIFT 13
#define APECS_IOC_STAT0_P_NBR_MASK 0x7ffff
 
#define HAE_ADDRESS APECS_IOC_HAXR1
 
#ifdef __KERNEL__
 
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
/* NOTE: we fudge the window 1 maximum as 48Mb instead of 64Mb, to prevent
virt_to_bus() from returning an address in the first window, for a
data area that goes beyond the 64Mb first DMA window. Sigh...
This MUST match with <asm/dma.h> MAX_DMA_ADDRESS for consistency, but
we can't just use that here, because of header file looping... :-(
*/
extern inline unsigned long virt_to_bus(void * address)
{
unsigned long paddr = virt_to_phys(address);
#ifdef CONFIG_ALPHA_XL
if (paddr < APECS_XL_DMA_WIN1_SIZE_PARANOID)
return paddr + APECS_XL_DMA_WIN1_BASE;
else
return paddr + APECS_XL_DMA_WIN2_BASE; /* win 2 xlates to 0 also */
#else /* CONFIG_ALPHA_XL */
return paddr + APECS_DMA_WIN_BASE;
#endif /* CONFIG_ALPHA_XL */
}
 
extern inline void * bus_to_virt(unsigned long address)
{
/*
* This check is a sanity check but also ensures that bus
* address 0 maps to virtual address 0 which is useful to
* detect null "pointers" (the NCR driver is much simpler if
* NULL pointers are preserved).
*/
#ifdef CONFIG_ALPHA_XL
if (address < APECS_XL_DMA_WIN1_BASE)
return 0;
else if (address < (APECS_XL_DMA_WIN1_BASE + APECS_XL_DMA_WIN1_SIZE))
return phys_to_virt(address - APECS_XL_DMA_WIN1_BASE);
else /* should be more checking here, maybe? */
return phys_to_virt(address - APECS_XL_DMA_WIN2_BASE);
#else /* CONFIG_ALPHA_XL */
if (address < APECS_DMA_WIN_BASE)
return 0;
return phys_to_virt(address - APECS_DMA_WIN_BASE);
#endif /* CONFIG_ALPHA_XL */
}
 
/*
* I/O functions:
*
* Unlike Jensen, the APECS machines have no concept of local
* I/O---everything goes over the PCI bus.
*
* There is plenty room for optimization here. In particular,
* the Alpha's insb/insw/extb/extw should be useful in moving
* data to/from the right byte-lanes.
*/
 
#define vuip volatile unsigned int *
 
extern inline unsigned int __inb(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + APECS_IO + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
 
extern inline void __outb(unsigned char b, unsigned long addr)
{
unsigned int w;
 
asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + APECS_IO + 0x00) = w;
mb();
}
 
extern inline unsigned int __inw(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + APECS_IO + 0x08);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
 
extern inline void __outw(unsigned short b, unsigned long addr)
{
unsigned int w;
 
asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + APECS_IO + 0x08) = w;
mb();
}
 
extern inline unsigned int __inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + APECS_IO + 0x18);
}
 
extern inline void __outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + APECS_IO + 0x18) = b;
mb();
}
 
 
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*/
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, msb;
 
shift = (addr & 0x3) * 8;
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
if (msb != hae.cache) {
set_hae(msb);
}
}
result = *(vuip) ((addr << 5) + APECS_SPARSE_MEM + 0x00);
result >>= shift;
return 0xffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
unsigned long result, shift, msb;
 
shift = (addr & 0x3) * 8;
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
if (msb != hae.cache) {
set_hae(msb);
}
}
result = *(vuip) ((addr << 5) + APECS_SPARSE_MEM + 0x08);
result >>= shift;
return 0xffffUL & result;
}
 
extern inline unsigned long __readl(unsigned long addr)
{
return *(vuip) (addr + APECS_DENSE_MEM);
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long msb;
 
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
if (msb != hae.cache) {
set_hae(msb);
}
}
*(vuip) ((addr << 5) + APECS_SPARSE_MEM + 0x00) = b * 0x01010101;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
unsigned long msb;
 
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
if (msb != hae.cache) {
set_hae(msb);
}
}
*(vuip) ((addr << 5) + APECS_SPARSE_MEM + 0x08) = b * 0x00010001;
}
 
extern inline void __writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + APECS_DENSE_MEM) = b;
}
 
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
 
#define outb(x, port) \
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
 
#define readl(a) __readl((unsigned long)(a))
#define writel(v,a) __writel((v),(unsigned long)(a))
 
#undef vuip
 
extern unsigned long apecs_init (unsigned long mem_start,
unsigned long mem_end);
 
#endif /* __KERNEL__ */
 
/*
* Data structure for handling APECS machine checks:
*/
#ifdef CONFIG_ALPHA_MIKASA
struct el_apecs_sysdata_mcheck {
unsigned long coma_gcr;
unsigned long coma_edsr;
unsigned long coma_ter;
unsigned long coma_elar;
unsigned long coma_ehar;
unsigned long coma_ldlr;
unsigned long coma_ldhr;
unsigned long coma_base0;
unsigned long coma_base1;
unsigned long coma_base2;
unsigned long coma_base3;
unsigned long coma_cnfg0;
unsigned long coma_cnfg1;
unsigned long coma_cnfg2;
unsigned long coma_cnfg3;
unsigned long epic_dcsr;
unsigned long epic_pear;
unsigned long epic_sear;
unsigned long epic_tbr1;
unsigned long epic_tbr2;
unsigned long epic_pbr1;
unsigned long epic_pbr2;
unsigned long epic_pmr1;
unsigned long epic_pmr2;
unsigned long epic_harx1;
unsigned long epic_harx2;
unsigned long epic_pmlt;
unsigned long epic_tag0;
unsigned long epic_tag1;
unsigned long epic_tag2;
unsigned long epic_tag3;
unsigned long epic_tag4;
unsigned long epic_tag5;
unsigned long epic_tag6;
unsigned long epic_tag7;
unsigned long epic_data0;
unsigned long epic_data1;
unsigned long epic_data2;
unsigned long epic_data3;
unsigned long epic_data4;
unsigned long epic_data5;
unsigned long epic_data6;
unsigned long epic_data7;
 
unsigned long pceb_vid;
unsigned long pceb_did;
unsigned long pceb_revision;
unsigned long pceb_command;
unsigned long pceb_status;
unsigned long pceb_latency;
unsigned long pceb_control;
unsigned long pceb_arbcon;
unsigned long pceb_arbpri;
 
unsigned long esc_id;
unsigned long esc_revision;
unsigned long esc_int0;
unsigned long esc_int1;
unsigned long esc_elcr0;
unsigned long esc_elcr1;
unsigned long esc_last_eisa;
unsigned long esc_nmi_stat;
 
unsigned long pci_ir;
unsigned long pci_imr;
unsigned long svr_mgr;
};
#else /* CONFIG_ALPHA_MIKASA */
/* this for the normal APECS machines */
struct el_apecs_sysdata_mcheck {
unsigned long coma_gcr;
unsigned long coma_edsr;
unsigned long coma_ter;
unsigned long coma_elar;
unsigned long coma_ehar;
unsigned long coma_ldlr;
unsigned long coma_ldhr;
unsigned long coma_base0;
unsigned long coma_base1;
unsigned long coma_base2;
unsigned long coma_cnfg0;
unsigned long coma_cnfg1;
unsigned long coma_cnfg2;
unsigned long epic_dcsr;
unsigned long epic_pear;
unsigned long epic_sear;
unsigned long epic_tbr1;
unsigned long epic_tbr2;
unsigned long epic_pbr1;
unsigned long epic_pbr2;
unsigned long epic_pmr1;
unsigned long epic_pmr2;
unsigned long epic_harx1;
unsigned long epic_harx2;
unsigned long epic_pmlt;
unsigned long epic_tag0;
unsigned long epic_tag1;
unsigned long epic_tag2;
unsigned long epic_tag3;
unsigned long epic_tag4;
unsigned long epic_tag5;
unsigned long epic_tag6;
unsigned long epic_tag7;
unsigned long epic_data0;
unsigned long epic_data1;
unsigned long epic_data2;
unsigned long epic_data3;
unsigned long epic_data4;
unsigned long epic_data5;
unsigned long epic_data6;
unsigned long epic_data7;
};
#endif /* CONFIG_ALPHA_MIKASA */
 
struct el_procdata {
unsigned long paltemp[32]; /* PAL TEMP REGS. */
/* EV4-specific fields */
unsigned long exc_addr; /* Address of excepting instruction. */
unsigned long exc_sum; /* Summary of arithmetic traps. */
unsigned long exc_mask; /* Exception mask (from exc_sum). */
unsigned long iccsr; /* IBox hardware enables. */
unsigned long pal_base; /* Base address for PALcode. */
unsigned long hier; /* Hardware Interrupt Enable. */
unsigned long hirr; /* Hardware Interrupt Request. */
unsigned long csr; /* D-stream fault info. */
unsigned long dc_stat; /* D-cache status (ECC/Parity Err). */
unsigned long dc_addr; /* EV3 Phys Addr for ECC/DPERR. */
unsigned long abox_ctl; /* ABox Control Register. */
unsigned long biu_stat; /* BIU Status. */
unsigned long biu_addr; /* BUI Address. */
unsigned long biu_ctl; /* BIU Control. */
unsigned long fill_syndrome;/* For correcting ECC errors. */
unsigned long fill_addr; /* Cache block which was being read */
unsigned long va; /* Effective VA of fault or miss. */
unsigned long bc_tag; /* Backup Cache Tag Probe Results.*/
};
 
 
#define RTC_PORT(x) (0x70 + (x))
#define RTC_ADDR(x) (0x80 | (x))
#define RTC_ALWAYS_BCD 0
 
#endif /* __ALPHA_APECS__H__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/byteorder.h
0,0 → 1,110
#ifndef _ALPHA_BYTEORDER_H
#define _ALPHA_BYTEORDER_H
 
#undef ntohl
#undef ntohs
#undef htonl
#undef htons
 
#ifndef __LITTLE_ENDIAN
#define __LITTLE_ENDIAN
#endif
 
#ifndef __LITTLE_ENDIAN_BITFIELD
#define __LITTLE_ENDIAN_BITFIELD
#endif
 
extern unsigned long int ntohl(unsigned long int);
extern unsigned short int ntohs(unsigned short int);
extern unsigned long int htonl(unsigned long int);
extern unsigned short int htons(unsigned short int);
 
extern unsigned long int __ntohl(unsigned long int);
extern unsigned short int __ntohs(unsigned short int);
 
#ifdef __GNUC__
 
extern unsigned long int __constant_ntohl(unsigned long int);
extern unsigned short int __constant_ntohs(unsigned short int);
 
/*
* The constant and non-constant versions here are the same.
* Maybe I'll come up with an alpha-optimized routine for the
* non-constant ones (the constant ones don't need it: gcc
* will optimize it to the correct constant)
*/
 
extern __inline__ unsigned long int
__ntohl(unsigned long int x)
{
unsigned long int res, t1, t2;
 
__asm__(
"# bswap input: %0 (aabbccdd)\n\t"
"# output: %0, used %1 %2\n\t"
"extlh %0,5,%1 # %1 = dd000000\n\t"
"zap %0,0xfd,%2 # %2 = 0000cc00\n\t"
"sll %2,5,%2 # %2 = 00198000\n\t"
"s8addq %2,%1,%1 # %1 = ddcc0000\n\t"
"zap %0,0xfb,%2 # %2 = 00bb0000\n\t"
"srl %2,8,%2 # %2 = 0000bb00\n\t"
"extbl %0,3,%0 # %0 = 000000aa\n\t"
"or %1,%0,%0 # %0 = ddcc00aa\n\t"
"or %2,%0,%0 # %0 = ddccbbaa\n"
: "r="(res), "r="(t1), "r="(t2)
: "0" (x & 0xffffffffUL));
return res;
}
 
#define __constant_ntohl(x) \
((unsigned long int)((((x) & 0x000000ffUL) << 24) | \
(((x) & 0x0000ff00UL) << 8) | \
(((x) & 0x00ff0000UL) >> 8) | \
(((x) & 0xff000000UL) >> 24)))
 
extern __inline__ unsigned short int
__ntohs(unsigned short int x)
{
unsigned long int res, t1;
__asm__(
"# v0 is result; swap in-place.\n\t"
"bis %2,%2,%0 # v0 = aabb\n\t"
"extwh %0,7,%1 # t1 = bb00\n\t"
"extbl %0,1,%0 # v0 = 00aa\n\t"
"bis %0,%1,%0 # v0 = bbaa\n"
: "r="(res), "r="(t1) : "r"(x));
return res;
}
 
#define __constant_ntohs(x) \
((unsigned short int)((((unsigned short int)(x) & 0x00ff) << 8) | \
(((unsigned short int)(x) & 0xff00) >> 8)))
 
#define __htonl(x) __ntohl(x)
#define __htons(x) __ntohs(x)
#define __constant_htonl(x) __constant_ntohl(x)
#define __constant_htons(x) __constant_ntohs(x)
 
#ifdef __OPTIMIZE__
# define ntohl(x) \
(__builtin_constant_p((long)(x)) ? \
__constant_ntohl((x)) : \
__ntohl((x)))
# define ntohs(x) \
(__builtin_constant_p((short)(x)) ? \
__constant_ntohs((x)) : \
__ntohs((x)))
# define htonl(x) \
(__builtin_constant_p((long)(x)) ? \
__constant_htonl((x)) : \
__htonl((x)))
# define htons(x) \
(__builtin_constant_p((short)(x)) ? \
__constant_htons((x)) : \
__htons((x)))
#endif /* __OPTIMIZE__ */
 
#endif /* __GNUC__ */
 
#endif /* _ALPHA_BYTEORDER_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/gentrap.h
0,0 → 1,37
#ifndef _ASMAXP_GENTRAP_H
#define _ASMAXP_GENTRAP_H
 
/*
* Definitions for gentrap causes. They are generated by user-level
* programs and therefore should be compatible with the corresponding
* OSF/1 definitions.
*/
#define GEN_INTOVF -1 /* integer overflow */
#define GEN_INTDIV -2 /* integer division by zero */
#define GEN_FLTOVF -3 /* fp overflow */
#define GEN_FLTDIV -4 /* fp division by zero */
#define GEN_FLTUND -5 /* fp underflow */
#define GEN_FLTINV -6 /* invalid fp operand */
#define GEN_FLTINE -7 /* inexact fp operand */
#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */
#define GEN_DECDIV -9 /* decimal division by zero */
#define GEN_DECINV -10 /* invalid decimal operand */
#define GEN_ROPRAND -11 /* reserved operand */
#define GEN_ASSERTERR -12 /* assertion error */
#define GEN_NULPTRERR -13 /* null pointer error */
#define GEN_STKOVF -14 /* stack overflow */
#define GEN_STRLENERR -15 /* string length error */
#define GEN_SUBSTRERR -16 /* substring error */
#define GEN_RANGERR -17 /* range error */
#define GEN_SUBRNG -18
#define GEN_SUBRNG1 -19
#define GEN_SUBRNG2 -20
#define GEN_SUBRNG3 -21 /* these report range errors for */
#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */
#define GEN_SUBRNG5 -23
#define GEN_SUBRNG6 -24
#define GEN_SUBRNG7 -25
 
/* the remaining codes (-26..-1023) are reserved. */
 
#endif /* _ASMAXP_GENTRAP_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/console.h
0,0 → 1,47
#ifndef __AXP_CONSOLE_H
#define __AXP_CONSOLE_H
 
/*
* Console callback routine numbers
*/
#define CCB_GETC 0x01
#define CCB_PUTS 0x02
#define CCB_RESET_TERM 0x03
#define CCB_SET_TERM_INT 0x04
#define CCB_SET_TERM_CTL 0x05
#define CCB_PROCESS_KEYCODE 0x06
 
#define CCB_OPEN 0x10
#define CCB_CLOSE 0x11
#define CCB_IOCTL 0x12
#define CCB_READ 0x13
#define CCB_WRITE 0x14
 
#define CCB_SET_ENV 0x20
#define CCB_RESET_ENV 0x21
#define CCB_GET_ENV 0x22
#define CCB_SAVE_ENV 0x23
 
/*
* Environment variable numbers
*/
#define ENV_AUTO_ACTION 0x01
#define ENV_BOOT_DEV 0x02
#define ENV_BOOTDEF_DEV 0x03
#define ENV_BOOTED_DEV 0x04
#define ENV_BOOT_FILE 0x05
#define ENV_BOOTED_FILE 0x06
#define ENV_BOOT_OSFLAGS 0x07
#define ENV_BOOTED_OSFLAGS 0x08
#define ENV_BOOT_RESET 0x09
#define ENV_DUMP_DEV 0x0A
#define ENV_ENABLE_AUDIT 0x0B
#define ENV_LICENCE 0x0C
#define ENV_CHAR_SET 0x0D
#define ENV_LANGUAGE 0x0E
#define ENV_TTY_DEV 0x0F
 
extern unsigned long dispatch(unsigned long code, ...);
#define puts(x,l) dispatch(CCB_PUTS,0,x,l)
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/segment.h
0,0 → 1,123
#ifndef _ASM_SEGMENT_H
#define _ASM_SEGMENT_H
 
#include <linux/string.h>
 
/*
* This is a gcc optimization barrier, which essentially
* inserts a sequence point in the gcc RTL tree that gcc
* can't move code around. This is needed when we enter
* or exit a critical region (in this case around user-level
* accesses that may sleep, and we can't let gcc optimize
* global state around them).
*/
#define __gcc_barrier() __asm__ __volatile__("": : :"memory")
 
/*
* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right
* pointer type..
*/
#define put_user(x,ptr) __put_user((unsigned long)(x),(ptr),sizeof(*(ptr)))
#define get_user(ptr) ((__typeof__(*(ptr)))__get_user((ptr),sizeof(*(ptr))))
 
/*
* This is a silly but good way to make sure that
* the __put_user function is indeed always optimized,
* and that we use the correct sizes..
*/
extern int bad_user_access_length(void);
 
/* I should make this use unaligned transfers etc.. */
static inline void __put_user(unsigned long x, void * y, int size)
{
__gcc_barrier();
switch (size) {
case 1:
*(char *) y = x;
break;
case 2:
*(short *) y = x;
break;
case 4:
*(int *) y = x;
break;
case 8:
*(long *) y = x;
break;
default:
bad_user_access_length();
}
__gcc_barrier();
}
 
/* I should make this use unaligned transfers etc.. */
static inline unsigned long __get_user(const void * y, int size)
{
unsigned long result;
 
__gcc_barrier();
switch (size) {
case 1:
result = *(unsigned char *) y;
break;
case 2:
result = *(unsigned short *) y;
break;
case 4:
result = *(unsigned int *) y;
break;
case 8:
result = *(unsigned long *) y;
break;
default:
result = bad_user_access_length();
}
__gcc_barrier();
return result;
}
 
#define get_fs_byte(addr) get_user((unsigned char *)(addr))
#define get_fs_word(addr) get_user((unsigned short *)(addr))
#define get_fs_long(addr) get_user((unsigned int *)(addr))
#define get_fs_quad(addr) get_user((unsigned long *)(addr))
 
#define put_fs_byte(x,addr) put_user((x),(char *)(addr))
#define put_fs_word(x,addr) put_user((x),(short *)(addr))
#define put_fs_long(x,addr) put_user((x),(int *)(addr))
#define put_fs_quad(x,addr) put_user((x),(long *)(addr))
 
static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
{
__gcc_barrier();
memcpy(to, from, n);
__gcc_barrier();
}
 
static inline void memcpy_tofs(void * to, const void * from, unsigned long n)
{
__gcc_barrier();
memcpy(to, from, n);
__gcc_barrier();
}
 
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*/
 
#define KERNEL_DS 0
#define USER_DS 1
 
#define get_fs() (current->tss.flags & 0x1)
#define set_fs(x) (current->tss.flags = (current->tss.flags & ~0x1) | ((x) & 0x1))
 
static inline unsigned long get_ds(void)
{
return 0;
}
 
#endif /* _ASM_SEGMENT_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/types.h
0,0 → 1,75
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
 
/*
* This file is never included by application software unless
* explicitly requested (e.g., via linux/types.h) in which case the
* application is Linux specific so (user-) name space pollution is
* not a major issue. However, for interoperability, libraries still
* need to be careful to avoid a name clashes.
*/
 
typedef unsigned int umode_t;
 
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
 
typedef __signed__ char __s8;
typedef unsigned char __u8;
 
typedef __signed__ short __s16;
typedef unsigned short __u16;
 
typedef __signed__ int __s32;
typedef unsigned int __u32;
 
/*
* There are 32-bit compilers for the alpha out there..
*/
#if ((~0UL) == 0xffffffff)
 
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
typedef __signed__ long long __s64;
typedef unsigned long long __u64;
#endif
 
#else
 
typedef __signed__ long __s64;
typedef unsigned long __u64;
 
#endif
 
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#ifdef __KERNEL__
 
typedef signed char s8;
typedef unsigned char u8;
 
typedef signed short s16;
typedef unsigned short u16;
 
typedef signed int s32;
typedef unsigned int u32;
 
/*
* There are 32-bit compilers for the alpha out there..
*/
#if ((~0UL) == 0xffffffff)
 
typedef signed long long s64;
typedef unsigned long long u64;
 
#else
 
typedef signed long s64;
typedef unsigned long u64;
 
#endif
 
#endif /* __KERNEL__ */
#endif /* _ALPHA_TYPES_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/elf.h
0,0 → 1,94
#ifndef __ASMaxp_ELF_H
#define __ASMaxp_ELF_H
 
/*
* ELF register definitions..
*/
 
/*
* The OSF/1 version of <sys/procfs.h> makes gregset_t 46 entries long.
* I have no idea why that is so. For now, we just leave it at 33
* (32 general regs + processor status word).
*/
#define ELF_NGREG 33
#define ELF_NFPREG 32
 
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x) == EM_ALPHA)
 
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2LSB;
#define ELF_ARCH EM_ALPHA
 
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 8192
 
/* $0 is set by ld.so to a pointer to a function which might be
registered using atexit. This provides a mean for the dynamic
linker to call DT_FINI functions for shared libraries that have
been loaded before the code runs.
 
So that we can use the same startup file with static executables,
we start programs with a value of 0 to indicate that there is no
such function. */
 
#define ELF_PLAT_INIT(_r) _r->r0 = 0
 
/* Use the same format as the OSF/1 procfs interface. The register
layout is sane. However, since dump_thread() creates the funky
layout that ECOFF coredumps want, we need to undo that layout here.
Eventually, it would be nice if the ECOFF core-dump had to do the
translation, then ELF_CORE_COPY_REGS() would become trivial and
faster. */
#define ELF_CORE_COPY_REGS(_dest,_regs) \
{ \
struct user _dump; \
\
dump_thread(_regs, &_dump); \
_dest[ 0] = _dump.regs[EF_V0]; \
_dest[ 1] = _dump.regs[EF_T0]; \
_dest[ 2] = _dump.regs[EF_T1]; \
_dest[ 3] = _dump.regs[EF_T2]; \
_dest[ 4] = _dump.regs[EF_T3]; \
_dest[ 5] = _dump.regs[EF_T4]; \
_dest[ 6] = _dump.regs[EF_T5]; \
_dest[ 7] = _dump.regs[EF_T6]; \
_dest[ 8] = _dump.regs[EF_T7]; \
_dest[ 9] = _dump.regs[EF_S0]; \
_dest[10] = _dump.regs[EF_S1]; \
_dest[11] = _dump.regs[EF_S2]; \
_dest[12] = _dump.regs[EF_S3]; \
_dest[13] = _dump.regs[EF_S4]; \
_dest[14] = _dump.regs[EF_S5]; \
_dest[15] = _dump.regs[EF_S6]; \
_dest[16] = _dump.regs[EF_A0]; \
_dest[17] = _dump.regs[EF_A1]; \
_dest[18] = _dump.regs[EF_A2]; \
_dest[19] = _dump.regs[EF_A3]; \
_dest[20] = _dump.regs[EF_A4]; \
_dest[21] = _dump.regs[EF_A5]; \
_dest[22] = _dump.regs[EF_T8]; \
_dest[23] = _dump.regs[EF_T9]; \
_dest[24] = _dump.regs[EF_T10]; \
_dest[25] = _dump.regs[EF_T11]; \
_dest[26] = _dump.regs[EF_RA]; \
_dest[27] = _dump.regs[EF_T12]; \
_dest[28] = _dump.regs[EF_AT]; \
_dest[29] = _dump.regs[EF_GP]; \
_dest[30] = _dump.regs[EF_SP]; \
_dest[31] = _dump.regs[EF_PC]; /* store PC here */ \
_dest[32] = _dump.regs[EF_PS]; \
}
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/fcntl.h
0,0 → 1,60
#ifndef _ALPHA_FCNTL_H
#define _ALPHA_FCNTL_H
 
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
#define O_ACCMODE 0003
#define O_RDONLY 00
#define O_WRONLY 01
#define O_RDWR 02
#define O_CREAT 01000 /* not fcntl */
#define O_TRUNC 02000 /* not fcntl */
#define O_EXCL 04000 /* not fcntl */
#define O_NOCTTY 010000 /* not fcntl */
 
#define O_NONBLOCK 00004
#define O_APPEND 00010
#define O_NDELAY O_NONBLOCK
#define O_SYNC 040000
#define FASYNC 020000 /* fcntl, for BSD compatibility */
 
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get f_flags */
#define F_SETFD 2 /* set f_flags */
#define F_GETFL 3 /* more flags (cloexec) */
#define F_SETFL 4
#define F_GETLK 7
#define F_SETLK 8
#define F_SETLKW 9
 
#define F_SETOWN 5 /* for sockets. */
#define F_GETOWN 6 /* for sockets. */
 
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
 
/* for posix fcntl() and lockf() */
#define F_RDLCK 1
#define F_WRLCK 2
#define F_UNLCK 8
 
/* for old implementation of bsd flock () */
#define F_EXLCK 16 /* or 3 */
#define F_SHLCK 32 /* or 4 */
 
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
 
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
};
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/string.h
0,0 → 1,34
#ifndef __ALPHA_STRING_H__
#define __ALPHA_STRING_H__
 
#ifdef __KERNEL__
 
extern void * __constant_c_memset(void *, unsigned long, long);
extern void * __memset(void *, char, size_t);
 
/*
* Ugh. Gcc uses "bcopy()" internally for structure assignments.
*/
#define __HAVE_ARCH_BCOPY
 
/*
* Define "memcpy()" to something else, otherwise gcc will
* corrupt that too into a "bcopy". Also, some day we might
* want to do a separate inlined constant-size memcpy (for 8
* and 16 byte user<->kernel structure copying).
*/
#define __HAVE_ARCH_MEMCPY
extern void * __memcpy(void *, const void *, size_t);
#define memcpy __memcpy
 
#define __HAVE_ARCH_MEMSET
#define memset(s, c, count) \
(__builtin_constant_p(c) ? \
__constant_c_memset((s),(0x0101010101010101UL*(unsigned char)c),(count)) : \
__memset((s),(c),(count)))
 
#define __HAVE_ARCH_STRLEN
 
#endif /* __KERNEL__ */
 
#endif /* __ALPHA_STRING_H__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/unaligned.h
0,0 → 1,171
#ifndef __ALPHA_UNALIGNED_H
#define __ALPHA_UNALIGNED_H
 
/*
* The main single-value unaligned transfer routines.
*/
#define get_unaligned(ptr) \
((__typeof__(*(ptr)))__get_unaligned((ptr), sizeof(*(ptr))))
#define put_unaligned(x,ptr) \
__put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr)))
 
/*
* This is a silly but good way to make sure that
* the get/put functions are indeed always optimized,
* and that we use the correct sizes.
*/
extern void bad_unaligned_access_length(void);
 
/*
* Elemental unaligned loads
*/
 
extern inline unsigned long __uldq(const unsigned long * r11)
{
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
"extql %0,%2,%0\n\t"
"extqh %1,%2,%1"
:"=&r" (r1), "=&r" (r2)
:"r" (r11),
"m" (*r11),
"m" (*(const unsigned long *)(7+(char *) r11)));
return r1 | r2;
}
 
extern inline unsigned long __uldl(const unsigned int * r11)
{
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
"extll %0,%2,%0\n\t"
"extlh %1,%2,%1"
:"=&r" (r1), "=&r" (r2)
:"r" (r11),
"m" (*r11),
"m" (*(const unsigned long *)(3+(char *) r11)));
return r1 | r2;
}
 
extern inline unsigned long __uldw(const unsigned short * r11)
{
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
"extwl %0,%2,%0\n\t"
"extwh %1,%2,%1"
:"=&r" (r1), "=&r" (r2)
:"r" (r11),
"m" (*r11),
"m" (*(const unsigned long *)(1+(char *) r11)));
return r1 | r2;
}
 
/*
* Elemental unaligned stores
*/
 
extern inline void __ustq(unsigned long r5, unsigned long * r11)
{
unsigned long r1,r2,r3,r4;
 
__asm__("ldq_u %3,%1\n\t"
"ldq_u %2,%0\n\t"
"insqh %6,%7,%5\n\t"
"insql %6,%7,%4\n\t"
"mskqh %3,%7,%3\n\t"
"mskql %2,%7,%2\n\t"
"bis %3,%5,%3\n\t"
"bis %2,%4,%2\n\t"
"stq_u %3,%1\n\t"
"stq_u %2,%0"
:"=m" (*r11),
"=m" (*(unsigned long *)(7+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
}
 
extern inline void __ustl(unsigned long r5, unsigned int * r11)
{
unsigned long r1,r2,r3,r4;
 
__asm__("ldq_u %3,%1\n\t"
"ldq_u %2,%0\n\t"
"inslh %6,%7,%5\n\t"
"insll %6,%7,%4\n\t"
"msklh %3,%7,%3\n\t"
"mskll %2,%7,%2\n\t"
"bis %3,%5,%3\n\t"
"bis %2,%4,%2\n\t"
"stq_u %3,%1\n\t"
"stq_u %2,%0"
:"=m" (*r11),
"=m" (*(unsigned long *)(3+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
}
 
extern inline void __ustw(unsigned long r5, unsigned short * r11)
{
unsigned long r1,r2,r3,r4;
 
__asm__("ldq_u %3,%1\n\t"
"ldq_u %2,%0\n\t"
"inswh %6,%7,%5\n\t"
"inswl %6,%7,%4\n\t"
"mskwh %3,%7,%3\n\t"
"mskwl %2,%7,%2\n\t"
"bis %3,%5,%3\n\t"
"bis %2,%4,%2\n\t"
"stq_u %3,%1\n\t"
"stq_u %2,%0"
:"=m" (*r11),
"=m" (*(unsigned long *)(1+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
}
 
extern inline unsigned long __get_unaligned(const void *ptr, size_t size)
{
unsigned long val;
switch (size) {
case 1:
val = *(const unsigned char *)ptr;
break;
case 2:
val = __uldw((const unsigned short *)ptr);
break;
case 4:
val = __uldl((const unsigned int *)ptr);
break;
case 8:
val = __uldq((const unsigned long *)ptr);
break;
default:
bad_unaligned_access_length();
}
return val;
}
 
extern inline void __put_unaligned(unsigned long val, void *ptr, size_t size)
{
switch (size) {
case 1:
*(unsigned char *)ptr = (val);
break;
case 2:
__ustw(val, (unsigned short *)ptr);
break;
case 4:
__ustl(val, (unsigned int *)ptr);
break;
case 8:
__ustq(val, (unsigned long *)ptr);
break;
default:
bad_unaligned_access_length();
}
}
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/io.h
0,0 → 1,245
#ifndef __ALPHA_IO_H
#define __ALPHA_IO_H
 
#include <linux/config.h>
 
#include <asm/system.h>
 
/* We don't use IO slowdowns on the alpha, but.. */
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
 
/*
* The hae (hardware address extension) register is used to
* access high IO addresses. To avoid doing an external cycle
* every time we need to set the hae, we have a hae cache in
* memory. The kernel entry code makes sure that the hae is
* preserved across interrupts, so it is safe to set the hae
* once and then depend on it staying the same in kernel code.
*/
extern struct hae {
unsigned long cache;
unsigned long *reg;
} hae;
 
/*
* Virtual -> physical identity mapping starts at this offset
*/
#ifdef USE_48_BIT_KSEG
#define IDENT_ADDR (0xffff800000000000UL)
#else
#define IDENT_ADDR (0xfffffc0000000000UL)
#endif
 
#ifdef __KERNEL__
 
/*
* We try to avoid hae updates (thus the cache), but when we
* do need to update the hae, we need to do it atomically, so
* that any interrupts wouldn't get confused with the hae
* register not being up-to-date with respect to the hardware
* value.
*/
extern inline void set_hae(unsigned long new_hae)
{
unsigned long ipl = swpipl(7);
hae.cache = new_hae;
*hae.reg = new_hae;
mb();
new_hae = *hae.reg; /* read to make sure it was written */
setipl(ipl);
}
 
/*
* Change virtual addresses to physical addresses and vv.
*/
extern inline unsigned long virt_to_phys(volatile void * address)
{
return 0xffffffffUL & (unsigned long) address;
}
 
extern inline void * phys_to_virt(unsigned long address)
{
return (void *) (address + IDENT_ADDR);
}
 
#else /* !__KERNEL__ */
 
/*
* Define actual functions in private name-space so it's easier to
* accommodate things like XFree or svgalib that like to define their
* own versions of inb etc.
*/
extern void __sethae (unsigned long addr); /* syscall */
extern void _sethae (unsigned long addr); /* cached version */
 
#endif /* !__KERNEL__ */
 
/*
* There are different chipsets to interface the Alpha CPUs to the world.
*/
#if defined(CONFIG_ALPHA_LCA)
# include <asm/lca.h> /* get chip-specific definitions */
#elif defined(CONFIG_ALPHA_APECS)
# include <asm/apecs.h> /* get chip-specific definitions */
#elif defined(CONFIG_ALPHA_CIA)
# include <asm/cia.h> /* get chip-specific definitions */
#elif defined(CONFIG_ALPHA_T2)
# include <asm/t2.h> /* get chip-specific definitions */
#elif defined(CONFIG_ALPHA_PYXIS)
# include <asm/pyxis.h> /* get chip-specific definitions */
#else
# include <asm/jensen.h>
#endif
 
/*
* The convention used for inb/outb etc. is that names starting with
* two underscores are the inline versions, names starting with a
* single underscore are proper functions, and names starting with a
* letter are macros that map in some way to inline or proper function
* versions. Not all that pretty, but before you change it, be sure
* to convince yourself that it won't break anything (in particular
* module support).
*/
extern unsigned int _inb (unsigned long port);
extern unsigned int _inw (unsigned long port);
extern unsigned int _inl (unsigned long port);
extern void _outb (unsigned char b,unsigned long port);
extern void _outw (unsigned short w,unsigned long port);
extern void _outl (unsigned int l,unsigned long port);
extern unsigned long _readb(unsigned long addr);
extern unsigned long _readw(unsigned long addr);
extern unsigned long _readl(unsigned long addr);
extern void _writeb(unsigned char b, unsigned long addr);
extern void _writew(unsigned short b, unsigned long addr);
extern void _writel(unsigned int b, unsigned long addr);
 
/*
* The platform header files may define some of these macros to use
* the inlined versions where appropriate. These macros may also be
* redefined by userlevel programs.
*/
#ifndef inb
# define inb(p) _inb((p))
#endif
#ifndef inw
# define inw(p) _inw((p))
#endif
#ifndef inl
# define inl(p) _inl((p))
#endif
#ifndef outb
# define outb(b,p) _outb((b),(p))
#endif
#ifndef outw
# define outw(w,p) _outw((w),(p))
#endif
#ifndef outl
# define outl(l,p) _outl((l),(p))
#endif
 
#ifndef inb_p
# define inb_p inb
#endif
#ifndef inw_p
# define inw_p inw
#endif
#ifndef inl_p
# define inl_p inl
#endif
 
#ifndef outb_p
# define outb_p outb
#endif
#ifndef outw_p
# define outw_p outw
#endif
#ifndef outl_p
# define outl_p outl
#endif
 
/*
* The "address" in IO memory space is not clearly either an integer or a
* pointer. We will accept both, thus the casts.
*
* On the alpha, we have the whole physical address space mapped at all
* times, so "ioremap()" and "iounmap()" do not need to do anything.
*/
extern inline void * ioremap(unsigned long offset, unsigned long size)
{
return (void *) offset;
}
 
extern inline void iounmap(void *addr)
{
}
 
#ifndef readb
# define readb(a) _readb((unsigned long)(a))
#endif
#ifndef readw
# define readw(a) _readw((unsigned long)(a))
#endif
#ifndef readl
# define readl(a) _readl((unsigned long)(a))
#endif
#ifndef writeb
# define writeb(v,a) _writeb((v),(unsigned long)(a))
#endif
#ifndef writew
# define writew(v,a) _writew((v),(unsigned long)(a))
#endif
#ifndef writel
# define writel(v,a) _writel((v),(unsigned long)(a))
#endif
 
#ifdef __KERNEL__
 
/*
* String version of IO memory access ops:
*/
extern void _memcpy_fromio(void *, unsigned long, unsigned long);
extern void _memcpy_toio(unsigned long, void *, unsigned long);
extern void _memset_io(unsigned long, int, unsigned long);
 
#define memcpy_fromio(to,from,len) _memcpy_fromio((to),(unsigned long)(from),(len))
#define memcpy_toio(to,from,len) _memcpy_toio((unsigned long)(to),(from),(len))
#define memset_io(addr,c,len) _memset_io((unsigned long)(addr),(c),(len))
 
/*
* String versions of in/out ops:
*/
extern void insb (unsigned long port, void *dst, unsigned long count);
extern void insw (unsigned long port, void *dst, unsigned long count);
extern void insl (unsigned long port, void *dst, unsigned long count);
extern void outsb (unsigned long port, const void *src, unsigned long count);
extern void outsw (unsigned long port, const void *src, unsigned long count);
extern void outsl (unsigned long port, const void *src, unsigned long count);
 
/*
* XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and
* just copy it. The net code will then do the checksum later. Presently
* only used by some shared memory 8390 ethernet cards anyway.
*/
 
#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
 
static inline int check_signature(unsigned long io_addr,
const unsigned char *signature, int length)
{
int retval = 0;
do {
if (readb(io_addr) != *signature)
goto out;
io_addr++;
signature++;
length--;
} while (length);
retval = 1;
out:
return retval;
}
 
#endif /* __KERNEL__ */
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/floppy.h
0,0 → 1,61
/*
* Architecture specific parts of the Floppy driver
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995
*/
#ifndef __ASM_ALPHA_FLOPPY_H
#define __ASM_ALPHA_FLOPPY_H
 
#include <linux/config.h>
 
#define fd_inb(port) inb_p(port)
#define fd_outb(port,value) outb_p(port,value)
 
#define fd_enable_dma() enable_dma(FLOPPY_DMA)
#define fd_disable_dma() disable_dma(FLOPPY_DMA)
#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy")
#define fd_free_dma() free_dma(FLOPPY_DMA)
#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,addr)
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr,size) /* nothing */
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \
SA_INTERRUPT|SA_SAMPLE_RANDOM, \
"floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
 
__inline__ void virtual_dma_init(void)
{
/* Nothing to do on an Alpha */
}
 
static int FDC1 = 0x3f0;
static int FDC2 = -1;
 
/*
* Again, the CMOS information doesn't work on the alpha..
*/
#define FLOPPY0_TYPE 6
#define FLOPPY1_TYPE 0
 
#define N_FDC 2
#define N_DRIVE 8
 
/*
* Most Alphas have no problems with floppy DMA crossing 64k borders. Sigh...
*/
#if defined(CONFIG_ALPHA_XL) || defined(CONFIG_ALPHA_RUFFIAN)
#define CROSS_64KB(a,s) \
((unsigned long)(a)/0x10000 != ((unsigned long)(a) + (s) - 1) / 0x10000)
#else /* XL || RUFFIAN */
#define CROSS_64KB(a,s) (0)
#endif /* XL || RUFFIAN */
 
#endif /* __ASM_ALPHA_FLOPPY_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/ioctl.h
0,0 → 1,66
#ifndef _ALPHA_IOCTL_H
#define _ALPHA_IOCTL_H
 
/*
* The original linux ioctl numbering scheme was just a general
* "anything goes" setup, where more or less random numbers were
* assigned. Sorry, I was clueless when I started out on this.
*
* On the alpha, we'll try to clean it up a bit, using a more sane
* ioctl numbering, and also trying to be compatible with OSF/1 in
* the process. I'd like to clean it up for the i386 as well, but
* it's so painful recognizing both the new and the old numbers..
*/
 
#define _IOC_NRBITS 8
#define _IOC_TYPEBITS 8
#define _IOC_SIZEBITS 13
#define _IOC_DIRBITS 3
 
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
 
#define _IOC_NRSHIFT 0
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
 
/*
* Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
* And this turns out useful to catch old ioctl numbers in header
* files for us.
*/
#define _IOC_NONE 1U
#define _IOC_READ 2U
#define _IOC_WRITE 4U
 
#define _IOC(dir,type,nr,size) \
((unsigned int) \
(((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT)))
 
/* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
 
/* used to decode them.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
 
/* ...and for the drivers/sound files... */
 
#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
 
#endif /* _ALPHA_IOCTL_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/stat.h
0,0 → 1,40
#ifndef _ALPHA_STAT_H
#define _ALPHA_STAT_H
 
struct old_stat {
unsigned int st_dev;
unsigned int st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned int st_rdev;
long st_size;
unsigned long st_atime;
unsigned long st_mtime;
unsigned long st_ctime;
unsigned int st_blksize;
int st_blocks;
unsigned int st_flags;
unsigned int st_gen;
};
 
struct new_stat {
unsigned int st_dev;
unsigned int st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned int st_rdev;
long st_size;
unsigned long st_atime;
unsigned long st_mtime;
unsigned long st_ctime;
unsigned int st_blksize;
int st_blocks;
unsigned int st_flags;
unsigned int st_gen;
};
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/page.h
0,0 → 1,59
#ifndef _ALPHA_PAGE_H
#define _ALPHA_PAGE_H
 
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 13
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
 
#ifdef __KERNEL__
 
#define STRICT_MM_TYPECHECKS
 
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
 
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
 
#define __pte(x) ((pte_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
 
#else
/*
* .. while these make it easier on the compiler
*/
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
 
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
 
#define __pte(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
 
#endif
 
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
 
#define PAGE_OFFSET 0xFFFFFC0000000000UL
#define MAP_NR(addr) ((((unsigned long) (addr)) - PAGE_OFFSET) >> PAGE_SHIFT)
 
#endif /* __KERNEL__ */
 
#endif /* _ALPHA_PAGE_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/pyxis.h
0,0 → 1,742
#ifndef __ALPHA_PYXIS__H__
#define __ALPHA_PYXIS__H__
 
#include <linux/config.h>
#include <linux/types.h>
 
/*
* PYXIS is the internal name for a core logic chipset which provides
* memory controller and PCI access for the 21164A chip based systems.
*
* This file is based on:
*
* Pyxis Chipset Spec
* 14-Jun-96
* Rev. X2.0
*
*/
 
/*------------------------------------------------------------------------**
** **
** I/O procedures **
** **
** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers **
** inportbxt: 8 bits only **
** inport: alias of inportw **
** outport: alias of outportw **
** **
** inmem[b|w|t|l], outmem[b|w|t|l] 8:16:24:32 ISA memory xfers **
** inmembxt: 8 bits only **
** inmem: alias of inmemw **
** outmem: alias of outmemw **
** **
**------------------------------------------------------------------------*/
 
 
/* CIA ADDRESS BIT DEFINITIONS
*
* 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |1| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |0|0|0|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | \_/ \_/
* | | |
* +-- IO space, not cached. Byte Enable --+ |
* Transfer Length --+
*
*
*
* Byte Transfer
* Enable Length Transfer Byte Address
* adr<6:5> adr<4:3> Length Enable Adder
* ---------------------------------------------
* 00 00 Byte 1110 0x000
* 01 00 Byte 1101 0x020
* 10 00 Byte 1011 0x040
* 11 00 Byte 0111 0x060
*
* 00 01 Word 1100 0x008
* 01 01 Word 1001 0x028 <= Not supported in this code.
* 10 01 Word 0011 0x048
*
* 00 10 Tribyte 1000 0x010
* 01 10 Tribyte 0001 0x030
*
* 10 11 Longword 0000 0x058
*
* Note that byte enables are asserted low.
*
*/
 
#define BYTE_ENABLE_SHIFT 5
#define TRANSFER_LENGTH_SHIFT 3
 
#define MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
#define MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
#define MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
 
#ifdef CONFIG_ALPHA_SRM_SETUP
/* if we are using the SRM PCI setup, we'll need to use variables instead */
#define PYXIS_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
#define PYXIS_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
 
extern unsigned int PYXIS_DMA_WIN_BASE;
extern unsigned int PYXIS_DMA_WIN_SIZE;
 
#else /* SRM_SETUP */
#define PYXIS_DMA_WIN_BASE (1024*1024*1024)
#define PYXIS_DMA_WIN_SIZE (1024*1024*1024)
#endif /* SRM_SETUP */
 
/*
* General Registers
*/
#define PYXIS_REV (IDENT_ADDR + 0x8740000080UL)
#define PYXIS_PCI_LAT (IDENT_ADDR + 0x87400000C0UL)
#define PYXIS_CTRL (IDENT_ADDR + 0x8740000100UL)
#define PYXIS_CTRL1 (IDENT_ADDR + 0x8740000140UL)
#define PYXIS_FLASH_CTRL (IDENT_ADDR + 0x8740000200UL)
 
#define PYXIS_HAE_MEM (IDENT_ADDR + 0x8740000400UL)
#define PYXIS_HAE_IO (IDENT_ADDR + 0x8740000440UL)
#define PYXIS_CFG (IDENT_ADDR + 0x8740000480UL)
 
/*
* Diagnostic Registers
*/
#define PYXIS_DIAG (IDENT_ADDR + 0x8740002000UL)
#define PYXIS_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL)
 
/*
* Performance Monitor registers
*/
#define PYXIS_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL)
#define PYXIS_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL)
 
/*
* Error registers
*/
#define PYXIS_ERR (IDENT_ADDR + 0x8740008200UL)
#define PYXIS_STAT (IDENT_ADDR + 0x8740008240UL)
#define PYXIS_ERR_MASK (IDENT_ADDR + 0x8740008280UL)
#define PYXIS_SYN (IDENT_ADDR + 0x8740008300UL)
#define PYXIS_ERR_DATA (IDENT_ADDR + 0x8740008308UL)
 
#define PYXIS_MEAR (IDENT_ADDR + 0x8740008400UL)
#define PYXIS_MESR (IDENT_ADDR + 0x8740008440UL)
#define PYXIS_PCI_ERR0 (IDENT_ADDR + 0x8740008800UL)
#define PYXIS_PCI_ERR1 (IDENT_ADDR + 0x8740008840UL)
#define PYXIS_PCI_ERR2 (IDENT_ADDR + 0x8740008880UL)
 
/*
* PCI Address Translation Registers.
*/
#define PYXIS_TBIA (IDENT_ADDR + 0x8760000100UL)
 
#define PYXIS_W0_BASE (IDENT_ADDR + 0x8760000400UL)
#define PYXIS_W0_MASK (IDENT_ADDR + 0x8760000440UL)
#define PYXIS_T0_BASE (IDENT_ADDR + 0x8760000480UL)
 
#define PYXIS_W1_BASE (IDENT_ADDR + 0x8760000500UL)
#define PYXIS_W1_MASK (IDENT_ADDR + 0x8760000540UL)
#define PYXIS_T1_BASE (IDENT_ADDR + 0x8760000580UL)
 
#define PYXIS_W2_BASE (IDENT_ADDR + 0x8760000600UL)
#define PYXIS_W2_MASK (IDENT_ADDR + 0x8760000640UL)
#define PYXIS_T2_BASE (IDENT_ADDR + 0x8760000680UL)
 
#define PYXIS_W3_BASE (IDENT_ADDR + 0x8760000700UL)
#define PYXIS_W3_MASK (IDENT_ADDR + 0x8760000740UL)
#define PYXIS_T3_BASE (IDENT_ADDR + 0x8760000780UL)
 
/*
* Memory Control registers
*/
#define PYXIS_MCR (IDENT_ADDR + 0x8750000000UL)
 
/*
* Memory spaces:
*/
#define PYXIS_IACK_SC (IDENT_ADDR + 0x8720000000UL)
#define PYXIS_CONF (IDENT_ADDR + 0x8700000000UL)
#define PYXIS_IO (IDENT_ADDR + 0x8580000000UL)
#define PYXIS_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL)
#define PYXIS_SPARSE_MEM_R2 (IDENT_ADDR + 0x8400000000UL)
#define PYXIS_SPARSE_MEM_R3 (IDENT_ADDR + 0x8500000000UL)
#define PYXIS_DENSE_MEM (IDENT_ADDR + 0x8600000000UL)
 
/*
* Byte/Word PCI Memory Spaces:
*/
#define PYXIS_BW_MEM (IDENT_ADDR + 0x8800000000UL)
#define PYXIS_BW_IO (IDENT_ADDR + 0x8900000000UL)
#define PYXIS_BW_CFG_0 (IDENT_ADDR + 0x8a00000000UL)
#define PYXIS_BW_CFG_1 (IDENT_ADDR + 0x8b00000000UL)
 
/*
* Interrupt Control registers
*/
#define PYXIS_INT_REQ (IDENT_ADDR + 0x87A0000000UL)
#define PYXIS_INT_MASK (IDENT_ADDR + 0x87A0000040UL)
#define PYXIS_INT_HILO (IDENT_ADDR + 0x87A00000C0UL)
#define PYXIS_INT_ROUTE (IDENT_ADDR + 0x87A0000140UL)
#define PYXIS_GPO (IDENT_ADDR + 0x87A0000180UL)
#define PYXIS_INT_CNFG (IDENT_ADDR + 0x87A00001C0UL)
#define PYXIS_RT_COUNT (IDENT_ADDR + 0x87A0000200UL)
#define PYXIS_INT_TIME (IDENT_ADDR + 0x87A0000240UL)
#define PYXIS_IIC_CTRL (IDENT_ADDR + 0x87A00002C0UL)
 
/*
* Bit definitions for I/O Controller status register 0:
*/
#define PYXIS_STAT0_CMD 0xf
#define PYXIS_STAT0_ERR (1<<4)
#define PYXIS_STAT0_LOST (1<<5)
#define PYXIS_STAT0_THIT (1<<6)
#define PYXIS_STAT0_TREF (1<<7)
#define PYXIS_STAT0_CODE_SHIFT 8
#define PYXIS_STAT0_CODE_MASK 0x7
#define PYXIS_STAT0_P_NBR_SHIFT 13
#define PYXIS_STAT0_P_NBR_MASK 0x7ffff
 
#define HAE_ADDRESS PYXIS_HAE_MEM
 
#ifdef __KERNEL__
 
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
#if defined(CONFIG_ALPHA_RUFFIAN)
#if 0
/* Ruffian doesn't do 1G PCI window */
extern inline unsigned long virt_to_bus(void * address)
{
return virt_to_phys(address);
}
 
extern inline void * bus_to_virt(unsigned long address)
{
return phys_to_virt(address);
}
#else
/* Oh, yes, it does (at least with the latest FW) */
extern inline unsigned long virt_to_bus(void * address)
{
return virt_to_phys(address) + PYXIS_DMA_WIN_BASE;
}
 
extern inline void * bus_to_virt(unsigned long address)
{
return phys_to_virt(address - PYXIS_DMA_WIN_BASE);
}
#endif
#else /* RUFFIAN */
extern inline unsigned long virt_to_bus(void * address)
{
return virt_to_phys(address) + PYXIS_DMA_WIN_BASE;
}
 
extern inline void * bus_to_virt(unsigned long address)
{
return phys_to_virt(address - PYXIS_DMA_WIN_BASE);
}
#endif /* RUFFIAN */
 
/*
* I/O functions:
*
* PYXIS, the 21174 PCI/memory support chipset for the EV56 (21164)
* and PCA56 (21164PC) processors, can use either a sparse address
* mapping scheme, or the so-called byte-word PCI address space, to
* get at PCI memory and I/O.
*/
 
#define vuip volatile unsigned int *
 
#if defined(BWIO_ENABLED)
# if defined(CONFIG_ALPHA_LX164) || \
defined(CONFIG_ALPHA_SX164)
/* only for the above platforms can we be sure this will work */
# define BWIO_REALLY_ENABLED
# else
# undef BWIO_REALLY_ENABLED
# endif
#else
# undef BWIO_REALLY_ENABLED
#endif
 
#ifdef BWIO_REALLY_ENABLED
 
extern inline unsigned int __inb(unsigned long addr)
{
register unsigned long result;
 
__asm__ __volatile__ (
"ldbu %0,%1"
: "=r" (result)
: "m" (*(unsigned char *)(addr+PYXIS_BW_IO)));
 
return result;
}
 
extern inline void __outb(unsigned char b, unsigned long addr)
{
__asm__ __volatile__ (
"stb %1,%0\n\t"
"mb"
: : "m" (*(unsigned char *)(addr+PYXIS_BW_IO)), "r" (b));
}
 
extern inline unsigned int __inw(unsigned long addr)
{
register unsigned long result;
 
__asm__ __volatile__ (
"ldwu %0,%1"
: "=r" (result)
: "m" (*(unsigned short *)(addr+PYXIS_BW_IO)));
 
return result;
}
 
extern inline void __outw(unsigned short b, unsigned long addr)
{
__asm__ __volatile__ (
"stw %1,%0\n\t"
"mb"
: : "m" (*(unsigned short *)(addr+PYXIS_BW_IO)), "r" (b));
}
 
extern inline unsigned int __inl(unsigned long addr)
{
register unsigned long result;
 
__asm__ __volatile__ (
"ldl %0,%1"
: "=r" (result)
: "m" (*(unsigned int *)(addr+PYXIS_BW_IO)));
 
return result;
}
 
extern inline void __outl(unsigned int b, unsigned long addr)
{
__asm__ __volatile__ (
"stl %1,%0\n\t"
"mb"
: : "m" (*(unsigned int *)(addr+PYXIS_BW_IO)), "r" (b));
}
 
#define inb(port) __inb((port))
#define inw(port) __inw((port))
#define inl(port) __inl((port))
 
#define outb(x, port) __outb((x),(port))
#define outw(x, port) __outw((x),(port))
#define outl(x, port) __outl((x),(port))
 
#else /* BWIO_REALLY_ENABLED */
 
extern inline unsigned int __inb(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + PYXIS_IO + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
 
extern inline void __outb(unsigned char b, unsigned long addr)
{
unsigned int w;
 
asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + PYXIS_IO + 0x00) = w;
mb();
}
 
extern inline unsigned int __inw(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + PYXIS_IO + 0x08);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
 
extern inline void __outw(unsigned short b, unsigned long addr)
{
unsigned int w;
 
asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + PYXIS_IO + 0x08) = w;
mb();
}
 
extern inline unsigned int __inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + PYXIS_IO + 0x18);
}
 
extern inline void __outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + PYXIS_IO + 0x18) = b;
mb();
}
 
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
 
#define outb(x, port) \
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
 
#endif /* BWIO_REALLY_ENABLED */
 
 
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*
* For reading and writing 8 and 16 bit quantities we need to
* go through one of the three sparse address mapping regions
* and use the HAE_MEM CSR to provide some bits of the address.
* The following few routines use only sparse address region 1
* which gives 1Gbyte of accessible space which relates exactly
* to the amount of PCI memory mapping *into* system address space.
* See p 6-17 of the specification but it looks something like this:
*
* 21164 Address:
*
* 3 2 1
* 9876543210987654321098765432109876543210
* 1ZZZZ0.PCI.QW.Address............BBLL
*
* ZZ = SBZ
* BB = Byte offset
* LL = Transfer length
*
* PCI Address:
*
* 3 2 1
* 10987654321098765432109876543210
* HHH....PCI.QW.Address........ 00
*
* HHH = 31:29 HAE_MEM CSR
*
*/
 
#ifdef BWIO_REALLY_ENABLED
 
extern inline unsigned long __readb(unsigned long addr)
{
register unsigned long result;
 
__asm__ __volatile__ (
"ldbu %0,%1"
: "=r" (result)
: "m" (*(unsigned char *)(addr+PYXIS_BW_MEM)));
 
return result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
register unsigned long result;
 
__asm__ __volatile__ (
"ldwu %0,%1"
: "=r" (result)
: "m" (*(unsigned short *)(addr+PYXIS_BW_MEM)));
 
return result;
}
 
extern inline unsigned long __readl(unsigned long addr)
{
register unsigned long result;
 
__asm__ __volatile__ (
"ldl %0,%1"
: "=r" (result)
: "m" (*(unsigned int *)(addr+PYXIS_BW_MEM)));
 
return result;
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
__asm__ __volatile__ (
"stb %1,%0\n\t"
"mb"
: : "m" (*(unsigned char *)(addr+PYXIS_BW_MEM)), "r" (b));
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
__asm__ __volatile__ (
"stw %1,%0\n\t"
"mb"
: : "m" (*(unsigned short *)(addr+PYXIS_BW_MEM)), "r" (b));
}
 
extern inline void __writel(unsigned int b, unsigned long addr)
{
__asm__ __volatile__ (
"stl %1,%0\n\t"
"mb"
: : "m" (*(unsigned int *)(addr+PYXIS_BW_MEM)), "r" (b));
}
 
#define readb(addr) __readb((addr))
#define readw(addr) __readw((addr))
 
#define writeb(b, addr) __writeb((b),(addr))
#define writew(b, addr) __writew((b),(addr))
 
#else /* BWIO_REALLY_ENABLED */
 
#ifdef CONFIG_ALPHA_SRM_SETUP
 
extern unsigned long pyxis_sm_base_r1, pyxis_sm_base_r2, pyxis_sm_base_r3;
 
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, work;
 
if ((addr >= pyxis_sm_base_r1) &&
(addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
else
if ((addr >= pyxis_sm_base_r2) &&
(addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
else
if ((addr >= pyxis_sm_base_r3) &&
(addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
else
{
#if 0
printk("__readb: address 0x%lx not covered by HAE\n", addr);
#endif
return 0x0ffUL;
}
shift = (addr & 0x3) << 3;
result = *(vuip) work;
result >>= shift;
return 0x0ffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
unsigned long result, shift, work;
 
if ((addr >= pyxis_sm_base_r1) &&
(addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x08);
else
if ((addr >= pyxis_sm_base_r2) &&
(addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x08);
else
if ((addr >= pyxis_sm_base_r3) &&
(addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x08);
else
{
#if 0
printk("__readw: address 0x%lx not covered by HAE\n", addr);
#endif
return 0x0ffUL;
}
shift = (addr & 0x3) << 3;
result = *(vuip) work;
result >>= shift;
return 0x0ffffUL & result;
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long work;
 
if ((addr >= pyxis_sm_base_r1) &&
(addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
else
if ((addr >= pyxis_sm_base_r2) &&
(addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
else
if ((addr >= pyxis_sm_base_r3) &&
(addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
else
{
#if 0
printk("__writeb: address 0x%lx not covered by HAE\n", addr);
#endif
return;
}
*(vuip) work = b * 0x01010101;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
unsigned long work;
 
if ((addr >= pyxis_sm_base_r1) &&
(addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
else
if ((addr >= pyxis_sm_base_r2) &&
(addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
else
if ((addr >= pyxis_sm_base_r3) &&
(addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
else
{
#if 0
printk("__writew: address 0x%lx not covered by HAE\n", addr);
#endif
return;
}
*(vuip) work = b * 0x00010001;
}
 
#else /* SRM_SETUP */
 
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, msb, work, temp;
 
shift = (addr & 0x3) << 3;
msb = addr & 0xE0000000UL;
temp = addr & MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
work = ((temp << 5) + PYXIS_SPARSE_MEM + 0x00);
result = *(vuip) work;
result >>= shift;
return 0x0ffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
unsigned long result, shift, msb, work, temp;
 
shift = (addr & 0x3) << 3;
msb = addr & 0xE0000000UL;
temp = addr & MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
work = ((temp << 5) + PYXIS_SPARSE_MEM + 0x08);
result = *(vuip) work;
result >>= shift;
return 0x0ffffUL & result;
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long msb ;
 
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + PYXIS_SPARSE_MEM + 0x00) = b * 0x01010101;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
unsigned long msb ;
 
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + PYXIS_SPARSE_MEM + 0x08) = b * 0x00010001;
}
#endif /* SRM_SETUP */
 
extern inline unsigned long __readl(unsigned long addr)
{
return *(vuip) (addr + PYXIS_DENSE_MEM);
}
 
extern inline void __writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + PYXIS_DENSE_MEM) = b;
}
 
#endif /* BWIO_REALLY_ENABLED */
 
#define readl(a) __readl((unsigned long)(a))
#define writel(v,a) __writel((v),(unsigned long)(a))
 
#undef vuip
 
extern unsigned long pyxis_init (unsigned long mem_start,
unsigned long mem_end);
 
#endif /* __KERNEL__ */
 
/*
* Data structure for handling PYXIS machine checks:
*/
struct el_PYXIS_sysdata_mcheck {
u_long coma_gcr;
u_long coma_edsr;
u_long coma_ter;
u_long coma_elar;
u_long coma_ehar;
u_long coma_ldlr;
u_long coma_ldhr;
u_long coma_base0;
u_long coma_base1;
u_long coma_base2;
u_long coma_cnfg0;
u_long coma_cnfg1;
u_long coma_cnfg2;
u_long epic_dcsr;
u_long epic_pear;
u_long epic_sear;
u_long epic_tbr1;
u_long epic_tbr2;
u_long epic_pbr1;
u_long epic_pbr2;
u_long epic_pmr1;
u_long epic_pmr2;
u_long epic_harx1;
u_long epic_harx2;
u_long epic_pmlt;
u_long epic_tag0;
u_long epic_tag1;
u_long epic_tag2;
u_long epic_tag3;
u_long epic_tag4;
u_long epic_tag5;
u_long epic_tag6;
u_long epic_tag7;
u_long epic_data0;
u_long epic_data1;
u_long epic_data2;
u_long epic_data3;
u_long epic_data4;
u_long epic_data5;
u_long epic_data6;
u_long epic_data7;
};
 
#define RTC_PORT(x) (0x70 + (x))
#define RTC_ADDR(x) (0x80 | (x))
#ifdef CONFIG_ALPHA_RUFFIAN
#define RTC_ALWAYS_BCD 1
#else /* RUFFIAN */
#define RTC_ALWAYS_BCD 0
#endif /* RUFFIAN */
#endif /* __ALPHA_PYXIS__H__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/user.h
0,0 → 1,52
#ifndef _ALPHA_USER_H
#define _ALPHA_USER_H
 
#include <linux/ptrace.h>
 
#include <asm/page.h>
#include <asm/reg.h>
 
/*
* Core file format: The core file is written in such a way that gdb
* can understand it and provide useful information to the user (under
* linux we use the `trad-core' bfd, NOT the osf-core). The file contents
* are as follows:
*
* upage: 1 page consisting of a user struct that tells gdb
* what is present in the file. Directly after this is a
* copy of the task_struct, which is currently not used by gdb,
* but it may come in handy at some point. All of the registers
* are stored as part of the upage. The upage should always be
* only one page long.
* data: The data segment follows next. We use current->end_text to
* current->brk to pick up all of the user variables, plus any memory
* that may have been sbrk'ed. No attempt is made to determine if a
* page is demand-zero or if a page is totally unused, we just cover
* the entire range. All of the addresses are rounded in such a way
* that an integral number of pages is written.
* stack: We need the stack information in order to get a meaningful
* backtrace. We need to write the data from usp to
* current->start_stack, so we round each of these in order to be able
* to write an integer number of pages.
*/
struct user {
unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */
size_t u_tsize; /* text size (pages) */
size_t u_dsize; /* data size (pages) */
size_t u_ssize; /* stack size (pages) */
unsigned long start_code; /* text starting address */
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
struct regs * u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
 
#define NBPG PAGE_SIZE
#define UPAGES 1
#define HOST_TEXT_START_ADDR (u.start_code)
#define HOST_DATA_START_ADDR (u.start_data)
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
#endif /* _ALPHA_USER_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/processor.h
0,0 → 1,97
/*
* include/asm-alpha/processor.h
*
* Copyright (C) 1994 Linus Torvalds
*/
 
#ifndef __ASM_ALPHA_PROCESSOR_H
#define __ASM_ALPHA_PROCESSOR_H
 
/*
* We have a 41-bit user address space: 2TB user VM.
* Under certain circumstances (e.g. when emulating 32-bit code)
* we may want to voluntarily limit this...
*/
#define TASK_SIZE (0x40000000000UL)
#define MAX_USER_ADDR ((current->personality&ADDR_MAX_32BIT) ? 0x100000000UL : \
((current->personality & ADDR_MAX_31BIT) ? 0x80000000UL : \
0x40000000000UL))
#define MMAP_SEARCH_START ((current->personality & ADDR_MAX_31BIT) ? \
(MAX_USER_ADDR/2) : (MAX_USER_ADDR/3))
 
 
/*
* Bus types
*/
#define EISA_bus 1
#define EISA_bus__is_a_macro /* for versions in ksyms.c */
#define MCA_bus 0
#define MCA_bus__is_a_macro /* for versions in ksyms.c */
 
/*
* The alpha has no problems with write protection
*/
#define wp_works_ok 1
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
 
struct thread_struct {
/* the fields below are used by PALcode and must match struct pcb: */
unsigned long ksp;
unsigned long usp;
unsigned long ptbr;
unsigned int pcc;
unsigned int asn;
unsigned long unique;
/*
* bit 0: floating point enable
* bit 62: performance monitor enable
*/
unsigned long pal_flags;
unsigned long res1, res2;
 
/* the fields below are Linux-specific: */
/*
* bit 0: perform syscall argument validation (get/set_fs)
* bit 1..5: IEEE_TRAP_ENABLE bits (see fpu.h)
* bit 17..21: IEEE_STATUS bits (see fpu.h)
*/
unsigned long flags;
};
 
#define INIT_MMAP { &init_mm, 0xfffffc0000000000, 0xfffffc0010000000, \
PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC }
 
#define INIT_TSS { \
0, 0, 0, \
0, 0, 0, \
0, 0, 0, \
0 \
}
 
#define alloc_kernel_stack() __get_free_page(GFP_KERNEL)
#define free_kernel_stack(page) free_page((page))
 
#include <asm/ptrace.h>
 
/*
* Return saved PC of a blocked thread. This assumes the frame
* pointer is the 6th saved long on the kernel stack and that the
* saved return address is the first long in the frame. This all
* holds provided the thread blocked through a call to schedule() ($15
* is the frame pointer in schedule() and $15 is saved at offset 48 by
* entry.S:do_switch_stack).
*/
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
{
unsigned long fp;
 
fp = ((unsigned long*)t->ksp)[6];
return *(unsigned long*)fp;
}
 
/*
* Do necessary setup to start up a newly executed thread.
*/
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
 
#endif /* __ASM_ALPHA_PROCESSOR_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/hwrpb.h
0,0 → 1,187
#ifndef _HWRPB_H
#define _HWRPB_H
 
#define INIT_HWRPB ((struct hwrpb_struct *) 0x10000000)
 
/*
* DEC processor types for Alpha systems. Found in HWRPB.
* These values are architected.
*/
 
#define EV3_CPU 1 /* EV3 */
#define EV4_CPU 2 /* EV4 (21064) */
#define LCA4_CPU 4 /* LCA4 (21066/21068) */
#define EV5_CPU 5 /* EV5 (21164) */
#define EV45_CPU 6 /* EV4.5 (21064/xxx) */
#define EV56_CPU 7 /* EV5.6 (21164) */
#define EV6_CPU 8 /* EV6 (21164) */
#define PCA56_CPU 9 /* PCA56 (21164PC) */
#define PCA57_CPU 10 /* PCA57 (21164??) */
 
/*
* DEC system types for Alpha systems. Found in HWRPB.
* These values are architected.
*/
 
#define ST_ADU 1 /* Alpha ADU systype */
#define ST_DEC_4000 2 /* Cobra systype */
#define ST_DEC_7000 3 /* Ruby systype */
#define ST_DEC_3000_500 4 /* Flamingo systype */
#define ST_DEC_2000_300 6 /* Jensen systype */
#define ST_DEC_3000_300 7 /* Pelican systype */
#define ST_DEC_2100_A500 9 /* Sable systype */
#define ST_DEC_AXPVME_64 10 /* AXPvme system type */
#define ST_DEC_AXPPCI_33 11 /* NoName system type */
#define ST_DEC_TLASER 12 /* Turbolaser systype */
#define ST_DEC_2100_A50 13 /* Avanti systype */
#define ST_DEC_ALCOR 15 /* Alcor (EV5) systype */
#define ST_DEC_1000 17 /* Mikasa systype */
#define ST_DEC_EB64 18 /* EB64 systype */
#define ST_DEC_EB66 19 /* EB66 systype */
#define ST_DEC_EB64P 20 /* EB64+ systype */
#define ST_DEC_BURNS 21 /* laptop systype */
#define ST_DEC_RAWHIDE 22 /* Rawhide systype */
#define ST_DEC_K2 23 /* K2 systype */
#define ST_DEC_LYNX 24 /* Lynx systype */
#define ST_DEC_XL 25 /* Alpha XL systype */
#define ST_DEC_EB164 26 /* EB164 systype */
#define ST_DEC_NORITAKE 27 /* Noritake systype */
#define ST_DEC_CORTEX 28 /* Cortex systype */
#define ST_DEC_MIATA 30 /* Miata systype */
#define ST_DEC_XXM 31 /* XXM systype */
#define ST_DEC_TAKARA 32 /* Takara systype */
#define ST_DEC_YUKON 33 /* Yukon systype */
#define ST_DEC_TSUNAMI 34 /* Tsunami systype */
#define ST_DEC_WILDFIRE 35 /* Wildfire systype */
#define ST_DEC_CUSCO 36 /* CUSCO systype */
 
/* UNOFFICIAL!!! */
#define ST_UNOFFICIAL_BIAS 100
#define ST_DTI_RUFFIAN 101 /* RUFFIAN systype */
 
struct pcb_struct {
unsigned long ksp;
unsigned long usp;
unsigned long ptbr;
unsigned int pcc;
unsigned int asn;
unsigned long unique;
unsigned long flags;
unsigned long res1, res2;
};
 
struct percpu_struct {
unsigned long hwpcb[16];
unsigned long flags;
unsigned long pal_mem_size;
unsigned long pal_scratch_size;
unsigned long pal_mem_pa;
unsigned long pal_scratch_pa;
unsigned long pal_revision;
unsigned long type;
unsigned long variation;
unsigned long revision;
unsigned long serial_no[2];
unsigned long logout_area_pa;
unsigned long logout_area_len;
unsigned long halt_PCBB;
unsigned long halt_PC;
unsigned long halt_PS;
unsigned long halt_arg;
unsigned long halt_ra;
unsigned long halt_pv;
unsigned long halt_reason;
unsigned long res;
unsigned long ipc_buffer[21];
unsigned long palcode_avail[16];
unsigned long compatibility;
};
 
struct procdesc_struct {
unsigned long weird_vms_stuff;
unsigned long address;
};
 
struct vf_map_struct {
unsigned long va;
unsigned long pa;
unsigned long count;
};
 
struct crb_struct {
struct procdesc_struct * dispatch_va;
struct procdesc_struct * dispatch_pa;
struct procdesc_struct * fixup_va;
struct procdesc_struct * fixup_pa;
/* virtual->physical map */
unsigned long map_entries;
unsigned long map_pages;
struct vf_map_struct map[1];
};
 
struct memclust_struct {
unsigned long start_pfn;
unsigned long numpages;
unsigned long numtested;
unsigned long bitmap_va;
unsigned long bitmap_pa;
unsigned long bitmap_chksum;
unsigned long usage;
};
 
struct memdesc_struct {
unsigned long chksum;
unsigned long optional_pa;
unsigned long numclusters;
struct memclust_struct cluster[0];
};
 
struct dsr_struct {
long smm; /* SMM nubber used by LMF */
unsigned long lurt_off; /* offset to LURT table */
unsigned long sysname_off; /* offset to sysname char count */
};
 
struct hwrpb_struct {
unsigned long phys_addr; /* check: physical address of the hwrpb */
unsigned long id; /* check: "HWRPB\0\0\0" */
unsigned long revision;
unsigned long size; /* size of hwrpb */
unsigned long cpuid;
unsigned long pagesize; /* 8192, I hope */
unsigned long pa_bits; /* number of physical address bits */
unsigned long max_asn;
unsigned char ssn[16]; /* system serial number: big bother is watching */
unsigned long sys_type;
unsigned long sys_variation;
unsigned long sys_revision;
unsigned long intr_freq; /* interval clock frequency * 4096 */
unsigned long cycle_freq; /* cycle counter frequency */
unsigned long vptb; /* Virtual Page Table Base address */
unsigned long res1;
unsigned long tbhb_offset; /* Translation Buffer Hint Block */
unsigned long nr_processors;
unsigned long processor_size;
unsigned long processor_offset;
unsigned long ctb_nr;
unsigned long ctb_size; /* console terminal block size */
unsigned long ctbt_offset; /* console terminal block table offset */
unsigned long crb_offset; /* console callback routine block */
unsigned long mddt_offset; /* memory data descriptor table */
unsigned long cdb_offset; /* configuration data block (or NULL) */
unsigned long frut_offset; /* FRU table (or NULL) */
void (*save_terminal)(unsigned long);
unsigned long save_terminal_data;
void (*restore_terminal)(unsigned long);
unsigned long restore_terminal_data;
void (*CPU_restart)(unsigned long);
unsigned long CPU_restart_data;
unsigned long res2;
unsigned long res3;
unsigned long chksum;
unsigned long rxrdy;
unsigned long txrdy;
unsigned long dsr_offset; /* "Dynamic System Recognition Data Block Table" */
};
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/semaphore.h
0,0 → 1,68
#ifndef _ALPHA_SEMAPHORE_H
#define _ALPHA_SEMAPHORE_H
 
/*
* SMP- and interrupt-safe semaphores..
*
* (C) Copyright 1996 Linus Torvalds
*/
 
#include <asm/atomic.h>
#include <asm/system.h>
 
struct semaphore {
atomic_t count;
atomic_t waking;
int lock; /* to make waking testing atomic */
struct wait_queue * wait;
};
 
#define MUTEX ((struct semaphore) { 1, 0, 0, NULL })
#define MUTEX_LOCKED ((struct semaphore) { 0, 0, 0, NULL })
 
extern void __down(struct semaphore * sem);
extern int __down_interruptible(struct semaphore * sem);
extern void __up(struct semaphore * sem);
 
/*
* This isn't quite as clever as the x86 side, but the gp register
* makes things a bit more complicated on the alpha..
*/
extern inline void down(struct semaphore * sem)
{
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
 
/*
* Primitives to spin on a lock. Needed only for SMP version.
*/
extern inline void get_buzz_lock(int *lock_ptr)
{
#ifdef __SMP__
while (xchg(lock_ptr,1) != 0) ;
#endif
} /* get_buzz_lock */
 
extern inline void give_buzz_lock(int *lock_ptr)
{
#ifdef __SMP__
*lock_ptr = 0 ;
#endif
} /* give_buzz_lock */
 
extern inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
}
 
extern inline void up(struct semaphore * sem)
{
if (atomic_inc_return(&sem->count) <= 0)
__up(sem);
}
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/system.h
0,0 → 1,179
#ifndef __ALPHA_SYSTEM_H
#define __ALPHA_SYSTEM_H
 
#include <asm/pal.h> /* for backwards compatibility... */
 
/*
* System defines.. Note that this is included both from .c and .S
* files, so it does only defines, not any C code.
*/
 
/*
* We leave one page for the initial stack page, and one page for
* the initial process structure. Also, the console eats 3 MB for
* the initial bootloader (one of which we can reclaim later).
* With a few other pages for various reasons, we'll use an initial
* load address of 0xfffffc0000310000UL
*/
#define BOOT_PCB 0x20000000
#define BOOT_ADDR 0x20000000
/* Remove when official MILO sources have ELF support: */
#define BOOT_SIZE (16*1024)
 
#define KERNEL_START 0xfffffc0000300000
#define SWAPPER_PGD 0xfffffc0000300000
#define INIT_STACK 0xfffffc0000302000
#define EMPTY_PGT 0xfffffc0000304000
#define EMPTY_PGE 0xfffffc0000308000
#define ZERO_PGE 0xfffffc000030A000
 
#define START_ADDR 0xfffffc0000310000
/* Remove when official MILO sources have ELF support: */
#define START_SIZE (2*1024*1024)
 
#ifndef __ASSEMBLY__
 
/*
* This is the logout header that should be common to all platforms
* (assuming they are running OSF/1 PALcode, I guess).
*/
struct el_common {
unsigned int size; /* size in bytes of logout area */
int sbz1 : 31; /* should be zero */
char retry : 1; /* retry flag */
unsigned int proc_offset; /* processor-specific offset */
unsigned int sys_offset; /* system-specific offset */
unsigned long code; /* machine check code */
};
 
extern void wrent(void *, unsigned long);
extern void wrkgp(unsigned long);
extern void wrusp(unsigned long);
extern unsigned long rdusp(void);
extern unsigned long rdmces (void);
extern void wrmces (unsigned long);
extern unsigned long whami(void);
extern void wripir(unsigned long);
 
#define halt() __asm__ __volatile__ ("call_pal %0" : : "i" (PAL_halt) : "memory")
 
#define switch_to(prev,next) do { \
current_set[0] = next; \
alpha_switch_to((unsigned long) &(next)->tss - 0xfffffc0000000000); \
} while (0)
 
extern void alpha_switch_to(unsigned long pctxp);
 
extern void imb(void);
 
#define mb() \
__asm__ __volatile__("mb": : :"memory")
 
#define draina() \
__asm__ __volatile__ ("call_pal %0" : : "i" (PAL_draina) : "memory")
 
#define getipl() \
({ unsigned long __old_ipl; \
__asm__ __volatile__( \
"call_pal 54\n\t" \
"bis $0,$0,%0" \
: "=r" (__old_ipl) \
: : "$0", "$1", "$16", "$22", "$23", "$24", "$25"); \
__old_ipl; })
 
#define setipl(__new_ipl) \
__asm__ __volatile__( \
"bis %0,%0,$16\n\t" \
"call_pal 53" \
: : "r" (__new_ipl) \
: "$0", "$1", "$16", "$22", "$23", "$24", "$25", "memory")
 
#define swpipl(__new_ipl) \
({ unsigned long __old_ipl; \
__asm__ __volatile__( \
"bis %1,%1,$16\n\t" \
"call_pal 53\n\t" \
"bis $0,$0,%0" \
: "=r" (__old_ipl) \
: "r" (__new_ipl) \
: "$0", "$1", "$16", "$22", "$23", "$24", "$25", "memory"); \
__old_ipl; })
 
#define cli() setipl(7)
#define sti() setipl(0)
#define save_flags(flags) do { flags = getipl(); } while (0)
#define restore_flags(flags) setipl(flags)
 
/*
* TB routines..
*/
extern void tbi(long type, ...);
 
#define tbisi(x) tbi(1,(x))
#define tbisd(x) tbi(2,(x))
#define tbis(x) tbi(3,(x))
#define tbiap() tbi(-1)
#define tbia() tbi(-2)
 
/*
* Give prototypes to shut up gcc.
*/
extern __inline__ unsigned long xchg_u32 (volatile int * m, unsigned long val);
extern __inline__ unsigned long xchg_u64 (volatile long * m, unsigned long val);
 
extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
{
unsigned long dummy;
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%2\n\t"
"bis %3,%3,%1\n\t"
"stl_c %1,%2\n\t"
"beq %1,1b\n"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "r" (val), "m" (*m));
return val;
}
 
extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val)
{
unsigned long dummy;
__asm__ __volatile__(
"\n1:\t"
"ldq_l %0,%2\n\t"
"bis %3,%3,%1\n\t"
"stq_c %1,%2\n\t"
"beq %1,1b\n"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "r" (val), "m" (*m));
return val;
}
 
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
 
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid xchg().
*
* This only works if the compiler isn't horribly bad at optimizing.
* gcc-2.5.8 reportedly can't handle this, but as that doesn't work
* too well on the alpha anyway..
*/
extern void __xchg_called_with_bad_pointer(void);
 
static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:
return xchg_u32(ptr, x);
case 8:
return xchg_u64(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
}
 
#endif /* __ASSEMBLY__ */
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/a.out.h
0,0 → 1,98
#ifndef __ALPHA_A_OUT_H__
#define __ALPHA_A_OUT_H__
 
#include <linux/types.h>
 
/*
* OSF/1 ECOFF header structs. ECOFF files consist of:
* - a file header (struct filehdr),
* - an a.out header (struct aouthdr),
* - one or more section headers (struct scnhdr).
* The filhdr's "f_nscns" field contains the
* number of section headers.
*/
 
struct filehdr
{
/* OSF/1 "file" header */
__u16 f_magic, f_nscns;
__u32 f_timdat;
__u64 f_symptr;
__u32 f_nsyms;
__u16 f_opthdr, f_flags;
};
 
struct aouthdr
{
__u64 info; /* after that it looks quite normal.. */
__u64 tsize;
__u64 dsize;
__u64 bsize;
__u64 entry;
__u64 text_start; /* with a few additions that actually make sense */
__u64 data_start;
__u64 bss_start;
__u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */
__u64 gpvalue;
};
 
struct scnhdr
{
char s_name[8];
__u64 s_paddr;
__u64 s_vaddr;
__u64 s_size;
__u64 s_scnptr;
__u64 s_relptr;
__u64 s_lnnoptr;
__u16 s_nreloc;
__u16 s_nlnno;
__u32 s_flags;
};
 
struct exec
{
/* OSF/1 "file" header */
struct filehdr fh;
struct aouthdr ah;
};
 
/*
* Define's so that the kernel exec code can access the a.out header
* fields...
*/
#define a_info ah.info
#define a_text ah.tsize
#define a_data ah.dsize
#define a_bss ah.bsize
#define a_entry ah.entry
#define a_textstart ah.text_start
#define a_datastart ah.data_start
#define a_bssstart ah.bss_start
#define a_gprmask ah.gprmask
#define a_fprmask ah.fprmask
#define a_gpvalue ah.gpvalue
 
#define N_TXTADDR(x) ((x).a_textstart)
#define N_DATADDR(x) ((x).a_datastart)
#define N_BSSADDR(x) ((x).a_bssstart)
#define N_DRSIZE(x) 0
#define N_TRSIZE(x) 0
#define N_SYMSIZE(x) 0
 
#define AOUTHSZ sizeof(struct aouthdr)
#define SCNHSZ sizeof(struct scnhdr)
#define SCNROUND 16
 
#define N_TXTOFF(x) \
((long) N_MAGIC(x) == ZMAGIC ? 0 : \
(sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1))
 
#ifdef __KERNEL__
 
#define STACK_TOP ((current->personality & ADDR_MAX_32BIT) ? 0x100000000UL : \
((current->personality&ADDR_MAX_31BIT) ? 0x80000000UL : 0x00120000000UL))
 
#endif
 
#endif /* __A_OUT_GNU_H__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/resource.h
0,0 → 1,39
#ifndef _ALPHA_RESOURCE_H
#define _ALPHA_RESOURCE_H
 
/*
* Resource limits
*/
 
#define RLIMIT_CPU 0 /* CPU time in ms */
#define RLIMIT_FSIZE 1 /* Maximum filesize */
#define RLIMIT_DATA 2 /* max data size */
#define RLIMIT_STACK 3 /* max stack size */
#define RLIMIT_CORE 4 /* max core file size */
#define RLIMIT_RSS 5 /* max resident set size */
#define RLIMIT_NOFILE 6 /* max number of open files */
#define RLIMIT_AS 7 /* address space limit(?) */
#define RLIMIT_NPROC 8 /* max number of processes */
#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */
 
#define RLIM_NLIMITS 10
 
#ifdef __KERNEL__
 
#define INIT_RLIMITS \
{ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_CPU */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_FSIZE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_DATA */ \
{_STK_LIM, _STK_LIM}, /* RLIMIT_STACK */ \
{ 0, LONG_MAX}, /* RLIMIT_CORE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_RSS */ \
{ NR_OPEN, NR_OPEN}, /* RLIMIT_NOFILE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_AS */ \
{MAX_TASKS_PER_USER, MAX_TASKS_PER_USER}, /* RLIMIT_NPROC */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_MEMLOCK */ \
}
 
#endif /* __KERNEL__ */
 
#endif /* _ALPHA_RESOURCE_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/fpu.h
0,0 → 1,85
#ifndef __ASM_ALPHA_FPU_H
#define __ASM_ALPHA_FPU_H
 
/*
* Alpha floating-point control register defines:
*/
#define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */
#define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */
#define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */
#define FPCR_INV (1UL<<52) /* invalid operation */
#define FPCR_DZE (1UL<<53) /* division by zero */
#define FPCR_OVF (1UL<<54) /* overflow */
#define FPCR_UNF (1UL<<55) /* underflow */
#define FPCR_INE (1UL<<56) /* inexact */
#define FPCR_IOV (1UL<<57) /* integer overflow */
#define FPCR_UNDZ (1UL<<60) /* underflow to zero (opt.) */
#define FPCR_UNFD (1UL<<61) /* underflow disable (opt.) */
#define FPCR_INED (1UL<<62) /* inexact disable (opt.) */
#define FPCR_SUM (1UL<<63) /* summary bit */
 
#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */
#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */
#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */
#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */
#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */
#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT)
 
#define FPCR_MASK 0xfffe000000000000
 
/*
* IEEE trap enables are implemented in software. These per-thread
* bits are stored in the "flags" field of "struct thread_struct".
* Thus, the bits are defined so as not to conflict with the
* floating-point enable bit (which is architected). On top of that,
* we want to make these bits compatible with OSF/1 so
* ieee_set_fp_control() etc. can be implemented easily and
* compatibly. The corresponding definitions are in
* /usr/include/machine/fpu.h under OSF/1.
*/
#define IEEE_TRAP_ENABLE_INV (1<<1) /* invalid op */
#define IEEE_TRAP_ENABLE_DZE (1<<2) /* division by zero */
#define IEEE_TRAP_ENABLE_OVF (1<<3) /* overflow */
#define IEEE_TRAP_ENABLE_UNF (1<<4) /* underflow */
#define IEEE_TRAP_ENABLE_INE (1<<5) /* inexact */
#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\
IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\
IEEE_TRAP_ENABLE_INE)
 
/* status bits coming from fpcr: */
#define IEEE_STATUS_INV (1<<17)
#define IEEE_STATUS_DZE (1<<18)
#define IEEE_STATUS_OVF (1<<19)
#define IEEE_STATUS_UNF (1<<20)
#define IEEE_STATUS_INE (1<<21)
 
#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \
IEEE_STATUS_OVF | IEEE_STATUS_UNF | \
IEEE_STATUS_INE)
 
#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | IEEE_STATUS_MASK)
 
#define IEEE_STATUS_TO_EXCSUM_SHIFT 16
 
#define IEEE_INHERIT (1UL<<63) /* inherit on thread create? */
 
/*
* Convert the software IEEE trap enables and status bits into the
* hardware fpcr format.
*/
 
static inline unsigned long
ieee_sw_to_fpcr(unsigned long sw)
{
unsigned long fpcw;
fpcw = (sw & IEEE_STATUS_MASK) << 35;
fpcw |= sw & IEEE_STATUS_MASK ? FPCR_SUM : 0;
fpcw |= (~sw & (IEEE_TRAP_ENABLE_INV
| IEEE_TRAP_ENABLE_DZE
| IEEE_TRAP_ENABLE_OVF)) << 48;
fpcw |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57;
return fpcw;
}
 
 
#endif /* __ASM_ALPHA_FPU_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/irq.h
0,0 → 1,93
#ifndef _ALPHA_IRQ_H
#define _ALPHA_IRQ_H
 
/*
* linux/include/alpha/irq.h
*
* (C) 1994 Linus Torvalds
*/
 
#include <linux/linkage.h>
#include <linux/config.h>
 
#if defined(CONFIG_ALPHA_CABRIOLET) || \
defined(CONFIG_ALPHA_EB66P) || \
defined(CONFIG_ALPHA_EB164) || \
defined(CONFIG_ALPHA_PC164) || \
defined(CONFIG_ALPHA_LX164)
 
# define NR_IRQS 35
 
#elif defined(CONFIG_ALPHA_EB66) || \
defined(CONFIG_ALPHA_EB64P) || \
defined(CONFIG_ALPHA_MIKASA)
 
# define NR_IRQS 32
 
#elif defined(CONFIG_ALPHA_ALCOR) || \
defined(CONFIG_ALPHA_XLT) || \
defined(CONFIG_ALPHA_MIATA) || \
defined(CONFIG_ALPHA_RUFFIAN) || \
defined(CONFIG_ALPHA_NORITAKE)
 
# define NR_IRQS 48
 
#elif defined(CONFIG_ALPHA_SABLE) || \
defined(CONFIG_ALPHA_SX164)
 
# define NR_IRQS 40
 
#elif defined(CONFIG_ALPHA_TAKARA)
 
# define NR_IRQS 20
 
#else /* everyone else */
 
# define NR_IRQS 16
 
#endif
 
 
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
 
#define __STR(x) #x
#define STR(x) __STR(x)
#define SAVE_ALL "xx"
 
/*
* SAVE_MOST/RESTORE_MOST is used for the faster version of IRQ handlers,
* installed by using the SA_INTERRUPT flag. These kinds of IRQ's don't
* call the routines that do signal handling etc on return, and can have
* more relaxed register-saving etc. They are also atomic, and are thus
* suited for small, fast interrupts like the serial lines or the harddisk
* drivers, which don't actually need signal handling etc.
*
* Also note that we actually save only those registers that are used in
* C subroutines, so if you do something weird, you're on your own.
*/
#define SAVE_MOST "yy"
 
#define RESTORE_MOST "zz"
 
#define ACK_FIRST(mask) "aa"
 
#define ACK_SECOND(mask) "dummy"
 
#define UNBLK_FIRST(mask) "dummy"
 
#define UNBLK_SECOND(mask) "dummy"
 
#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
#define FAST_IRQ_NAME(nr) IRQ_NAME2(fast_IRQ##nr)
#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)
#define BUILD_IRQ(chip,nr,mask) \
asmlinkage void IRQ_NAME(nr); \
asmlinkage void FAST_IRQ_NAME(nr); \
asmlinkage void BAD_IRQ_NAME(nr); \
asm code comes here
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/cia.h
0,0 → 1,591
#ifndef __ALPHA_CIA__H__
#define __ALPHA_CIA__H__
 
#include <linux/config.h>
#include <linux/types.h>
 
/*
* CIA is the internal name for the 2117x chipset which provides
* memory controller and PCI access for the 21164 chip based systems.
*
* This file is based on:
*
* DECchip 21171 Core Logic Chipset
* Technical Reference Manual
*
* EC-QE18B-TE
*
* david.rusling@reo.mts.dec.com Initial Version.
*
*/
 
/*------------------------------------------------------------------------**
** **
** EB164 I/O procedures **
** **
** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers **
** inportbxt: 8 bits only **
** inport: alias of inportw **
** outport: alias of outportw **
** **
** inmem[b|w|t|l], outmem[b|w|t|l] 8:16:24:32 ISA memory xfers **
** inmembxt: 8 bits only **
** inmem: alias of inmemw **
** outmem: alias of outmemw **
** **
**------------------------------------------------------------------------*/
 
 
/* CIA ADDRESS BIT DEFINITIONS
*
* 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |1| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |0|0|0|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | \_/ \_/
* | | |
* +-- IO space, not cached. Byte Enable --+ |
* Transfer Length --+
*
*
*
* Byte Transfer
* Enable Length Transfer Byte Address
* adr<6:5> adr<4:3> Length Enable Adder
* ---------------------------------------------
* 00 00 Byte 1110 0x000
* 01 00 Byte 1101 0x020
* 10 00 Byte 1011 0x040
* 11 00 Byte 0111 0x060
*
* 00 01 Word 1100 0x008
* 01 01 Word 1001 0x028 <= Not supported in this code.
* 10 01 Word 0011 0x048
*
* 00 10 Tribyte 1000 0x010
* 01 10 Tribyte 0001 0x030
*
* 10 11 Longword 0000 0x058
*
* Note that byte enables are asserted low.
*
*/
 
#define BYTE_ENABLE_SHIFT 5
#define TRANSFER_LENGTH_SHIFT 3
 
#define MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
#define MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
#define MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
 
#ifdef CONFIG_ALPHA_SRM_SETUP
/* if we are using the SRM PCI setup, we'll need to use variables instead */
#define CIA_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
#define CIA_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
 
extern unsigned int CIA_DMA_WIN_BASE;
extern unsigned int CIA_DMA_WIN_SIZE;
 
#else /* SRM_SETUP */
#define CIA_DMA_WIN_BASE (1024*1024*1024)
#define CIA_DMA_WIN_SIZE (1024*1024*1024)
#endif /* SRM_SETUP */
 
/*
* 21171-CA Control and Status Registers (p4-1)
*/
#define CIA_IOC_CIA_REV (IDENT_ADDR + 0x8740000080UL)
#define CIA_IOC_PCI_LAT (IDENT_ADDR + 0x87400000C0UL)
#define CIA_IOC_CIA_CTRL (IDENT_ADDR + 0x8740000100UL)
#define CIA_IOC_CIA_CNFG (IDENT_ADDR + 0x8740000140UL)
#define CIA_IOC_HAE_MEM (IDENT_ADDR + 0x8740000400UL)
#define CIA_IOC_HAE_IO (IDENT_ADDR + 0x8740000440UL)
#define CIA_IOC_CFG (IDENT_ADDR + 0x8740000480UL)
#define CIA_IOC_CACK_EN (IDENT_ADDR + 0x8740000600UL)
 
/*
* 21171-CA Diagnostic Registers (p4-2)
*/
#define CIA_IOC_CIA_DIAG (IDENT_ADDR + 0x8740002000UL)
#define CIA_IOC_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL)
 
/*
* 21171-CA Performance Monitor registers (p4-3)
*/
#define CIA_IOC_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL)
#define CIA_IOC_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL)
 
/*
* 21171-CA Error registers (p4-3)
*/
#define CIA_IOC_CPU_ERR0 (IDENT_ADDR + 0x8740008000UL)
#define CIA_IOC_CPU_ERR1 (IDENT_ADDR + 0x8740008040UL)
#define CIA_IOC_CIA_ERR (IDENT_ADDR + 0x8740008200UL)
#define CIA_IOC_CIA_STAT (IDENT_ADDR + 0x8740008240UL)
#define CIA_IOC_ERR_MASK (IDENT_ADDR + 0x8740008280UL)
#define CIA_IOC_CIA_SYN (IDENT_ADDR + 0x8740008300UL)
#define CIA_IOC_MEM_ERR0 (IDENT_ADDR + 0x8740008400UL)
#define CIA_IOC_MEM_ERR1 (IDENT_ADDR + 0x8740008440UL)
#define CIA_IOC_PCI_ERR0 (IDENT_ADDR + 0x8740008800UL)
#define CIA_IOC_PCI_ERR1 (IDENT_ADDR + 0x8740008840UL)
#define CIA_IOC_PCI_ERR3 (IDENT_ADDR + 0x8740008880UL)
 
/*
* 2117A-CA PCI Address Translation Registers.
*/
#define CIA_IOC_PCI_TBIA (IDENT_ADDR + 0x8760000100UL)
 
#define CIA_IOC_PCI_W0_BASE (IDENT_ADDR + 0x8760000400UL)
#define CIA_IOC_PCI_W0_MASK (IDENT_ADDR + 0x8760000440UL)
#define CIA_IOC_PCI_T0_BASE (IDENT_ADDR + 0x8760000480UL)
 
#define CIA_IOC_PCI_W1_BASE (IDENT_ADDR + 0x8760000500UL)
#define CIA_IOC_PCI_W1_MASK (IDENT_ADDR + 0x8760000540UL)
#define CIA_IOC_PCI_T1_BASE (IDENT_ADDR + 0x8760000580UL)
 
#define CIA_IOC_PCI_W2_BASE (IDENT_ADDR + 0x8760000600UL)
#define CIA_IOC_PCI_W2_MASK (IDENT_ADDR + 0x8760000640UL)
#define CIA_IOC_PCI_T2_BASE (IDENT_ADDR + 0x8760000680UL)
 
#define CIA_IOC_PCI_W3_BASE (IDENT_ADDR + 0x8760000700UL)
#define CIA_IOC_PCI_W3_MASK (IDENT_ADDR + 0x8760000740UL)
#define CIA_IOC_PCI_T3_BASE (IDENT_ADDR + 0x8760000780UL)
 
/*
* 21171-CA System configuration registers (p4-3)
*/
#define CIA_IOC_MCR (IDENT_ADDR + 0x8750000000UL)
#define CIA_IOC_MBA0 (IDENT_ADDR + 0x8750000600UL)
#define CIA_IOC_MBA2 (IDENT_ADDR + 0x8750000680UL)
#define CIA_IOC_MBA4 (IDENT_ADDR + 0x8750000700UL)
#define CIA_IOC_MBA6 (IDENT_ADDR + 0x8750000780UL)
#define CIA_IOC_MBA8 (IDENT_ADDR + 0x8750000800UL)
#define CIA_IOC_MBAA (IDENT_ADDR + 0x8750000880UL)
#define CIA_IOC_MBAC (IDENT_ADDR + 0x8750000900UL)
#define CIA_IOC_MBAE (IDENT_ADDR + 0x8750000980UL)
#define CIA_IOC_TMG0 (IDENT_ADDR + 0x8750000B00UL)
#define CIA_IOC_TMG1 (IDENT_ADDR + 0x8750000B40UL)
#define CIA_IOC_TMG2 (IDENT_ADDR + 0x8750000B80UL)
 
/*
* Memory spaces:
*/
#define CIA_IACK_SC (IDENT_ADDR + 0x8720000000UL)
#define CIA_CONF (IDENT_ADDR + 0x8700000000UL)
#define CIA_IO (IDENT_ADDR + 0x8580000000UL)
#define CIA_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL)
#define CIA_SPARSE_MEM_R2 (IDENT_ADDR + 0x8400000000UL)
#define CIA_SPARSE_MEM_R3 (IDENT_ADDR + 0x8500000000UL)
#define CIA_DENSE_MEM (IDENT_ADDR + 0x8600000000UL)
 
/*
* ALCOR's GRU ASIC registers
*/
#define GRU_INT_REQ (IDENT_ADDR + 0x8780000000UL)
#define GRU_INT_MASK (IDENT_ADDR + 0x8780000040UL)
#define GRU_INT_EDGE (IDENT_ADDR + 0x8780000080UL)
#define GRU_INT_HILO (IDENT_ADDR + 0x87800000C0UL)
#define GRU_INT_CLEAR (IDENT_ADDR + 0x8780000100UL)
 
#define GRU_CACHE_CNFG (IDENT_ADDR + 0x8780000200UL)
#define GRU_SCR (IDENT_ADDR + 0x8780000300UL)
#define GRU_LED (IDENT_ADDR + 0x8780000800UL)
#define GRU_RESET (IDENT_ADDR + 0x8780000900UL)
 
#if defined(CONFIG_ALPHA_ALCOR)
#define GRU_INT_REQ_BITS 0x800fffffUL
#elif defined(CONFIG_ALPHA_XLT)
#define GRU_INT_REQ_BITS 0x80003fffUL
#else
#define GRU_INT_REQ_BITS 0xffffffffUL
#endif
 
/*
* Bit definitions for I/O Controller status register 0:
*/
#define CIA_IOC_STAT0_CMD 0xf
#define CIA_IOC_STAT0_ERR (1<<4)
#define CIA_IOC_STAT0_LOST (1<<5)
#define CIA_IOC_STAT0_THIT (1<<6)
#define CIA_IOC_STAT0_TREF (1<<7)
#define CIA_IOC_STAT0_CODE_SHIFT 8
#define CIA_IOC_STAT0_CODE_MASK 0x7
#define CIA_IOC_STAT0_P_NBR_SHIFT 13
#define CIA_IOC_STAT0_P_NBR_MASK 0x7ffff
 
#define HAE_ADDRESS CIA_IOC_HAE_MEM
 
#ifdef __KERNEL__
 
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
extern inline unsigned long virt_to_bus(void * address)
{
return virt_to_phys(address) + CIA_DMA_WIN_BASE;
}
 
extern inline void * bus_to_virt(unsigned long address)
{
return phys_to_virt(address - CIA_DMA_WIN_BASE);
}
 
/*
* I/O functions:
*
* CIA (the 2117x PCI/memory support chipset for the EV5 (21164)
* series of processors uses a sparse address mapping scheme to
* get at PCI memory and I/O.
*/
 
#define vuip volatile unsigned int *
 
extern inline unsigned int __inb(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + CIA_IO + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
 
extern inline void __outb(unsigned char b, unsigned long addr)
{
unsigned int w;
 
asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + CIA_IO + 0x00) = w;
mb();
}
 
extern inline unsigned int __inw(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + CIA_IO + 0x08);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
 
extern inline void __outw(unsigned short b, unsigned long addr)
{
unsigned int w;
 
asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + CIA_IO + 0x08) = w;
mb();
}
 
extern inline unsigned int __inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + CIA_IO + 0x18);
}
 
extern inline void __outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + CIA_IO + 0x18) = b;
mb();
}
 
 
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*
* For reading and writing 8 and 16 bit quantities we need to
* go through one of the three sparse address mapping regions
* and use the HAE_MEM CSR to provide some bits of the address.
* The following few routines use only sparse address region 1
* which gives 1Gbyte of accessible space which relates exactly
* to the amount of PCI memory mapping *into* system address space.
* See p 6-17 of the specification but it looks something like this:
*
* 21164 Address:
*
* 3 2 1
* 9876543210987654321098765432109876543210
* 1ZZZZ0.PCI.QW.Address............BBLL
*
* ZZ = SBZ
* BB = Byte offset
* LL = Transfer length
*
* PCI Address:
*
* 3 2 1
* 10987654321098765432109876543210
* HHH....PCI.QW.Address........ 00
*
* HHH = 31:29 HAE_MEM CSR
*
*/
 
#ifdef CONFIG_ALPHA_SRM_SETUP
 
extern unsigned long cia_sm_base_r1, cia_sm_base_r2, cia_sm_base_r3;
 
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, work;
 
if ((addr >= cia_sm_base_r1) &&
(addr <= (cia_sm_base_r1 + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + CIA_SPARSE_MEM + 0x00);
else
if ((addr >= cia_sm_base_r2) &&
(addr <= (cia_sm_base_r2 + MEM_R2_MASK)))
work = (((addr & MEM_R2_MASK) << 5) + CIA_SPARSE_MEM_R2 + 0x00);
else
if ((addr >= cia_sm_base_r3) &&
(addr <= (cia_sm_base_r3 + MEM_R3_MASK)))
work = (((addr & MEM_R3_MASK) << 5) + CIA_SPARSE_MEM_R3 + 0x00);
else
{
#if 0
printk("__readb: address 0x%lx not covered by HAE\n", addr);
#endif
return 0x0ffUL;
}
shift = (addr & 0x3) << 3;
result = *(vuip) work;
result >>= shift;
return 0x0ffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
unsigned long result, shift, work;
 
if ((addr >= cia_sm_base_r1) &&
(addr <= (cia_sm_base_r1 + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + CIA_SPARSE_MEM + 0x08);
else
if ((addr >= cia_sm_base_r2) &&
(addr <= (cia_sm_base_r2 + MEM_R2_MASK)))
work = (((addr & MEM_R2_MASK) << 5) + CIA_SPARSE_MEM_R2 + 0x08);
else
if ((addr >= cia_sm_base_r3) &&
(addr <= (cia_sm_base_r3 + MEM_R3_MASK)))
work = (((addr & MEM_R3_MASK) << 5) + CIA_SPARSE_MEM_R3 + 0x08);
else
{
#if 0
printk("__readw: address 0x%lx not covered by HAE\n", addr);
#endif
return 0x0ffUL;
}
shift = (addr & 0x3) << 3;
result = *(vuip) work;
result >>= shift;
return 0x0ffffUL & result;
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long work;
 
if ((addr >= cia_sm_base_r1) &&
(addr <= (cia_sm_base_r1 + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + CIA_SPARSE_MEM + 0x00);
else
if ((addr >= cia_sm_base_r2) &&
(addr <= (cia_sm_base_r2 + MEM_R2_MASK)))
work = (((addr & MEM_R2_MASK) << 5) + CIA_SPARSE_MEM_R2 + 0x00);
else
if ((addr >= cia_sm_base_r3) &&
(addr <= (cia_sm_base_r3 + MEM_R3_MASK)))
work = (((addr & MEM_R3_MASK) << 5) + CIA_SPARSE_MEM_R3 + 0x00);
else
{
#if 0
printk("__writeb: address 0x%lx not covered by HAE\n", addr);
#endif
return;
}
*(vuip) work = b * 0x01010101;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
unsigned long work;
 
if ((addr >= cia_sm_base_r1) &&
(addr <= (cia_sm_base_r1 + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + CIA_SPARSE_MEM + 0x00);
else
if ((addr >= cia_sm_base_r2) &&
(addr <= (cia_sm_base_r2 + MEM_R2_MASK)))
work = (((addr & MEM_R2_MASK) << 5) + CIA_SPARSE_MEM_R2 + 0x00);
else
if ((addr >= cia_sm_base_r3) &&
(addr <= (cia_sm_base_r3 + MEM_R3_MASK)))
work = (((addr & MEM_R3_MASK) << 5) + CIA_SPARSE_MEM_R3 + 0x00);
else
{
#if 0
printk("__writew: address 0x%lx not covered by HAE\n", addr);
#endif
return;
}
*(vuip) work = b * 0x00010001;
}
 
#else /* SRM_SETUP */
 
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, msb;
 
shift = (addr & 0x3) * 8 ;
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
result = *(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x00) ;
result >>= shift;
return 0xffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
unsigned long result, shift, msb;
 
shift = (addr & 0x3) * 8;
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
result = *(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x08);
result >>= shift;
return 0xffffUL & result;
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long msb ;
 
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x00) = b * 0x01010101;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
unsigned long msb ;
 
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x08) = b * 0x00010001;
}
 
#endif /* SRM_SETUP */
 
extern inline unsigned long __readl(unsigned long addr)
{
return *(vuip) (addr + CIA_DENSE_MEM);
}
 
extern inline void __writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + CIA_DENSE_MEM) = b;
}
 
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
 
#define outb(x, port) \
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
 
#define readl(a) __readl((unsigned long)(a))
#define writel(v,a) __writel((v),(unsigned long)(a))
 
#undef vuip
 
extern unsigned long cia_init (unsigned long mem_start,
unsigned long mem_end);
 
#endif /* __KERNEL__ */
 
/*
* Data structure for handling CIA machine checks:
*/
/* ev5-specific info: */
struct el_procdata {
unsigned long shadow[8]; /* PALmode shadow registers */
unsigned long paltemp[24]; /* PAL temporary registers */
/* EV5-specific fields */
unsigned long exc_addr; /* Address of excepting instruction. */
unsigned long exc_sum; /* Summary of arithmetic traps. */
unsigned long exc_mask; /* Exception mask (from exc_sum). */
unsigned long exc_base; /* PALbase at time of exception. */
unsigned long isr; /* Interrupt summary register. */
unsigned long icsr; /* Ibox control register. */
unsigned long ic_perr_stat;
unsigned long dc_perr_stat;
unsigned long va; /* Effective VA of fault or miss. */
unsigned long mm_stat;
unsigned long sc_addr;
unsigned long sc_stat;
unsigned long bc_tag_addr;
unsigned long ei_addr;
unsigned long fill_syn;
unsigned long ei_stat;
unsigned long ld_lock;
};
 
/* system-specific info: */
struct el_CIA_sysdata_mcheck {
unsigned long coma_gcr;
unsigned long coma_edsr;
unsigned long coma_ter;
unsigned long coma_elar;
unsigned long coma_ehar;
unsigned long coma_ldlr;
unsigned long coma_ldhr;
unsigned long coma_base0;
unsigned long coma_base1;
unsigned long coma_base2;
unsigned long coma_cnfg0;
unsigned long coma_cnfg1;
unsigned long coma_cnfg2;
unsigned long epic_dcsr;
unsigned long epic_pear;
unsigned long epic_sear;
unsigned long epic_tbr1;
unsigned long epic_tbr2;
unsigned long epic_pbr1;
unsigned long epic_pbr2;
unsigned long epic_pmr1;
unsigned long epic_pmr2;
unsigned long epic_harx1;
unsigned long epic_harx2;
unsigned long epic_pmlt;
unsigned long epic_tag0;
unsigned long epic_tag1;
unsigned long epic_tag2;
unsigned long epic_tag3;
unsigned long epic_tag4;
unsigned long epic_tag5;
unsigned long epic_tag6;
unsigned long epic_tag7;
unsigned long epic_data0;
unsigned long epic_data1;
unsigned long epic_data2;
unsigned long epic_data3;
unsigned long epic_data4;
unsigned long epic_data5;
unsigned long epic_data6;
unsigned long epic_data7;
};
 
#define RTC_PORT(x) (0x70 + (x))
#define RTC_ADDR(x) (0x80 | (x))
#define RTC_ALWAYS_BCD 0
 
#endif /* __ALPHA_CIA__H__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/ipsum.h
0,0 → 1,45
#ifndef __ASM_IPSUM_H
#define __ASM_IPSUM_H
 
/*
* This routine computes a UDP checksum.
*/
extern inline unsigned short udp_check(struct udphdr *uh, int len, u32 saddr, u32 daddr)
{
/* uhh.. eventually */
return 0;
}
 
/*
* This routine computes a TCP checksum.
*/
extern inline unsigned short tcp_check(struct tcphdr *th, int len, u32 saddr, u32 daddr)
{
/* uhh.. eventually */
return 0;
}
 
 
/*
* This routine does all the checksum computations that don't
* require anything special (like copying or special headers).
*/
 
extern inline unsigned short ip_compute_csum(unsigned char * buff, int len)
{
/* uhh.. eventually */
return 0;
}
 
/*
* This is a version of ip_compute_csum() optimized for IP headers, which
* always checksum on 4 octet boundaries.
*/
 
static inline unsigned short ip_fast_csum(unsigned char * buff, int wlen)
{
/* uhh.. eventually */
return 0;
}
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/ioctls.h
0,0 → 1,104
#ifndef _ASM_ALPHA_IOCTLS_H
#define _ASM_ALPHA_IOCTLS_H
 
#include <asm/ioctl.h>
 
#define FIOCLEX _IO('f', 1)
#define FIONCLEX _IO('f', 2)
#define FIOASYNC _IOW('f', 125, int)
#define FIONBIO _IOW('f', 126, int)
#define FIONREAD _IOR('f', 127, int)
#define TIOCINQ FIONREAD
 
#define TIOCGETP _IOR('t', 8, struct sgttyb)
#define TIOCSETP _IOW('t', 9, struct sgttyb)
#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
 
#define TIOCSETC _IOW('t', 17, struct tchars)
#define TIOCGETC _IOR('t', 18, struct tchars)
#define TCGETS _IOR('t', 19, struct termios)
#define TCSETS _IOW('t', 20, struct termios)
#define TCSETSW _IOW('t', 21, struct termios)
#define TCSETSF _IOW('t', 22, struct termios)
 
#define TCGETA _IOR('t', 23, struct termio)
#define TCSETA _IOW('t', 24, struct termio)
#define TCSETAW _IOW('t', 25, struct termio)
#define TCSETAF _IOW('t', 28, struct termio)
 
#define TCSBRK _IO('t', 29)
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
 
#define TIOCSWINSZ _IOW('t', 103, struct winsize)
#define TIOCGWINSZ _IOR('t', 104, struct winsize)
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
 
#define TIOCGLTC _IOR('t', 116, struct ltchars)
#define TIOCSLTC _IOW('t', 117, struct ltchars)
#define TIOCSPGRP _IOW('t', 118, int)
#define TIOCGPGRP _IOR('t', 119, int)
 
#define TIOCEXCL 0x540C
#define TIOCNXCL 0x540D
#define TIOCSCTTY 0x540E
 
#define TIOCSTI 0x5412
#define TIOCMGET 0x5415
#define TIOCMBIS 0x5416
#define TIOCMBIC 0x5417
#define TIOCMSET 0x5418
# define TIOCM_LE 0x001
# define TIOCM_DTR 0x002
# define TIOCM_RTS 0x004
# define TIOCM_ST 0x008
# define TIOCM_SR 0x010
# define TIOCM_CTS 0x020
# define TIOCM_CAR 0x040
# define TIOCM_RNG 0x080
# define TIOCM_DSR 0x100
# define TIOCM_CD TIOCM_CAR
# define TIOCM_RI TIOCM_RNG
 
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
#define TIOCLINUX 0x541C
#define TIOCCONS 0x541D
#define TIOCGSERIAL 0x541E
#define TIOCSSERIAL 0x541F
#define TIOCPKT 0x5420
# define TIOCPKT_DATA 0
# define TIOCPKT_FLUSHREAD 1
# define TIOCPKT_FLUSHWRITE 2
# define TIOCPKT_STOP 4
# define TIOCPKT_START 8
# define TIOCPKT_NOSTOP 16
# define TIOCPKT_DOSTOP 32
 
 
#define TIOCNOTTY 0x5422
#define TIOCSETD 0x5423
#define TIOCGETD 0x5424
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
 
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
#define TIOCSERSWILD 0x5455
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR 0x5459 /* Get line status register */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
 
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
 
#endif /* _ASM_ALPHA_IOCTLS_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/delay.h
0,0 → 1,47
#ifndef __ALPHA_DELAY_H
#define __ALPHA_DELAY_H
 
extern unsigned long loops_per_sec;
 
/*
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines, using a pre-computed "loops_per_second" value.
*/
 
extern __inline__ void __delay(unsigned long loops)
{
__asm__ __volatile__(".align 3\n"
"1:\tsubq %0,1,%0\n\t"
"bge %0,1b": "=r" (loops) : "0" (loops));
}
 
/*
* division by multiplication: you don't have to worry about
* loss of precision.
*
* Use only for very small delays ( < 1 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
extern __inline__ void udelay(unsigned long usecs)
{
usecs *= 0x000010c6f7a0b5edUL; /* 2**64 / 1000000 */
__asm__("umulh %1,%2,%0"
:"=r" (usecs)
:"r" (usecs),"r" (loops_per_sec));
__delay(usecs);
}
 
/*
* 64-bit integers means we don't have to worry about overflow as
* on some other architectures..
*/
extern __inline__ unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c)
{
return (a*b)/c;
}
 
#endif /* defined(__ALPHA_DELAY_H) */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/lca.h
0,0 → 1,399
#include <linux/config.h> /* CONFIG_ALPHA_SRM_SETUP */
#ifndef __ALPHA_LCA__H__
#define __ALPHA_LCA__H__
 
/*
* Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
* for example).
*
* This file is based on:
*
* DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors
* Hardware Reference Manual; Digital Equipment Corp.; May 1994;
* Maynard, MA; Order Number: EC-N2681-71.
*/
 
/*
* NOTE: The LCA uses a Host Address Extension (HAE) register to access
* PCI addresses that are beyond the first 27 bits of address
* space. Updating the HAE requires an external cycle (and
* a memory barrier), which tends to be slow. Instead of updating
* it on each sparse memory access, we keep the current HAE value
* cached in variable cache_hae. Only if the cached HAE differs
* from the desired HAE value do we actually updated HAE register.
* The HAE register is preserved by the interrupt handler entry/exit
* code, so this scheme works even in the presence of interrupts.
*
* Dense memory space doesn't require the HAE, but is restricted to
* aligned 32 and 64 bit accesses. Special Cycle and Interrupt
* Acknowledge cycles may also require the use of the HAE. The LCA
* limits I/O address space to the bottom 24 bits of address space,
* but this easily covers the 16 bit ISA I/O address space.
*/
 
/*
* NOTE 2! The memory operations do not set any memory barriers, as
* it's not needed for cases like a frame buffer that is essentially
* memory-like. You need to do them by hand if the operations depend
* on ordering.
*
* Similarly, the port I/O operations do a "mb" only after a write
* operation: if an mb is needed before (as in the case of doing
* memory mapped I/O first, and then a port I/O operation to the same
* device), it needs to be done by hand.
*
* After the above has bitten me 100 times, I'll give up and just do
* the mb all the time, but right now I'm hoping this will work out.
* Avoiding mb's may potentially be a noticeable speed improvement,
* but I can't honestly say I've tested it.
*
* Handling interrupts that need to do mb's to synchronize to
* non-interrupts is another fun race area. Don't do it (because if
* you do, I'll have to do *everything* with interrupts disabled,
* ugh).
*/
 
#include <asm/system.h>
 
#ifdef CONFIG_ALPHA_SRM_SETUP
/* if we are using the SRM PCI setup, we'll need to use variables instead */
#define LCA_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
#define LCA_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
 
extern unsigned int LCA_DMA_WIN_BASE;
extern unsigned int LCA_DMA_WIN_SIZE;
 
#else /* SRM_SETUP */
#define LCA_DMA_WIN_BASE (1024*1024*1024)
#define LCA_DMA_WIN_SIZE (1024*1024*1024)
#endif /* SRM_SETUP */
 
/*
* Memory Controller registers:
*/
#define LCA_MEM_BCR0 (IDENT_ADDR + 0x120000000UL)
#define LCA_MEM_BCR1 (IDENT_ADDR + 0x120000008UL)
#define LCA_MEM_BCR2 (IDENT_ADDR + 0x120000010UL)
#define LCA_MEM_BCR3 (IDENT_ADDR + 0x120000018UL)
#define LCA_MEM_BMR0 (IDENT_ADDR + 0x120000020UL)
#define LCA_MEM_BMR1 (IDENT_ADDR + 0x120000028UL)
#define LCA_MEM_BMR2 (IDENT_ADDR + 0x120000030UL)
#define LCA_MEM_BMR3 (IDENT_ADDR + 0x120000038UL)
#define LCA_MEM_BTR0 (IDENT_ADDR + 0x120000040UL)
#define LCA_MEM_BTR1 (IDENT_ADDR + 0x120000048UL)
#define LCA_MEM_BTR2 (IDENT_ADDR + 0x120000050UL)
#define LCA_MEM_BTR3 (IDENT_ADDR + 0x120000058UL)
#define LCA_MEM_GTR (IDENT_ADDR + 0x120000060UL)
#define LCA_MEM_ESR (IDENT_ADDR + 0x120000068UL)
#define LCA_MEM_EAR (IDENT_ADDR + 0x120000070UL)
#define LCA_MEM_CAR (IDENT_ADDR + 0x120000078UL)
#define LCA_MEM_VGR (IDENT_ADDR + 0x120000080UL)
#define LCA_MEM_PLM (IDENT_ADDR + 0x120000088UL)
#define LCA_MEM_FOR (IDENT_ADDR + 0x120000090UL)
 
/*
* I/O Controller registers:
*/
#define LCA_IOC_HAE (IDENT_ADDR + 0x180000000UL)
#define LCA_IOC_CONF (IDENT_ADDR + 0x180000020UL)
#define LCA_IOC_STAT0 (IDENT_ADDR + 0x180000040UL)
#define LCA_IOC_STAT1 (IDENT_ADDR + 0x180000060UL)
#define LCA_IOC_TBIA (IDENT_ADDR + 0x180000080UL)
#define LCA_IOC_TB_ENA (IDENT_ADDR + 0x1800000a0UL)
#define LCA_IOC_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
#define LCA_IOC_PAR_DIS (IDENT_ADDR + 0x1800000e0UL)
#define LCA_IOC_W_BASE0 (IDENT_ADDR + 0x180000100UL)
#define LCA_IOC_W_BASE1 (IDENT_ADDR + 0x180000120UL)
#define LCA_IOC_W_MASK0 (IDENT_ADDR + 0x180000140UL)
#define LCA_IOC_W_MASK1 (IDENT_ADDR + 0x180000160UL)
#define LCA_IOC_T_BASE0 (IDENT_ADDR + 0x180000180UL)
#define LCA_IOC_T_BASE1 (IDENT_ADDR + 0x1800001a0UL)
#define LCA_IOC_TB_TAG0 (IDENT_ADDR + 0x188000000UL)
#define LCA_IOC_TB_TAG1 (IDENT_ADDR + 0x188000020UL)
#define LCA_IOC_TB_TAG2 (IDENT_ADDR + 0x188000040UL)
#define LCA_IOC_TB_TAG3 (IDENT_ADDR + 0x188000060UL)
#define LCA_IOC_TB_TAG4 (IDENT_ADDR + 0x188000070UL)
#define LCA_IOC_TB_TAG5 (IDENT_ADDR + 0x1880000a0UL)
#define LCA_IOC_TB_TAG6 (IDENT_ADDR + 0x1880000c0UL)
#define LCA_IOC_TB_TAG7 (IDENT_ADDR + 0x1880000e0UL)
 
/*
* Memory spaces:
*/
#define LCA_IACK_SC (IDENT_ADDR + 0x1a0000000UL)
#define LCA_CONF (IDENT_ADDR + 0x1e0000000UL)
#define LCA_IO (IDENT_ADDR + 0x1c0000000UL)
#define LCA_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
#define LCA_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
 
/*
* Bit definitions for I/O Controller status register 0:
*/
#define LCA_IOC_STAT0_CMD 0xf
#define LCA_IOC_STAT0_ERR (1<<4)
#define LCA_IOC_STAT0_LOST (1<<5)
#define LCA_IOC_STAT0_THIT (1<<6)
#define LCA_IOC_STAT0_TREF (1<<7)
#define LCA_IOC_STAT0_CODE_SHIFT 8
#define LCA_IOC_STAT0_CODE_MASK 0x7
#define LCA_IOC_STAT0_P_NBR_SHIFT 13
#define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff
 
#define HAE_ADDRESS LCA_IOC_HAE
 
/* LCA PMR Power Management register defines */
#define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL)
#define LCA_PMR_PDIV 0x7 /* Primary clock divisor */
#define LCA_PMR_ODIV 0x38 /* Override clock divisor */
#define LCA_PMR_INTO 0x40 /* Interrupt override */
#define LCA_PMR_DMAO 0x80 /* DMA override */
#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even
bits */
#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even
bits */
#define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8
/* LCA PMR Macros */
 
#define READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR)
#define WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d))
 
#define GET_PRIMARY(r) ((r) & LCA_PMR_PDIV)
#define GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV)
#define SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK) | (c)))
 
/* LCA PMR Divisor values */
#define DIV_1 0x0
#define DIV_1_5 0x1
#define DIV_2 0x2
#define DIV_4 0x3
#define DIV_8 0x4
#define DIV_16 0x5
#define DIV_MIN DIV_1
#define DIV_MAX DIV_16
 
 
#ifdef __KERNEL__
 
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
extern inline unsigned long virt_to_bus(void * address)
{
return virt_to_phys(address) + LCA_DMA_WIN_BASE;
}
 
extern inline void * bus_to_virt(unsigned long address)
{
/*
* This check is a sanity check but also ensures that bus
* address 0 maps to virtual address 0 which is useful to
* detect null "pointers" (the NCR driver is much simpler if
* NULL pointers are preserved).
*/
if (address < LCA_DMA_WIN_BASE)
return 0;
return phys_to_virt(address - LCA_DMA_WIN_BASE);
}
 
/*
* I/O functions:
*
* Unlike Jensen, the Noname machines have no concept of local
* I/O---everything goes over the PCI bus.
*
* There is plenty room for optimization here. In particular,
* the Alpha's insb/insw/extb/extw should be useful in moving
* data to/from the right byte-lanes.
*/
 
#define vuip volatile unsigned int *
 
extern inline unsigned int __inb(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + LCA_IO + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
 
extern inline void __outb(unsigned char b, unsigned long addr)
{
unsigned int w;
 
asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + LCA_IO + 0x00) = w;
mb();
}
 
extern inline unsigned int __inw(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + LCA_IO + 0x08);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
 
extern inline void __outw(unsigned short b, unsigned long addr)
{
unsigned int w;
 
asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + LCA_IO + 0x08) = w;
mb();
}
 
extern inline unsigned int __inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + LCA_IO + 0x18);
}
 
extern inline void __outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + LCA_IO + 0x18) = b;
mb();
}
 
 
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*/
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, msb;
 
shift = (addr & 0x3) * 8;
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
if (msb != hae.cache) {
set_hae(msb);
}
}
result = *(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x00);
result >>= shift;
return 0xffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
unsigned long result, shift, msb;
 
shift = (addr & 0x3) * 8;
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
if (msb != hae.cache) {
set_hae(msb);
}
}
result = *(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x08);
result >>= shift;
return 0xffffUL & result;
}
 
extern inline unsigned long __readl(unsigned long addr)
{
return *(vuip) (addr + LCA_DENSE_MEM);
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long msb;
unsigned int w;
 
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
if (msb != hae.cache) {
set_hae(msb);
}
}
asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x00) = w;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
unsigned long msb;
unsigned int w;
 
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
if (msb != hae.cache) {
set_hae(msb);
}
}
asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x08) = w;
}
 
extern inline void __writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + LCA_DENSE_MEM) = b;
}
 
/*
* Most of the above have so much overhead that it probably doesn't
* make sense to have them inlined (better icache behavior).
*/
 
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
 
#define outb(x, port) \
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
 
#define readl(a) __readl((unsigned long)(a))
#define writel(v,a) __writel((v),(unsigned long)(a))
 
#undef vuip
 
extern unsigned long lca_init (unsigned long mem_start, unsigned long mem_end);
 
#endif /* __KERNEL__ */
 
/*
* Data structure for handling LCA machine checks. Correctable errors
* result in a short logout frame, uncorrectable ones in a long one.
*/
struct el_lca_mcheck_short {
struct el_common h; /* common logout header */
unsigned long esr; /* error-status register */
unsigned long ear; /* error-address register */
unsigned long dc_stat; /* dcache status register */
unsigned long ioc_stat0; /* I/O controller status register 0 */
unsigned long ioc_stat1; /* I/O controller status register 1 */
};
 
struct el_lca_mcheck_long {
struct el_common h; /* common logout header */
unsigned long pt[31]; /* PAL temps */
unsigned long exc_addr; /* exception address */
unsigned long pad1[3];
unsigned long pal_base; /* PALcode base address */
unsigned long hier; /* hw interrupt enable */
unsigned long hirr; /* hw interrupt request */
unsigned long mm_csr; /* MMU control & status */
unsigned long dc_stat; /* data cache status */
unsigned long dc_addr; /* data cache addr register */
unsigned long abox_ctl; /* address box control register */
unsigned long esr; /* error status register */
unsigned long ear; /* error address register */
unsigned long car; /* cache control register */
unsigned long ioc_stat0; /* I/O controller status register 0 */
unsigned long ioc_stat1; /* I/O controller status register 1 */
unsigned long va; /* virtual address register */
};
 
union el_lca {
struct el_common * c;
struct el_lca_mcheck_long * l;
struct el_lca_mcheck_short * s;
};
 
#define RTC_PORT(x) (0x70 + (x))
#define RTC_ADDR(x) (0x80 | (x))
#define RTC_ALWAYS_BCD 0
 
#endif /* __ALPHA_LCA__H__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/param.h
0,0 → 1,20
#ifndef _ASM_ALPHA_PARAM_H
#define _ASM_ALPHA_PARAM_H
 
#ifndef HZ
# define HZ 1024
#endif
 
#define EXEC_PAGESIZE 8192
 
#ifndef NGROUPS
#define NGROUPS 32
#endif
 
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
 
#define MAXHOSTNAMELEN 64 /* max length of hostname */
 
#endif /* _ASM_ALPHA_PARAM_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/bitops.h
0,0 → 1,163
#ifndef _ALPHA_BITOPS_H
#define _ALPHA_BITOPS_H
 
/*
* Copyright 1994, Linus Torvalds.
*/
 
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
* was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
*/
 
extern __inline__ unsigned long set_bit(unsigned long nr, void * addr)
{
unsigned long oldbit;
unsigned long temp;
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
 
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%1\n\t"
"and %0,%3,%2\n\t"
"bne %2,2f\n\t"
"xor %0,%3,%0\n\t"
"stl_c %0,%1\n\t"
"beq %0,1b\n"
"2:"
:"=&r" (temp),
"=m" (*m),
"=&r" (oldbit)
:"Ir" (1UL << (nr & 31)),
"m" (*m));
return oldbit != 0;
}
 
extern __inline__ unsigned long clear_bit(unsigned long nr, void * addr)
{
unsigned long oldbit;
unsigned long temp;
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
 
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%1\n\t"
"and %0,%3,%2\n\t"
"beq %2,2f\n\t"
"xor %0,%3,%0\n\t"
"stl_c %0,%1\n\t"
"beq %0,1b\n"
"2:"
:"=&r" (temp),
"=m" (*m),
"=&r" (oldbit)
:"Ir" (1UL << (nr & 31)),
"m" (*m));
return oldbit != 0;
}
 
extern __inline__ unsigned long change_bit(unsigned long nr, void * addr)
{
unsigned long oldbit;
unsigned long temp;
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
 
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%1\n\t"
"and %0,%3,%2\n\t"
"xor %0,%3,%0\n\t"
"stl_c %0,%1\n\t"
"beq %0,1b\n"
:"=&r" (temp),
"=m" (*m),
"=&r" (oldbit)
:"Ir" (1UL << (nr & 31)),
"m" (*m));
return oldbit != 0;
}
 
extern __inline__ unsigned long test_bit(int nr, const void * addr)
{
return 1UL & (((const int *) addr)[nr >> 5] >> (nr & 31));
}
 
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*
* Do a binary search on the bits. Due to the nature of large
* constants on the alpha, it is worthwhile to split the search.
*/
extern inline unsigned long ffz_b(unsigned long x)
{
unsigned long sum = 0;
 
x = ~x & -~x; /* set first 0 bit, clear others */
if (x & 0xF0) sum += 4;
if (x & 0xCC) sum += 2;
if (x & 0xAA) sum += 1;
 
return sum;
}
 
extern inline unsigned long ffz(unsigned long word)
{
unsigned long bits, qofs, bofs;
 
__asm__("cmpbge %1,%2,%0" : "=r"(bits) : "r"(word), "r"(~0UL));
qofs = ffz_b(bits);
__asm__("extbl %1,%2,%0" : "=r"(bits) : "r"(word), "r"(qofs));
bofs = ffz_b(bits);
 
return qofs*8 + bofs;
}
 
/*
* Find next zero bit in a bitmap reasonably efficiently..
*/
extern inline unsigned long find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
{
unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
unsigned long result = offset & ~63UL;
unsigned long tmp;
 
if (offset >= size)
return size;
size -= result;
offset &= 63UL;
if (offset) {
tmp = *(p++);
tmp |= ~0UL >> (64-offset);
if (size < 64)
goto found_first;
if (~tmp)
goto found_middle;
size -= 64;
result += 64;
}
while (size & ~63UL) {
if (~(tmp = *(p++)))
goto found_middle;
result += 64;
size -= 64;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp |= ~0UL << size;
found_middle:
return result + ffz(tmp);
}
 
/*
* The optimizer actually does good code for this case..
*/
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
 
#endif /* _ALPHA_BITOPS_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/bugs.h
0,0 → 1,20
/*
* include/asm-alpha/bugs.h
*
* Copyright (C) 1994 Linus Torvalds
*/
 
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
 
/*
* I don't know of any alpha bugs yet.. Nice chip
*/
 
static void check_bugs(void)
{
}
/trunk/rc203soc/sw/uClinux/include/asm-alpha/dma.h
0,0 → 1,322
/*
* include/asm-alpha/dma.h
*
* This is essentially the same as the i386 DMA stuff, as the AlphaPCs
* use ISA-compatible dma. The only extension is support for high-page
* registers that allow to set the top 8 bits of a 32-bit DMA address.
* This register should be written last when setting up a DMA address
* as this will also enable DMA across 64 KB boundaries.
*/
 
/* $Id: dma.h,v 1.1 2005-12-20 11:20:29 jcastillo Exp $
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
*/
 
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
 
#include <linux/config.h>
 
#include <asm/io.h> /* need byte IO */
 
#define dma_outb outb
#define dma_inb inb
 
/*
* NOTES about DMA transfers:
*
* controller 1: channels 0-3, byte operations, ports 00-1F
* controller 2: channels 4-7, word operations, ports C0-DF
*
* - ALL registers are 8 bits only, regardless of transfer size
* - channel 4 is not used - cascades 1 into 2.
* - channels 0-3 are byte - addresses/counts are for physical bytes
* - channels 5-7 are word - addresses/counts are for physical words
* - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
* - transfer count loaded to registers is 1 less than actual count
* - controller 2 offsets are all even (2x offsets for controller 1)
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
* DMA transfers are limited to the lower 16MB of _physical_ memory.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
*
* Address mapping for channels 0-3:
*
* A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* P7 ... P0 A7 ... A0 A7 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Address mapping for channels 5-7:
*
* A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
* | ... | \ \ ... \ \ \ ... \ \
* | ... | \ \ ... \ \ \ ... \ (not used)
* | ... | \ \ ... \ \ \ ... \
* P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
* the hardware level, so odd-byte transfers aren't possible).
*
* Transfer count (_not # bytes_) is limited to 64K, represented as actual
* count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
* and up to 128K bytes may be transferred on channels 5-7 in one operation.
*
*/
 
#define MAX_DMA_CHANNELS 8
 
#if defined(CONFIG_ALPHA_RUFFIAN)
#define MAX_DMA_ADDRESS (0xfffffc0001000000UL) /* yup, 16Mb :-( */
#elif defined(CONFIG_ALPHA_XL)
/* The maximum address that we can perform a DMA transfer to on Alpha XL,
due to a hardware SIO (PCI<->ISA bus bridge) chip limitation, is 64MB.
See <asm/apecs.h> for more info.
*/
/* NOTE: we must define the maximum as something less than 64Mb, to prevent
virt_to_bus() from returning an address in the first window, for a
data area that goes beyond the 64Mb first DMA window. Sigh...
We MUST coordinate the maximum with <asm/apecs.h> for consistency.
For now, this limit is set to 48Mb...
*/
#define MAX_DMA_ADDRESS (0xfffffc0003000000UL)
#else
/*
* The maximum address that we can perform a DMA transfer to on
* normal Alpha platforms
*/
#define MAX_DMA_ADDRESS (~0UL)
#endif
 
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
 
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG)
 
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
 
#define DMA_ADDR_0 0x00 /* DMA address registers */
#define DMA_ADDR_1 0x02
#define DMA_ADDR_2 0x04
#define DMA_ADDR_3 0x06
#define DMA_ADDR_4 0xC0
#define DMA_ADDR_5 0xC4
#define DMA_ADDR_6 0xC8
#define DMA_ADDR_7 0xCC
 
#define DMA_CNT_0 0x01 /* DMA count registers */
#define DMA_CNT_1 0x03
#define DMA_CNT_2 0x05
#define DMA_CNT_3 0x07
#define DMA_CNT_4 0xC2
#define DMA_CNT_5 0xC6
#define DMA_CNT_6 0xCA
#define DMA_CNT_7 0xCE
 
#define DMA_PAGE_0 0x87 /* DMA page registers */
#define DMA_PAGE_1 0x83
#define DMA_PAGE_2 0x81
#define DMA_PAGE_3 0x82
#define DMA_PAGE_5 0x8B
#define DMA_PAGE_6 0x89
#define DMA_PAGE_7 0x8A
 
#define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0)
#define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1)
#define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2)
#define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3)
#define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4)
#define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5)
#define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6)
#define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7)
 
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
 
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
}
 
static __inline__ void disable_dma(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
else
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
}
 
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while interrupts are disabled! ---
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
if (dmanr<=3)
dma_outb(0, DMA1_CLEAR_FF_REG);
else
dma_outb(0, DMA2_CLEAR_FF_REG);
}
 
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr<=3)
dma_outb(mode | dmanr, DMA1_MODE_REG);
else
dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
}
 
/* set extended mode for a specific DMA channel */
static __inline__ void set_dma_ext_mode(unsigned int dmanr, char ext_mode)
{
if (dmanr<=3)
dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG);
else
dma_outb(ext_mode | (dmanr&3), DMA2_EXT_MODE_REG);
}
 
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register.
*/
static __inline__ void set_dma_page(unsigned int dmanr, unsigned int pagenr)
{
switch(dmanr) {
case 0:
dma_outb(pagenr, DMA_PAGE_0);
dma_outb((pagenr >> 8), DMA_HIPAGE_0);
break;
case 1:
dma_outb(pagenr, DMA_PAGE_1);
dma_outb((pagenr >> 8), DMA_HIPAGE_1);
break;
case 2:
dma_outb(pagenr, DMA_PAGE_2);
dma_outb((pagenr >> 8), DMA_HIPAGE_2);
break;
case 3:
dma_outb(pagenr, DMA_PAGE_3);
dma_outb((pagenr >> 8), DMA_HIPAGE_3);
break;
case 5:
dma_outb(pagenr & 0xfe, DMA_PAGE_5);
dma_outb((pagenr >> 8), DMA_HIPAGE_5);
break;
case 6:
dma_outb(pagenr & 0xfe, DMA_PAGE_6);
dma_outb((pagenr >> 8), DMA_HIPAGE_6);
break;
case 7:
dma_outb(pagenr & 0xfe, DMA_PAGE_7);
dma_outb((pagenr >> 8), DMA_HIPAGE_7);
break;
}
}
 
 
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
if (dmanr <= 3) {
dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
} else {
dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
}
set_dma_page(dmanr, a>>16); /* set hipage last to enable 32-bit mode */
}
 
 
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
count--;
if (dmanr <= 3) {
dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
} else {
dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
}
}
 
 
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
 
/* using short to get 16-bit wrap around */
unsigned short count;
 
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
return (dmanr<=3)? count : (count<<1);
}
 
 
/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
 
 
#endif /* _ASM_DMA_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/mmu_context.h
0,0 → 1,90
#ifndef __ALPHA_MMU_CONTEXT_H
#define __ALPHA_MMU_CONTEXT_H
 
/*
* get a new mmu context..
*
* Copyright (C) 1996, Linus Torvalds
*/
 
#include <linux/config.h>
#include <asm/system.h>
 
/*
* The maximum ASN's the processor supports. On the EV4 this is 63
* but the PAL-code doesn't actually use this information. On the
* EV5 this is 127.
*
* On the EV4, the ASNs are more-or-less useless anyway, as they are
* only used as a icache tag, not for TB entries. On the EV5 ASN's
* also validate the TB entries, and thus make a lot more sense.
*
* The EV4 ASN's don't even match the architecture manual, ugh. And
* I quote: "If a processor implements address space numbers (ASNs),
* and the old PTE has the Address Space Match (ASM) bit clear (ASNs
* in use) and the Valid bit set, then entries can also effectively be
* made coherent by assigning a new, unused ASN to the currently
* running process and not reusing the previous ASN before calling the
* appropriate PALcode routine to invalidate the translation buffer
* (TB)".
*
* In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
* work correctly and can thus not be used (explaining the lack of PAL-code
* support).
*/
#ifdef CONFIG_ALPHA_EV5
#define MAX_ASN 127
#else
#define MAX_ASN 63
#define BROKEN_ASN 1
#endif
 
extern unsigned long asn_cache;
 
#define ASN_VERSION_SHIFT 16
#define ASN_VERSION_MASK ((~0UL) << ASN_VERSION_SHIFT)
#define ASN_FIRST_VERSION (1UL << ASN_VERSION_SHIFT)
 
extern inline void get_new_mmu_context(struct task_struct *p,
struct mm_struct *mm,
unsigned long asn)
{
/* check if it's legal.. */
if ((asn & ~ASN_VERSION_MASK) > MAX_ASN) {
/* start a new version, invalidate all old asn's */
tbiap(); imb();
asn = (asn & ASN_VERSION_MASK) + ASN_FIRST_VERSION;
if (!asn)
asn = ASN_FIRST_VERSION;
}
asn_cache = asn + 1;
mm->context = asn; /* full version + asn */
p->tss.asn = asn & ~ASN_VERSION_MASK; /* just asn */
}
 
/*
* NOTE! The way this is set up, the high bits of the "asn_cache" (and
* the "mm->context") are the ASN _version_ code. A version of 0 is
* always considered invalid, so to invalidate another process you only
* need to do "p->mm->context = 0".
*
* If we need more ASN's than the processor has, we invalidate the old
* user TLB's (tbiap()) and start a new ASN version. That will automatically
* force a new asn for any other processes the next time they want to
* run.
*/
extern inline void get_mmu_context(struct task_struct *p)
{
#ifndef BROKEN_ASN
struct mm_struct * mm = p->mm;
 
if (mm) {
unsigned long asn = asn_cache;
/* Check if our ASN is of an older version and thus invalid */
if ((mm->context ^ asn) & ASN_VERSION_MASK)
get_new_mmu_context(p, mm, asn);
}
#endif
}
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/checksum.h
0,0 → 1,69
#ifndef _ALPHA_CHECKSUM_H
#define _ALPHA_CHECKSUM_H
 
 
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
 
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
extern unsigned short int csum_tcpudp_magic(unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum);
 
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
 
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
 
/*
* the same as csum_partial, but copies from user space (but on the alpha
* we have just one address space, so this is identical to the above)
*/
#define csum_partial_copy_fromuser csum_partial_copy
 
 
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
 
extern unsigned short ip_compute_csum(unsigned char * buff, int len);
 
/*
* Fold a partial checksum without adding pseudo headers
*/
 
static inline unsigned short csum_fold(unsigned int sum)
{
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return ~sum;
}
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/statfs.h
0,0 → 1,25
#ifndef _ALPHA_STATFS_H
#define _ALPHA_STATFS_H
 
#ifndef __KERNEL_STRICT_NAMES
 
#include <linux/types.h>
 
typedef __kernel_fsid_t fsid_t;
 
#endif
 
struct statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
__kernel_fsid_t f_fsid;
int f_namelen;
int f_spare[6];
};
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/unistd.h
0,0 → 1,326
#ifndef _ALPHA_UNISTD_H
#define _ALPHA_UNISTD_H
 
#define __NR_exit 1
#define __NR_fork 2
#define __NR_read 3
#define __NR_write 4
#define __NR_close 6
#define __NR_wait4 7
#define __NR_link 9
#define __NR_unlink 10
#define __NR_chdir 12
#define __NR_fchdir 13
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_chown 16
#define __NR_brk 17
#define __NR_lseek 19
#define __NR_getxpid 20
#define __NR_setuid 23
#define __NR_getxuid 24
#define __NR_ptrace 26
#define __NR_access 33
#define __NR_sync 36
#define __NR_kill 37
#define __NR_setpgid 39
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_open 45
#define __NR_getxgid 47
#define __NR_acct 51
#define __NR_sigpending 52
#define __NR_ioctl 54
#define __NR_symlink 57
#define __NR_readlink 58
#define __NR_execve 59
#define __NR_umask 60
#define __NR_chroot 61
#define __NR_getpgrp 63
#define __NR_getpagesize 64
#define __NR_stat 67
#define __NR_lstat 68
#define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */
#define __NR_munmap 73
#define __NR_mprotect 74
#define __NR_madvise 75
#define __NR_vhangup 76
#define __NR_getgroups 79
#define __NR_setgroups 80
#define __NR_setpgrp 82 /* BSD alias for setpgid */
#define __NR_setitimer 83
#define __NR_getitimer 86
#define __NR_gethostname 87
#define __NR_sethostname 88
#define __NR_getdtablesize 89
#define __NR_dup2 90
#define __NR_fstat 91
#define __NR_fcntl 92
#define __NR_select 93
#define __NR_fsync 95
#define __NR_setpriority 96
#define __NR_socket 97
#define __NR_connect 98
#define __NR_accept 99
#define __NR_getpriority 100
#define __NR_send 101
#define __NR_recv 102
#define __NR_sigreturn 103
#define __NR_bind 104
#define __NR_setsockopt 105
#define __NR_listen 106
#define __NR_sigsuspend 111
#define __NR_recvmsg 113
#define __NR_sendmsg 114
#define __NR_gettimeofday 116
#define __NR_getrusage 117
#define __NR_getsockopt 118
#define __NR_readv 120
#define __NR_writev 121
#define __NR_settimeofday 122
#define __NR_fchown 123
#define __NR_fchmod 124
#define __NR_recvfrom 125
#define __NR_setreuid 126
#define __NR_setregid 127
#define __NR_rename 128
#define __NR_truncate 129
#define __NR_ftruncate 130
#define __NR_flock 131
#define __NR_setgid 132
#define __NR_sendto 133
#define __NR_shutdown 134
#define __NR_socketpair 135
#define __NR_mkdir 136
#define __NR_rmdir 137
#define __NR_utimes 138
#define __NR_getpeername 141
#define __NR_getrlimit 144
#define __NR_setrlimit 145
#define __NR_setsid 147
#define __NR_quotactl 148
#define __NR_getsockname 150
#define __NR_sigaction 156
#define __NR_setdomainname 166
#define __NR_msgctl 200
#define __NR_msgget 201
#define __NR_msgrcv 202
#define __NR_msgsnd 203
#define __NR_semctl 204
#define __NR_semget 205
#define __NR_semop 206
#define __NR_shmctl 210
#define __NR_shmdt 211
#define __NR_shmget 212
 
#define __NR_msync 217
 
#define __NR_getpgid 233
#define __NR_getsid 234
 
#define __NR_sysfs 254
 
/*
* Linux-specific system calls begin at 300
*/
#define __NR_bdflush 300
#define __NR_sethae 301
#define __NR_mount 302
#define __NR_adjtimex 303
#define __NR_swapoff 304
#define __NR_getdents 305
#define __NR_create_module 306
#define __NR_init_module 307
#define __NR_delete_module 308
#define __NR_get_kernel_syms 309
#define __NR_syslog 310
#define __NR_reboot 311
#define __NR_clone 312
#define __NR_uselib 313
#define __NR_mlock 314
#define __NR_munlock 315
#define __NR_mlockall 316
#define __NR_munlockall 317
#define __NR_sysinfo 318
#define __NR__sysctl 319
#define __NR_idle 320
#define __NR_umount 321
#define __NR_swapon 322
#define __NR_times 323
#define __NR_personality 324
#define __NR_setfsuid 325
#define __NR_setfsgid 326
#define __NR_ustat 327
#define __NR_statfs 328
#define __NR_fstatfs 329
#define __NR_sched_setparam 330
#define __NR_sched_getparam 331
#define __NR_sched_setscheduler 332
#define __NR_sched_getscheduler 333
#define __NR_sched_yield 334
#define __NR_sched_get_priority_max 335
#define __NR_sched_get_priority_min 336
#define __NR_sched_rr_get_interval 337
#define __NR_afs_syscall 338
#define __NR_uname 339
#define __NR_nanosleep 340
#define __NR_mremap 341
#define __NR_nfsctl 342
#define __NR_setresuid 343
#define __NR_getresuid 344
#define __NR_pciconfig_read 345
#define __NR_pciconfig_write 346
 
#if defined(__LIBRARY__) && defined(__GNUC__)
 
/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
#define _syscall0(type, name) \
type name(void) \
{ \
extern long syscall (int, ...); \
return syscall(__NR_##name)); \
}
 
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) \
{ \
extern long syscall (int, ...); \
return syscall(__NR_##name, arg1); \
}
 
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1,type2 arg2) \
{ \
extern long syscall (int, ...); \
return syscall(__NR_##name, arg1, arg2); \
}
 
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1,type2 arg2,type3 arg3) \
{ \
extern long syscall (int, ...); \
return syscall(__NR_##name, arg1, arg2, arg3); \
}
 
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
extern long syscall (int, ...); \
return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
}
 
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
type5,arg5) \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
{ \
extern long syscall (int, ...); \
return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
}
 
#endif /* __LIBRARY__ && __GNUC__ */
 
#ifdef __KERNEL_SYSCALLS__
 
#include <linux/string.h>
#include <linux/signal.h>
 
extern long __kernel_thread(unsigned long, int (*)(void *), void *);
 
static inline long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
return __kernel_thread(flags | CLONE_VM, fn, arg);
}
 
extern void sys_idle(void);
static inline void idle(void)
{
sys_idle();
}
 
extern int sys_setup(void);
static inline int setup(void)
{
return sys_setup();
}
 
extern int sys_open(const char *, int, int);
static inline int open(const char * name, int mode, int flags)
{
return sys_open(name, mode, flags);
}
 
extern int sys_dup(int);
static inline int dup(int fd)
{
return sys_dup(fd);
}
 
static inline int close(int fd)
{
return sys_close(fd);
}
 
extern int sys_exit(int);
static inline int _exit(int value)
{
return sys_exit(value);
}
 
#define exit(x) _exit(x)
 
extern int sys_write(int, const char *, int);
static inline int write(int fd, const char * buf, int nr)
{
return sys_write(fd, buf, nr);
}
 
extern int sys_read(unsigned int, char *, int);
static inline int read(unsigned int fd, char * buf, int nr)
{
return sys_read(fd, buf, nr);
}
 
extern int do_execve(char *, char **, char **, struct pt_regs *);
extern void ret_from_sys_call(void);
static inline int execve(char * file, char ** argvp, char ** envp)
{
int i;
struct pt_regs regs;
 
memset(&regs, 0, sizeof(regs));
i = do_execve(file, argvp, envp, &regs);
if (!i) {
__asm__ __volatile__("bis %0,%0,$30\n\t"
"bis %1,%1,$26\n\t"
"ret $31,($26),1\n\t"
: :"r" (&regs), "r" (ret_from_sys_call));
}
return -1;
}
 
extern int sys_setsid(void);
static inline int setsid(void)
{
return sys_setsid();
}
 
extern int sys_sync(void);
static inline int sync(void)
{
return sys_sync();
}
 
extern int sys_wait4(int, int *, int, struct rusage *);
static inline pid_t waitpid(int pid, int * wait_stat, int flags)
{
return sys_wait4(pid, wait_stat, flags, NULL);
}
 
static inline pid_t wait(int * wait_stat)
{
return waitpid(-1,wait_stat,0);
}
 
#endif
 
#endif /* _ALPHA_UNISTD_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/shmparam.h
0,0 → 1,46
#ifndef _ASMAXP_SHMPARAM_H
#define _ASMAXP_SHMPARAM_H
 
/*
* Address range for shared memory attaches if no address passed to shmat().
*/
#define SHM_RANGE_START 0x14000000000
#define SHM_RANGE_END 0x15000000000
 
/*
* Format of a swap-entry for shared memory pages currently out in
* swap space (see also mm/swap.c).
*
* SWP_TYPE = SHM_SWP_TYPE
* SWP_OFFSET is used as follows:
*
* bits 0..6 : id of shared memory segment page belongs to (SHM_ID)
* bits 7..21: index of page within shared memory segment (SHM_IDX)
* (actually fewer bits get used since SHMMAX is so low)
*/
 
/*
* Keep _SHM_ID_BITS as low as possible since SHMMNI depends on it and
* there is a static array of size SHMMNI.
*/
#define _SHM_ID_BITS 7
#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1)
 
#define SHM_IDX_SHIFT (_SHM_ID_BITS)
#define _SHM_IDX_BITS 15
#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
 
/*
* _SHM_ID_BITS + _SHM_IDX_BITS must be <= 24 on the Alpha and
* SHMMAX <= (PAGE_SIZE << _SHM_IDX_BITS).
*/
 
#define SHMMAX 0x3fa000 /* max shared seg size (bytes) */
#define SHMMIN 1 /* really PAGE_SIZE */ /* min shared seg size (bytes) */
#define SHMMNI (1<<_SHM_ID_BITS) /* max num of segs system wide */
#define SHMALL /* max shm system wide (pages) */ \
(1<<(_SHM_IDX_BITS+_SHM_ID_BITS))
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#define SHMSEG SHMMNI /* max shared segs per process */
 
#endif /* _ASMAXP_SHMPARAM_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/sockios.h
0,0 → 1,15
#ifndef _ASM_ALPHA_SOCKIOS_H
#define _ASM_ALPHA_SOCKIOS_H
 
/* Socket-level I/O control calls. */
 
#define FIOGETOWN _IOR('f', 123, int)
#define FIOSETOWN _IOW('f', 124, int)
 
#define SIOCATMARK _IOR('s', 7, int)
#define SIOCSPGRP _IOW('s', 8, pid_t)
#define SIOCGPGRP _IOR('s', 9, pid_t)
 
#define SIOCGSTAMP 0x8906 /* Get stamp - linux-specific */
 
#endif /* _ASM_ALPHA_SOCKIOS_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/pal.h
0,0 → 1,51
#ifndef __ALPHA_PAL_H
#define __ALPHA_PAL_H
 
/*
* Common PAL-code
*/
#define PAL_halt 0
#define PAL_cflush 1
#define PAL_draina 2
#define PAL_bpt 128
#define PAL_bugchk 129
#define PAL_chmk 131
#define PAL_callsys 131
#define PAL_imb 134
#define PAL_rduniq 158
#define PAL_wruniq 159
#define PAL_gentrap 170
#define PAL_nphalt 190
 
/*
* VMS specific PAL-code
*/
#define PAL_swppal 10
#define PAL_mfpr_vptb 41
 
/*
* OSF specific PAL-code
*/
#define PAL_cserve 9
#define PAL_wripir 13
#define PAL_rdmces 16
#define PAL_wrmces 17
#define PAL_wrfen 43
#define PAL_wrvptptr 45
#define PAL_jtopal 46
#define PAL_swpctx 48
#define PAL_wrval 49
#define PAL_rdval 50
#define PAL_tbi 51
#define PAL_wrent 52
#define PAL_swpipl 53
#define PAL_rdps 54
#define PAL_wrkgp 55
#define PAL_wrusp 56
#define PAL_wrperfmon 57
#define PAL_rdusp 58
#define PAL_whami 60
#define PAL_rtsys 61
#define PAL_rti 63
 
#endif /* __ALPHA_PAL_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/atomic.h
0,0 → 1,103
#ifndef __ARCH_ALPHA_ATOMIC__
#define __ARCH_ALPHA_ATOMIC__
 
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc...
*
* But use these as seldom as possible since they are much more slower
* than regular operations.
*/
 
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
 
typedef int atomic_t;
 
extern __inline__ void atomic_add(atomic_t i, atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%1\n\t"
"addl %0,%2,%0\n\t"
"stl_c %0,%1\n\t"
"beq %0,1b\n"
"2:"
:"=&r" (temp),
"=m" (__atomic_fool_gcc(v))
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
}
 
extern __inline__ void atomic_sub(atomic_t i, atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%1\n\t"
"subl %0,%2,%0\n\t"
"stl_c %0,%1\n\t"
"beq %0,1b\n"
"2:"
:"=&r" (temp),
"=m" (__atomic_fool_gcc(v))
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
}
 
/*
* Same as above, but return the result value
*/
extern __inline__ long atomic_add_return(atomic_t i, atomic_t * v)
{
long temp, result;
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%1\n\t"
"addl %0,%3,%0\n\t"
"bis %0,%0,%2\n\t"
"stl_c %0,%1\n\t"
"beq %0,1b\n"
"2:"
:"=&r" (temp),
"=m" (__atomic_fool_gcc(v)),
"=&r" (result)
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
return result;
}
 
extern __inline__ long atomic_sub_return(atomic_t i, atomic_t * v)
{
long temp, result;
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%1\n\t"
"subl %0,%3,%0\n\t"
"bis %0,%0,%2\n\t"
"stl_c %0,%1\n\t"
"beq %0,1b\n"
"2:"
:"=&r" (temp),
"=m" (__atomic_fool_gcc(v)),
"=&r" (result)
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
return result;
}
 
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
 
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
 
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/reg.h
0,0 → 1,52
#ifndef __reg_h__
#define __reg_h__
 
/*
* Exception frame offsets.
*/
#define EF_V0 0
#define EF_T0 1
#define EF_T1 2
#define EF_T2 3
#define EF_T3 4
#define EF_T4 5
#define EF_T5 6
#define EF_T6 7
#define EF_T7 8
#define EF_S0 9
#define EF_S1 10
#define EF_S2 11
#define EF_S3 12
#define EF_S4 13
#define EF_S5 14
#define EF_S6 15
#define EF_A3 16
#define EF_A4 17
#define EF_A5 18
#define EF_T8 19
#define EF_T9 20
#define EF_T10 21
#define EF_T11 22
#define EF_RA 23
#define EF_T12 24
#define EF_AT 25
#define EF_SP 26
#define EF_PS 27
#define EF_PC 28
#define EF_GP 29
#define EF_A0 30
#define EF_A1 31
#define EF_A2 32
 
#define EF_SIZE (33*8)
#define HWEF_SIZE (6*8) /* size of PAL frame (PS-A2) */
 
#define EF_SSIZE (EF_SIZE - HWEF_SIZE)
 
/*
* Map register number into core file offset.
*/
#define CORE_REG(reg, ubase) \
(((unsigned long *)((unsigned long)(ubase)))[reg])
 
#endif /* __reg_h__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/signal.h
0,0 → 1,100
#ifndef _ASMAXP_SIGNAL_H
#define _ASMAXP_SIGNAL_H
 
typedef unsigned long sigset_t; /* at least 32 bits */
 
#define _NSIG 32
#define NSIG _NSIG
 
/*
* Linux/AXP has different signal numbers that Linux/i386: I'm trying
* to make it OSF/1 binary compatible, at least for normal binaries.
*/
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGEMT 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGBUS 10
#define SIGSEGV 11
#define SIGSYS 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGURG 16
#define SIGSTOP 17
#define SIGTSTP 18
#define SIGCONT 19
#define SIGCHLD 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGIO 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGINFO 29
#define SIGUSR1 30
#define SIGUSR2 31
 
#define SIGPOLL SIGIO
#define SIGPWR SIGINFO
#define SIGIOT SIGABRT
 
/*
* sa_flags values: SA_STACK is not currently supported, but will allow the
* usage of signal stacks by using the (now obsolete) sa_restorer field in
* the sigaction structure as a stack pointer. This is now possible due to
* the changes in signal handling. LBT 010493.
* SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_SHIRQ flag is for shared interrupt support on PCI and EISA.
*/
#define SA_NOCLDSTOP 0x00000004
 
#define SA_STACK 0x00000001
#define SA_RESTART 0x00000002
#define SA_INTERRUPT 0x20000000
#define SA_NOMASK 0x00000008
#define SA_ONESHOT 0x00000010
#define SA_SHIRQ 0x00000020
 
#ifdef __KERNEL__
/*
* These values of sa_flags are used only by the kernel as part of the
* irq handling routines.
*
* SA_INTERRUPT is also used by the irq handling routines.
*/
#define SA_PROBE SA_ONESHOT
#define SA_SAMPLE_RANDOM SA_RESTART
#endif
 
 
#define SIG_BLOCK 1 /* for blocking signals */
#define SIG_UNBLOCK 2 /* for unblocking signals */
#define SIG_SETMASK 3 /* for setting the signal mask */
 
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
 
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
 
struct sigaction {
__sighandler_t sa_handler;
sigset_t sa_mask;
unsigned int sa_flags;
};
 
#ifdef __KERNEL__
#include <asm/sigcontext.h>
#endif
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/pgtable.h
0,0 → 1,516
#ifndef _ALPHA_PGTABLE_H
#define _ALPHA_PGTABLE_H
 
/*
* This file contains the functions and defines necessary to modify and use
* the alpha page table tree.
*
* This hopefully works with any standard alpha page-size, as defined
* in <asm/page.h> (currently 8192).
*/
 
#include <asm/system.h>
#include <asm/mmu_context.h>
 
/* Caches aren't brain-dead on the alpha. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(mm, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
 
/*
* Force a context reload. This is needed when we
* change the page table pointer or when we update
* the ASN of the current process.
*/
static inline void reload_context(struct task_struct *task)
{
__asm__ __volatile__(
"bis %0,%0,$16\n\t"
"call_pal %1"
: /* no outputs */
: "r" (&task->tss), "i" (PAL_swpctx)
: "$0", "$1", "$16", "$22", "$23", "$24", "$25");
}
 
/*
* Use a few helper functions to hide the ugly broken ASN
* numbers on early alpha's (ev4 and ev45)
*/
#ifdef BROKEN_ASN
 
#define flush_tlb_current(x) tbiap()
#define flush_tlb_other(x) do { } while (0)
 
#else
 
extern void get_new_asn_and_reload(struct task_struct *, struct mm_struct *);
 
#define flush_tlb_current(mm) get_new_asn_and_reload(current, mm)
#define flush_tlb_other(mm) do { (mm)->context = 0; } while (0)
 
#endif
 
/*
* Flush just one page in the current TLB set.
* We need to be very careful about the icache here, there
* is no way to invalidate a specific icache page..
*/
static inline void flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
{
#ifdef BROKEN_ASN
tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
#else
if (vma->vm_flags & VM_EXEC)
flush_tlb_current(mm);
else
tbi(2, addr);
#endif
}
 
/*
* Flush current user mapping.
*/
static inline void flush_tlb(void)
{
flush_tlb_current(current->mm);
}
 
/*
* Flush everything (kernel mapping may also have
* changed due to vmalloc/vfree)
*/
static inline void flush_tlb_all(void)
{
tbia();
}
 
/*
* Flush a specified user mapping
*/
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm != current->mm)
flush_tlb_other(mm);
else
flush_tlb_current(mm);
}
 
/*
* Page-granular tlb flush.
*
* do a tbisd (type = 2) normally, and a tbis (type = 3)
* if it is an executable mapping. We want to avoid the
* itlb flush, because that potentially also does a
* icache flush.
*/
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct * mm = vma->vm_mm;
 
if (mm != current->mm)
flush_tlb_other(mm);
else
flush_tlb_current_page(mm, vma, addr);
}
 
/*
* Flush a specified range of user mapping: on the
* alpha we flush the whole user tlb
*/
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
flush_tlb_mm(mm);
}
 
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
 
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
 
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
 
/*
* entries per page directory level: the alpha is three-level, with
* all levels having a one-page page table.
*
* The PGD is special: the last entry is reserved for self-mapping.
*/
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
 
/* the no. of pointers that fit on a page: this will go away */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
 
#define VMALLOC_START 0xFFFFFE0000000000
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
 
/*
* OSF/1 PAL-code-imposed page table bits
*/
#define _PAGE_VALID 0x0001
#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */
#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */
#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */
#define _PAGE_ASM 0x0010
#define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */
#define _PAGE_URE 0x0200 /* xxx */
#define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */
#define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */
 
/* .. and these are ours ... */
#define _PAGE_DIRTY 0x20000
#define _PAGE_ACCESSED 0x40000
 
/*
* NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
* by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
* Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
* the KRE/URE bits to watch for it. That way we don't need to overload the
* KWE/UWE bits with both handling dirty and accessed.
*
* Note that the kernel uses the accessed bit just to check whether to page
* out a page or not, so it doesn't have to be exact anyway.
*/
 
#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
 
#define _PFN_MASK 0xFFFFFFFF00000000
 
#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
 
/*
* All the normal masks have the "page accessed" bits on, as any time they are used,
* the page is accessed. They are cleared only by the page-out routines
*/
#define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 
#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
 
#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
#define _PAGE_S(x) _PAGE_NORMAL(x)
 
/*
* The hardware can handle write-only mappings, but as the alpha
* architecture does byte-wide writes with a read-modify-write
* sequence, it's not practical to have write-without-read privs.
* Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
* arch/alpha/mm/fault.c)
*/
/* xwr */
#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
#define __P010 _PAGE_P(_PAGE_FOE)
#define __P011 _PAGE_P(_PAGE_FOE)
#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
#define __P101 _PAGE_P(_PAGE_FOW)
#define __P110 _PAGE_P(0)
#define __P111 _PAGE_P(0)
 
#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
#define __S010 _PAGE_S(_PAGE_FOE)
#define __S011 _PAGE_S(_PAGE_FOE)
#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
#define __S101 _PAGE_S(_PAGE_FOW)
#define __S110 _PAGE_S(0)
#define __S111 _PAGE_S(0)
 
/*
* BAD_PAGETABLE is used when we need a bogus page-table, while
* BAD_PAGE is used for a bogus page.
*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern pte_t __bad_page(void);
extern pmd_t * __bad_pagetable(void);
 
extern unsigned long __zero_page(void);
 
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE 0xfffffc000030A000
 
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
 
/* to align the pointer to a pointer address */
#define PTR_MASK (~(sizeof(void*)-1))
 
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
#define SIZEOF_PTR_LOG2 3
 
/* to find an entry in a page-table */
#define PAGE_PTR(address) \
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 
extern unsigned long high_memory;
 
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
 
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 
extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 
extern inline unsigned long pte_page(pte_t pte)
{ return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 
extern inline unsigned long pmd_page(pmd_t pmd)
{ return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 
extern inline unsigned long pgd_page(pgd_t pgd)
{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 
extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
 
extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
 
extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
 
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_FOR); }
extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_FOE); }
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
 
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOR; return pte; }
extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOE; return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_FOR; return pte; }
extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_FOE; return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
 
/*
* To set the page-dir. Note the self-mapping in the last entry
*
* Also note that if we update the current process ptbr, we need to
* update the PAL-cached ptbr value as well.. There doesn't seem to
* be any "wrptbr" PAL-insn, but we can do a dummy swpctx to ourself
* instead.
*/
extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
{
pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
if (tsk == current)
reload_context(tsk);
}
 
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 
/* to find an entry in a page-table-directory. */
extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
{
return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
}
 
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
}
 
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
{
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
}
 
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
extern inline void pte_free_kernel(pte_t * pte)
{
free_page((unsigned long) pte);
}
 
extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (page) {
pmd_set(pmd, page);
return page + address;
}
pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long) page);
}
if (pmd_bad(*pmd)) {
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
}
 
extern inline void pmd_free_kernel(pmd_t * pmd)
{
free_page((unsigned long) pmd);
}
 
extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
{
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd)) {
pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
if (pgd_none(*pgd)) {
if (page) {
pgd_set(pgd, page);
return page + address;
}
pgd_set(pgd, BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long) page);
}
if (pgd_bad(*pgd)) {
printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
pgd_set(pgd, BAD_PAGETABLE);
return NULL;
}
return (pmd_t *) pgd_page(*pgd) + address;
}
 
extern inline void pte_free(pte_t * pte)
{
free_page((unsigned long) pte);
}
 
extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (page) {
pmd_set(pmd, page);
return page + address;
}
pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long) page);
}
if (pmd_bad(*pmd)) {
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
}
 
extern inline void pmd_free(pmd_t * pmd)
{
free_page((unsigned long) pmd);
}
 
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
{
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd)) {
pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
if (pgd_none(*pgd)) {
if (page) {
pgd_set(pgd, page);
return page + address;
}
pgd_set(pgd, BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long) page);
}
if (pgd_bad(*pgd)) {
printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
pgd_set(pgd, BAD_PAGETABLE);
return NULL;
}
return (pmd_t *) pgd_page(*pgd) + address;
}
 
extern inline void pgd_free(pgd_t * pgd)
{
free_page((unsigned long) pgd);
}
 
extern inline pgd_t * pgd_alloc(void)
{
return (pgd_t *) get_free_page(GFP_KERNEL);
}
 
extern pgd_t swapper_pg_dir[1024];
 
/*
* The alpha doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
extern inline void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
}
 
/*
* Non-present pages: high 24 bits are offset, next 8 bits type,
* low 32 bits zero..
*/
extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
 
#define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
#define SWP_OFFSET(entry) ((entry) >> 40)
#define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
 
#endif /* _ALPHA_PGTABLE_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/ptrace.h
0,0 → 1,75
#ifndef _ASMAXP_PTRACE_H
#define _ASMAXP_PTRACE_H
 
 
/*
* This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry
*
* NOTE! I want to minimize the overhead of system calls, so this
* struct has as little information as possible. I does not have
*
* - floating point regs: the kernel doesn't change those
* - r9-15: saved by the C compiler
*
* This makes "fork()" and "exec()" a bit more complex, but should
* give us low system call latency.
*/
 
struct pt_regs {
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r19;
unsigned long r20;
unsigned long r21;
unsigned long r22;
unsigned long r23;
unsigned long r24;
unsigned long r25;
unsigned long r26;
unsigned long r27;
unsigned long r28;
unsigned long hae;
/* JRP - These are the values provided to a0-a2 by PALcode */
unsigned long trap_a0;
unsigned long trap_a1;
unsigned long trap_a2;
/* These are saved by PAL-code: */
unsigned long ps;
unsigned long pc;
unsigned long gp;
unsigned long r16;
unsigned long r17;
unsigned long r18;
};
 
/*
* This is the extended stack used by signal handlers and the context
* switcher: it's pushed after the normal "struct pt_regs".
*/
struct switch_stack {
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
unsigned long r26;
unsigned long fp[32]; /* fp[31] is fpcr */
};
 
#ifdef __KERNEL__
#define user_mode(regs) ((regs)->ps & 8)
#define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
#endif
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/jensen.h
0,0 → 1,273
#ifndef __ALPHA_JENSEN_H
#define __ALPHA_JENSEN_H
 
/*
* Defines for the AlphaPC EISA IO and memory address space.
*/
 
/*
* NOTE! The memory operations do not set any memory barriers, as it's
* not needed for cases like a frame buffer that is essentially memory-like.
* You need to do them by hand if the operations depend on ordering.
*
* Similarly, the port IO operations do a "mb" only after a write operation:
* if an mb is needed before (as in the case of doing memory mapped IO
* first, and then a port IO operation to the same device), it needs to be
* done by hand.
*
* After the above has bitten me 100 times, I'll give up and just do the
* mb all the time, but right now I'm hoping this will work out. Avoiding
* mb's may potentially be a noticeable speed improvement, but I can't
* honestly say I've tested it.
*
* Handling interrupts that need to do mb's to synchronize to non-interrupts
* is another fun race area. Don't do it (because if you do, I'll have to
* do *everything* with interrupts disabled, ugh).
*/
 
/*
* EISA Interrupt Acknowledge address
*/
#define EISA_INTA (IDENT_ADDR + 0x100000000UL)
 
/*
* FEPROM addresses
*/
#define EISA_FEPROM0 (IDENT_ADDR + 0x180000000UL)
#define EISA_FEPROM1 (IDENT_ADDR + 0x1A0000000UL)
 
/*
* VL82C106 base address
*/
#define EISA_VL82C106 (IDENT_ADDR + 0x1C0000000UL)
 
/*
* EISA "Host Address Extension" address (bits 25-31 of the EISA address)
*/
#define EISA_HAE (IDENT_ADDR + 0x1D0000000UL)
 
/*
* "SYSCTL" register address
*/
#define EISA_SYSCTL (IDENT_ADDR + 0x1E0000000UL)
 
/*
* "spare" register address
*/
#define EISA_SPARE (IDENT_ADDR + 0x1F0000000UL)
 
/*
* EISA memory address offset
*/
#define EISA_MEM (IDENT_ADDR + 0x200000000UL)
 
/*
* EISA IO address offset
*/
#define EISA_IO (IDENT_ADDR + 0x300000000UL)
 
/*
* Change virtual addresses to bus addresses and vv.
*
* NOTE! On the Jensen, the physical address is the same
* as the bus address, but this is not necessarily true on
* other alpha hardware.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
 
#define HAE_ADDRESS EISA_HAE
 
/*
* Handle the "host address register". This needs to be set
* to the high 7 bits of the EISA address. This is also needed
* for EISA IO addresses, which are only 16 bits wide (the
* hae needs to be set to 0).
*
* HAE isn't needed for the local IO operations, though.
*/
#define __HAE_MASK 0x1ffffff
extern inline void __set_hae(unsigned long addr)
{
/* hae on the Jensen is bits 31:25 shifted right */
addr >>= 25;
if (addr != hae.cache)
set_hae(addr);
}
 
/*
* IO functions
*
* The "local" functions are those that don't go out to the EISA bus,
* but instead act on the VL82C106 chip directly.. This is mainly the
* keyboard, RTC, printer and first two serial lines..
*
* The local stuff makes for some complications, but it seems to be
* gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H
* convinced that I need one of the newer machines.
*/
extern inline unsigned int __local_inb(unsigned long addr)
{
long result = *(volatile int *) ((addr << 9) + EISA_VL82C106);
return 0xffUL & result;
}
 
extern inline void __local_outb(unsigned char b, unsigned long addr)
{
*(volatile unsigned int *) ((addr << 9) + EISA_VL82C106) = b;
mb();
}
 
extern unsigned int _bus_inb(unsigned long addr);
 
extern inline unsigned int __bus_inb(unsigned long addr)
{
long result;
 
__set_hae(0);
result = *(volatile int *) ((addr << 7) + EISA_IO + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
 
extern void _bus_outb(unsigned char b, unsigned long addr);
 
extern inline void __bus_outb(unsigned char b, unsigned long addr)
{
__set_hae(0);
*(volatile unsigned int *) ((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
mb();
}
 
/*
* It seems gcc is not very good at optimizing away logical
* operations that result in operations across inline functions.
* Which is why this is a macro.
*/
#define __is_local(addr) ( \
/* keyboard */ (addr == 0x60 || addr == 0x64) || \
/* RTC */ (addr == 0x170 || addr == 0x171) || \
/* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \
/* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \
/* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff))
 
extern inline unsigned int __inb(unsigned long addr)
{
if (__is_local(addr))
return __local_inb(addr);
return _bus_inb(addr);
}
 
extern inline void __outb(unsigned char b, unsigned long addr)
{
if (__is_local(addr))
__local_outb(b, addr);
else
_bus_outb(b, addr);
}
 
extern inline unsigned int __inw(unsigned long addr)
{
long result;
 
__set_hae(0);
result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
 
extern inline unsigned int __inl(unsigned long addr)
{
__set_hae(0);
return *(volatile unsigned int *) ((addr << 7) + EISA_IO + 0x60);
}
 
extern inline void __outw(unsigned short b, unsigned long addr)
{
__set_hae(0);
*(volatile unsigned int *) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001;
mb();
}
 
extern inline void __outl(unsigned int b, unsigned long addr)
{
__set_hae(0);
*(volatile unsigned int *) ((addr << 7) + EISA_IO + 0x60) = b;
mb();
}
 
/*
* Memory functions.
*/
extern inline unsigned long __readb(unsigned long addr)
{
long result;
 
__set_hae(addr);
addr &= __HAE_MASK;
result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
long result;
 
__set_hae(addr);
addr &= __HAE_MASK;
result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
 
extern inline unsigned long __readl(unsigned long addr)
{
__set_hae(addr);
addr &= __HAE_MASK;
return *(volatile unsigned int *) ((addr << 7) + EISA_MEM + 0x60);
}
 
extern inline void __writeb(unsigned short b, unsigned long addr)
{
__set_hae(addr);
addr &= __HAE_MASK;
*(volatile unsigned int *) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
__set_hae(addr);
addr &= __HAE_MASK;
*(volatile unsigned int *) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001;
}
 
extern inline void __writel(unsigned int b, unsigned long addr)
{
__set_hae(addr);
addr &= __HAE_MASK;
*(volatile unsigned int *) ((addr << 7) + EISA_MEM + 0x60) = b;
}
 
/*
* The above have so much overhead that it probably doesn't make
* sense to have them inlined (better icache behaviour).
*/
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
 
#define outb(x, port) \
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
 
/*
* The Alpha Jensen hardware for some rather strange reason puts
* the RTC clock at 0x170 instead of 0x70. Probably due to some
* misguided idea about using 0x70 for NMI stuff.
*
* These defines will override the defaults when doing RTC queries
*/
#define RTC_PORT(x) (0x170+(x))
#define RTC_ADDR(x) (x)
#define RTC_ALWAYS_BCD 0
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/termios.h
0,0 → 1,138
#ifndef _ALPHA_TERMIOS_H
#define _ALPHA_TERMIOS_H
 
#include <asm/ioctls.h>
#include <asm/termbits.h>
 
struct sgttyb {
char sg_ispeed;
char sg_ospeed;
char sg_erase;
char sg_kill;
short sg_flags;
};
 
struct tchars {
char t_intrc;
char t_quitc;
char t_startc;
char t_stopc;
char t_eofc;
char t_brkc;
};
 
struct ltchars {
char t_suspc;
char t_dsuspc;
char t_rprntc;
char t_flushc;
char t_werasc;
char t_lnextc;
};
 
struct winsize {
unsigned short ws_row;
unsigned short ws_col;
unsigned short ws_xpixel;
unsigned short ws_ypixel;
};
 
#define NCC 8
struct termio {
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
 
/*
* c_cc characters in the termio structure. Oh, how I love being
* backwardly compatible. Notice that character 4 and 5 are
* interpreted differently depending on whether ICANON is set in
* c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise
* as _VMIN and V_TIME. This is for compatibility with OSF/1 (which
* is compatible with sysV)...
*/
#define _VINTR 0
#define _VQUIT 1
#define _VERASE 2
#define _VKILL 3
#define _VEOF 4
#define _VMIN 4
#define _VEOL 5
#define _VTIME 5
#define _VEOL2 6
#define _VSWTC 7
 
/* line disciplines */
#define N_TTY 0
#define N_SLIP 1
#define N_MOUSE 2
#define N_PPP 3
#define N_AX25 5
 
#ifdef __KERNEL__
/* eof=^D eol=\0 eol2=\0 erase=del
werase=^W kill=^U reprint=^R sxtc=\0
intr=^C quit=^\ susp=^Z <OSF/1 VDSUSP>
start=^Q stop=^S lnext=^V discard=^U
vmin=\1 vtime=\0
*/
#define INIT_C_CC "\004\000\000\177\027\025\022\000\003\034\032\000\021\023\026\025\001\000"
 
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
extern inline void trans_from_termio(struct termio * termio,
struct termios * termios)
{
#define SET_LOW_BITS(x,y) ((x) = (0xffff0000 & (x)) | (y))
SET_LOW_BITS(termios->c_iflag, termio->c_iflag);
SET_LOW_BITS(termios->c_oflag, termio->c_oflag);
SET_LOW_BITS(termios->c_cflag, termio->c_cflag);
SET_LOW_BITS(termios->c_lflag, termio->c_lflag);
#undef SET_LOW_BITS
termios->c_cc[VINTR] = termio->c_cc[_VINTR];
termios->c_cc[VQUIT] = termio->c_cc[_VQUIT];
termios->c_cc[VERASE]= termio->c_cc[_VERASE];
termios->c_cc[VKILL] = termio->c_cc[_VKILL];
termios->c_cc[VEOF] = termio->c_cc[_VEOF];
termios->c_cc[VMIN] = termio->c_cc[_VMIN];
termios->c_cc[VEOL] = termio->c_cc[_VEOL];
termios->c_cc[VTIME] = termio->c_cc[_VTIME];
termios->c_cc[VEOL2] = termio->c_cc[_VEOL2];
termios->c_cc[VSWTC] = termio->c_cc[_VSWTC];
}
 
/*
* Translate a "termios" structure into a "termio". Ugh.
*
* Note the "fun" _VMIN overloading.
*/
extern inline void trans_to_termio(struct termios * termios,
struct termio * termio)
{
termio->c_iflag = termios->c_iflag;
termio->c_oflag = termios->c_oflag;
termio->c_cflag = termios->c_cflag;
termio->c_lflag = termios->c_lflag;
termio->c_line = termios->c_line;
termio->c_cc[_VINTR] = termios->c_cc[VINTR];
termio->c_cc[_VQUIT] = termios->c_cc[VQUIT];
termio->c_cc[_VERASE]= termios->c_cc[VERASE];
termio->c_cc[_VKILL] = termios->c_cc[VKILL];
termio->c_cc[_VEOF] = termios->c_cc[VEOF];
termio->c_cc[_VEOL] = termios->c_cc[VEOL];
termio->c_cc[_VEOL2] = termios->c_cc[VEOL2];
termio->c_cc[_VSWTC] = termios->c_cc[VSWTC];
if (!(termios->c_lflag & ICANON)) {
termio->c_cc[_VMIN] = termios->c_cc[VMIN];
termio->c_cc[_VTIME] = termios->c_cc[VTIME];
}
}
 
#endif /* __KERNEL__ */
 
#endif /* _ALPHA_TERMIOS_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/errno.h
0,0 → 1,142
#ifndef _ALPHA_ERRNO_H
#define _ALPHA_ERRNO_H
 
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Arg list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EDEADLK 11 /* Resource deadlock would occur */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
#define EAGAIN 35 /* Try again */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define EINPROGRESS 36 /* Operation now in progress */
#define EALREADY 37 /* Operation already in progress */
#define ENOTSOCK 38 /* Socket operation on non-socket */
#define EDESTADDRREQ 39 /* Destination address required */
#define EMSGSIZE 40 /* Message too long */
#define EPROTOTYPE 41 /* Protocol wrong type for socket */
#define ENOPROTOOPT 42 /* Protocol not available */
#define EPROTONOSUPPORT 43 /* Protocol not supported */
#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */
#define EPFNOSUPPORT 46 /* Protocol family not supported */
#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
#define EADDRINUSE 48 /* Address already in use */
#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
#define ENETDOWN 50 /* Network is down */
#define ENETUNREACH 51 /* Network is unreachable */
#define ENETRESET 52 /* Network dropped connection because of reset */
#define ECONNABORTED 53 /* Software caused connection abort */
#define ECONNRESET 54 /* Connection reset by peer */
#define ENOBUFS 55 /* No buffer space available */
#define EISCONN 56 /* Transport endpoint is already connected */
#define ENOTCONN 57 /* Transport endpoint is not connected */
#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 59 /* Too many references: cannot splice */
#define ETIMEDOUT 60 /* Connection timed out */
#define ECONNREFUSED 61 /* Connection refused */
#define ELOOP 62 /* Too many symbolic links encountered */
#define ENAMETOOLONG 63 /* File name too long */
#define EHOSTDOWN 64 /* Host is down */
#define EHOSTUNREACH 65 /* No route to host */
#define ENOTEMPTY 66 /* Directory not empty */
 
#define EUSERS 68 /* Too many users */
#define EDQUOT 69 /* Quota exceeded */
#define ESTALE 70 /* Stale NFS file handle */
#define EREMOTE 71 /* Object is remote */
 
#define ENOLCK 77 /* No record locks available */
#define ENOSYS 78 /* Function not implemented */
 
#define ENOMSG 80 /* No message of desired type */
#define EIDRM 81 /* Identifier removed */
#define ENOSR 82 /* Out of streams resources */
#define ETIME 83 /* Timer expired */
#define EBADMSG 84 /* Not a data message */
#define EPROTO 85 /* Protocol error */
#define ENODATA 86 /* No data available */
#define ENOSTR 87 /* Device not a stream */
 
#define ENOPKG 92 /* Package not installed */
 
#define EILSEQ 116 /* Illegal byte sequence */
 
/* The following are just random noise.. */
#define ECHRNG 88 /* Channel number out of range */
#define EL2NSYNC 89 /* Level 2 not synchronized */
#define EL3HLT 90 /* Level 3 halted */
#define EL3RST 91 /* Level 3 reset */
 
#define ELNRNG 93 /* Link number out of range */
#define EUNATCH 94 /* Protocol driver not attached */
#define ENOCSI 95 /* No CSI structure available */
#define EL2HLT 96 /* Level 2 halted */
#define EBADE 97 /* Invalid exchange */
#define EBADR 98 /* Invalid request descriptor */
#define EXFULL 99 /* Exchange full */
#define ENOANO 100 /* No anode */
#define EBADRQC 101 /* Invalid request code */
#define EBADSLT 102 /* Invalid slot */
 
#define EDEADLOCK EDEADLK
 
#define EBFONT 104 /* Bad font file format */
#define ENONET 105 /* Machine is not on the network */
#define ENOLINK 106 /* Link has been severed */
#define EADV 107 /* Advertise error */
#define ESRMNT 108 /* Srmount error */
#define ECOMM 109 /* Communication error on send */
#define EMULTIHOP 110 /* Multihop attempted */
#define EDOTDOT 111 /* RFS specific error */
#define EOVERFLOW 112 /* Value too large for defined data type */
#define ENOTUNIQ 113 /* Name not unique on network */
#define EBADFD 114 /* File descriptor in bad state */
#define EREMCHG 115 /* Remote address changed */
 
#define EUCLEAN 117 /* Structure needs cleaning */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
 
#define ELIBACC 122 /* Can not access a needed shared library */
#define ELIBBAD 123 /* Accessing a corrupted shared library */
#define ELIBSCN 124 /* .lib section in a.out corrupted */
#define ELIBMAX 125 /* Attempting to link in too many shared libraries */
#define ELIBEXEC 126 /* Cannot exec a shared library directly */
#define ERESTART 127 /* Interrupted system call should be restarted */
#define ESTRPIPE 128 /* Streams pipe error */
 
#define ENOMEDIUM 129 /* No medium found */
#define EMEDIUMTYPE 130 /* Wrong medium type */
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/t2.h
0,0 → 1,654
#ifndef __ALPHA_T2__H__
#define __ALPHA_T2__H__
 
#include <linux/config.h>
#include <linux/types.h>
 
/*
* T2 is the internal name for the core logic chipset which provides
* memory controller and PCI access for the SABLE-based systems.
*
* This file is based on:
*
* SABLE I/O Specification
* Revision/Update Information: 1.3
*
* jestabro@amt.tay1.dec.com Initial Version.
*
*/
 
#define BYTE_ENABLE_SHIFT 5
#define TRANSFER_LENGTH_SHIFT 3
#define MEM_R1_MASK 0x03ffffff /* Mem sparse space region 1 mask is 26 bits */
 
#ifdef CONFIG_ALPHA_SRM_SETUP
/* if we are using the SRM PCI setup, we'll need to use variables instead */
#define T2_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
#define T2_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
 
extern unsigned int T2_DMA_WIN_BASE;
extern unsigned int T2_DMA_WIN_SIZE;
 
#else /* SRM_SETUP */
#define T2_DMA_WIN_BASE (1024*1024*1024)
#define T2_DMA_WIN_SIZE (1024*1024*1024)
#endif /* SRM_SETUP */
 
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
#ifdef CONFIG_ALPHA_GAMMA
# define GAMMA_BIAS 0x8000000000UL
#else /* GAMMA */
# define GAMMA_BIAS 0x0000000000UL
#endif /* GAMMA */
 
/*
* Memory spaces:
*/
#define T2_CONF (IDENT_ADDR + GAMMA_BIAS + 0x390000000UL)
#define T2_IO (IDENT_ADDR + GAMMA_BIAS + 0x3a0000000UL)
#define T2_SPARSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x200000000UL)
#define T2_DENSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x3c0000000UL)
 
#define T2_IOCSR (IDENT_ADDR + GAMMA_BIAS + 0x38e000000UL)
#define T2_CERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000020UL)
#define T2_CERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000040UL)
#define T2_CERR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000060UL)
#define T2_PERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000080UL)
#define T2_PERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000a0UL)
#define T2_PSCR (IDENT_ADDR + GAMMA_BIAS + 0x38e0000c0UL)
#define T2_HAE_1 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000e0UL)
#define T2_HAE_2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000100UL)
#define T2_HBASE (IDENT_ADDR + GAMMA_BIAS + 0x38e000120UL)
#define T2_WBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000140UL)
#define T2_WMASK1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000160UL)
#define T2_TBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000180UL)
#define T2_WBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001a0UL)
#define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL)
#define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL)
#define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL)
 
#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL)
#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL)
 
#define HAE_ADDRESS T2_HAE_1
 
/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
3.8fff.ffff
*
* +--------------+ 3 8000 0000
* | CPU 0 CSRs |
* +--------------+ 3 8100 0000
* | CPU 1 CSRs |
* +--------------+ 3 8200 0000
* | CPU 2 CSRs |
* +--------------+ 3 8300 0000
* | CPU 3 CSRs |
* +--------------+ 3 8400 0000
* | CPU Reserved |
* +--------------+ 3 8700 0000
* | Mem Reserved |
* +--------------+ 3 8800 0000
* | Mem 0 CSRs |
* +--------------+ 3 8900 0000
* | Mem 1 CSRs |
* +--------------+ 3 8a00 0000
* | Mem 2 CSRs |
* +--------------+ 3 8b00 0000
* | Mem 3 CSRs |
* +--------------+ 3 8c00 0000
* | Mem Reserved |
* +--------------+ 3 8e00 0000
* | PCI Bridge |
* +--------------+ 3 8f00 0000
* | Expansion IO |
* +--------------+ 3 9000 0000
*
*
*/
#define CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L)
#define CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
#define CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
#define CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
#define MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
#define MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
#define MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
#define MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L)
 
#ifdef __KERNEL__
 
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
extern inline unsigned long virt_to_bus(void * address)
{
return virt_to_phys(address) + T2_DMA_WIN_BASE;
}
 
extern inline void * bus_to_virt(unsigned long address)
{
return phys_to_virt(address - T2_DMA_WIN_BASE);
}
 
/*
* I/O functions:
*
* T2 (the core logic PCI/memory support chipset for the SABLE
* series of processors uses a sparse address mapping scheme to
* get at PCI memory and I/O.
*/
 
#define vuip volatile unsigned int *
 
extern inline unsigned int __inb(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + T2_IO + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
 
extern inline void __outb(unsigned char b, unsigned long addr)
{
unsigned int w;
 
asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + T2_IO + 0x00) = w;
mb();
}
 
extern inline unsigned int __inw(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + T2_IO + 0x08);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
 
extern inline void __outw(unsigned short b, unsigned long addr)
{
unsigned int w;
 
asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
*(vuip) ((addr << 5) + T2_IO + 0x08) = w;
mb();
}
 
extern inline unsigned int __inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + T2_IO + 0x18);
}
 
extern inline void __outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + T2_IO + 0x18) = b;
mb();
}
 
 
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*
* For reading and writing 8 and 16 bit quantities we need to
* go through one of the three sparse address mapping regions
* and use the HAE_MEM CSR to provide some bits of the address.
* The following few routines use only sparse address region 1
* which gives 1Gbyte of accessible space which relates exactly
* to the amount of PCI memory mapping *into* system address space.
* See p 6-17 of the specification but it looks something like this:
*
* 21164 Address:
*
* 3 2 1
* 9876543210987654321098765432109876543210
* 1ZZZZ0.PCI.QW.Address............BBLL
*
* ZZ = SBZ
* BB = Byte offset
* LL = Transfer length
*
* PCI Address:
*
* 3 2 1
* 10987654321098765432109876543210
* HHH....PCI.QW.Address........ 00
*
* HHH = 31:29 HAE_MEM CSR
*
*/
#ifdef CONFIG_ALPHA_SRM_SETUP
 
extern unsigned long t2_sm_base;
 
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, work;
 
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
else
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
else
{
#if 0
printk("__readb: address 0x%lx not covered by HAE\n", addr);
#endif
return 0x0ffUL;
}
shift = (addr & 0x3) << 3;
result = *(vuip) work;
result >>= shift;
return 0x0ffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
unsigned long result, shift, work;
 
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
else
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
else
{
#if 0
printk("__readw: address 0x%lx not covered by HAE\n", addr);
#endif
return 0x0ffffUL;
}
shift = (addr & 0x3) << 3;
result = *(vuip) work;
result >>= shift;
return 0x0ffffUL & result;
}
 
/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
extern inline unsigned long __readl(unsigned long addr)
{
unsigned long result, work;
 
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
else
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
else
{
#if 0
printk("__readl: address 0x%lx not covered by HAE\n", addr);
#endif
return 0x0ffffffffUL;
}
result = *(vuip) work;
return 0xffffffffUL & result;
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long work;
 
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
else
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
else
{
#if 0
printk("__writeb: address 0x%lx not covered by HAE\n", addr);
#endif
return;
}
*(vuip) work = b * 0x01010101;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
unsigned long work;
 
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
else
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
else
{
#if 0
printk("__writew: address 0x%lx not covered by HAE\n", addr);
#endif
return;
}
*(vuip) work = b * 0x00010001;
}
 
/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
extern inline void __writel(unsigned int b, unsigned long addr)
{
unsigned long work;
 
if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
else
if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
{
#if 0
printk("__writel: address 0x%lx not covered by HAE\n", addr);
#endif
return;
}
*(vuip) work = b;
}
 
#else /* SRM_SETUP */
 
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, msb;
 
shift = (addr & 0x3) * 8 ;
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) ;
result >>= shift;
return 0xffUL & result;
}
 
extern inline unsigned long __readw(unsigned long addr)
{
unsigned long result, shift, msb;
 
shift = (addr & 0x3) * 8;
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
result >>= shift;
return 0xffffUL & result;
}
 
/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
extern inline unsigned long __readl(unsigned long addr)
{
unsigned long result, msb;
 
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
return 0xffffffffUL & result;
}
 
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long msb ;
 
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = b * 0x01010101;
}
 
extern inline void __writew(unsigned short b, unsigned long addr)
{
unsigned long msb ;
 
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = b * 0x00010001;
}
 
/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
extern inline void __writel(unsigned int b, unsigned long addr)
{
unsigned long msb ;
 
msb = addr & 0xE0000000 ;
addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
}
 
#endif /* SRM_SETUP */
 
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
 
#define outb(x, port) \
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
 
#define readl(a) __readl((unsigned long)(a))
#define writel(v,a) __writel((v),(unsigned long)(a))
 
#undef vuip
 
extern unsigned long t2_init (unsigned long mem_start,
unsigned long mem_end);
 
#endif /* __KERNEL__ */
 
/*
* Sable CPU Module CSRS
*
* These are CSRs for hardware other than the CPU chip on the CPU module.
* The CPU module has Backup Cache control logic, Cbus control logic, and
* interrupt control logic on it. There is a duplicate tag store to speed
* up maintaining cache coherency.
*/
 
struct sable_cpu_csr {
unsigned long bcc; long fill_00[3]; /* Backup Cache Control */
unsigned long bcce; long fill_01[3]; /* Backup Cache Correctable Error */
unsigned long bccea; long fill_02[3]; /* B-Cache Corr Err Address Latch */
unsigned long bcue; long fill_03[3]; /* B-Cache Uncorrectable Error */
unsigned long bcuea; long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */
unsigned long dter; long fill_05[3]; /* Duplicate Tag Error */
unsigned long cbctl; long fill_06[3]; /* CBus Control */
unsigned long cbe; long fill_07[3]; /* CBus Error */
unsigned long cbeal; long fill_08[3]; /* CBus Error Addr Latch low */
unsigned long cbeah; long fill_09[3]; /* CBus Error Addr Latch high */
unsigned long pmbx; long fill_10[3]; /* Processor Mailbox */
unsigned long ipir; long fill_11[3]; /* Inter-Processor Int Request */
unsigned long sic; long fill_12[3]; /* System Interrupt Clear */
unsigned long adlk; long fill_13[3]; /* Address Lock (LDxL/STxC) */
unsigned long madrl; long fill_14[3]; /* CBus Miss Address */
unsigned long rev; long fill_15[3]; /* CMIC Revision */
};
 
/*
* Data structure for handling T2 machine checks:
*/
struct el_t2_frame_header {
unsigned int elcf_fid; /* Frame ID (from above) */
unsigned int elcf_size; /* Size of frame in bytes */
};
 
struct el_t2_procdata_mcheck {
unsigned long elfmc_paltemp[32]; /* PAL TEMP REGS. */
/* EV4-specific fields */
unsigned long elfmc_exc_addr; /* Addr of excepting insn. */
unsigned long elfmc_exc_sum; /* Summary of arith traps. */
unsigned long elfmc_exc_mask; /* Exception mask (from exc_sum). */
unsigned long elfmc_iccsr; /* IBox hardware enables. */
unsigned long elfmc_pal_base; /* Base address for PALcode. */
unsigned long elfmc_hier; /* Hardware Interrupt Enable. */
unsigned long elfmc_hirr; /* Hardware Interrupt Request. */
unsigned long elfmc_mm_csr; /* D-stream fault info. */
unsigned long elfmc_dc_stat; /* D-cache status (ECC/Parity Err). */
unsigned long elfmc_dc_addr; /* EV3 Phys Addr for ECC/DPERR. */
unsigned long elfmc_abox_ctl; /* ABox Control Register. */
unsigned long elfmc_biu_stat; /* BIU Status. */
unsigned long elfmc_biu_addr; /* BUI Address. */
unsigned long elfmc_biu_ctl; /* BIU Control. */
unsigned long elfmc_fill_syndrome; /* For correcting ECC errors. */
unsigned long elfmc_fill_addr;/* Cache block which was being read. */
unsigned long elfmc_va; /* Effective VA of fault or miss. */
unsigned long elfmc_bc_tag; /* Backup Cache Tag Probe Results. */
};
 
/*
* Sable processor specific Machine Check Data segment.
*/
 
struct el_t2_logout_header {
unsigned int elfl_size; /* size in bytes of logout area. */
int elfl_sbz1:31; /* Should be zero. */
char elfl_retry:1; /* Retry flag. */
unsigned int elfl_procoffset; /* Processor-specific offset. */
unsigned int elfl_sysoffset; /* Offset of system-specific. */
unsigned int elfl_error_type; /* PAL error type code. */
unsigned int elfl_frame_rev; /* PAL Frame revision. */
};
struct el_t2_sysdata_mcheck {
unsigned long elcmc_bcc; /* CSR 0 */
unsigned long elcmc_bcce; /* CSR 1 */
unsigned long elcmc_bccea; /* CSR 2 */
unsigned long elcmc_bcue; /* CSR 3 */
unsigned long elcmc_bcuea; /* CSR 4 */
unsigned long elcmc_dter; /* CSR 5 */
unsigned long elcmc_cbctl; /* CSR 6 */
unsigned long elcmc_cbe; /* CSR 7 */
unsigned long elcmc_cbeal; /* CSR 8 */
unsigned long elcmc_cbeah; /* CSR 9 */
unsigned long elcmc_pmbx; /* CSR 10 */
unsigned long elcmc_ipir; /* CSR 11 */
unsigned long elcmc_sic; /* CSR 12 */
unsigned long elcmc_adlk; /* CSR 13 */
unsigned long elcmc_madrl; /* CSR 14 */
unsigned long elcmc_crrev4; /* CSR 15 */
};
 
/*
* Sable memory error frame - sable pfms section 3.42
*/
struct el_t2_data_memory {
struct el_t2_frame_header elcm_hdr; /* ID$MEM-FERR = 0x08 */
unsigned int elcm_module; /* Module id. */
unsigned int elcm_res04; /* Reserved. */
unsigned long elcm_merr; /* CSR0: Error Reg 1. */
unsigned long elcm_mcmd1; /* CSR1: Command Trap 1. */
unsigned long elcm_mcmd2; /* CSR2: Command Trap 2. */
unsigned long elcm_mconf; /* CSR3: Configuration. */
unsigned long elcm_medc1; /* CSR4: EDC Status 1. */
unsigned long elcm_medc2; /* CSR5: EDC Status 2. */
unsigned long elcm_medcc; /* CSR6: EDC Control. */
unsigned long elcm_msctl; /* CSR7: Stream Buffer Control. */
unsigned long elcm_mref; /* CSR8: Refresh Control. */
unsigned long elcm_filter; /* CSR9: CRD Filter Control. */
};
 
 
/*
* Sable other cpu error frame - sable pfms section 3.43
*/
struct el_t2_data_other_cpu {
short elco_cpuid; /* CPU ID */
short elco_res02[3];
unsigned long elco_bcc; /* CSR 0 */
unsigned long elco_bcce; /* CSR 1 */
unsigned long elco_bccea; /* CSR 2 */
unsigned long elco_bcue; /* CSR 3 */
unsigned long elco_bcuea; /* CSR 4 */
unsigned long elco_dter; /* CSR 5 */
unsigned long elco_cbctl; /* CSR 6 */
unsigned long elco_cbe; /* CSR 7 */
unsigned long elco_cbeal; /* CSR 8 */
unsigned long elco_cbeah; /* CSR 9 */
unsigned long elco_pmbx; /* CSR 10 */
unsigned long elco_ipir; /* CSR 11 */
unsigned long elco_sic; /* CSR 12 */
unsigned long elco_adlk; /* CSR 13 */
unsigned long elco_madrl; /* CSR 14 */
unsigned long elco_crrev4; /* CSR 15 */
};
 
/*
* Sable other cpu error frame - sable pfms section 3.44
*/
struct el_t2_data_t2{
struct el_t2_frame_header elct_hdr; /* ID$T2-FRAME */
unsigned long elct_iocsr; /* IO Control and Status Register */
unsigned long elct_cerr1; /* Cbus Error Register 1 */
unsigned long elct_cerr2; /* Cbus Error Register 2 */
unsigned long elct_cerr3; /* Cbus Error Register 3 */
unsigned long elct_perr1; /* PCI Error Register 1 */
unsigned long elct_perr2; /* PCI Error Register 2 */
unsigned long elct_hae0_1; /* High Address Extension Register 1 */
unsigned long elct_hae0_2; /* High Address Extension Register 2 */
unsigned long elct_hbase; /* High Base Register */
unsigned long elct_wbase1; /* Window Base Register 1 */
unsigned long elct_wmask1; /* Window Mask Register 1 */
unsigned long elct_tbase1; /* Translated Base Register 1 */
unsigned long elct_wbase2; /* Window Base Register 2 */
unsigned long elct_wmask2; /* Window Mask Register 2 */
unsigned long elct_tbase2; /* Translated Base Register 2 */
unsigned long elct_tdr0; /* TLB Data Register 0 */
unsigned long elct_tdr1; /* TLB Data Register 1 */
unsigned long elct_tdr2; /* TLB Data Register 2 */
unsigned long elct_tdr3; /* TLB Data Register 3 */
unsigned long elct_tdr4; /* TLB Data Register 4 */
unsigned long elct_tdr5; /* TLB Data Register 5 */
unsigned long elct_tdr6; /* TLB Data Register 6 */
unsigned long elct_tdr7; /* TLB Data Register 7 */
};
 
/*
* Sable error log data structure - sable pfms section 3.40
*/
struct el_t2_data_corrected {
unsigned long elcpb_biu_stat;
unsigned long elcpb_biu_addr;
unsigned long elcpb_biu_ctl;
unsigned long elcpb_fill_syndrome;
unsigned long elcpb_fill_addr;
unsigned long elcpb_bc_tag;
};
 
/*
* Sable error log data structure
* Note there are 4 memory slots on sable (see t2.h)
*/
struct el_t2_frame_mcheck {
struct el_t2_frame_header elfmc_header; /* ID$P-FRAME_MCHECK */
struct el_t2_logout_header elfmc_hdr;
struct el_t2_procdata_mcheck elfmc_procdata;
struct el_t2_sysdata_mcheck elfmc_sysdata;
struct el_t2_data_t2 elfmc_t2data;
struct el_t2_data_memory elfmc_memdata[4];
struct el_t2_frame_header elfmc_footer; /* empty */
};
 
 
/*
* Sable error log data structures on memory errors
*/
struct el_t2_frame_corrected {
struct el_t2_frame_header elfcc_header; /* ID$P-BC-COR */
struct el_t2_logout_header elfcc_hdr;
struct el_t2_data_corrected elfcc_procdata;
/* struct el_t2_data_t2 elfcc_t2data; */
/* struct el_t2_data_memory elfcc_memdata[4]; */
struct el_t2_frame_header elfcc_footer; /* empty */
};
 
 
#define RTC_PORT(x) (0x70 + (x))
#define RTC_ADDR(x) (0x80 | (x))
#define RTC_ALWAYS_BCD 0
 
#endif /* __ALPHA_T2__H__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/posix_types.h
0,0 → 1,110
#ifndef _ALPHA_POSIX_TYPES_H
#define _ALPHA_POSIX_TYPES_H
 
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
 
typedef unsigned int __kernel_dev_t;
typedef unsigned int __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
typedef unsigned int __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_clock_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
 
#ifdef __GNUC__
typedef long long __kernel_loff_t;
#endif
 
typedef struct {
int val[2];
} __kernel_fsid_t;
 
#ifndef __GNUC__
 
#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
#define __FD_ZERO(set) \
((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
 
#else /* __GNUC__ */
 
/* With GNU C, use inline functions instead so args are evaluated only once: */
 
#undef __FD_SET
static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
}
 
#undef __FD_CLR
static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
}
 
#undef __FD_ISSET
static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
}
 
/*
* This will unroll the loop for the normal constant case (8 ints,
* for a 256-bit fd_set)
*/
#undef __FD_ZERO
static __inline__ void __FD_ZERO(__kernel_fd_set *p)
{
unsigned long *tmp = p->fds_bits;
int i;
 
if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
case 16:
tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
return;
 
case 8:
tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
return;
 
case 4:
tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
return;
}
}
i = __FDSET_LONGS;
while (i) {
i--;
*tmp = 0;
tmp++;
}
}
 
#endif /* __GNUC__ */
 
#endif /* _ALPHA_POSIX_TYPES_H */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/sigcontext.h
0,0 → 1,34
#ifndef _ASMAXP_SIGCONTEXT_H
#define _ASMAXP_SIGCONTEXT_H
 
struct sigcontext_struct {
/*
* what should we have here? I'd probably better use the same
* stack layout as OSF/1, just in case we ever want to try
* running their binaries..
*
* This is the basic layout, but I don't know if we'll ever
* actually fill in all the values..
*/
long sc_onstack;
long sc_mask;
long sc_pc;
long sc_ps;
long sc_regs[32];
long sc_ownedfp;
long sc_fpregs[32];
unsigned long sc_fpcr;
unsigned long sc_fp_control;
unsigned long sc_reserved1, sc_reserved2;
unsigned long sc_ssize;
char * sc_sbase;
unsigned long sc_traparg_a0;
unsigned long sc_traparg_a1;
unsigned long sc_traparg_a2;
unsigned long sc_fp_trap_pc;
unsigned long sc_fp_trigger_sum;
unsigned long sc_fp_trigger_inst;
unsigned long sc_retcode[2];
};
 
#endif
/trunk/rc203soc/sw/uClinux/include/asm-alpha/mman.h
0,0 → 1,37
#ifndef __ALPHA_MMAN_H__
#define __ALPHA_MMAN_H__
 
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_NONE 0x0 /* page can not be accessed */
 
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_TYPE 0x0f /* Mask for type of mapping (OSF/1 is _wrong_) */
#define MAP_FIXED 0x100 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x10 /* don't use a file */
 
/* not used by linux, but here to make sure we don't clash with OSF/1 defines */
#define MAP_HASSEMAPHORE 0x0200
#define MAP_INHERIT 0x0400
#define MAP_UNALIGNED 0x0800
 
/* These are linux-specific */
#define MAP_GROWSDOWN 0x1000 /* stack-like segment */
#define MAP_DENYWRITE 0x2000 /* ETXTBSY */
#define MAP_EXECUTABLE 0x4000 /* mark it as a executable */
#define MAP_LOCKED 0x8000 /* lock the mapping */
 
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_SYNC 2 /* synchronous memory sync */
#define MS_INVALIDATE 4 /* invalidate the caches */
 
#define MCL_CURRENT 8192 /* lock all currently mapped pages */
#define MCL_FUTURE 16384 /* lock all additions to address space */
 
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
#define MAP_FILE 0
 
#endif /* __ALPHA_MMAN_H__ */
/trunk/rc203soc/sw/uClinux/include/asm-alpha/socket.h
0,0 → 1,36
#ifndef _ASM_SOCKET_H
#define _ASM_SOCKET_H
 
#include <asm/sockios.h>
 
/* For setsockoptions(2) */
/*
* Note: we only bother about making the SOL_SOCKET options
* same as OSF/1, as that's all that "normal" programs are
* likely to set. We don't necessarily want to be binary
* compatible with _everything_.
*/
#define SOL_SOCKET 0xffff
 
#define SO_DEBUG 0x0001
#define SO_REUSEADDR 0x0004
#define SO_KEEPALIVE 0x0008
#define SO_DONTROUTE 0x0010
#define SO_BROADCAST 0x0020
#define SO_LINGER 0x0080
#define SO_OOBINLINE 0x0100
/* To add :#define SO_REUSEPORT 0x0200 */
 
#define SO_TYPE 0x1008
#define SO_ERROR 0x1007
#define SO_SNDBUF 0x1001
#define SO_RCVBUF 0x1002
 
/* linux-specific, might as well be the same as on i386 */
#define SO_NO_CHECK 11
#define SO_PRIORITY 12
#define SO_BSDCOMPAT 14
 
#define SO_BINDTODEVICE 25
 
#endif /* _ASM_SOCKET_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.