URL
https://opencores.org/ocsvn/or1k_old/or1k_old/trunk
Subversion Repositories or1k_old
Compare Revisions
- This comparison shows the changes necessary to convert path
/or1k_old/trunk/rc203soc/sw/uClinux/include/asm-i386
- from Rev 1765 to Rev 1782
- ↔ Reverse comparison
Rev 1765 → Rev 1782
/termbits.h
0,0 → 1,160
#ifndef __ARCH_I386_TERMBITS_H__ |
#define __ARCH_I386_TERMBITS_H__ |
|
#include <linux/posix_types.h> |
|
typedef unsigned char cc_t; |
typedef unsigned int speed_t; |
typedef unsigned int tcflag_t; |
|
#define NCCS 19 |
struct termios { |
tcflag_t c_iflag; /* input mode flags */ |
tcflag_t c_oflag; /* output mode flags */ |
tcflag_t c_cflag; /* control mode flags */ |
tcflag_t c_lflag; /* local mode flags */ |
cc_t c_line; /* line discipline */ |
cc_t c_cc[NCCS]; /* control characters */ |
}; |
|
/* c_cc characters */ |
#define VINTR 0 |
#define VQUIT 1 |
#define VERASE 2 |
#define VKILL 3 |
#define VEOF 4 |
#define VTIME 5 |
#define VMIN 6 |
#define VSWTC 7 |
#define VSTART 8 |
#define VSTOP 9 |
#define VSUSP 10 |
#define VEOL 11 |
#define VREPRINT 12 |
#define VDISCARD 13 |
#define VWERASE 14 |
#define VLNEXT 15 |
#define VEOL2 16 |
|
/* c_iflag bits */ |
#define IGNBRK 0000001 |
#define BRKINT 0000002 |
#define IGNPAR 0000004 |
#define PARMRK 0000010 |
#define INPCK 0000020 |
#define ISTRIP 0000040 |
#define INLCR 0000100 |
#define IGNCR 0000200 |
#define ICRNL 0000400 |
#define IUCLC 0001000 |
#define IXON 0002000 |
#define IXANY 0004000 |
#define IXOFF 0010000 |
#define IMAXBEL 0020000 |
|
/* c_oflag bits */ |
#define OPOST 0000001 |
#define OLCUC 0000002 |
#define ONLCR 0000004 |
#define OCRNL 0000010 |
#define ONOCR 0000020 |
#define ONLRET 0000040 |
#define OFILL 0000100 |
#define OFDEL 0000200 |
#define NLDLY 0000400 |
#define NL0 0000000 |
#define NL1 0000400 |
#define CRDLY 0003000 |
#define CR0 0000000 |
#define CR1 0001000 |
#define CR2 0002000 |
#define CR3 0003000 |
#define TABDLY 0014000 |
#define TAB0 0000000 |
#define TAB1 0004000 |
#define TAB2 0010000 |
#define TAB3 0014000 |
#define XTABS 0014000 |
#define BSDLY 0020000 |
#define BS0 0000000 |
#define BS1 0020000 |
#define VTDLY 0040000 |
#define VT0 0000000 |
#define VT1 0040000 |
#define FFDLY 0100000 |
#define FF0 0000000 |
#define FF1 0100000 |
|
/* c_cflag bit meaning */ |
#define CBAUD 0010017 |
#define B0 0000000 /* hang up */ |
#define B50 0000001 |
#define B75 0000002 |
#define B110 0000003 |
#define B134 0000004 |
#define B150 0000005 |
#define B200 0000006 |
#define B300 0000007 |
#define B600 0000010 |
#define B1200 0000011 |
#define B1800 0000012 |
#define B2400 0000013 |
#define B4800 0000014 |
#define B9600 0000015 |
#define B19200 0000016 |
#define B38400 0000017 |
#define EXTA B19200 |
#define EXTB B38400 |
#define CSIZE 0000060 |
#define CS5 0000000 |
#define CS6 0000020 |
#define CS7 0000040 |
#define CS8 0000060 |
#define CSTOPB 0000100 |
#define CREAD 0000200 |
#define PARENB 0000400 |
#define PARODD 0001000 |
#define HUPCL 0002000 |
#define CLOCAL 0004000 |
#define CBAUDEX 0010000 |
#define B57600 0010001 |
#define B115200 0010002 |
#define B230400 0010003 |
#define B460800 0010004 |
#define CIBAUD 002003600000 /* input baud rate (not used) */ |
#define CRTSCTS 020000000000 /* flow control */ |
|
/* c_lflag bits */ |
#define ISIG 0000001 |
#define ICANON 0000002 |
#define XCASE 0000004 |
#define ECHO 0000010 |
#define ECHOE 0000020 |
#define ECHOK 0000040 |
#define ECHONL 0000100 |
#define NOFLSH 0000200 |
#define TOSTOP 0000400 |
#define ECHOCTL 0001000 |
#define ECHOPRT 0002000 |
#define ECHOKE 0004000 |
#define FLUSHO 0010000 |
#define PENDIN 0040000 |
#define IEXTEN 0100000 |
|
/* tcflow() and TCXONC use these */ |
#define TCOOFF 0 |
#define TCOON 1 |
#define TCIOFF 2 |
#define TCION 3 |
|
/* tcflush() and TCFLSH use these */ |
#define TCIFLUSH 0 |
#define TCOFLUSH 1 |
#define TCIOFLUSH 2 |
|
/* tcsetattr uses these */ |
#define TCSANOW 0 |
#define TCSADRAIN 1 |
#define TCSAFLUSH 2 |
|
#endif |
/math_emu.h
0,0 → 1,57
#ifndef _I386_MATH_EMU_H |
#define _I386_MATH_EMU_H |
|
#include <asm/sigcontext.h> |
|
void restore_i387_soft(struct _fpstate *buf); |
struct _fpstate * save_i387_soft(struct _fpstate * buf); |
|
struct fpu_reg { |
char sign; |
char tag; |
long exp; |
unsigned sigl; |
unsigned sigh; |
}; |
|
|
/* This structure matches the layout of the data saved to the stack |
following a device-not-present interrupt, part of it saved |
automatically by the 80386/80486. |
*/ |
struct info { |
long ___orig_eip; |
long ___ret_from_system_call; |
long ___ebx; |
long ___ecx; |
long ___edx; |
long ___esi; |
long ___edi; |
long ___ebp; |
long ___eax; |
long ___ds; |
long ___es; |
long ___fs; |
long ___gs; |
long ___orig_eax; |
long ___eip; |
long ___cs; |
long ___eflags; |
long ___esp; |
long ___ss; |
long ___vm86_es; /* This and the following only in vm86 mode */ |
long ___vm86_ds; |
long ___vm86_fs; |
long ___vm86_gs; |
}; |
|
/* Interface for converting data between the emulator format |
* and the hardware format. Used for core dumping and for |
* ptrace(2) */ |
void hardreg_to_softreg(const char hardreg[10], |
struct fpu_reg *soft_reg); |
|
void softreg_to_hardreg(const struct fpu_reg *rp, char d[10], |
long int control_word); |
|
#endif |
/byteorder.h
0,0 → 1,90
#ifndef _I386_BYTEORDER_H |
#define _I386_BYTEORDER_H |
|
#undef ntohl |
#undef ntohs |
#undef htonl |
#undef htons |
|
#ifndef __LITTLE_ENDIAN |
#define __LITTLE_ENDIAN 1234 |
#endif |
|
#ifndef __LITTLE_ENDIAN_BITFIELD |
#define __LITTLE_ENDIAN_BITFIELD |
#endif |
|
/* For avoiding bswap on i386 */ |
#ifdef __KERNEL__ |
#include <linux/config.h> |
#endif |
|
extern unsigned long int ntohl(unsigned long int); |
extern unsigned short int ntohs(unsigned short int); |
extern unsigned long int htonl(unsigned long int); |
extern unsigned short int htons(unsigned short int); |
|
extern __inline__ unsigned long int __ntohl(unsigned long int); |
extern __inline__ unsigned short int __ntohs(unsigned short int); |
extern __inline__ unsigned long int __constant_ntohl(unsigned long int); |
extern __inline__ unsigned short int __constant_ntohs(unsigned short int); |
|
extern __inline__ unsigned long int |
__ntohl(unsigned long int x) |
{ |
#if defined(__KERNEL__) && !defined(CONFIG_M386) |
__asm__("bswap %0" : "=r" (x) : "0" (x)); |
#else |
__asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
"rorl $16,%0\n\t" /* swap words */ |
"xchgb %b0,%h0" /* swap higher bytes */ |
:"=q" (x) |
: "0" (x)); |
#endif |
return x; |
} |
|
#define __constant_ntohl(x) \ |
((unsigned long int)((((unsigned long int)(x) & 0x000000ffU) << 24) | \ |
(((unsigned long int)(x) & 0x0000ff00U) << 8) | \ |
(((unsigned long int)(x) & 0x00ff0000U) >> 8) | \ |
(((unsigned long int)(x) & 0xff000000U) >> 24))) |
|
extern __inline__ unsigned short int |
__ntohs(unsigned short int x) |
{ |
__asm__("xchgb %b0,%h0" /* swap bytes */ |
: "=q" (x) |
: "0" (x)); |
return x; |
} |
|
#define __constant_ntohs(x) \ |
((unsigned short int)((((unsigned short int)(x) & 0x00ff) << 8) | \ |
(((unsigned short int)(x) & 0xff00) >> 8))) \ |
|
#define __htonl(x) __ntohl(x) |
#define __htons(x) __ntohs(x) |
#define __constant_htonl(x) __constant_ntohl(x) |
#define __constant_htons(x) __constant_ntohs(x) |
|
#ifdef __OPTIMIZE__ |
# define ntohl(x) \ |
(__builtin_constant_p((long)(x)) ? \ |
__constant_ntohl((x)) : \ |
__ntohl((x))) |
# define ntohs(x) \ |
(__builtin_constant_p((short)(x)) ? \ |
__constant_ntohs((x)) : \ |
__ntohs((x))) |
# define htonl(x) \ |
(__builtin_constant_p((long)(x)) ? \ |
__constant_htonl((x)) : \ |
__htonl((x))) |
# define htons(x) \ |
(__builtin_constant_p((short)(x)) ? \ |
__constant_htons((x)) : \ |
__htons((x))) |
#endif |
|
#endif |
/smp.h
0,0 → 1,259
#ifndef __ASM_SMP_H |
#define __ASM_SMP_H |
|
#ifdef __SMP__ |
#ifndef ASSEMBLY |
|
#include <asm/i82489.h> |
#include <asm/bitops.h> |
#include <linux/tasks.h> |
#include <linux/ptrace.h> |
|
/* |
* Support definitions for SMP machines following the intel multiprocessing |
* specification |
*/ |
|
/* |
* This tag identifies where the SMP configuration |
* information is. |
*/ |
|
#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') |
|
struct intel_mp_floating |
{ |
char mpf_signature[4]; /* "_MP_" */ |
unsigned long mpf_physptr; /* Configuration table address */ |
unsigned char mpf_length; /* Our length (paragraphs) */ |
unsigned char mpf_specification;/* Specification version */ |
unsigned char mpf_checksum; /* Checksum (makes sum 0) */ |
unsigned char mpf_feature1; /* Standard or configuration ? */ |
unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ |
unsigned char mpf_feature3; /* Unused (0) */ |
unsigned char mpf_feature4; /* Unused (0) */ |
unsigned char mpf_feature5; /* Unused (0) */ |
}; |
|
struct mp_config_table |
{ |
char mpc_signature[4]; |
#define MPC_SIGNATURE "PCMP" |
unsigned short mpc_length; /* Size of table */ |
char mpc_spec; /* 0x01 */ |
char mpc_checksum; |
char mpc_oem[8]; |
char mpc_productid[12]; |
unsigned long mpc_oemptr; /* 0 if not present */ |
unsigned short mpc_oemsize; /* 0 if not present */ |
unsigned short mpc_oemcount; |
unsigned long mpc_lapic; /* APIC address */ |
unsigned long reserved; |
}; |
|
/* Followed by entries */ |
|
#define MP_PROCESSOR 0 |
#define MP_BUS 1 |
#define MP_IOAPIC 2 |
#define MP_INTSRC 3 |
#define MP_LINTSRC 4 |
|
struct mpc_config_processor |
{ |
unsigned char mpc_type; |
unsigned char mpc_apicid; /* Local APIC number */ |
unsigned char mpc_apicver; /* Its versions */ |
unsigned char mpc_cpuflag; |
#define CPU_ENABLED 1 /* Processor is available */ |
#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ |
unsigned long mpc_cpufeature; |
#define CPU_STEPPING_MASK 0x0F |
#define CPU_MODEL_MASK 0xF0 |
#define CPU_FAMILY_MASK 0xF00 |
unsigned long mpc_featureflag; /* CPUID feature value */ |
unsigned long mpc_reserved[2]; |
}; |
|
struct mpc_config_bus |
{ |
unsigned char mpc_type; |
unsigned char mpc_busid; |
unsigned char mpc_bustype[6] __attribute((packed)); |
}; |
|
#define BUSTYPE_EISA "EISA" |
#define BUSTYPE_ISA "ISA" |
#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ |
#define BUSTYPE_MCA "MCA" |
#define BUSTYPE_VL "VL" /* Local bus */ |
#define BUSTYPE_PCI "PCI" |
#define BUSTYPE_PCMCIA "PCMCIA" |
|
/* We don't understand the others */ |
|
struct mpc_config_ioapic |
{ |
unsigned char mpc_type; |
unsigned char mpc_apicid; |
unsigned char mpc_apicver; |
unsigned char mpc_flags; |
#define MPC_APIC_USABLE 0x01 |
unsigned long mpc_apicaddr; |
}; |
|
struct mpc_config_intsrc |
{ |
unsigned char mpc_type; |
unsigned char mpc_irqtype; |
unsigned short mpc_irqflag; |
unsigned char mpc_srcbus; |
unsigned char mpc_srcbusirq; |
unsigned char mpc_dstapic; |
unsigned char mpc_dstirq; |
}; |
|
#define MP_INT_VECTORED 0 |
#define MP_INT_NMI 1 |
#define MP_INT_SMI 2 |
#define MP_INT_EXTINT 3 |
|
#define MP_IRQDIR_DEFAULT 0 |
#define MP_IRQDIR_HIGH 1 |
#define MP_IRQDIR_LOW 3 |
|
|
struct mpc_config_intlocal |
{ |
unsigned char mpc_type; |
unsigned char mpc_irqtype; |
unsigned short mpc_irqflag; |
unsigned char mpc_srcbusid; |
unsigned char mpc_srcbusirq; |
unsigned char mpc_destapic; |
#define MP_APIC_ALL 0xFF |
unsigned char mpc_destapiclint; |
}; |
|
|
/* |
* Default configurations |
* |
* 1 2 CPU ISA 82489DX |
* 2 2 CPU EISA 82489DX no IRQ 8 or timer chaining |
* 3 2 CPU EISA 82489DX |
* 4 2 CPU MCA 82489DX |
* 5 2 CPU ISA+PCI |
* 6 2 CPU EISA+PCI |
* 7 2 CPU MCA+PCI |
*/ |
|
/* |
* Per process x86 parameters |
*/ |
|
struct cpuinfo_x86 |
{ |
char hard_math; |
char x86; |
char x86_model; |
char x86_mask; |
char x86_vendor_id[16]; |
int x86_capability; |
int x86_ext_capability; |
int fdiv_bug; |
int have_cpuid; |
char wp_works_ok; |
char hlt_works_ok; |
unsigned long udelay_val; |
}; |
|
|
extern struct cpuinfo_x86 cpu_data[NR_CPUS]; |
|
/* |
* Private routines/data |
*/ |
|
extern int smp_found_config; |
extern int smp_scan_config(unsigned long, unsigned long); |
extern unsigned long smp_alloc_memory(unsigned long mem_base); |
extern unsigned char *apic_reg; |
extern unsigned char *kernel_stacks[NR_CPUS]; |
extern unsigned char boot_cpu_id; |
extern unsigned long cpu_present_map; |
extern volatile int cpu_number_map[NR_CPUS]; |
extern volatile int cpu_logical_map[NR_CPUS]; |
extern volatile unsigned long smp_invalidate_needed; |
extern void smp_flush_tlb(void); |
extern volatile unsigned long kernel_flag, kernel_counter; |
extern volatile unsigned long cpu_callin_map[NR_CPUS]; |
extern volatile unsigned char active_kernel_processor; |
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); |
extern void smp_reschedule_irq(int cpl, struct pt_regs *regs); |
extern unsigned long ipi_count; |
extern void smp_invalidate_rcv(void); /* Process an NMI */ |
extern volatile unsigned long kernel_counter; |
extern volatile unsigned long syscall_count; |
|
/* |
* General functions that each host system must provide. |
*/ |
|
extern void smp_callin(void); |
extern void smp_boot_cpus(void); |
extern void smp_store_cpu_info(int id); /* Store per cpu info (like the initial udelay numbers */ |
|
extern volatile unsigned long smp_proc_in_lock[NR_CPUS]; /* for computing process time */ |
extern volatile unsigned long smp_process_available; |
|
/* |
* APIC handlers: Note according to the Intel specification update |
* you should put reads between APIC writes. |
* Intel Pentium processor specification update [11AP, pg 64] |
* "Back to Back Assertions of HOLD May Cause Lost APIC Write Cycle" |
*/ |
|
extern __inline void apic_write(unsigned long reg, unsigned long v) |
{ |
*((volatile unsigned long *)(apic_reg+reg))=v; |
} |
|
extern __inline unsigned long apic_read(unsigned long reg) |
{ |
return *((volatile unsigned long *)(apic_reg+reg)); |
} |
|
/* |
* This function is needed by all SMP systems. It must _always_ be valid from the initial |
* startup. This may require magic on some systems (in the i86 case we dig out the boot |
* cpu id from the config and set up a fake apic_reg pointer so that before we activate |
* the apic we get the right answer). Hopefully other processors are more sensible 8) |
*/ |
|
extern __inline int smp_processor_id(void) |
{ |
return GET_APIC_ID(apic_read(APIC_ID)); |
} |
|
#endif /* !ASSEMBLY */ |
|
#define NO_PROC_ID 0xFF /* No processor magic marker */ |
|
/* |
* This magic constant controls our willingness to transfer |
* a process across CPUs. Such a transfer incurs misses on the L1 |
* cache, and on a P6 or P5 with multiple L2 caches L2 hits. My |
* gut feeling is this will vary by board in value. For a board |
* with separate L2 cache it probably depends also on the RSS, and |
* for a board with shared L2 cache it ought to decay fast as other |
* processes are run. |
*/ |
|
#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ |
|
#define SMP_FROM_INT 1 |
#define SMP_FROM_SYSCALL 2 |
|
#endif |
#endif |
/setup.h
0,0 → 1,4
#ifndef _I386_SETUP_H |
#define _I386_SETUP_H |
|
#endif /* _I386_SETUP_H */ |
/segment.h
0,0 → 1,339
#ifndef _ASM_SEGMENT_H |
#define _ASM_SEGMENT_H |
|
#define KERNEL_CS 0x10 |
#define KERNEL_DS 0x18 |
|
#define USER_CS 0x23 |
#define USER_DS 0x2B |
|
#ifndef __ASSEMBLY__ |
|
/* |
* Uh, these should become the main single-value transfer routines.. |
* They automatically use the right size if we just have the right |
* pointer type.. |
*/ |
#define put_user(x,ptr) __put_user((unsigned long)(x),(ptr),sizeof(*(ptr))) |
#define get_user(ptr) ((__typeof__(*(ptr)))__get_user((ptr),sizeof(*(ptr)))) |
|
/* |
* This is a silly but good way to make sure that |
* the __put_user function is indeed always optimized, |
* and that we use the correct sizes.. |
*/ |
extern int bad_user_access_length(void); |
|
/* |
* dummy pointer type structure.. gcc won't try to do something strange |
* this way.. |
*/ |
struct __segment_dummy { unsigned long a[100]; }; |
#define __sd(x) ((struct __segment_dummy *) (x)) |
#define __const_sd(x) ((const struct __segment_dummy *) (x)) |
|
static inline void __put_user(unsigned long x, void * y, int size) |
{ |
switch (size) { |
case 1: |
__asm__ ("movb %b1,%%fs:%0" |
:"=m" (*__sd(y)) |
:"iq" ((unsigned char) x), "m" (*__sd(y))); |
break; |
case 2: |
__asm__ ("movw %w1,%%fs:%0" |
:"=m" (*__sd(y)) |
:"ir" ((unsigned short) x), "m" (*__sd(y))); |
break; |
case 4: |
__asm__ ("movl %1,%%fs:%0" |
:"=m" (*__sd(y)) |
:"ir" (x), "m" (*__sd(y))); |
break; |
default: |
bad_user_access_length(); |
} |
} |
|
static inline unsigned long __get_user(const void * y, int size) |
{ |
unsigned long result; |
|
switch (size) { |
case 1: |
__asm__ ("movb %%fs:%1,%b0" |
:"=q" (result) |
:"m" (*__const_sd(y))); |
return (unsigned char) result; |
case 2: |
__asm__ ("movw %%fs:%1,%w0" |
:"=r" (result) |
:"m" (*__const_sd(y))); |
return (unsigned short) result; |
case 4: |
__asm__ ("movl %%fs:%1,%0" |
:"=r" (result) |
:"m" (*__const_sd(y))); |
return result; |
default: |
return bad_user_access_length(); |
} |
} |
|
static inline void __generic_memcpy_tofs(void * to, const void * from, unsigned long n) |
{ |
__asm__ volatile |
(" cld |
push %%es |
push %%fs |
cmpl $3,%0 |
pop %%es |
jbe 1f |
movl %%edi,%%ecx |
negl %%ecx |
andl $3,%%ecx |
subl %%ecx,%0 |
rep; movsb |
movl %0,%%ecx |
shrl $2,%%ecx |
rep; movsl |
andl $3,%0 |
1: movl %0,%%ecx |
rep; movsb |
pop %%es" |
:"=abd" (n) |
:"0" (n),"D" ((long) to),"S" ((long) from) |
:"cx","di","si"); |
} |
|
static inline void __constant_memcpy_tofs(void * to, const void * from, unsigned long n) |
{ |
switch (n) { |
case 0: |
return; |
case 1: |
__put_user(*(const char *) from, (char *) to, 1); |
return; |
case 2: |
__put_user(*(const short *) from, (short *) to, 2); |
return; |
case 3: |
__put_user(*(const short *) from, (short *) to, 2); |
__put_user(*(2+(const char *) from), 2+(char *) to, 1); |
return; |
case 4: |
__put_user(*(const int *) from, (int *) to, 4); |
return; |
case 8: |
__put_user(*(const int *) from, (int *) to, 4); |
__put_user(*(1+(const int *) from), 1+(int *) to, 4); |
return; |
case 12: |
__put_user(*(const int *) from, (int *) to, 4); |
__put_user(*(1+(const int *) from), 1+(int *) to, 4); |
__put_user(*(2+(const int *) from), 2+(int *) to, 4); |
return; |
case 16: |
__put_user(*(const int *) from, (int *) to, 4); |
__put_user(*(1+(const int *) from), 1+(int *) to, 4); |
__put_user(*(2+(const int *) from), 2+(int *) to, 4); |
__put_user(*(3+(const int *) from), 3+(int *) to, 4); |
return; |
} |
#define COMMON(x) \ |
__asm__("cld\n\t" \ |
"push %%es\n\t" \ |
"push %%fs\n\t" \ |
"pop %%es\n\t" \ |
"rep ; movsl\n\t" \ |
x \ |
"pop %%es" \ |
: /* no outputs */ \ |
:"c" (n/4),"D" ((long) to),"S" ((long) from) \ |
:"cx","di","si") |
|
switch (n % 4) { |
case 0: |
COMMON(""); |
return; |
case 1: |
COMMON("movsb\n\t"); |
return; |
case 2: |
COMMON("movsw\n\t"); |
return; |
case 3: |
COMMON("movsw\n\tmovsb\n\t"); |
return; |
} |
#undef COMMON |
} |
|
static inline void __generic_memcpy_fromfs(void * to, const void * from, unsigned long n) |
{ |
__asm__ volatile |
(" cld |
cmpl $3,%0 |
jbe 1f |
movl %%edi,%%ecx |
negl %%ecx |
andl $3,%%ecx |
subl %%ecx,%0 |
fs; rep; movsb |
movl %0,%%ecx |
shrl $2,%%ecx |
fs; rep; movsl |
andl $3,%0 |
1: movl %0,%%ecx |
fs; rep; movsb" |
:"=abd" (n) |
:"0" (n),"D" ((long) to),"S" ((long) from) |
:"cx","di","si", "memory"); |
} |
|
static inline void __constant_memcpy_fromfs(void * to, const void * from, unsigned long n) |
{ |
switch (n) { |
case 0: |
return; |
case 1: |
*(char *)to = __get_user((const char *) from, 1); |
return; |
case 2: |
*(short *)to = __get_user((const short *) from, 2); |
return; |
case 3: |
*(short *) to = __get_user((const short *) from, 2); |
*((char *) to + 2) = __get_user(2+(const char *) from, 1); |
return; |
case 4: |
*(int *) to = __get_user((const int *) from, 4); |
return; |
case 8: |
*(int *) to = __get_user((const int *) from, 4); |
*(1+(int *) to) = __get_user(1+(const int *) from, 4); |
return; |
case 12: |
*(int *) to = __get_user((const int *) from, 4); |
*(1+(int *) to) = __get_user(1+(const int *) from, 4); |
*(2+(int *) to) = __get_user(2+(const int *) from, 4); |
return; |
case 16: |
*(int *) to = __get_user((const int *) from, 4); |
*(1+(int *) to) = __get_user(1+(const int *) from, 4); |
*(2+(int *) to) = __get_user(2+(const int *) from, 4); |
*(3+(int *) to) = __get_user(3+(const int *) from, 4); |
return; |
} |
#define COMMON(x) \ |
__asm__("cld\n\t" \ |
"rep ; fs ; movsl\n\t" \ |
x \ |
: /* no outputs */ \ |
:"c" (n/4),"D" ((long) to),"S" ((long) from) \ |
:"cx","di","si","memory") |
|
switch (n % 4) { |
case 0: |
COMMON(""); |
return; |
case 1: |
COMMON("fs ; movsb"); |
return; |
case 2: |
COMMON("fs ; movsw"); |
return; |
case 3: |
COMMON("fs ; movsw\n\tfs ; movsb"); |
return; |
} |
#undef COMMON |
} |
|
#define memcpy_fromfs(to, from, n) \ |
(__builtin_constant_p(n) ? \ |
__constant_memcpy_fromfs((to),(from),(n)) : \ |
__generic_memcpy_fromfs((to),(from),(n))) |
|
#define memcpy_tofs(to, from, n) \ |
(__builtin_constant_p(n) ? \ |
__constant_memcpy_tofs((to),(from),(n)) : \ |
__generic_memcpy_tofs((to),(from),(n))) |
|
/* |
* These are deprecated.. |
* |
* Use "put_user()" and "get_user()" with the proper pointer types instead. |
*/ |
|
#define get_fs_byte(addr) __get_user((const unsigned char *)(addr),1) |
#define get_fs_word(addr) __get_user((const unsigned short *)(addr),2) |
#define get_fs_long(addr) __get_user((const unsigned int *)(addr),4) |
|
#define put_fs_byte(x,addr) __put_user((x),(unsigned char *)(addr),1) |
#define put_fs_word(x,addr) __put_user((x),(unsigned short *)(addr),2) |
#define put_fs_long(x,addr) __put_user((x),(unsigned int *)(addr),4) |
|
#ifdef WE_REALLY_WANT_TO_USE_A_BROKEN_INTERFACE |
|
static inline unsigned short get_user_word(const short *addr) |
{ |
return __get_user(addr, 2); |
} |
|
static inline unsigned char get_user_byte(const char * addr) |
{ |
return __get_user(addr,1); |
} |
|
static inline unsigned long get_user_long(const int *addr) |
{ |
return __get_user(addr, 4); |
} |
|
static inline void put_user_byte(char val,char *addr) |
{ |
__put_user(val, addr, 1); |
} |
|
static inline void put_user_word(short val,short * addr) |
{ |
__put_user(val, addr, 2); |
} |
|
static inline void put_user_long(unsigned long val,int * addr) |
{ |
__put_user(val, addr, 4); |
} |
|
#endif |
|
/* |
* Someone who knows GNU asm better than I should double check the following. |
* It seems to work, but I don't know if I'm doing something subtly wrong. |
* --- TYT, 11/24/91 |
* [ nothing wrong here, Linus: I just changed the ax to be any reg ] |
*/ |
|
static inline unsigned long get_fs(void) |
{ |
unsigned long _v; |
__asm__("mov %%fs,%w0":"=r" (_v):"0" (0)); |
return _v; |
} |
|
static inline unsigned long get_ds(void) |
{ |
unsigned long _v; |
__asm__("mov %%ds,%w0":"=r" (_v):"0" (0)); |
return _v; |
} |
|
static inline void set_fs(unsigned long val) |
{ |
__asm__ __volatile__("mov %w0,%%fs": /* no output */ :"r" (val)); |
} |
|
#endif /* __ASSEMBLY__ */ |
|
#endif /* _ASM_SEGMENT_H */ |
/types.h
0,0 → 1,44
#ifndef _I386_TYPES_H |
#define _I386_TYPES_H |
|
typedef unsigned short umode_t; |
|
/* |
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
* header files exported to user space |
*/ |
|
typedef __signed__ char __s8; |
typedef unsigned char __u8; |
|
typedef __signed__ short __s16; |
typedef unsigned short __u16; |
|
typedef __signed__ int __s32; |
typedef unsigned int __u32; |
|
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) |
typedef __signed__ long long __s64; |
typedef unsigned long long __u64; |
#endif |
|
/* |
* These aren't exported outside the kernel to avoid name space clashes |
*/ |
#ifdef __KERNEL__ |
|
typedef signed char s8; |
typedef unsigned char u8; |
|
typedef signed short s16; |
typedef unsigned short u16; |
|
typedef signed int s32; |
typedef unsigned int u32; |
|
typedef signed long long s64; |
typedef unsigned long long u64; |
|
#endif /* __KERNEL__ */ |
|
#endif |
/elf.h
0,0 → 1,41
#ifndef __ASMi386_ELF_H |
#define __ASMi386_ELF_H |
|
/* |
* ELF register definitions.. |
*/ |
|
#include <asm/ptrace.h> |
|
typedef unsigned long elf_greg_t; |
|
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) |
typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
|
typedef struct user_i387_struct elf_fpregset_t; |
|
/* |
* This is used to ensure we don't load something for the wrong architecture. |
*/ |
#define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) |
|
/* |
* These are used to set parameters in the core dumps. |
*/ |
#define ELF_CLASS ELFCLASS32 |
#define ELF_DATA ELFDATA2LSB; |
#define ELF_ARCH EM_386 |
|
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program |
starts %edx contains a pointer to a function which might be |
registered using `atexit'. This provides a mean for the |
dynamic linker to call DT_FINI functions for shared libraries |
that have been loaded before the code runs. |
|
A value of 0 tells we have no such handler. */ |
#define ELF_PLAT_INIT(_r) _r->edx = 0 |
|
#define USE_ELF_CORE_DUMP |
#define ELF_EXEC_PAGESIZE 4096 |
|
#endif |
/fcntl.h
0,0 → 1,59
#ifndef _I386_FCNTL_H |
#define _I386_FCNTL_H |
|
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files |
located on an ext2 file system */ |
#define O_ACCMODE 0003 |
#define O_RDONLY 00 |
#define O_WRONLY 01 |
#define O_RDWR 02 |
#define O_CREAT 0100 /* not fcntl */ |
#define O_EXCL 0200 /* not fcntl */ |
#define O_NOCTTY 0400 /* not fcntl */ |
#define O_TRUNC 01000 /* not fcntl */ |
#define O_APPEND 02000 |
#define O_NONBLOCK 04000 |
#define O_NDELAY O_NONBLOCK |
#define O_SYNC 010000 |
#define FASYNC 020000 /* fcntl, for BSD compatibility */ |
|
#define F_DUPFD 0 /* dup */ |
#define F_GETFD 1 /* get f_flags */ |
#define F_SETFD 2 /* set f_flags */ |
#define F_GETFL 3 /* more flags (cloexec) */ |
#define F_SETFL 4 |
#define F_GETLK 5 |
#define F_SETLK 6 |
#define F_SETLKW 7 |
|
#define F_SETOWN 8 /* for sockets. */ |
#define F_GETOWN 9 /* for sockets. */ |
|
/* for F_[GET|SET]FL */ |
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */ |
|
/* for posix fcntl() and lockf() */ |
#define F_RDLCK 0 |
#define F_WRLCK 1 |
#define F_UNLCK 2 |
|
/* for old implementation of bsd flock () */ |
#define F_EXLCK 4 /* or 3 */ |
#define F_SHLCK 8 /* or 4 */ |
|
/* operations for bsd flock(), also used by the kernel implementation */ |
#define LOCK_SH 1 /* shared lock */ |
#define LOCK_EX 2 /* exclusive lock */ |
#define LOCK_NB 4 /* or'd with one of the above to prevent |
blocking */ |
#define LOCK_UN 8 /* remove lock */ |
|
struct flock { |
short l_type; |
short l_whence; |
off_t l_start; |
off_t l_len; |
pid_t l_pid; |
}; |
|
#endif |
/string.h
0,0 → 1,631
#ifndef _I386_STRING_H_ |
#define _I386_STRING_H_ |
|
/* |
* On a 486 or Pentium, we are better off not using the |
* byte string operations. But on a 386 or a PPro the |
* byte string ops are faster than doing it by hand |
* (MUCH faster on a Pentium). |
* |
* Also, the byte strings actually work correctly. Forget |
* the i486 routines for now as they may be broken.. |
*/ |
#if FIXED_486_STRING && (CPU == 486 || CPU == 586) |
#include <asm/string-486.h> |
#else |
|
/* |
* This string-include defines all string functions as inline |
* functions. Use gcc. It also assumes ds=es=data space, this should be |
* normal. Most of the string-functions are rather heavily hand-optimized, |
* see especially strtok,strstr,str[c]spn. They should work, but are not |
* very easy to understand. Everything is done entirely within the register |
* set, making the functions fast and clean. String instructions have been |
* used through-out, making for "slightly" unclear code :-) |
* |
* Copyright (C) 1991, 1992 Linus Torvalds |
*/ |
|
#define __HAVE_ARCH_STRCPY |
extern inline char * strcpy(char * dest,const char *src) |
{ |
__asm__ __volatile__( |
"cld\n" |
"1:\tlodsb\n\t" |
"stosb\n\t" |
"testb %%al,%%al\n\t" |
"jne 1b" |
: /* no output */ |
:"S" (src),"D" (dest):"si","di","ax","memory"); |
return dest; |
} |
|
#define __HAVE_ARCH_STRNCPY |
extern inline char * strncpy(char * dest,const char *src,size_t count) |
{ |
__asm__ __volatile__( |
"cld\n" |
"1:\tdecl %2\n\t" |
"js 2f\n\t" |
"lodsb\n\t" |
"stosb\n\t" |
"testb %%al,%%al\n\t" |
"jne 1b\n\t" |
"rep\n\t" |
"stosb\n" |
"2:" |
: /* no output */ |
:"S" (src),"D" (dest),"c" (count):"si","di","ax","cx","memory"); |
return dest; |
} |
|
#define __HAVE_ARCH_STRCAT |
extern inline char * strcat(char * dest,const char * src) |
{ |
__asm__ __volatile__( |
"cld\n\t" |
"repne\n\t" |
"scasb\n\t" |
"decl %1\n" |
"1:\tlodsb\n\t" |
"stosb\n\t" |
"testb %%al,%%al\n\t" |
"jne 1b" |
: /* no output */ |
:"S" (src),"D" (dest),"a" (0),"c" (0xffffffff):"si","di","ax","cx"); |
return dest; |
} |
|
#define __HAVE_ARCH_STRNCAT |
extern inline char * strncat(char * dest,const char * src,size_t count) |
{ |
__asm__ __volatile__( |
"cld\n\t" |
"repne\n\t" |
"scasb\n\t" |
"decl %1\n\t" |
"movl %4,%3\n" |
"1:\tdecl %3\n\t" |
"js 2f\n\t" |
"lodsb\n\t" |
"stosb\n\t" |
"testb %%al,%%al\n\t" |
"jne 1b\n" |
"2:\txorl %2,%2\n\t" |
"stosb" |
: /* no output */ |
:"S" (src),"D" (dest),"a" (0),"c" (0xffffffff),"g" (count) |
:"si","di","ax","cx","memory"); |
return dest; |
} |
|
#define __HAVE_ARCH_STRCMP |
extern inline int strcmp(const char * cs,const char * ct) |
{ |
register int __res; |
__asm__ __volatile__( |
"cld\n" |
"1:\tlodsb\n\t" |
"scasb\n\t" |
"jne 2f\n\t" |
"testb %%al,%%al\n\t" |
"jne 1b\n\t" |
"xorl %%eax,%%eax\n\t" |
"jmp 3f\n" |
"2:\tsbbl %%eax,%%eax\n\t" |
"orb $1,%%eax\n" |
"3:" |
:"=a" (__res):"S" (cs),"D" (ct):"si","di"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRNCMP |
extern inline int strncmp(const char * cs,const char * ct,size_t count) |
{ |
register int __res; |
__asm__ __volatile__( |
"cld\n" |
"1:\tdecl %3\n\t" |
"js 2f\n\t" |
"lodsb\n\t" |
"scasb\n\t" |
"jne 3f\n\t" |
"testb %%al,%%al\n\t" |
"jne 1b\n" |
"2:\txorl %%eax,%%eax\n\t" |
"jmp 4f\n" |
"3:\tsbbl %%eax,%%eax\n\t" |
"orb $1,%%al\n" |
"4:" |
:"=a" (__res):"S" (cs),"D" (ct),"c" (count):"si","di","cx"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRCHR |
extern inline char * strchr(const char * s, int c) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movb %%al,%%ah\n" |
"1:\tlodsb\n\t" |
"cmpb %%ah,%%al\n\t" |
"je 2f\n\t" |
"testb %%al,%%al\n\t" |
"jne 1b\n\t" |
"movl $1,%1\n" |
"2:\tmovl %1,%0\n\t" |
"decl %0" |
:"=a" (__res):"S" (s),"0" (c):"si"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRRCHR |
extern inline char * strrchr(const char * s, int c) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movb %%al,%%ah\n" |
"1:\tlodsb\n\t" |
"cmpb %%ah,%%al\n\t" |
"jne 2f\n\t" |
"leal -1(%%esi),%0\n" |
"2:\ttestb %%al,%%al\n\t" |
"jne 1b" |
:"=d" (__res):"0" (0),"S" (s),"a" (c):"ax","si"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRSPN |
extern inline size_t strspn(const char * cs, const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" |
"movl %%ecx,%%edx\n" |
"1:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 2f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"je 1b\n" |
"2:\tdecl %0" |
:"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct) |
:"ax","cx","dx","di"); |
return __res-cs; |
} |
|
#define __HAVE_ARCH_STRCSPN |
extern inline size_t strcspn(const char * cs, const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" |
"movl %%ecx,%%edx\n" |
"1:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 2f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"jne 1b\n" |
"2:\tdecl %0" |
:"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct) |
:"ax","cx","dx","di"); |
return __res-cs; |
} |
|
#define __HAVE_ARCH_STRPBRK |
extern inline char * strpbrk(const char * cs,const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" |
"movl %%ecx,%%edx\n" |
"1:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 2f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"jne 1b\n\t" |
"decl %0\n\t" |
"jmp 3f\n" |
"2:\txorl %0,%0\n" |
"3:" |
:"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct) |
:"ax","cx","dx","di"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRSTR |
extern inline char * strstr(const char * cs,const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" \ |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */ |
"movl %%ecx,%%edx\n" |
"1:\tmovl %4,%%edi\n\t" |
"movl %%esi,%%eax\n\t" |
"movl %%edx,%%ecx\n\t" |
"repe\n\t" |
"cmpsb\n\t" |
"je 2f\n\t" /* also works for empty string, see above */ |
"xchgl %%eax,%%esi\n\t" |
"incl %%esi\n\t" |
"cmpb $0,-1(%%eax)\n\t" |
"jne 1b\n\t" |
"xorl %%eax,%%eax\n\t" |
"2:" |
:"=a" (__res):"0" (0),"c" (0xffffffff),"S" (cs),"g" (ct) |
:"cx","dx","di","si"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRLEN |
extern inline size_t strlen(const char * s) |
{ |
register int __res; |
__asm__ __volatile__( |
"cld\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %0\n\t" |
"decl %0" |
:"=c" (__res):"D" (s),"a" (0),"0" (0xffffffff):"di"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRTOK |
extern inline char * strtok(char * s,const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"testl %1,%1\n\t" |
"jne 1f\n\t" |
"testl %0,%0\n\t" |
"je 8f\n\t" |
"movl %0,%1\n" |
"1:\txorl %0,%0\n\t" |
"movl $-1,%%ecx\n\t" |
"xorl %%eax,%%eax\n\t" |
"cld\n\t" |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" |
"je 7f\n\t" /* empty delimiter-string */ |
"movl %%ecx,%%edx\n" |
"2:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 7f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"je 2b\n\t" |
"decl %1\n\t" |
"cmpb $0,(%1)\n\t" |
"je 7f\n\t" |
"movl %1,%0\n" |
"3:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 5f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"jne 3b\n\t" |
"decl %1\n\t" |
"cmpb $0,(%1)\n\t" |
"je 5f\n\t" |
"movb $0,(%1)\n\t" |
"incl %1\n\t" |
"jmp 6f\n" |
"5:\txorl %1,%1\n" |
"6:\tcmpb $0,(%0)\n\t" |
"jne 7f\n\t" |
"xorl %0,%0\n" |
"7:\ttestl %0,%0\n\t" |
"jne 8f\n\t" |
"movl %0,%1\n" |
"8:" |
:"=b" (__res),"=S" (___strtok) |
:"0" (___strtok),"1" (s),"g" (ct) |
:"ax","cx","dx","di","memory"); |
return __res; |
} |
|
extern inline void * __memcpy(void * to, const void * from, size_t n) |
{ |
__asm__ __volatile__( |
"cld\n\t" |
"rep ; movsl\n\t" |
"testb $2,%b1\n\t" |
"je 1f\n\t" |
"movsw\n" |
"1:\ttestb $1,%b1\n\t" |
"je 2f\n\t" |
"movsb\n" |
"2:" |
: /* no output */ |
:"c" (n/4), "q" (n),"D" ((long) to),"S" ((long) from) |
: "cx","di","si","memory"); |
return (to); |
} |
|
/* |
* This looks horribly ugly, but the compiler can optimize it totally, |
* as the count is constant. |
*/ |
extern inline void * __constant_memcpy(void * to, const void * from, size_t n) |
{ |
switch (n) { |
case 0: |
return to; |
case 1: |
*(unsigned char *)to = *(const unsigned char *)from; |
return to; |
case 2: |
*(unsigned short *)to = *(const unsigned short *)from; |
return to; |
case 3: |
*(unsigned short *)to = *(const unsigned short *)from; |
*(2+(unsigned char *)to) = *(2+(const unsigned char *)from); |
return to; |
case 4: |
*(unsigned long *)to = *(const unsigned long *)from; |
return to; |
case 8: |
*(unsigned long *)to = *(const unsigned long *)from; |
*(1+(unsigned long *)to) = *(1+(const unsigned long *)from); |
return to; |
case 12: |
*(unsigned long *)to = *(const unsigned long *)from; |
*(1+(unsigned long *)to) = *(1+(const unsigned long *)from); |
*(2+(unsigned long *)to) = *(2+(const unsigned long *)from); |
return to; |
case 16: |
*(unsigned long *)to = *(const unsigned long *)from; |
*(1+(unsigned long *)to) = *(1+(const unsigned long *)from); |
*(2+(unsigned long *)to) = *(2+(const unsigned long *)from); |
*(3+(unsigned long *)to) = *(3+(const unsigned long *)from); |
return to; |
case 20: |
*(unsigned long *)to = *(const unsigned long *)from; |
*(1+(unsigned long *)to) = *(1+(const unsigned long *)from); |
*(2+(unsigned long *)to) = *(2+(const unsigned long *)from); |
*(3+(unsigned long *)to) = *(3+(const unsigned long *)from); |
*(4+(unsigned long *)to) = *(4+(const unsigned long *)from); |
return to; |
} |
#define COMMON(x) \ |
__asm__("cld\n\t" \ |
"rep ; movsl" \ |
x \ |
: /* no outputs */ \ |
: "c" (n/4),"D" ((long) to),"S" ((long) from) \ |
: "cx","di","si","memory"); |
|
switch (n % 4) { |
case 0: COMMON(""); return to; |
case 1: COMMON("\n\tmovsb"); return to; |
case 2: COMMON("\n\tmovsw"); return to; |
case 3: COMMON("\n\tmovsw\n\tmovsb"); return to; |
} |
#undef COMMON |
} |
|
#define __HAVE_ARCH_MEMCPY |
#define memcpy(t, f, n) \ |
(__builtin_constant_p(n) ? \ |
__constant_memcpy((t),(f),(n)) : \ |
__memcpy((t),(f),(n))) |
|
#define __HAVE_ARCH_MEMMOVE |
extern inline void * memmove(void * dest,const void * src, size_t n) |
{ |
if (dest<src) |
__asm__ __volatile__( |
"cld\n\t" |
"rep\n\t" |
"movsb" |
: /* no output */ |
:"c" (n),"S" (src),"D" (dest) |
:"cx","si","di"); |
else |
__asm__ __volatile__( |
"std\n\t" |
"rep\n\t" |
"movsb\n\t" |
"cld" |
: /* no output */ |
:"c" (n), |
"S" (n-1+(const char *)src), |
"D" (n-1+(char *)dest) |
:"cx","si","di","memory"); |
return dest; |
} |
|
#define memcmp __builtin_memcmp |
|
#define __HAVE_ARCH_MEMCHR |
extern inline void * memchr(const void * cs,int c,size_t count) |
{ |
register void * __res; |
if (!count) |
return NULL; |
__asm__ __volatile__( |
"cld\n\t" |
"repne\n\t" |
"scasb\n\t" |
"je 1f\n\t" |
"movl $1,%0\n" |
"1:\tdecl %0" |
:"=D" (__res):"a" (c),"D" (cs),"c" (count) |
:"cx"); |
return __res; |
} |
|
extern inline void * __memset_generic(void * s, char c,size_t count) |
{ |
__asm__ __volatile__( |
"cld\n\t" |
"rep\n\t" |
"stosb" |
: /* no output */ |
:"a" (c),"D" (s),"c" (count) |
:"cx","di","memory"); |
return s; |
} |
|
/* we might want to write optimized versions of these later */ |
#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) |
|
/* |
* memset(x,0,y) is a reasonably common thing to do, so we want to fill |
* things 32 bits at a time even when we don't know the size of the |
* area at compile-time.. |
*/ |
extern inline void * __constant_c_memset(void * s, unsigned long c, size_t count) |
{ |
__asm__ __volatile__( |
"cld\n\t" |
"rep ; stosl\n\t" |
"testb $2,%b1\n\t" |
"je 1f\n\t" |
"stosw\n" |
"1:\ttestb $1,%b1\n\t" |
"je 2f\n\t" |
"stosb\n" |
"2:" |
: /* no output */ |
:"a" (c), "q" (count), "c" (count/4), "D" ((long) s) |
:"cx","di","memory"); |
return (s); |
} |
|
/* Added by Gertjan van Wingerde to make minix and sysv module work */ |
#define __HAVE_ARCH_STRNLEN |
extern inline size_t strnlen(const char * s, size_t count) |
{ |
register int __res; |
__asm__ __volatile__( |
"movl %1,%0\n\t" |
"jmp 2f\n" |
"1:\tcmpb $0,(%0)\n\t" |
"je 3f\n\t" |
"incl %0\n" |
"2:\tdecl %2\n\t" |
"cmpl $-1,%2\n\t" |
"jne 1b\n" |
"3:\tsubl %1,%0" |
:"=a" (__res) |
:"c" (s),"d" (count) |
:"dx"); |
return __res; |
} |
/* end of additional stuff */ |
|
/* |
* This looks horribly ugly, but the compiler can optimize it totally, |
* as we by now know that both pattern and count is constant.. |
*/ |
extern inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) |
{ |
switch (count) { |
case 0: |
return s; |
case 1: |
*(unsigned char *)s = pattern; |
return s; |
case 2: |
*(unsigned short *)s = pattern; |
return s; |
case 3: |
*(unsigned short *)s = pattern; |
*(2+(unsigned char *)s) = pattern; |
return s; |
case 4: |
*(unsigned long *)s = pattern; |
return s; |
} |
#define COMMON(x) \ |
__asm__("cld\n\t" \ |
"rep ; stosl" \ |
x \ |
: /* no outputs */ \ |
: "a" (pattern),"c" (count/4),"D" ((long) s) \ |
: "cx","di","memory") |
|
switch (count % 4) { |
case 0: COMMON(""); return s; |
case 1: COMMON("\n\tstosb"); return s; |
case 2: COMMON("\n\tstosw"); return s; |
case 3: COMMON("\n\tstosw\n\tstosb"); return s; |
} |
#undef COMMON |
} |
|
#define __constant_c_x_memset(s, c, count) \ |
(__builtin_constant_p(count) ? \ |
__constant_c_and_count_memset((s),(c),(count)) : \ |
__constant_c_memset((s),(c),(count))) |
|
#define __memset(s, c, count) \ |
(__builtin_constant_p(count) ? \ |
__constant_count_memset((s),(c),(count)) : \ |
__memset_generic((s),(c),(count))) |
|
#define __HAVE_ARCH_MEMSET |
#define memset(s, c, count) \ |
(__builtin_constant_p(c) ? \ |
__constant_c_x_memset((s),(0x01010101UL*(unsigned char)c),(count)) : \ |
__memset((s),(c),(count))) |
|
/* |
* find the first occurrence of byte 'c', or 1 past the area if none |
*/ |
#define __HAVE_ARCH_MEMSCAN |
extern inline void * memscan(void * addr, int c, size_t size) |
{ |
if (!size) |
return addr; |
__asm__("cld |
repnz; scasb |
jnz 1f |
dec %%edi |
1: " |
: "=D" (addr), "=c" (size) |
: "0" (addr), "1" (size), "a" (c)); |
return addr; |
} |
|
#endif |
#endif |
/unaligned.h
0,0 → 1,16
#ifndef __I386_UNALIGNED_H |
#define __I386_UNALIGNED_H |
|
/* |
* The i386 can do unaligned accesses itself. |
* |
* The strange macros are there to make sure these can't |
* be misused in a way that makes them not work on other |
* architectures where unaligned accesses aren't as simple. |
*/ |
|
#define get_unaligned(ptr) (*(ptr)) |
|
#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) |
|
#endif |
/io.h
0,0 → 1,213
#ifndef _ASM_IO_H |
#define _ASM_IO_H |
|
/* |
* This file contains the definitions for the x86 IO instructions |
* inb/inw/inl/outb/outw/outl and the "string versions" of the same |
* (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" |
* versions of the single-IO instructions (inb_p/inw_p/..). |
* |
* This file is not meant to be obfuscating: it's just complicated |
* to (a) handle it all in a way that makes gcc able to optimize it |
* as well as possible and (b) trying to avoid writing the same thing |
* over and over again with slight variations and possibly making a |
* mistake somewhere. |
*/ |
|
/* |
* Thanks to James van Artsdalen for a better timing-fix than |
* the two short jumps: using outb's to a nonexistent port seems |
* to guarantee better timings even on fast machines. |
* |
* On the other hand, I'd like to be sure of a non-existent port: |
* I feel a bit unsafe about using 0x80 (should be safe, though) |
* |
* Linus |
*/ |
|
#ifdef SLOW_IO_BY_JUMPING |
#define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:") |
#else |
#define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80") |
#endif |
|
#ifdef REALLY_SLOW_IO |
#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } |
#else |
#define SLOW_DOWN_IO __SLOW_DOWN_IO |
#endif |
|
/* |
* Change virtual addresses to physical addresses and vv. |
* These are trivial on the 1:1 Linux/i386 mapping (but if we ever |
* make the kernel segment mapped at 0, we need to do translation |
* on the i386 as well) |
*/ |
extern inline unsigned long virt_to_phys(volatile void * address) |
{ |
return (unsigned long) address; |
} |
|
extern inline void * phys_to_virt(unsigned long address) |
{ |
return (void *) address; |
} |
|
/* |
* IO bus memory addresses are also 1:1 with the physical address |
*/ |
#define virt_to_bus virt_to_phys |
#define bus_to_virt phys_to_virt |
|
/* |
* readX/writeX() are used to access memory mapped devices. On some |
* architectures the memory mapped IO stuff needs to be accessed |
* differently. On the x86 architecture, we just read/write the |
* memory location directly. |
*/ |
#define readb(addr) (*(volatile unsigned char *) (addr)) |
#define readw(addr) (*(volatile unsigned short *) (addr)) |
#define readl(addr) (*(volatile unsigned int *) (addr)) |
|
#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b)) |
#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b)) |
#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b)) |
|
#define memset_io(a,b,c) memset((void *)(a),(b),(c)) |
#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) |
#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) |
|
/* |
* Again, i386 does not require mem IO specific function. |
*/ |
|
#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d)) |
|
/* |
* Talk about misusing macros.. |
*/ |
|
#define __OUT1(s,x) \ |
extern inline void __out##s(unsigned x value, unsigned short port) { |
|
#define __OUT2(s,s1,s2) \ |
__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" |
|
#define __OUT(s,s1,x) \ |
__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \ |
__OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \ |
__OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \ |
__OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; } |
|
#define __IN1(s) \ |
extern inline RETURN_TYPE __in##s(unsigned short port) { RETURN_TYPE _v; |
|
#define __IN2(s,s1,s2) \ |
__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" |
|
#define __IN(s,s1,i...) \ |
__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \ |
__IN1(s##c) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \ |
__IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \ |
__IN1(s##c_p) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; } |
|
#define __INS(s) \ |
extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \ |
{ __asm__ __volatile__ ("cld ; rep ; ins" #s \ |
: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } |
|
#define __OUTS(s) \ |
extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ |
{ __asm__ __volatile__ ("cld ; rep ; outs" #s \ |
: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } |
|
#define RETURN_TYPE unsigned char |
/* __IN(b,"b","0" (0)) */ |
__IN(b,"") |
#undef RETURN_TYPE |
#define RETURN_TYPE unsigned short |
/* __IN(w,"w","0" (0)) */ |
__IN(w,"") |
#undef RETURN_TYPE |
#define RETURN_TYPE unsigned int |
__IN(l,"") |
#undef RETURN_TYPE |
|
__OUT(b,"b",char) |
__OUT(w,"w",short) |
__OUT(l,,int) |
|
__INS(b) |
__INS(w) |
__INS(l) |
|
__OUTS(b) |
__OUTS(w) |
__OUTS(l) |
|
/* |
* Note that due to the way __builtin_constant_p() works, you |
* - can't use it inside a inline function (it will never be true) |
* - you don't have to worry about side effects within the __builtin.. |
*/ |
#define outb(val,port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__outbc((val),(port)) : \ |
__outb((val),(port))) |
|
#define inb(port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__inbc(port) : \ |
__inb(port)) |
|
#define outb_p(val,port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__outbc_p((val),(port)) : \ |
__outb_p((val),(port))) |
|
#define inb_p(port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__inbc_p(port) : \ |
__inb_p(port)) |
|
#define outw(val,port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__outwc((val),(port)) : \ |
__outw((val),(port))) |
|
#define inw(port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__inwc(port) : \ |
__inw(port)) |
|
#define outw_p(val,port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__outwc_p((val),(port)) : \ |
__outw_p((val),(port))) |
|
#define inw_p(port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__inwc_p(port) : \ |
__inw_p(port)) |
|
#define outl(val,port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__outlc((val),(port)) : \ |
__outl((val),(port))) |
|
#define inl(port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__inlc(port) : \ |
__inl(port)) |
|
#define outl_p(val,port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__outlc_p((val),(port)) : \ |
__outl_p((val),(port))) |
|
#define inl_p(port) \ |
((__builtin_constant_p((port)) && (port) < 256) ? \ |
__inlc_p(port) : \ |
__inl_p(port)) |
|
#endif |
/floppy.h
0,0 → 1,290
/* |
* Architecture specific parts of the Floppy driver |
* |
* This file is subject to the terms and conditions of the GNU General Public |
* License. See the file "COPYING" in the main directory of this archive |
* for more details. |
* |
* Copyright (C) 1995 |
*/ |
#ifndef __ASM_I386_FLOPPY_H |
#define __ASM_I386_FLOPPY_H |
|
|
#define SW fd_routine[use_virtual_dma&1] |
|
|
#define fd_inb(port) inb_p(port) |
#define fd_outb(port,value) outb_p(port,value) |
|
#define fd_enable_dma() SW._enable_dma(FLOPPY_DMA) |
#define fd_disable_dma() SW._disable_dma(FLOPPY_DMA) |
#define fd_request_dma() SW._request_dma(FLOPPY_DMA,"floppy") |
#define fd_free_dma() SW._free_dma(FLOPPY_DMA) |
#define fd_clear_dma_ff() SW._clear_dma_ff(FLOPPY_DMA) |
#define fd_set_dma_mode(mode) SW._set_dma_mode(FLOPPY_DMA,mode) |
#define fd_set_dma_addr(addr) SW._set_dma_addr(FLOPPY_DMA,addr) |
#define fd_set_dma_count(count) SW._set_dma_count(FLOPPY_DMA,count) |
#define fd_enable_irq() enable_irq(FLOPPY_IRQ) |
#define fd_disable_irq() disable_irq(FLOPPY_IRQ) |
#define fd_cacheflush(addr,size) /* nothing */ |
#define fd_request_irq() SW._request_irq(FLOPPY_IRQ, floppy_interrupt, \ |
SA_INTERRUPT|SA_SAMPLE_RANDOM, \ |
"floppy", NULL) |
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) |
#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) |
#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) |
#define fd_dma_mem_free(addr,size) SW._dma_mem_free(addr,size) |
|
static int virtual_dma_count=0; |
static int virtual_dma_residue=0; |
static unsigned long virtual_dma_addr=0; |
static int virtual_dma_mode=0; |
static int doing_pdma=0; |
|
static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) |
{ |
register unsigned char st; |
|
#undef TRACE_FLPY_INT |
#undef NO_FLOPPY_ASSEMBLER |
|
#ifdef TRACE_FLPY_INT |
static int calls=0; |
static int bytes=0; |
static int dma_wait=0; |
#endif |
if(!doing_pdma) { |
floppy_interrupt(irq, dev_id, regs); |
return; |
} |
|
#ifdef TRACE_FLPY_INT |
if(!calls) |
bytes = virtual_dma_count; |
#endif |
|
#ifndef NO_FLOPPY_ASSEMBLER |
__asm__ ( |
"testl %1,%1 |
je 3f |
1: inb %w4,%b0 |
andb $160,%b0 |
cmpb $160,%b0 |
jne 2f |
incw %w4 |
testl %3,%3 |
jne 4f |
inb %w4,%b0 |
movb %0,(%2) |
jmp 5f |
4: movb (%2),%0 |
outb %b0,%w4 |
5: decw %w4 |
outb %0,$0x80 |
decl %1 |
incl %2 |
testl %1,%1 |
jne 1b |
3: inb %w4,%b0 |
2: " |
: "=a" ((char) st), |
"=c" ((long) virtual_dma_count), |
"=S" ((long) virtual_dma_addr) |
: "b" ((long) virtual_dma_mode), |
"d" ((short) virtual_dma_port+4), |
"1" ((long) virtual_dma_count), |
"2" ((long) virtual_dma_addr)); |
#else |
{ |
register int lcount; |
register char *lptr; |
|
st = 1; |
for(lcount=virtual_dma_count, lptr=(char *)virtual_dma_addr; |
lcount; lcount--, lptr++) { |
st=inb(virtual_dma_port+4) & 0xa0 ; |
if(st != 0xa0) |
break; |
if(virtual_dma_mode) |
outb_p(*lptr, virtual_dma_port+5); |
else |
*lptr = inb_p(virtual_dma_port+5); |
st = inb(virtual_dma_port+4); |
} |
virtual_dma_count = lcount; |
virtual_dma_addr = (int) lptr; |
} |
#endif |
|
#ifdef TRACE_FLPY_INT |
calls++; |
#endif |
if(st == 0x20) |
return; |
if(!(st & 0x20)) { |
virtual_dma_residue += virtual_dma_count; |
virtual_dma_count=0; |
#ifdef TRACE_FLPY_INT |
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", |
virtual_dma_count, virtual_dma_residue, calls, bytes, |
dma_wait); |
calls = 0; |
dma_wait=0; |
#endif |
doing_pdma = 0; |
floppy_interrupt(irq, dev_id, regs); |
return; |
} |
#ifdef TRACE_FLPY_INT |
if(!virtual_dma_count) |
dma_wait++; |
#endif |
} |
|
static void vdma_enable_dma(unsigned int dummy) |
{ |
doing_pdma = 1; |
} |
|
static void vdma_disable_dma(unsigned int dummy) |
{ |
doing_pdma = 0; |
virtual_dma_residue += virtual_dma_count; |
virtual_dma_count=0; |
} |
|
static int vdma_request_dma(unsigned int dmanr, const char * device_id) |
{ |
return 0; |
} |
|
static void vdma_nop(unsigned int dummy) |
{ |
} |
|
static void vdma_set_dma_mode(unsigned int dummy,char mode) |
{ |
virtual_dma_mode = (mode == DMA_MODE_WRITE); |
} |
|
static void vdma_set_dma_addr(unsigned int dummy,unsigned int addr) |
{ |
virtual_dma_addr = addr; |
} |
|
static void vdma_set_dma_count(unsigned int dummy,unsigned int count) |
{ |
virtual_dma_count = count; |
virtual_dma_residue = 0; |
} |
|
static int vdma_get_dma_residue(unsigned int dummy) |
{ |
return virtual_dma_count + virtual_dma_residue; |
} |
|
|
static int vdma_request_irq(unsigned int irq, |
void (*handler)(int, void *, struct pt_regs *), |
unsigned long flags, |
const char *device, |
void *dev_id) |
{ |
return request_irq(irq, floppy_hardint,SA_INTERRUPT,device, dev_id); |
|
} |
|
static unsigned long dma_mem_alloc(unsigned long size) |
{ |
return __get_dma_pages(GFP_KERNEL,__get_order(size)); |
} |
|
static void dma_mem_free(unsigned long addr, unsigned long size) |
{ |
free_pages(addr, __get_order(size)); |
} |
|
static unsigned long vdma_mem_alloc(unsigned long size) |
{ |
return (unsigned long) vmalloc(size); |
} |
|
static void vdma_mem_free(unsigned long addr, unsigned long size) |
{ |
return vfree((void *)addr); |
} |
|
struct fd_routine_l { |
void (*_enable_dma)(unsigned int dummy); |
void (*_disable_dma)(unsigned int dummy); |
int (*_request_dma)(unsigned int dmanr, const char * device_id); |
void (*_free_dma)(unsigned int dmanr); |
void (*_clear_dma_ff)(unsigned int dummy); |
void (*_set_dma_mode)(unsigned int dummy, char mode); |
void (*_set_dma_addr)(unsigned int dummy, unsigned int addr); |
void (*_set_dma_count)(unsigned int dummy, unsigned int count); |
int (*_get_dma_residue)(unsigned int dummy); |
int (*_request_irq)(unsigned int irq, |
void (*handler)(int, void *, struct pt_regs *), |
unsigned long flags, |
const char *device, |
void *dev_id); |
unsigned long (*_dma_mem_alloc) (unsigned long size); |
void (*_dma_mem_free)(unsigned long addr, unsigned long size); |
} fd_routine[] = { |
{ |
enable_dma, |
disable_dma, |
request_dma, |
free_dma, |
clear_dma_ff, |
set_dma_mode, |
set_dma_addr, |
set_dma_count, |
get_dma_residue, |
request_irq, |
dma_mem_alloc, |
dma_mem_free |
}, |
{ |
vdma_enable_dma, |
vdma_disable_dma, |
vdma_request_dma, |
vdma_nop, |
vdma_nop, |
vdma_set_dma_mode, |
vdma_set_dma_addr, |
vdma_set_dma_count, |
vdma_get_dma_residue, |
vdma_request_irq, |
vdma_mem_alloc, |
vdma_mem_free |
} |
}; |
|
__inline__ void virtual_dma_init(void) |
{ |
/* Nothing to do on an i386 */ |
} |
|
static int FDC1 = 0x3f0; |
static int FDC2 = -1; |
|
#define FLOPPY0_TYPE ((CMOS_READ(0x10) >> 4) & 15) |
#define FLOPPY1_TYPE (CMOS_READ(0x10) & 15) |
|
#define N_FDC 2 |
#define N_DRIVE 8 |
|
/* |
* The DMA channel used by the floppy controller cannot access data at |
* addresses >= 16MB |
* |
* Went back to the 1MB limit, as some people had problems with the floppy |
* driver otherwise. It doesn't matter much for performance anyway, as most |
* floppy accesses go through the track buffer. |
*/ |
#define CROSS_64KB(a,s) (((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64) && ! (use_virtual_dma & 1)) |
|
#endif /* __ASM_I386_FLOPPY_H */ |
/ioctl.h
0,0 → 1,75
/* $Id: ioctl.h,v 1.1 2005-12-20 11:35:33 jcastillo Exp $ |
* |
* linux/ioctl.h for Linux by H.H. Bergman. |
*/ |
|
#ifndef _ASMI386_IOCTL_H |
#define _ASMI386_IOCTL_H |
|
/* ioctl command encoding: 32 bits total, command in lower 16 bits, |
* size of the parameter structure in the lower 14 bits of the |
* upper 16 bits. |
* Encoding the size of the parameter structure in the ioctl request |
* is useful for catching programs compiled with old versions |
* and to avoid overwriting user space outside the user buffer area. |
* The highest 2 bits are reserved for indicating the ``access mode''. |
* NOTE: This limits the max parameter size to 16kB -1 ! |
*/ |
|
/* |
* The following is for compatibility across the various Linux |
* platforms. The i386 ioctl numbering scheme doesn't really enforce |
* a type field. De facto, however, the top 8 bits of the lower 16 |
* bits are indeed used as a type field, so we might just as well make |
* this explicit here. Please be sure to use the decoding macros |
* below from now on. |
*/ |
#define _IOC_NRBITS 8 |
#define _IOC_TYPEBITS 8 |
#define _IOC_SIZEBITS 14 |
#define _IOC_DIRBITS 2 |
|
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) |
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) |
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) |
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) |
|
#define _IOC_NRSHIFT 0 |
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) |
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) |
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) |
|
/* |
* Direction bits. |
*/ |
#define _IOC_NONE 0U |
#define _IOC_WRITE 1U |
#define _IOC_READ 2U |
|
#define _IOC(dir,type,nr,size) \ |
(((dir) << _IOC_DIRSHIFT) | \ |
((type) << _IOC_TYPESHIFT) | \ |
((nr) << _IOC_NRSHIFT) | \ |
((size) << _IOC_SIZESHIFT)) |
|
/* used to create numbers */ |
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) |
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) |
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) |
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) |
|
/* used to decode ioctl numbers.. */ |
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) |
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) |
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) |
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) |
|
/* ...and for the drivers/sound files... */ |
|
#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) |
#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) |
#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) |
#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) |
#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) |
|
#endif /* _ASMI386_IOCTL_H */ |
/stat.h
0,0 → 1,41
#ifndef _I386_STAT_H |
#define _I386_STAT_H |
|
struct old_stat { |
unsigned short st_dev; |
unsigned short st_ino; |
unsigned short st_mode; |
unsigned short st_nlink; |
unsigned short st_uid; |
unsigned short st_gid; |
unsigned short st_rdev; |
unsigned long st_size; |
unsigned long st_atime; |
unsigned long st_mtime; |
unsigned long st_ctime; |
}; |
|
struct new_stat { |
unsigned short st_dev; |
unsigned short __pad1; |
unsigned long st_ino; |
unsigned short st_mode; |
unsigned short st_nlink; |
unsigned short st_uid; |
unsigned short st_gid; |
unsigned short st_rdev; |
unsigned short __pad2; |
unsigned long st_size; |
unsigned long st_blksize; |
unsigned long st_blocks; |
unsigned long st_atime; |
unsigned long __unused1; |
unsigned long st_mtime; |
unsigned long __unused2; |
unsigned long st_ctime; |
unsigned long __unused3; |
unsigned long __unused4; |
unsigned long __unused5; |
}; |
|
#endif |
/page.h
0,0 → 1,65
#ifndef _I386_PAGE_H |
#define _I386_PAGE_H |
|
/* PAGE_SHIFT determines the page size */ |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (1UL << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
|
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
|
#define STRICT_MM_TYPECHECKS |
|
#ifdef STRICT_MM_TYPECHECKS |
/* |
* These are used to make use of C type-checking.. |
*/ |
typedef struct { unsigned long pte; } pte_t; |
typedef struct { unsigned long pmd; } pmd_t; |
typedef struct { unsigned long pgd; } pgd_t; |
typedef struct { unsigned long pgprot; } pgprot_t; |
|
#define pte_val(x) ((x).pte) |
#define pmd_val(x) ((x).pmd) |
#define pgd_val(x) ((x).pgd) |
#define pgprot_val(x) ((x).pgprot) |
|
#define __pte(x) ((pte_t) { (x) } ) |
#define __pmd(x) ((pmd_t) { (x) } ) |
#define __pgd(x) ((pgd_t) { (x) } ) |
#define __pgprot(x) ((pgprot_t) { (x) } ) |
|
#else |
/* |
* .. while these make it easier on the compiler |
*/ |
typedef unsigned long pte_t; |
typedef unsigned long pmd_t; |
typedef unsigned long pgd_t; |
typedef unsigned long pgprot_t; |
|
#define pte_val(x) (x) |
#define pmd_val(x) (x) |
#define pgd_val(x) (x) |
#define pgprot_val(x) (x) |
|
#define __pte(x) (x) |
#define __pmd(x) (x) |
#define __pgd(x) (x) |
#define __pgprot(x) (x) |
|
#endif |
#endif /* !__ASSEMBLY__ */ |
|
/* to align the pointer to the (next) page boundary */ |
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
|
/* This handles the memory map.. */ |
#define __PAGE_OFFSET ((0x1000-CONFIG_MAX_MEMSIZE)<<20) |
#define PAGE_OFFSET (0) |
#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT) |
|
#endif /* __KERNEL__ */ |
|
#endif /* _I386_PAGE_H */ |
/user.h
0,0 → 1,78
#ifndef _I386_USER_H |
#define _I386_USER_H |
|
#include <asm/page.h> |
#include <linux/ptrace.h> |
/* Core file format: The core file is written in such a way that gdb |
can understand it and provide useful information to the user (under |
linux we use the 'trad-core' bfd). There are quite a number of |
obstacles to being able to view the contents of the floating point |
registers, and until these are solved you will not be able to view the |
contents of them. Actually, you can read in the core file and look at |
the contents of the user struct to find out what the floating point |
registers contain. |
The actual file contents are as follows: |
UPAGE: 1 page consisting of a user struct that tells gdb what is present |
in the file. Directly after this is a copy of the task_struct, which |
is currently not used by gdb, but it may come in useful at some point. |
All of the registers are stored as part of the upage. The upage should |
always be only one page. |
DATA: The data area is stored. We use current->end_text to |
current->brk to pick up all of the user variables, plus any memory |
that may have been malloced. No attempt is made to determine if a page |
is demand-zero or if a page is totally unused, we just cover the entire |
range. All of the addresses are rounded in such a way that an integral |
number of pages is written. |
STACK: We need the stack information in order to get a meaningful |
backtrace. We need to write the data from (esp) to |
current->start_stack, so we round each of these off in order to be able |
to write an integer number of pages. |
The minimum core file size is 3 pages, or 12288 bytes. |
*/ |
|
struct user_i387_struct { |
long cwd; |
long swd; |
long twd; |
long fip; |
long fcs; |
long foo; |
long fos; |
long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ |
}; |
|
/* When the kernel dumps core, it starts by dumping the user struct - |
this will be used by gdb to figure out where the data and stack segments |
are within the file, and what virtual addresses to use. */ |
struct user{ |
/* We start with the registers, to mimic the way that "memory" is returned |
from the ptrace(3,...) function. */ |
struct pt_regs regs; /* Where the registers are actually stored */ |
/* ptrace does not yet supply these. Someday.... */ |
int u_fpvalid; /* True if math co-processor being used. */ |
/* for this mess. Not yet used. */ |
struct user_i387_struct i387; /* Math Co-processor registers. */ |
/* The rest of this junk is to help gdb figure out what goes where */ |
unsigned long int u_tsize; /* Text segment size (pages). */ |
unsigned long int u_dsize; /* Data segment size (pages). */ |
unsigned long int u_ssize; /* Stack segment size (pages). */ |
unsigned long start_code; /* Starting virtual address of text. */ |
unsigned long start_stack; /* Starting virtual address of stack area. |
This is actually the bottom of the stack, |
the top of the stack is always found in the |
esp register. */ |
long int signal; /* Signal that caused the core dump. */ |
int reserved; /* No longer used */ |
struct pt_regs * u_ar0; /* Used by gdb to help find the values for */ |
/* the registers. */ |
struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ |
unsigned long magic; /* To uniquely identify a core file */ |
char u_comm[32]; /* User command that was responsible */ |
int u_debugreg[8]; |
}; |
#define NBPG PAGE_SIZE |
#define UPAGES 1 |
#define HOST_TEXT_START_ADDR (u.start_code) |
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) |
|
#endif /* _I386_USER_H */ |
/processor.h
0,0 → 1,201
/* |
* include/asm-i386/processor.h |
* |
* Copyright (C) 1994 Linus Torvalds |
*/ |
|
#ifndef __ASM_I386_PROCESSOR_H |
#define __ASM_I386_PROCESSOR_H |
|
#include <asm/vm86.h> |
#include <asm/math_emu.h> |
|
/* |
* System setup and hardware bug flags.. |
* [Note we don't test the 386 multiply bug or popad bug] |
*/ |
|
extern char hard_math; |
extern char x86; /* lower 4 bits */ |
extern char x86_vendor_id[13]; |
extern char x86_model; /* lower 4 bits */ |
extern char x86_mask; /* lower 4 bits */ |
extern int x86_capability; /* field of flags */ |
extern int fdiv_bug; |
extern char ignore_irq13; |
extern char wp_works_ok; /* doesn't work on a 386 */ |
extern char hlt_works_ok; /* problems on some 486Dx4's and old 386's */ |
extern int have_cpuid; /* We have a CPUID */ |
|
extern unsigned long cpu_hz; /* CPU clock frequency from time.c */ |
|
/* |
* Detection of CPU model (CPUID). |
*/ |
extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx) |
{ |
__asm__("cpuid" |
: "=a" (*eax), |
"=b" (*ebx), |
"=c" (*ecx), |
"=d" (*edx) |
: "a" (op) |
: "cc"); |
} |
|
/* |
* Cyrix CPU register indexes (use special macros to access these) |
*/ |
#define CX86_CCR2 0xc2 |
#define CX86_CCR3 0xc3 |
#define CX86_CCR4 0xe8 |
#define CX86_CCR5 0xe9 |
#define CX86_DIR0 0xfe |
#define CX86_DIR1 0xff |
|
/* |
* Cyrix CPU register access macros |
*/ |
|
extern inline unsigned char getCx86(unsigned char reg) |
{ |
unsigned char data; |
|
__asm__ __volatile__("movb %1,%%al\n\t" |
"outb %%al,$0x22\n\t" |
"inb $0x23,%%al" : "=a" (data) : "q" (reg)); |
return data; |
} |
|
extern inline void setCx86(unsigned char reg, unsigned char data) |
{ |
__asm__ __volatile__("outb %%al,$0x22\n\t" |
"movb %1,%%al\n\t" |
"outb %%al,$0x23" : : "a" (reg), "q" (data)); |
} |
|
/* |
* Bus types (default is ISA, but people can check others with these..) |
* MCA_bus hardcoded to 0 for now. |
*/ |
extern int EISA_bus; |
#define MCA_bus 0 |
#define MCA_bus__is_a_macro /* for versions in ksyms.c */ |
|
/* |
* User space process size: 3GB (default). |
*/ |
#define TASK_SIZE ((unsigned long)__PAGE_OFFSET) |
#define MAX_USER_ADDR TASK_SIZE |
#define MMAP_SEARCH_START (TASK_SIZE/3) |
|
/* |
* Size of io_bitmap in longwords: 32 is ports 0-0x3ff. |
*/ |
#define IO_BITMAP_SIZE 32 |
|
struct i387_hard_struct { |
long cwd; |
long swd; |
long twd; |
long fip; |
long fcs; |
long foo; |
long fos; |
long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ |
long status; /* software status information */ |
}; |
|
struct i387_soft_struct { |
long cwd; |
long swd; |
long twd; |
long fip; |
long fcs; |
long foo; |
long fos; |
long top; |
struct fpu_reg regs[8]; /* 8*16 bytes for each FP-reg = 128 bytes */ |
unsigned char lookahead; |
struct info *info; |
unsigned long entry_eip; |
}; |
|
union i387_union { |
struct i387_hard_struct hard; |
struct i387_soft_struct soft; |
}; |
|
struct thread_struct { |
unsigned short back_link,__blh; |
unsigned long esp0; |
unsigned short ss0,__ss0h; |
unsigned long esp1; |
unsigned short ss1,__ss1h; |
unsigned long esp2; |
unsigned short ss2,__ss2h; |
unsigned long cr3; |
unsigned long eip; |
unsigned long eflags; |
unsigned long eax,ecx,edx,ebx; |
unsigned long esp; |
unsigned long ebp; |
unsigned long esi; |
unsigned long edi; |
unsigned short es, __esh; |
unsigned short cs, __csh; |
unsigned short ss, __ssh; |
unsigned short ds, __dsh; |
unsigned short fs, __fsh; |
unsigned short gs, __gsh; |
unsigned short ldt, __ldth; |
unsigned short trace, bitmap; |
unsigned long io_bitmap[IO_BITMAP_SIZE+1]; |
unsigned long tr; |
unsigned long cr2, trap_no, error_code; |
/* floating point info */ |
union i387_union i387; |
/* virtual 86 mode info */ |
struct vm86_struct * vm86_info; |
unsigned long screen_bitmap; |
unsigned long v86flags, v86mask, v86mode; |
}; |
|
#define INIT_MMAP { &init_mm, 0, 0x40000000, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC } |
|
#define INIT_TSS { \ |
0,0, \ |
sizeof(init_kernel_stack) + (long) &init_kernel_stack, \ |
KERNEL_DS, 0, \ |
0,0,0,0,0,0, \ |
(long) &swapper_pg_dir, \ |
0,0,0,0,0,0,0,0,0,0, \ |
USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0, \ |
_LDT(0),0, \ |
0, 0x8000, \ |
{~0, }, /* ioperm */ \ |
_TSS(0), 0, 0,0, \ |
{ { 0, }, }, /* 387 state */ \ |
NULL, 0, 0, 0, 0 /* vm86_info */ \ |
} |
|
#define alloc_kernel_stack() __get_free_page(GFP_KERNEL) |
#define free_kernel_stack(page) free_page((page)) |
|
static inline void start_thread(struct pt_regs * regs, unsigned long eip, unsigned long esp) |
{ |
regs->cs = USER_CS; |
regs->ds = regs->es = regs->ss = regs->fs = regs->gs = USER_DS; |
regs->eip = eip; |
regs->esp = esp; |
} |
|
/* |
* Return saved PC of a blocked thread. |
*/ |
extern inline unsigned long thread_saved_pc(struct thread_struct *t) |
{ |
return ((unsigned long *)t->esp)[3]; |
} |
|
#endif /* __ASM_I386_PROCESSOR_H */ |
/semaphore.h
0,0 → 1,127
#ifndef _I386_SEMAPHORE_H |
#define _I386_SEMAPHORE_H |
|
#include <linux/linkage.h> |
#include <asm/system.h> |
|
/* |
* SMP- and interrupt-safe semaphores.. |
* |
* (C) Copyright 1996 Linus Torvalds |
* |
* Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in |
* the original code and to make semaphore waits |
* interruptible so that processes waiting on |
* semaphores can be killed. |
* |
* If you would like to see an analysis of this implementation, please |
* ftp to gcom.com and download the file |
* /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. |
* |
*/ |
|
struct semaphore { |
int count; |
int waking; |
int lock ; /* to make waking testing atomic */ |
struct wait_queue * wait; |
}; |
|
#define MUTEX ((struct semaphore) { 1, 0, 0, NULL }) |
#define MUTEX_LOCKED ((struct semaphore) { 0, 0, 0, NULL }) |
|
asmlinkage void down_failed(void /* special register calling convention */); |
asmlinkage void up_wakeup(void /* special register calling convention */); |
|
extern void __down(struct semaphore * sem); |
extern void __up(struct semaphore * sem); |
|
/* |
* This is ugly, but we want the default case to fall through. |
* "down_failed" is a special asm handler that calls the C |
* routine that actually waits. See arch/i386/lib/semaphore.S |
*/ |
extern inline void down(struct semaphore * sem) |
{ |
__asm__ __volatile__( |
"# atomic down operation\n\t" |
"movl $1f,%%eax\n\t" |
#ifdef __SMP__ |
"lock ; " |
#endif |
"decl 0(%0)\n\t" |
"js " SYMBOL_NAME_STR(down_failed) "\n" |
"1:\n" |
:/* no outputs */ |
:"c" (sem) |
:"ax","dx","memory"); |
} |
|
/* |
* Primitives to spin on a lock. Needed only for SMP version. |
*/ |
extern inline void get_buzz_lock(int *lock_ptr) |
{ |
#ifdef __SMP__ |
while (xchg(lock_ptr,1) != 0) ; |
#endif |
} /* get_buzz_lock */ |
|
extern inline void give_buzz_lock(int *lock_ptr) |
{ |
#ifdef __SMP__ |
*lock_ptr = 0 ; |
#endif |
} /* give_buzz_lock */ |
|
asmlinkage int down_failed_interruptible(void); /* params in registers */ |
|
/* |
* This version waits in interruptible state so that the waiting |
* process can be killed. The down_failed_interruptible routine |
* returns negative for signalled and zero for semaphore acquired. |
*/ |
extern inline int down_interruptible(struct semaphore * sem) |
{ |
int ret ; |
|
__asm__ __volatile__( |
"# atomic interruptible down operation\n\t" |
"movl $2f,%%eax\n\t" |
#ifdef __SMP__ |
"lock ; " |
#endif |
"decl 0(%1)\n\t" |
"js " SYMBOL_NAME_STR(down_failed_interruptible) "\n\t" |
"xorl %%eax,%%eax\n" |
"2:\n" |
:"=a" (ret) |
:"c" (sem) |
:"ax","dx","memory"); |
|
return(ret) ; |
} |
|
/* |
* Note! This is subtle. We jump to wake people up only if |
* the semaphore was negative (== somebody was waiting on it). |
* The default case (no contention) will result in NO |
* jumps for both down() and up(). |
*/ |
extern inline void up(struct semaphore * sem) |
{ |
__asm__ __volatile__( |
"# atomic up operation\n\t" |
"movl $1f,%%eax\n\t" |
#ifdef __SMP__ |
"lock ; " |
#endif |
"incl 0(%0)\n\t" |
"jle " SYMBOL_NAME_STR(up_wakeup) |
"\n1:" |
:/* no outputs */ |
:"c" (sem) |
:"ax", "dx", "memory"); |
} |
|
#endif |
/system.h
0,0 → 1,334
#ifndef __ASM_SYSTEM_H |
#define __ASM_SYSTEM_H |
|
#include <asm/segment.h> |
|
/* |
* Entry into gdt where to find first TSS. GDT layout: |
* 0 - null |
* 1 - not used |
* 2 - kernel code segment |
* 3 - kernel data segment |
* 4 - user code segment |
* 5 - user data segment |
* ... |
* 8 - TSS #0 |
* 9 - LDT #0 |
* 10 - TSS #1 |
* 11 - LDT #1 |
*/ |
#define FIRST_TSS_ENTRY 8 |
#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1) |
#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3)) |
#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3)) |
#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n))) |
#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n))) |
#define store_TR(n) \ |
__asm__("str %%ax\n\t" \ |
"subl %2,%%eax\n\t" \ |
"shrl $4,%%eax" \ |
:"=a" (n) \ |
:"0" (0),"i" (FIRST_TSS_ENTRY<<3)) |
|
/* This special macro can be used to load a debugging register */ |
|
#define loaddebug(tsk,register) \ |
__asm__("movl %0,%%edx\n\t" \ |
"movl %%edx,%%db" #register "\n\t" \ |
: /* no output */ \ |
:"m" (tsk->debugreg[register]) \ |
:"dx"); |
|
|
/* |
* switch_to(n) should switch tasks to task nr n, first |
* checking that n isn't the current task, in which case it does nothing. |
* This also clears the TS-flag if the task we switched to has used |
* the math co-processor latest. |
* |
* It also reloads the debug regs if necessary.. |
*/ |
|
|
#ifdef __SMP__ |
/* |
* Keep the lock depth straight. If we switch on an interrupt from |
* kernel->user task we need to lose a depth, and if we switch the |
* other way we need to gain a depth. Same layer switches come out |
* the same. |
* |
* We spot a switch in user mode because the kernel counter is the |
* same as the interrupt counter depth. (We never switch during the |
* message/invalidate IPI). |
* |
* We fsave/fwait so that an exception goes off at the right time |
* (as a call from the fsave or fwait in effect) rather than to |
* the wrong process. |
*/ |
|
#define switch_to(prev,next) do { \ |
cli();\ |
if(prev->flags&PF_USEDFPU) \ |
{ \ |
__asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \ |
__asm__ __volatile__("fwait"); \ |
prev->flags&=~PF_USEDFPU; \ |
} \ |
prev->lock_depth=syscall_count; \ |
kernel_counter+=next->lock_depth-prev->lock_depth; \ |
syscall_count=next->lock_depth; \ |
__asm__("pushl %%edx\n\t" \ |
"movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \ |
"movl 0x20(%%edx), %%edx\n\t" \ |
"shrl $22,%%edx\n\t" \ |
"and $0x3C,%%edx\n\t" \ |
"movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \ |
"popl %%edx\n\t" \ |
"ljmp %0\n\t" \ |
"sti\n\t" \ |
: /* no output */ \ |
:"m" (*(((char *)&next->tss.tr)-4)), \ |
"c" (next)); \ |
/* Now maybe reload the debug registers */ \ |
if(prev->debugreg[7]){ \ |
loaddebug(prev,0); \ |
loaddebug(prev,1); \ |
loaddebug(prev,2); \ |
loaddebug(prev,3); \ |
loaddebug(prev,6); \ |
} \ |
} while (0) |
|
#else |
#define switch_to(prev,next) do { \ |
__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \ |
"ljmp %0\n\t" \ |
"cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \ |
"jne 1f\n\t" \ |
"clts\n" \ |
"1:" \ |
: /* no outputs */ \ |
:"m" (*(((char *)&next->tss.tr)-4)), \ |
"r" (prev), "r" (next)); \ |
/* Now maybe reload the debug registers */ \ |
if(prev->debugreg[7]){ \ |
loaddebug(prev,0); \ |
loaddebug(prev,1); \ |
loaddebug(prev,2); \ |
loaddebug(prev,3); \ |
loaddebug(prev,6); \ |
} \ |
} while (0) |
#endif |
|
#define _set_base(addr,base) \ |
__asm__("movw %%dx,%0\n\t" \ |
"rorl $16,%%edx\n\t" \ |
"movb %%dl,%1\n\t" \ |
"movb %%dh,%2" \ |
: /* no output */ \ |
:"m" (*((addr)+2)), \ |
"m" (*((addr)+4)), \ |
"m" (*((addr)+7)), \ |
"d" (base) \ |
:"dx") |
|
#define _set_limit(addr,limit) \ |
__asm__("movw %%dx,%0\n\t" \ |
"rorl $16,%%edx\n\t" \ |
"movb %1,%%dh\n\t" \ |
"andb $0xf0,%%dh\n\t" \ |
"orb %%dh,%%dl\n\t" \ |
"movb %%dl,%1" \ |
: /* no output */ \ |
:"m" (*(addr)), \ |
"m" (*((addr)+6)), \ |
"d" (limit) \ |
:"dx") |
|
#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base ) |
#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 ) |
|
static inline unsigned long _get_base(char * addr) |
{ |
unsigned long __base; |
__asm__("movb %3,%%dh\n\t" |
"movb %2,%%dl\n\t" |
"shll $16,%%edx\n\t" |
"movw %1,%%dx" |
:"=&d" (__base) |
:"m" (*((addr)+2)), |
"m" (*((addr)+4)), |
"m" (*((addr)+7))); |
return __base; |
} |
|
#define get_base(ldt) _get_base( ((char *)&(ldt)) ) |
|
static inline unsigned long get_limit(unsigned long segment) |
{ |
unsigned long __limit; |
__asm__("lsll %1,%0" |
:"=r" (__limit):"r" (segment)); |
return __limit+1; |
} |
|
#define nop() __asm__ __volatile__ ("nop") |
|
/* |
* Clear and set 'TS' bit respectively |
*/ |
#define clts() __asm__ __volatile__ ("clts") |
#define stts() \ |
__asm__ __volatile__ ( \ |
"movl %%cr0,%%eax\n\t" \ |
"orl $8,%%eax\n\t" \ |
"movl %%eax,%%cr0" \ |
: /* no outputs */ \ |
: /* no inputs */ \ |
:"ax") |
|
|
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
#define tas(ptr) (xchg((ptr),1)) |
|
struct __xchg_dummy { unsigned long a[100]; }; |
#define __xg(x) ((struct __xchg_dummy *)(x)) |
|
static inline unsigned long __xchg(unsigned long x, void * ptr, int size) |
{ |
switch (size) { |
case 1: |
__asm__("xchgb %b0,%1" |
:"=q" (x) |
:"m" (*__xg(ptr)), "0" (x) |
:"memory"); |
break; |
case 2: |
__asm__("xchgw %w0,%1" |
:"=r" (x) |
:"m" (*__xg(ptr)), "0" (x) |
:"memory"); |
break; |
case 4: |
__asm__("xchgl %0,%1" |
:"=r" (x) |
:"m" (*__xg(ptr)), "0" (x) |
:"memory"); |
break; |
} |
return x; |
} |
|
#define mb() __asm__ __volatile__ ("" : : :"memory") |
#define sti() __asm__ __volatile__ ("sti": : :"memory") |
#define cli() __asm__ __volatile__ ("cli": : :"memory") |
|
#define save_flags(x) \ |
__asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory") |
|
#define restore_flags(x) \ |
__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory") |
|
#define iret() __asm__ __volatile__ ("iret": : :"memory") |
|
#define _set_gate(gate_addr,type,dpl,addr) \ |
__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ |
"movw %2,%%dx\n\t" \ |
"movl %%eax,%0\n\t" \ |
"movl %%edx,%1" \ |
:"=m" (*((long *) (gate_addr))), \ |
"=m" (*(1+(long *) (gate_addr))) \ |
:"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ |
"d" ((char *) (addr)),"a" (KERNEL_CS << 16) \ |
:"ax","dx") |
|
#define set_intr_gate(n,addr) \ |
_set_gate(&idt[n],14,0,addr) |
|
#define set_trap_gate(n,addr) \ |
_set_gate(&idt[n],15,0,addr) |
|
#define set_system_gate(n,addr) \ |
_set_gate(&idt[n],15,3,addr) |
|
#define set_call_gate(a,addr) \ |
_set_gate(a,12,3,addr) |
|
#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\ |
*((gate_addr)+1) = ((base) & 0xff000000) | \ |
(((base) & 0x00ff0000)>>16) | \ |
((limit) & 0xf0000) | \ |
((dpl)<<13) | \ |
(0x00408000) | \ |
((type)<<8); \ |
*(gate_addr) = (((base) & 0x0000ffff)<<16) | \ |
((limit) & 0x0ffff); } |
|
#define _set_tssldt_desc(n,addr,limit,type) \ |
__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \ |
"movw %%ax,%2\n\t" \ |
"rorl $16,%%eax\n\t" \ |
"movb %%al,%3\n\t" \ |
"movb $" type ",%4\n\t" \ |
"movb $0x00,%5\n\t" \ |
"movb %%ah,%6\n\t" \ |
"rorl $16,%%eax" \ |
: /* no output */ \ |
:"a" (addr+__PAGE_OFFSET), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \ |
"m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \ |
) |
|
#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89") |
#define set_ldt_desc(n,addr,size) \ |
_set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82") |
|
/* |
* This is the ldt that every process will get unless we need |
* something other than this. |
*/ |
extern struct desc_struct default_ldt; |
|
/* |
* disable hlt during certain critical i/o operations |
*/ |
#define HAVE_DISABLE_HLT |
void disable_hlt(void); |
void enable_hlt(void); |
|
static __inline__ unsigned long long rdmsr(unsigned int msr) |
{ |
unsigned long long ret; |
__asm__ __volatile__("rdmsr" |
: "=A" (ret) |
: "c" (msr)); |
return ret; |
} |
|
static __inline__ void wrmsr(unsigned int msr,unsigned long long val) |
{ |
__asm__ __volatile__("wrmsr" |
: /* no Outputs */ |
: "c" (msr), "A" (val)); |
} |
|
|
static __inline__ unsigned long long rdtsc(void) |
{ |
unsigned long long ret; |
__asm__ __volatile__("rdtsc" |
: "=A" (ret) |
: /* no inputs */); |
return ret; |
} |
|
static __inline__ unsigned long long rdpmc(unsigned int counter) |
{ |
unsigned long long ret; |
__asm__ __volatile__("rdpmc" |
: "=A" (ret) |
: "c" (counter)); |
return ret; |
} |
|
#endif |
/a.out.h
0,0 → 1,26
#ifndef __I386_A_OUT_H__ |
#define __I386_A_OUT_H__ |
|
struct exec |
{ |
unsigned long a_info; /* Use macros N_MAGIC, etc for access */ |
unsigned a_text; /* length of text, in bytes */ |
unsigned a_data; /* length of data, in bytes */ |
unsigned a_bss; /* length of uninitialized data area for file, in bytes */ |
unsigned a_syms; /* length of symbol table data in file, in bytes */ |
unsigned a_entry; /* start address */ |
unsigned a_trsize; /* length of relocation info for text, in bytes */ |
unsigned a_drsize; /* length of relocation info for data, in bytes */ |
}; |
|
#define N_TRSIZE(a) ((a).a_trsize) |
#define N_DRSIZE(a) ((a).a_drsize) |
#define N_SYMSIZE(a) ((a).a_syms) |
|
#ifdef __KERNEL__ |
|
#define STACK_TOP TASK_SIZE |
|
#endif |
|
#endif /* __A_OUT_GNU_H__ */ |
/resource.h
0,0 → 1,39
#ifndef _I386_RESOURCE_H |
#define _I386_RESOURCE_H |
|
/* |
* Resource limits |
*/ |
|
#define RLIMIT_CPU 0 /* CPU time in ms */ |
#define RLIMIT_FSIZE 1 /* Maximum filesize */ |
#define RLIMIT_DATA 2 /* max data size */ |
#define RLIMIT_STACK 3 /* max stack size */ |
#define RLIMIT_CORE 4 /* max core file size */ |
#define RLIMIT_RSS 5 /* max resident set size */ |
#define RLIMIT_NPROC 6 /* max number of processes */ |
#define RLIMIT_NOFILE 7 /* max number of open files */ |
#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ |
#define RLIMIT_AS 9 /* address space limit */ |
|
#define RLIM_NLIMITS 10 |
|
#ifdef __KERNEL__ |
|
#define INIT_RLIMITS \ |
{ \ |
{ LONG_MAX, LONG_MAX }, \ |
{ LONG_MAX, LONG_MAX }, \ |
{ LONG_MAX, LONG_MAX }, \ |
{ _STK_LIM, _STK_LIM }, \ |
{ 0, LONG_MAX }, \ |
{ LONG_MAX, LONG_MAX }, \ |
{ MAX_TASKS_PER_USER, MAX_TASKS_PER_USER }, \ |
{ NR_OPEN, NR_OPEN }, \ |
{ LONG_MAX, LONG_MAX }, \ |
{ LONG_MAX, LONG_MAX }, \ |
} |
|
#endif /* __KERNEL__ */ |
|
#endif |
/irq.h
0,0 → 1,420
#ifndef _ASM_IRQ_H |
#define _ASM_IRQ_H |
|
/* |
* linux/include/asm/irq.h |
* |
* (C) 1992, 1993 Linus Torvalds |
* |
* IRQ/IPI changes taken from work by Thomas Radke <tomsoft@informatik.tu-chemnitz.de> |
*/ |
|
#include <linux/linkage.h> |
#include <asm/segment.h> |
|
#define NR_IRQS 16 |
|
#define TIMER_IRQ 0 |
|
extern void disable_irq(unsigned int); |
extern void enable_irq(unsigned int); |
|
#define __STR(x) #x |
#define STR(x) __STR(x) |
|
#define SAVE_ALL \ |
"cld\n\t" \ |
"push %gs\n\t" \ |
"push %fs\n\t" \ |
"push %es\n\t" \ |
"push %ds\n\t" \ |
"pushl %eax\n\t" \ |
"pushl %ebp\n\t" \ |
"pushl %edi\n\t" \ |
"pushl %esi\n\t" \ |
"pushl %edx\n\t" \ |
"pushl %ecx\n\t" \ |
"pushl %ebx\n\t" \ |
"movl $" STR(KERNEL_DS) ",%edx\n\t" \ |
"mov %dx,%ds\n\t" \ |
"mov %dx,%es\n\t" \ |
"movl $" STR(USER_DS) ",%edx\n\t" \ |
"mov %dx,%fs\n\t" \ |
"movl $0,%edx\n\t" \ |
"movl %edx,%db7\n\t" |
|
/* |
* SAVE_MOST/RESTORE_MOST is used for the faster version of IRQ handlers, |
* installed by using the SA_INTERRUPT flag. These kinds of IRQ's don't |
* call the routines that do signal handling etc on return, and can have |
* more relaxed register-saving etc. They are also atomic, and are thus |
* suited for small, fast interrupts like the serial lines or the harddisk |
* drivers, which don't actually need signal handling etc. |
* |
* Also note that we actually save only those registers that are used in |
* C subroutines (%eax, %edx and %ecx), so if you do something weird, |
* you're on your own. The only segments that are saved (not counting the |
* automatic stack and code segment handling) are %ds and %es, and they |
* point to kernel space. No messing around with %fs here. |
*/ |
#define SAVE_MOST \ |
"cld\n\t" \ |
"push %es\n\t" \ |
"push %ds\n\t" \ |
"pushl %eax\n\t" \ |
"pushl %edx\n\t" \ |
"pushl %ecx\n\t" \ |
"movl $" STR(KERNEL_DS) ",%edx\n\t" \ |
"mov %dx,%ds\n\t" \ |
"mov %dx,%es\n\t" |
|
#define RESTORE_MOST \ |
"popl %ecx\n\t" \ |
"popl %edx\n\t" \ |
"popl %eax\n\t" \ |
"pop %ds\n\t" \ |
"pop %es\n\t" \ |
"iret" |
|
/* |
* The "inb" instructions are not needed, but seem to change the timings |
* a bit - without them it seems that the harddisk driver won't work on |
* all hardware. Arghh. |
*/ |
#define ACK_FIRST(mask,nr) \ |
"inb $0x21,%al\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\torb $" #mask ","SYMBOL_NAME_STR(cache_21)"\n\t" \ |
"movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \ |
"outb %al,$0x21\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\tmovb $0x20,%al\n\t" \ |
"outb %al,$0x20\n\t" |
|
#define ACK_SECOND(mask,nr) \ |
"inb $0xA1,%al\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\torb $" #mask ","SYMBOL_NAME_STR(cache_A1)"\n\t" \ |
"movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \ |
"outb %al,$0xA1\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\tmovb $0x20,%al\n\t" \ |
"outb %al,$0xA0\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\toutb %al,$0x20\n\t" |
|
/* do not modify the ISR nor the cache_A1 variable */ |
#define MSGACK_SECOND(mask,nr) \ |
"inb $0xA1,%al\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\tmovb $0x20,%al\n\t" \ |
"outb %al,$0xA0\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\toutb %al,$0x20\n\t" |
|
#define UNBLK_FIRST(mask) \ |
"inb $0x21,%al\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_21)"\n\t" \ |
"movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \ |
"outb %al,$0x21\n\t" |
|
#define UNBLK_SECOND(mask) \ |
"inb $0xA1,%al\n\t" \ |
"jmp 1f\n" \ |
"1:\tjmp 1f\n" \ |
"1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_A1)"\n\t" \ |
"movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \ |
"outb %al,$0xA1\n\t" |
|
#define IRQ_NAME2(nr) nr##_interrupt(void) |
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) |
#define FAST_IRQ_NAME(nr) IRQ_NAME2(fast_IRQ##nr) |
#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr) |
|
#ifdef __SMP__ |
|
#ifndef __SMP_PROF__ |
#define SMP_PROF_INT_SPINS |
#define SMP_PROF_IPI_CNT |
#else |
#define SMP_PROF_INT_SPINS "incl "SYMBOL_NAME_STR(smp_spins)"(,%eax,4)\n\t" |
#define SMP_PROF_IPI_CNT "incl "SYMBOL_NAME_STR(ipi_count)"\n\t" |
#endif |
|
#define GET_PROCESSOR_ID \ |
"movl "SYMBOL_NAME_STR(apic_reg)", %edx\n\t" \ |
"movl 32(%edx), %eax\n\t" \ |
"shrl $24,%eax\n\t" \ |
"andl $0x0F,%eax\n" |
|
#define ENTER_KERNEL \ |
"pushl %eax\n\t" \ |
"pushl %ebx\n\t" \ |
"pushl %ecx\n\t" \ |
"pushl %edx\n\t" \ |
"pushfl\n\t" \ |
"cli\n\t" \ |
"movl $6000, %ebx\n\t" \ |
"movl "SYMBOL_NAME_STR(smp_loops_per_tick)", %ecx\n\t" \ |
GET_PROCESSOR_ID \ |
"btsl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \ |
"1: " \ |
"lock\n\t" \ |
"btsl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \ |
"jnc 3f\n\t" \ |
"cmpb "SYMBOL_NAME_STR(active_kernel_processor)", %al\n\t" \ |
"je 4f\n\t" \ |
"cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \ |
"jne 2f\n\t" \ |
"movb $1, "SYMBOL_NAME_STR(smp_blocked_interrupt_pending)"\n\t" \ |
"2: " \ |
SMP_PROF_INT_SPINS \ |
"btl %eax, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \ |
"jnc 5f\n\t" \ |
"lock\n\t" \ |
"btrl %eax, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \ |
"jnc 5f\n\t" \ |
"movl %cr3,%edx\n\t" \ |
"movl %edx,%cr3\n" \ |
"5: btl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \ |
"jnc 1b\n\t" \ |
"cmpb "SYMBOL_NAME_STR(active_kernel_processor)", %al\n\t" \ |
"je 4f\n\t" \ |
"decl %ecx\n\t" \ |
"jne 2b\n\t" \ |
"decl %ebx\n\t" \ |
"jne 6f\n\t" \ |
"call "SYMBOL_NAME_STR(irq_deadlock_detected)"\n\t" \ |
"6: movl "SYMBOL_NAME_STR(smp_loops_per_tick)", %ecx\n\t" \ |
"cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \ |
"jne 2b\n\t" \ |
"incl "SYMBOL_NAME_STR(jiffies)"\n\t" \ |
"jmp 2b\n\t" \ |
"3: " \ |
"movb %al, "SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \ |
"4: " \ |
"incl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \ |
"cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \ |
"jne 7f\n\t" \ |
"movb $0, "SYMBOL_NAME_STR(smp_blocked_interrupt_pending)"\n\t" \ |
"7: " \ |
"popfl\n\t" \ |
"popl %edx\n\t" \ |
"popl %ecx\n\t" \ |
"popl %ebx\n\t" \ |
"popl %eax\n\t" |
|
#define LEAVE_KERNEL \ |
GET_PROCESSOR_ID \ |
"btrl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \ |
"pushfl\n\t" \ |
"cli\n\t" \ |
"decl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \ |
"jnz 1f\n\t" \ |
"movb "SYMBOL_NAME_STR(saved_active_kernel_processor)",%al\n\t" \ |
"movb %al,"SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \ |
"cmpb $" STR (NO_PROC_ID) ",%al\n\t" \ |
"jne 1f\n\t" \ |
"lock\n\t" \ |
"btrl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \ |
"1: " \ |
"popfl\n\t" |
|
|
/* |
* the syscall count inc is a gross hack because ret_from_syscall is used by both irq and |
* syscall return paths (urghh). |
*/ |
|
#define BUILD_IRQ(chip,nr,mask) \ |
asmlinkage void IRQ_NAME(nr); \ |
asmlinkage void FAST_IRQ_NAME(nr); \ |
asmlinkage void BAD_IRQ_NAME(nr); \ |
__asm__( \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ |
"pushl $-"#nr"-2\n\t" \ |
SAVE_ALL \ |
ENTER_KERNEL \ |
ACK_##chip(mask,(nr&7)) \ |
"incl "SYMBOL_NAME_STR(intr_count)"\n\t"\ |
"sti\n\t" \ |
"movl %esp,%ebx\n\t" \ |
"pushl %ebx\n\t" \ |
"pushl $" #nr "\n\t" \ |
"call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \ |
"addl $8,%esp\n\t" \ |
"cli\n\t" \ |
UNBLK_##chip(mask) \ |
"decl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
"incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \ |
"jmp ret_from_sys_call\n" \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \ |
SAVE_MOST \ |
ENTER_KERNEL \ |
ACK_##chip(mask,(nr&7)) \ |
"incl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
"pushl $" #nr "\n\t" \ |
"call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \ |
"addl $4,%esp\n\t" \ |
"cli\n\t" \ |
UNBLK_##chip(mask) \ |
"decl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
LEAVE_KERNEL \ |
RESTORE_MOST \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \ |
SAVE_MOST \ |
ENTER_KERNEL \ |
ACK_##chip(mask,(nr&7)) \ |
LEAVE_KERNEL \ |
RESTORE_MOST); |
|
|
#define BUILD_TIMER_IRQ(chip,nr,mask) \ |
asmlinkage void IRQ_NAME(nr); \ |
asmlinkage void FAST_IRQ_NAME(nr); \ |
asmlinkage void BAD_IRQ_NAME(nr); \ |
__asm__( \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \ |
SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \ |
SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ |
"pushl $-"#nr"-2\n\t" \ |
SAVE_ALL \ |
ENTER_KERNEL \ |
ACK_##chip(mask,(nr&7)) \ |
"incl "SYMBOL_NAME_STR(intr_count)"\n\t"\ |
"movl %esp,%ebx\n\t" \ |
"pushl %ebx\n\t" \ |
"pushl $" #nr "\n\t" \ |
"call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \ |
"addl $8,%esp\n\t" \ |
"cli\n\t" \ |
UNBLK_##chip(mask) \ |
"decl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
"incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \ |
"jmp ret_from_sys_call\n"); |
|
|
/* |
* Message pass must be a fast IRQ.. |
*/ |
|
#define BUILD_MSGIRQ(chip,nr,mask) \ |
asmlinkage void IRQ_NAME(nr); \ |
asmlinkage void FAST_IRQ_NAME(nr); \ |
asmlinkage void BAD_IRQ_NAME(nr); \ |
__asm__( \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ |
SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \ |
SAVE_MOST \ |
MSGACK_##chip(mask,(nr&7)) \ |
SMP_PROF_IPI_CNT \ |
"pushl $" #nr "\n\t" \ |
"call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \ |
"addl $4,%esp\n\t" \ |
"cli\n\t" \ |
RESTORE_MOST \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \ |
SAVE_MOST \ |
ACK_##chip(mask,(nr&7)) \ |
RESTORE_MOST); |
|
#define BUILD_RESCHEDIRQ(nr) \ |
asmlinkage void IRQ_NAME(nr); \ |
__asm__( \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ |
"pushl $-"#nr"-2\n\t" \ |
SAVE_ALL \ |
ENTER_KERNEL \ |
"incl "SYMBOL_NAME_STR(intr_count)"\n\t"\ |
"sti\n\t" \ |
"movl %esp,%ebx\n\t" \ |
"pushl %ebx\n\t" \ |
"pushl $" #nr "\n\t" \ |
"call "SYMBOL_NAME_STR(smp_reschedule_irq)"\n\t" \ |
"addl $8,%esp\n\t" \ |
"cli\n\t" \ |
"decl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
"incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \ |
"jmp ret_from_sys_call\n"); |
#else |
|
#define BUILD_IRQ(chip,nr,mask) \ |
asmlinkage void IRQ_NAME(nr); \ |
asmlinkage void FAST_IRQ_NAME(nr); \ |
asmlinkage void BAD_IRQ_NAME(nr); \ |
__asm__( \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ |
"pushl $-"#nr"-2\n\t" \ |
SAVE_ALL \ |
ACK_##chip(mask,(nr&7)) \ |
"incl "SYMBOL_NAME_STR(intr_count)"\n\t"\ |
"sti\n\t" \ |
"movl %esp,%ebx\n\t" \ |
"pushl %ebx\n\t" \ |
"pushl $" #nr "\n\t" \ |
"call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \ |
"addl $8,%esp\n\t" \ |
"cli\n\t" \ |
UNBLK_##chip(mask) \ |
"decl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
"jmp ret_from_sys_call\n" \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \ |
SAVE_MOST \ |
ACK_##chip(mask,(nr&7)) \ |
"incl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
"pushl $" #nr "\n\t" \ |
"call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \ |
"addl $4,%esp\n\t" \ |
"cli\n\t" \ |
UNBLK_##chip(mask) \ |
"decl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
RESTORE_MOST \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \ |
SAVE_MOST \ |
ACK_##chip(mask,(nr&7)) \ |
RESTORE_MOST); |
|
#define BUILD_TIMER_IRQ(chip,nr,mask) \ |
asmlinkage void IRQ_NAME(nr); \ |
asmlinkage void FAST_IRQ_NAME(nr); \ |
asmlinkage void BAD_IRQ_NAME(nr); \ |
__asm__( \ |
"\n"__ALIGN_STR"\n" \ |
SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \ |
SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \ |
SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ |
"pushl $-"#nr"-2\n\t" \ |
SAVE_ALL \ |
ACK_##chip(mask,(nr&7)) \ |
"incl "SYMBOL_NAME_STR(intr_count)"\n\t"\ |
"movl %esp,%ebx\n\t" \ |
"pushl %ebx\n\t" \ |
"pushl $" #nr "\n\t" \ |
"call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \ |
"addl $8,%esp\n\t" \ |
"cli\n\t" \ |
UNBLK_##chip(mask) \ |
"decl "SYMBOL_NAME_STR(intr_count)"\n\t" \ |
"jmp ret_from_sys_call\n"); |
|
#endif |
#endif |
/ioctls.h
0,0 → 1,76
#ifndef __ARCH_I386_IOCTLS_H__ |
#define __ARCH_I386_IOCTLS_H__ |
|
#include <asm/ioctl.h> |
|
/* 0x54 is just a magic number to make these relatively unique ('T') */ |
|
#define TCGETS 0x5401 |
#define TCSETS 0x5402 |
#define TCSETSW 0x5403 |
#define TCSETSF 0x5404 |
#define TCGETA 0x5405 |
#define TCSETA 0x5406 |
#define TCSETAW 0x5407 |
#define TCSETAF 0x5408 |
#define TCSBRK 0x5409 |
#define TCXONC 0x540A |
#define TCFLSH 0x540B |
#define TIOCEXCL 0x540C |
#define TIOCNXCL 0x540D |
#define TIOCSCTTY 0x540E |
#define TIOCGPGRP 0x540F |
#define TIOCSPGRP 0x5410 |
#define TIOCOUTQ 0x5411 |
#define TIOCSTI 0x5412 |
#define TIOCGWINSZ 0x5413 |
#define TIOCSWINSZ 0x5414 |
#define TIOCMGET 0x5415 |
#define TIOCMBIS 0x5416 |
#define TIOCMBIC 0x5417 |
#define TIOCMSET 0x5418 |
#define TIOCGSOFTCAR 0x5419 |
#define TIOCSSOFTCAR 0x541A |
#define FIONREAD 0x541B |
#define TIOCINQ FIONREAD |
#define TIOCLINUX 0x541C |
#define TIOCCONS 0x541D |
#define TIOCGSERIAL 0x541E |
#define TIOCSSERIAL 0x541F |
#define TIOCPKT 0x5420 |
#define FIONBIO 0x5421 |
#define TIOCNOTTY 0x5422 |
#define TIOCSETD 0x5423 |
#define TIOCGETD 0x5424 |
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ |
#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */ |
#define TIOCSBRK 0x5427 /* BSD compatibility */ |
#define TIOCCBRK 0x5428 /* BSD compatibility */ |
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ |
#define FIOCLEX 0x5451 |
#define FIOASYNC 0x5452 |
#define TIOCSERCONFIG 0x5453 |
#define TIOCSERGWILD 0x5454 |
#define TIOCSERSWILD 0x5455 |
#define TIOCGLCKTRMIOS 0x5456 |
#define TIOCSLCKTRMIOS 0x5457 |
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ |
#define TIOCSERGETLSR 0x5459 /* Get line status register */ |
#define TIOCSERGETMULTI 0x545A /* Get multiport config */ |
#define TIOCSERSETMULTI 0x545B /* Set multiport config */ |
|
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ |
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ |
|
/* Used for packet mode */ |
#define TIOCPKT_DATA 0 |
#define TIOCPKT_FLUSHREAD 1 |
#define TIOCPKT_FLUSHWRITE 2 |
#define TIOCPKT_STOP 4 |
#define TIOCPKT_START 8 |
#define TIOCPKT_NOSTOP 16 |
#define TIOCPKT_DOSTOP 32 |
|
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ |
|
#endif |
/delay.h
0,0 → 1,63
#ifndef _I386_DELAY_H |
#define _I386_DELAY_H |
|
/* |
* Copyright (C) 1993 Linus Torvalds |
* |
* Delay routines, using a pre-computed "loops_per_second" value. |
*/ |
|
#include <linux/linkage.h> |
|
#ifdef __SMP__ |
#include <asm/smp.h> |
#endif |
|
extern void __do_delay(void); /* Special register call calling convention */ |
|
extern __inline__ void __delay(int loops) |
{ |
__asm__ __volatile__( |
"call " SYMBOL_NAME_STR(__do_delay) |
:/* no outputs */ |
:"a" (loops) |
:"ax"); |
} |
|
/* |
* division by multiplication: you don't have to worry about |
* loss of precision. |
* |
* Use only for very small delays ( < 1 msec). Should probably use a |
* lookup table, really, as the multiplications take much too long with |
* short delays. This is a "reasonable" implementation, though (and the |
* first constant multiplications gets optimized away if the delay is |
* a constant) |
*/ |
extern __inline__ void udelay(unsigned long usecs) |
{ |
usecs *= 0x000010c6; /* 2**32 / 1000000 */ |
__asm__("mull %0" |
:"=d" (usecs) |
#ifdef __SMP__ |
:"a" (usecs),"0" (cpu_data[smp_processor_id()].udelay_val) |
#else |
:"a" (usecs),"0" (loops_per_sec) |
#endif |
:"ax"); |
|
__delay(usecs); |
} |
|
extern __inline__ unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c) |
{ |
__asm__("mull %1 ; divl %2" |
:"=a" (a) |
:"d" (b), |
"r" (c), |
"0" (a) |
:"dx"); |
return a; |
} |
|
#endif /* defined(_I386_DELAY_H) */ |
/vm86.h
0,0 → 1,175
#ifndef _LINUX_VM86_H |
#define _LINUX_VM86_H |
|
/* |
* I'm guessing at the VIF/VIP flag usage, but hope that this is how |
* the Pentium uses them. Linux will return from vm86 mode when both |
* VIF and VIP is set. |
* |
* On a Pentium, we could probably optimize the virtual flags directly |
* in the eflags register instead of doing it "by hand" in vflags... |
* |
* Linus |
*/ |
|
#define TF_MASK 0x00000100 |
#define IF_MASK 0x00000200 |
#define IOPL_MASK 0x00003000 |
#define NT_MASK 0x00004000 |
#define VM_MASK 0x00020000 |
#define AC_MASK 0x00040000 |
#define VIF_MASK 0x00080000 /* virtual interrupt flag */ |
#define VIP_MASK 0x00100000 /* virtual interrupt pending */ |
#define ID_MASK 0x00200000 |
|
#define BIOSSEG 0x0f000 |
|
#define CPU_086 0 |
#define CPU_186 1 |
#define CPU_286 2 |
#define CPU_386 3 |
#define CPU_486 4 |
#define CPU_586 5 |
|
/* |
* Return values for the 'vm86()' system call |
*/ |
#define VM86_TYPE(retval) ((retval) & 0xff) |
#define VM86_ARG(retval) ((retval) >> 8) |
|
#define VM86_SIGNAL 0 /* return due to signal */ |
#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ |
#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ |
#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ |
|
/* |
* Additional return values when invoking new vm86() |
*/ |
#define VM86_PICRETURN 4 /* return due to pending PIC request */ |
#define VM86_TRAP 6 /* return due to DOS-debugger request */ |
|
/* |
* function codes when invoking new vm86() |
*/ |
#define VM86_PLUS_INSTALL_CHECK 0 |
#define VM86_ENTER 1 |
#define VM86_ENTER_NO_BYPASS 2 |
#define VM86_REQUEST_IRQ 3 |
#define VM86_FREE_IRQ 4 |
#define VM86_GET_IRQ_BITS 5 |
#define VM86_GET_AND_RESET_IRQ 6 |
|
/* |
* This is the stack-layout when we have done a "SAVE_ALL" from vm86 |
* mode - the main change is that the old segment descriptors aren't |
* useful any more and are forced to be zero by the kernel (and the |
* hardware when a trap occurs), and the real segment descriptors are |
* at the end of the structure. Look at ptrace.h to see the "normal" |
* setup. |
*/ |
|
struct vm86_regs { |
/* |
* normal regs, with special meaning for the segment descriptors.. |
*/ |
long ebx; |
long ecx; |
long edx; |
long esi; |
long edi; |
long ebp; |
long eax; |
long __null_ds; |
long __null_es; |
long __null_fs; |
long __null_gs; |
long orig_eax; |
long eip; |
unsigned short cs, __csh; |
long eflags; |
long esp; |
unsigned short ss, __ssh; |
/* |
* these are specific to v86 mode: |
*/ |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned short fs, __fsh; |
unsigned short gs, __gsh; |
}; |
|
struct revectored_struct { |
unsigned long __map[8]; /* 256 bits */ |
}; |
|
struct vm86_struct { |
struct vm86_regs regs; |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
}; |
|
/* |
* flags masks |
*/ |
#define VM86_SCREEN_BITMAP 0x0001 |
|
struct vm86plus_info_struct { |
unsigned long force_return_for_pic:1; |
unsigned long vm86dbg_active:1; /* for debugger */ |
unsigned long vm86dbg_TFpendig:1; /* for debugger */ |
unsigned long unused:28; |
unsigned long is_vm86pus:1; /* for vm86 internal use */ |
unsigned char vm86dbg_intxxtab[32]; /* for debugger */ |
}; |
|
struct vm86plus_struct { |
struct vm86_regs regs; |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
struct vm86plus_info_struct vm86plus; |
}; |
|
#ifdef __KERNEL__ |
|
struct kernel_vm86_struct { |
struct vm86_regs regs; |
/* |
* the below part remains on the kernel stack while we are in VM86 mode. |
* 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we |
* get forced back from VM86, the CPU and "SAVE_ALL" will restore the above |
* 'struct kernel_vm86_regs' with the then actual values. |
* Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct' |
* in kernelspace, hence we need not reget the data from userspace. |
*/ |
#define VM86_TSS_ESP0 flags |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
struct vm86plus_info_struct vm86plus; |
struct pt_regs *regs32; /* here we save the pointer to the old regs */ |
/* |
* The below is not part of the structure, but the stack layout continues |
* this way. In front of 'return-eip' may be some data, depending on |
* compilation, so we don't rely on this and save the pointer to 'oldregs' |
* in 'regs32' above. |
* However, with GCC-2.7.2 and the current CFLAGS you see exactly this: |
|
long return-eip; from call to vm86() |
struct pt_regs oldregs; user space registers as saved by syscall |
*/ |
}; |
|
void handle_vm86_fault(struct vm86_regs *, long); |
int handle_vm86_trap(struct vm86_regs *, long, int); |
|
#endif /* __KERNEL__ */ |
|
#endif |
/bugs.h
0,0 → 1,378
/* |
* include/asm-i386/bugs.h |
* |
* Copyright (C) 1994 Linus Torvalds |
*/ |
|
/* |
* This is included by init/main.c to check for architecture-dependent bugs. |
* |
* Needs: |
* void check_bugs(void); |
*/ |
|
#include <linux/config.h> |
|
#define CONFIG_BUGi386 |
|
static void no_halt(char *s, int *ints) |
{ |
hlt_works_ok = 0; |
} |
|
static void no_387(char *s, int *ints) |
{ |
hard_math = 0; |
__asm__("movl %%cr0,%%eax\n\t" |
"orl $0xE,%%eax\n\t" |
"movl %%eax,%%cr0\n\t" : : : "ax"); |
} |
|
static char fpu_error = 0; |
|
static void copro_timeout(void) |
{ |
fpu_error = 1; |
timer_table[COPRO_TIMER].expires = jiffies+100; |
timer_active |= 1<<COPRO_TIMER; |
printk("387 failed: trying to reset\n"); |
send_sig(SIGFPE, last_task_used_math, 1); |
outb_p(0,0xf1); |
outb_p(0,0xf0); |
} |
|
static void check_fpu(void) |
{ |
static double x = 4195835.0; |
static double y = 3145727.0; |
unsigned short control_word; |
|
if (!hard_math) { |
#ifndef CONFIG_MATH_EMULATION |
printk("No coprocessor found and no math emulation present.\n"); |
printk("Giving up.\n"); |
for (;;) ; |
#endif |
return; |
} |
/* |
* check if exception 16 works correctly.. This is truly evil |
* code: it disables the high 8 interrupts to make sure that |
* the irq13 doesn't happen. But as this will lead to a lockup |
* if no exception16 arrives, it depends on the fact that the |
* high 8 interrupts will be re-enabled by the next timer tick. |
* So the irq13 will happen eventually, but the exception 16 |
* should get there first.. |
*/ |
printk("Checking 386/387 coupling... "); |
timer_table[COPRO_TIMER].expires = jiffies+50; |
timer_table[COPRO_TIMER].fn = copro_timeout; |
timer_active |= 1<<COPRO_TIMER; |
__asm__("clts ; fninit ; fnstcw %0 ; fwait":"=m" (*&control_word)); |
control_word &= 0xffc0; |
__asm__("fldcw %0 ; fwait": :"m" (*&control_word)); |
outb_p(inb_p(0x21) | (1 << 2), 0x21); |
__asm__("fldz ; fld1 ; fdiv %st,%st(1) ; fwait"); |
timer_active &= ~(1<<COPRO_TIMER); |
if (fpu_error) |
return; |
if (!ignore_irq13) { |
printk("Ok, fpu using old IRQ13 error reporting\n"); |
return; |
} |
__asm__("fninit\n\t" |
"fldl %1\n\t" |
"fdivl %2\n\t" |
"fmull %2\n\t" |
"fldl %1\n\t" |
"fsubp %%st,%%st(1)\n\t" |
"fistpl %0\n\t" |
"fwait\n\t" |
"fninit" |
: "=m" (*&fdiv_bug) |
: "m" (*&x), "m" (*&y)); |
if (!fdiv_bug) { |
printk("Ok, fpu using exception 16 error reporting.\n"); |
return; |
|
} |
printk("Hmm, FDIV bug i%c86 system\n", '0'+x86); |
} |
|
static void check_hlt(void) |
{ |
printk("Checking 'hlt' instruction... "); |
if (!hlt_works_ok) { |
printk("disabled\n"); |
return; |
} |
__asm__ __volatile__("hlt ; hlt ; hlt ; hlt"); |
printk("Ok.\n"); |
} |
|
static void check_tlb(void) |
{ |
#ifndef CONFIG_M386 |
/* |
* The 386 chips don't support TLB finegrained invalidation. |
* They will fault when they hit a invlpg instruction. |
*/ |
if (x86 == 3) { |
printk("CPU is a 386 and this kernel was compiled for 486 or better.\n"); |
printk("Giving up.\n"); |
for (;;) ; |
} |
#endif |
} |
|
/* |
* All current models of Pentium and Pentium with MMX technology CPUs |
* have the F0 0F bug, which lets nonpriviledged users lock up the system: |
*/ |
extern int pentium_f00f_bug; |
extern void trap_init_f00f_bug(void); |
|
/* |
* Access to machine-specific registers (available on 586 and better only) |
* Note: the rd* operations modify the parameters directly (without using |
* pointer indirection), this allows gcc to optimize better |
* Code from Richard Gooch's 2.2 MTRR drivers. |
*/ |
|
#define rdmsr(msr,val1,val2) \ |
__asm__ __volatile__("rdmsr" \ |
: "=a" (val1), "=d" (val2) \ |
: "c" (msr)) |
|
#define wrmsr(msr,val1,val2) \ |
__asm__ __volatile__("wrmsr" \ |
: /* no outputs */ \ |
: "c" (msr), "a" (val1), "d" (val2)) |
|
|
static void check_pentium_f00f(void) |
{ |
/* |
* Pentium and Pentium MMX |
*/ |
pentium_f00f_bug = 0; |
if (x86==5 && !memcmp(x86_vendor_id, "GenuineIntel", 12)) { |
printk(KERN_INFO "Intel Pentium with F0 0F bug - workaround enabled.\n"); |
pentium_f00f_bug = 1; |
trap_init_f00f_bug(); |
} |
} |
|
static void check_privacy(void) |
{ |
/* |
* Pentium III or higher - processors with mtrrs/cpuid |
*/ |
if(memcmp(x86_vendor_id, "GenuineIntel", 12)) |
return; |
if(x86_capability & (1<<18)) |
{ |
/* |
* Thanks to Phil Karn for this bit. |
*/ |
unsigned long lo,hi; |
rdmsr(0x119,lo,hi); |
lo |= 0x200000; |
wrmsr(0x119,lo,hi); |
printk(KERN_INFO "Pentium-III serial number disabled.\n"); |
} |
} |
|
/* |
* B step AMD K6 before B 9730xxxx have hardware bugs that can cause |
* misexecution of code under Linux. Owners of such processors should |
* contact AMD for precise details and a (free) CPU exchange. |
* |
* See http://www.chorus.com/~poulot/k6bug.html |
* http://www.amd.com/K6/k6docs/revgd.html |
* |
* The following test is erm... interesting. AMD neglected to up |
* the chip stepping when fixing the bug but they also tweaked some |
* performance at the same time... |
*/ |
|
extern void vide(void); |
__asm__(".align 4\nvide: ret"); |
|
static void check_k6_bug(void) |
{ |
|
if ((strcmp(x86_vendor_id, "AuthenticAMD") == 0) && |
(x86_model == 6) && (x86_mask == 1)) |
{ |
int n; |
void (*f_vide)(void); |
unsigned long d, d2; |
|
printk(KERN_INFO "AMD K6 stepping B detected - "); |
|
#define K6_BUG_LOOP 1000000 |
|
/* |
* It looks like AMD fixed the 2.6.2 bug and improved indirect |
* calls at the same time. |
*/ |
|
n = K6_BUG_LOOP; |
f_vide = vide; |
__asm__ ("rdtsc" : "=a" (d)); |
while (n--) |
f_vide(); |
__asm__ ("rdtsc" : "=a" (d2)); |
d = d2-d; |
|
if (d > 20*K6_BUG_LOOP) { |
printk("system stability may be impaired when more than 32 MB are used.\n"); |
} |
else |
printk("probably OK (after B9730xxxx).\n"); |
} |
} |
|
/* Cyrix stuff from this point on */ |
|
/* Cyrix 5/2 test (return 0x200 if it's a Cyrix) */ |
static inline int test_cyrix_52div(void) |
{ |
int test; |
|
__asm__ __volatile__("xor %%eax,%%eax\n\t" |
"sahf\n\t" |
"movb $5,%%al\n\t" |
"movb $2,%%bl\n\t" |
"div %%bl\n\t" |
"lahf\n\t" |
"andl $0xff00,%%eax": "=eax" (test) : : "bx"); |
|
return test; |
} |
|
/* test for CCR3 bit 7 r/w */ |
static char test_cyrix_cr3rw(void) |
{ |
char temp, test; |
|
temp = getCx86(CX86_CCR3); /* get current CCR3 value */ |
setCx86(CX86_CCR3, temp ^ 0x80); /* toggle test bit and write */ |
getCx86(0xc0); /* dummy to change bus */ |
test = temp - getCx86(CX86_CCR3); /* != 0 if ccr3 r/w */ |
setCx86(CX86_CCR3, temp); /* return CCR3 to original value */ |
|
return test; |
} |
|
/* redo the cpuid test in head.S, so that those 6x86(L) now get |
detected properly (0 == no cpuid) */ |
static inline int test_cpuid(void) |
{ |
int test; |
|
__asm__("pushfl\n\t" |
"popl %%eax\n\t" |
"movl %%eax,%%ecx\n\t" |
"xorl $0x200000,%%eax\n\t" |
"pushl %%eax\n\t" |
"popfl\n\t" |
"pushfl\n\t" |
"popl %%eax\n\t" |
"xorl %%ecx,%%eax\n\t" |
"pushl %%ecx\n\t" |
"popfl" : "=eax" (test) : : "cx"); |
|
return test; |
} |
|
/* All Cyrix 6x86 and 6x86L need the SLOP bit reset so that the udelay loop |
* calibration works well. |
* This routine must be called with MAPEN enabled, otherwise we don't |
* have access to CCR5. |
*/ |
|
static void check_6x86_slop(void) |
{ |
if (x86_model == 2) /* if 6x86 or 6x86L */ |
setCx86(CX86_CCR5, getCx86(CX86_CCR5) & 0xfd); /* reset SLOP */ |
} |
|
/* Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected |
* by the fact that they preserve the flags across the division of 5/2. |
* PII and PPro exhibit this behavior too, but they have cpuid available. |
*/ |
|
static void check_cyrix_various(void) |
{ |
if ((x86 == 4) && (test_cyrix_52div()==0x200)) |
{ |
/* if it's a Cyrix */ |
|
unsigned long flags; |
|
/* default to an "old" Cx486 */ |
strcpy(x86_vendor_id, "CyrixInstead"); |
x86_model = -1; |
x86_mask = 0; |
|
/* Disable interrupts */ |
save_flags(flags); |
cli(); |
|
/* First check for very old CX486 models */ |
/* that did not have DIR0/DIR1. */ |
if (test_cyrix_cr3rw()) |
{ /* if has DIR0/DIR1 */ |
|
char ccr3; |
char dir0; |
x86_model = 0; |
|
/* Enable MAPEN */ |
ccr3 = getCx86(CX86_CCR3); |
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
|
dir0 = getCx86(CX86_DIR0); |
if ((dir0 & 0xf0) == 0x30) /* Use DIR0 to determine if this is a 6x86 class processor */ |
{ |
/* try enabling cpuid */ |
setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); |
} |
|
if (test_cpuid()) |
{ |
int eax, dummy; |
|
/* get processor info */ |
|
cpuid(1, &eax, &dummy, &dummy, |
&x86_capability); |
|
have_cpuid = 1; |
x86_model = (eax >> 4) & 0xf; |
x86 = (eax >> 8) & 0xf; |
check_6x86_slop(); |
} |
/* disable MAPEN */ |
setCx86(CX86_CCR3, ccr3); |
} /* endif has DIR0/DIR1 */ |
sti(); |
restore_flags(flags); /* restore interrupt state */ |
} /* endif it's a Cyrix */ |
} |
|
/* Check various processor bugs */ |
|
static void check_bugs(void) |
{ |
check_cyrix_various(); |
check_k6_bug(); |
check_tlb(); |
check_fpu(); |
check_hlt(); |
check_pentium_f00f(); |
check_privacy(); |
system_utsname.machine[1] = '0' + x86; |
} |
/bitops.h
0,0 → 1,137
#ifndef _I386_BITOPS_H |
#define _I386_BITOPS_H |
|
/* |
* Copyright 1992, Linus Torvalds. |
*/ |
|
/* |
* These have to be done with inline assembly: that way the bit-setting |
* is guaranteed to be atomic. All bit operations return 0 if the bit |
* was cleared before the operation and != 0 if it was not. |
* |
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
*/ |
|
#ifdef __SMP__ |
#define LOCK_PREFIX "lock ; " |
#define SMPVOL volatile |
#else |
#define LOCK_PREFIX "" |
#define SMPVOL |
#endif |
|
/* |
* Some hacks to defeat gcc over-optimizations.. |
*/ |
struct __dummy { unsigned long a[100]; }; |
#define ADDR (*(struct __dummy *) addr) |
#define CONST_ADDR (*(const struct __dummy *) addr) |
|
extern __inline__ int set_bit(int nr, SMPVOL void * addr) |
{ |
int oldbit; |
|
__asm__ __volatile__(LOCK_PREFIX |
"btsl %2,%1\n\tsbbl %0,%0" |
:"=r" (oldbit),"=m" (ADDR) |
:"ir" (nr)); |
return oldbit; |
} |
|
extern __inline__ int clear_bit(int nr, SMPVOL void * addr) |
{ |
int oldbit; |
|
__asm__ __volatile__(LOCK_PREFIX |
"btrl %2,%1\n\tsbbl %0,%0" |
:"=r" (oldbit),"=m" (ADDR) |
:"ir" (nr)); |
return oldbit; |
} |
|
extern __inline__ int change_bit(int nr, SMPVOL void * addr) |
{ |
int oldbit; |
|
__asm__ __volatile__(LOCK_PREFIX |
"btcl %2,%1\n\tsbbl %0,%0" |
:"=r" (oldbit),"=m" (ADDR) |
:"ir" (nr)); |
return oldbit; |
} |
|
/* |
* This routine doesn't need to be atomic. |
*/ |
extern __inline__ int test_bit(int nr, const SMPVOL void * addr) |
{ |
return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0; |
} |
|
/* |
* Find-bit routines.. |
*/ |
extern __inline__ int find_first_zero_bit(void * addr, unsigned size) |
{ |
int res; |
|
if (!size) |
return 0; |
__asm__("cld\n\t" |
"movl $-1,%%eax\n\t" |
"xorl %%edx,%%edx\n\t" |
"repe; scasl\n\t" |
"je 1f\n\t" |
"xorl -4(%%edi),%%eax\n\t" |
"subl $4,%%edi\n\t" |
"bsfl %%eax,%%edx\n" |
"1:\tsubl %%ebx,%%edi\n\t" |
"shll $3,%%edi\n\t" |
"addl %%edi,%%edx" |
:"=d" (res) |
:"c" ((size + 31) >> 5), "D" (addr), "b" (addr) |
:"ax", "cx", "di"); |
return res; |
} |
|
extern __inline__ int find_next_zero_bit (void * addr, int size, int offset) |
{ |
unsigned long * p = ((unsigned long *) addr) + (offset >> 5); |
int set = 0, bit = offset & 31, res; |
|
if (bit) { |
/* |
* Look for zero in first byte |
*/ |
__asm__("bsfl %1,%0\n\t" |
"jne 1f\n\t" |
"movl $32, %0\n" |
"1:" |
: "=r" (set) |
: "r" (~(*p >> bit))); |
if (set < (32 - bit)) |
return set + offset; |
set = 32 - bit; |
p++; |
} |
/* |
* No zero yet, search remaining full bytes for a zero |
*/ |
res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); |
return (offset + set + res); |
} |
|
/* |
* ffz = Find First Zero in word. Undefined if no zero exists, |
* so code should check against ~0UL first.. |
*/ |
extern __inline__ unsigned long ffz(unsigned long word) |
{ |
__asm__("bsfl %1,%0" |
:"=r" (word) |
:"r" (~word)); |
return word; |
} |
|
#endif /* _I386_BITOPS_H */ |
/param.h
0,0 → 1,20
#ifndef _ASMi386_PARAM_H |
#define _ASMi386_PARAM_H |
|
#ifndef HZ |
#define HZ 100 |
#endif |
|
#define EXEC_PAGESIZE 4096 |
|
#ifndef NGROUPS |
#define NGROUPS 32 |
#endif |
|
#ifndef NOGROUP |
#define NOGROUP (-1) |
#endif |
|
#define MAXHOSTNAMELEN 64 /* max length of hostname */ |
|
#endif |
/dma.h
0,0 → 1,271
/* $Id: dma.h,v 1.1 2005-12-20 11:35:33 jcastillo Exp $ |
* linux/include/asm/dma.h: Defines for using and allocating dma channels. |
* Written by Hennus Bergman, 1992. |
* High DMA channel support & info by Hannu Savolainen |
* and John Boyd, Nov. 1992. |
*/ |
|
#ifndef _ASM_DMA_H |
#define _ASM_DMA_H |
|
#include <asm/io.h> /* need byte IO */ |
|
|
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER |
#define dma_outb outb_p |
#else |
#define dma_outb outb |
#endif |
|
#define dma_inb inb |
|
/* |
* NOTES about DMA transfers: |
* |
* controller 1: channels 0-3, byte operations, ports 00-1F |
* controller 2: channels 4-7, word operations, ports C0-DF |
* |
* - ALL registers are 8 bits only, regardless of transfer size |
* - channel 4 is not used - cascades 1 into 2. |
* - channels 0-3 are byte - addresses/counts are for physical bytes |
* - channels 5-7 are word - addresses/counts are for physical words |
* - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries |
* - transfer count loaded to registers is 1 less than actual count |
* - controller 2 offsets are all even (2x offsets for controller 1) |
* - page registers for 5-7 don't use data bit 0, represent 128K pages |
* - page registers for 0-3 use bit 0, represent 64K pages |
* |
* DMA transfers are limited to the lower 16MB of _physical_ memory. |
* Note that addresses loaded into registers must be _physical_ addresses, |
* not logical addresses (which may differ if paging is active). |
* |
* Address mapping for channels 0-3: |
* |
* A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) |
* | ... | | ... | | ... | |
* | ... | | ... | | ... | |
* | ... | | ... | | ... | |
* P7 ... P0 A7 ... A0 A7 ... A0 |
* | Page | Addr MSB | Addr LSB | (DMA registers) |
* |
* Address mapping for channels 5-7: |
* |
* A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) |
* | ... | \ \ ... \ \ \ ... \ \ |
* | ... | \ \ ... \ \ \ ... \ (not used) |
* | ... | \ \ ... \ \ \ ... \ |
* P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 |
* | Page | Addr MSB | Addr LSB | (DMA registers) |
* |
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses |
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at |
* the hardware level, so odd-byte transfers aren't possible). |
* |
* Transfer count (_not # bytes_) is limited to 64K, represented as actual |
* count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, |
* and up to 128K bytes may be transferred on channels 5-7 in one operation. |
* |
*/ |
|
#define MAX_DMA_CHANNELS 8 |
|
/* The maximum address that we can perform a DMA transfer to on this platform */ |
#define MAX_DMA_ADDRESS 0x1000000 |
|
/* 8237 DMA controllers */ |
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ |
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ |
|
/* DMA controller registers */ |
#define DMA1_CMD_REG 0x08 /* command register (w) */ |
#define DMA1_STAT_REG 0x08 /* status register (r) */ |
#define DMA1_REQ_REG 0x09 /* request register (w) */ |
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ |
#define DMA1_MODE_REG 0x0B /* mode register (w) */ |
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ |
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ |
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ |
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ |
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ |
|
#define DMA2_CMD_REG 0xD0 /* command register (w) */ |
#define DMA2_STAT_REG 0xD0 /* status register (r) */ |
#define DMA2_REQ_REG 0xD2 /* request register (w) */ |
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ |
#define DMA2_MODE_REG 0xD6 /* mode register (w) */ |
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ |
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ |
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ |
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ |
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ |
|
#define DMA_ADDR_0 0x00 /* DMA address registers */ |
#define DMA_ADDR_1 0x02 |
#define DMA_ADDR_2 0x04 |
#define DMA_ADDR_3 0x06 |
#define DMA_ADDR_4 0xC0 |
#define DMA_ADDR_5 0xC4 |
#define DMA_ADDR_6 0xC8 |
#define DMA_ADDR_7 0xCC |
|
#define DMA_CNT_0 0x01 /* DMA count registers */ |
#define DMA_CNT_1 0x03 |
#define DMA_CNT_2 0x05 |
#define DMA_CNT_3 0x07 |
#define DMA_CNT_4 0xC2 |
#define DMA_CNT_5 0xC6 |
#define DMA_CNT_6 0xCA |
#define DMA_CNT_7 0xCE |
|
#define DMA_PAGE_0 0x87 /* DMA page registers */ |
#define DMA_PAGE_1 0x83 |
#define DMA_PAGE_2 0x81 |
#define DMA_PAGE_3 0x82 |
#define DMA_PAGE_5 0x8B |
#define DMA_PAGE_6 0x89 |
#define DMA_PAGE_7 0x8A |
|
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ |
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ |
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ |
|
/* enable/disable a specific DMA channel */ |
static __inline__ void enable_dma(unsigned int dmanr) |
{ |
if (dmanr<=3) |
dma_outb(dmanr, DMA1_MASK_REG); |
else |
dma_outb(dmanr & 3, DMA2_MASK_REG); |
} |
|
static __inline__ void disable_dma(unsigned int dmanr) |
{ |
if (dmanr<=3) |
dma_outb(dmanr | 4, DMA1_MASK_REG); |
else |
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); |
} |
|
/* Clear the 'DMA Pointer Flip Flop'. |
* Write 0 for LSB/MSB, 1 for MSB/LSB access. |
* Use this once to initialize the FF to a known state. |
* After that, keep track of it. :-) |
* --- In order to do that, the DMA routines below should --- |
* --- only be used while interrupts are disabled! --- |
*/ |
static __inline__ void clear_dma_ff(unsigned int dmanr) |
{ |
if (dmanr<=3) |
dma_outb(0, DMA1_CLEAR_FF_REG); |
else |
dma_outb(0, DMA2_CLEAR_FF_REG); |
} |
|
/* set mode (above) for a specific DMA channel */ |
static __inline__ void set_dma_mode(unsigned int dmanr, char mode) |
{ |
if (dmanr<=3) |
dma_outb(mode | dmanr, DMA1_MODE_REG); |
else |
dma_outb(mode | (dmanr&3), DMA2_MODE_REG); |
} |
|
/* Set only the page register bits of the transfer address. |
* This is used for successive transfers when we know the contents of |
* the lower 16 bits of the DMA current address register, but a 64k boundary |
* may have been crossed. |
*/ |
static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) |
{ |
switch(dmanr) { |
case 0: |
dma_outb(pagenr, DMA_PAGE_0); |
break; |
case 1: |
dma_outb(pagenr, DMA_PAGE_1); |
break; |
case 2: |
dma_outb(pagenr, DMA_PAGE_2); |
break; |
case 3: |
dma_outb(pagenr, DMA_PAGE_3); |
break; |
case 5: |
dma_outb(pagenr & 0xfe, DMA_PAGE_5); |
break; |
case 6: |
dma_outb(pagenr & 0xfe, DMA_PAGE_6); |
break; |
case 7: |
dma_outb(pagenr & 0xfe, DMA_PAGE_7); |
break; |
} |
} |
|
|
/* Set transfer address & page bits for specific DMA channel. |
* Assumes dma flipflop is clear. |
*/ |
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) |
{ |
set_dma_page(dmanr, a>>16); |
if (dmanr <= 3) { |
dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); |
dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); |
} else { |
dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); |
dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); |
} |
} |
|
|
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for |
* a specific DMA channel. |
* You must ensure the parameters are valid. |
* NOTE: from a manual: "the number of transfers is one more |
* than the initial word count"! This is taken into account. |
* Assumes dma flip-flop is clear. |
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. |
*/ |
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) |
{ |
count--; |
if (dmanr <= 3) { |
dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); |
dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); |
} else { |
dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); |
dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); |
} |
} |
|
|
/* Get DMA residue count. After a DMA transfer, this |
* should return zero. Reading this while a DMA transfer is |
* still in progress will return unpredictable results. |
* If called before the channel has been used, it may return 1. |
* Otherwise, it returns the number of _bytes_ left to transfer. |
* |
* Assumes DMA flip-flop is clear. |
*/ |
static __inline__ int get_dma_residue(unsigned int dmanr) |
{ |
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE |
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; |
|
/* using short to get 16-bit wrap around */ |
unsigned short count; |
|
count = 1 + dma_inb(io_port); |
count += dma_inb(io_port) << 8; |
|
return (dmanr<=3)? count : (count<<1); |
} |
|
|
/* These are in kernel/dma.c: */ |
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ |
extern void free_dma(unsigned int dmanr); /* release it again */ |
|
|
#endif /* _ASM_DMA_H */ |
/checksum.h
0,0 → 1,121
#ifndef _I386_CHECKSUM_H |
#define _I386_CHECKSUM_H |
|
/* |
* computes the checksum of a memory block at buff, length len, |
* and adds in "sum" (32-bit) |
* |
* returns a 32-bit number suitable for feeding into itself |
* or csum_tcpudp_magic |
* |
* this function must be called with even lengths, except |
* for the last fragment, which may be odd |
* |
* it's best to have buff aligned on a 32-bit boundary |
*/ |
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); |
|
/* |
* the same as csum_partial, but copies from src while it |
* checksums |
* |
* here even more important to align src and dst on a 32-bit (or even |
* better 64-bit) boundary |
*/ |
|
unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum); |
|
|
/* |
* the same as csum_partial_copy, but copies from user space. |
* |
* here even more important to align src and dst on a 32-bit (or even |
* better 64-bit) boundary |
*/ |
|
unsigned int csum_partial_copy_fromuser(const char *src, char *dst, int len, int sum); |
|
/* |
* This is a version of ip_compute_csum() optimized for IP headers, |
* which always checksum on 4 octet boundaries. |
* |
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by |
* Arnt Gulbrandsen. |
*/ |
static inline unsigned short ip_fast_csum(unsigned char * iph, |
unsigned int ihl) { |
unsigned int sum; |
|
__asm__ __volatile__(" |
movl (%1), %0 |
subl $4, %2 |
jbe 2f |
addl 4(%1), %0 |
adcl 8(%1), %0 |
adcl 12(%1), %0 |
1: adcl 16(%1), %0 |
lea 4(%1), %1 |
decl %2 |
jne 1b |
adcl $0, %0 |
movl %0, %2 |
shrl $16, %0 |
addw %w2, %w0 |
adcl $0, %0 |
notl %0 |
2: |
" |
/* Since the input registers which are loaded with iph and ipl |
are modified, we must also specify them as outputs, or gcc |
will assume they contain their original values. */ |
: "=r" (sum), "=r" (iph), "=r" (ihl) |
: "1" (iph), "2" (ihl)); |
return(sum); |
} |
|
/* |
* Fold a partial checksum |
*/ |
|
static inline unsigned int csum_fold(unsigned int sum) |
{ |
__asm__(" |
addl %1, %0 |
adcl $0xffff, %0 |
" |
: "=r" (sum) |
: "r" (sum << 16), "0" (sum & 0xffff0000) |
); |
return (~sum) >> 16; |
} |
|
/* |
* computes the checksum of the TCP/UDP pseudo-header |
* returns a 16-bit checksum, already complemented |
*/ |
|
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, |
unsigned long daddr, |
unsigned short len, |
unsigned short proto, |
unsigned int sum) { |
__asm__(" |
addl %1, %0 |
adcl %2, %0 |
adcl %3, %0 |
adcl $0, %0 |
" |
: "=r" (sum) |
: "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum)); |
return csum_fold(sum); |
} |
/* |
* this routine is used for miscellaneous IP-like checksums, mainly |
* in icmp.c |
*/ |
|
static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { |
return csum_fold (csum_partial(buff, len, 0)); |
} |
|
#endif |
/mmu_context.h
0,0 → 1,9
#ifndef __I386_MMU_CONTEXT_H |
#define __I386_MMU_CONTEXT_H |
|
/* |
* get a new mmu context.. x86's don't know about contexts. |
*/ |
#define get_mmu_context(x) do { } while (0) |
|
#endif |
/statfs.h
0,0 → 1,25
#ifndef _I386_STATFS_H |
#define _I386_STATFS_H |
|
#ifndef __KERNEL_STRICT_NAMES |
|
#include <linux/types.h> |
|
typedef __kernel_fsid_t fsid_t; |
|
#endif |
|
struct statfs { |
long f_type; |
long f_bsize; |
long f_blocks; |
long f_bfree; |
long f_bavail; |
long f_files; |
long f_ffree; |
__kernel_fsid_t f_fsid; |
long f_namelen; |
long f_spare[6]; |
}; |
|
#endif |
/unistd.h
0,0 → 1,328
#ifndef _ASM_I386_UNISTD_H_ |
#define _ASM_I386_UNISTD_H_ |
|
/* |
* This file contains the system call numbers. |
*/ |
|
#define __NR_setup 0 /* used only by init, to get system going */ |
#define __NR_exit 1 |
#define __NR_fork 2 |
#define __NR_read 3 |
#define __NR_write 4 |
#define __NR_open 5 |
#define __NR_close 6 |
#define __NR_waitpid 7 |
#define __NR_creat 8 |
#define __NR_link 9 |
#define __NR_unlink 10 |
#define __NR_execve 11 |
#define __NR_chdir 12 |
#define __NR_time 13 |
#define __NR_mknod 14 |
#define __NR_chmod 15 |
#define __NR_chown 16 |
#define __NR_break 17 |
#define __NR_oldstat 18 |
#define __NR_lseek 19 |
#define __NR_getpid 20 |
#define __NR_mount 21 |
#define __NR_umount 22 |
#define __NR_setuid 23 |
#define __NR_getuid 24 |
#define __NR_stime 25 |
#define __NR_ptrace 26 |
#define __NR_alarm 27 |
#define __NR_oldfstat 28 |
#define __NR_pause 29 |
#define __NR_utime 30 |
#define __NR_stty 31 |
#define __NR_gtty 32 |
#define __NR_access 33 |
#define __NR_nice 34 |
#define __NR_ftime 35 |
#define __NR_sync 36 |
#define __NR_kill 37 |
#define __NR_rename 38 |
#define __NR_mkdir 39 |
#define __NR_rmdir 40 |
#define __NR_dup 41 |
#define __NR_pipe 42 |
#define __NR_times 43 |
#define __NR_prof 44 |
#define __NR_brk 45 |
#define __NR_setgid 46 |
#define __NR_getgid 47 |
#define __NR_signal 48 |
#define __NR_geteuid 49 |
#define __NR_getegid 50 |
#define __NR_acct 51 |
#define __NR_phys 52 |
#define __NR_lock 53 |
#define __NR_ioctl 54 |
#define __NR_fcntl 55 |
#define __NR_mpx 56 |
#define __NR_setpgid 57 |
#define __NR_ulimit 58 |
#define __NR_oldolduname 59 |
#define __NR_umask 60 |
#define __NR_chroot 61 |
#define __NR_ustat 62 |
#define __NR_dup2 63 |
#define __NR_getppid 64 |
#define __NR_getpgrp 65 |
#define __NR_setsid 66 |
#define __NR_sigaction 67 |
#define __NR_sgetmask 68 |
#define __NR_ssetmask 69 |
#define __NR_setreuid 70 |
#define __NR_setregid 71 |
#define __NR_sigsuspend 72 |
#define __NR_sigpending 73 |
#define __NR_sethostname 74 |
#define __NR_setrlimit 75 |
#define __NR_getrlimit 76 |
#define __NR_getrusage 77 |
#define __NR_gettimeofday 78 |
#define __NR_settimeofday 79 |
#define __NR_getgroups 80 |
#define __NR_setgroups 81 |
#define __NR_select 82 |
#define __NR_symlink 83 |
#define __NR_oldlstat 84 |
#define __NR_readlink 85 |
#define __NR_uselib 86 |
#define __NR_swapon 87 |
#define __NR_reboot 88 |
#define __NR_readdir 89 |
#define __NR_mmap 90 |
#define __NR_munmap 91 |
#define __NR_truncate 92 |
#define __NR_ftruncate 93 |
#define __NR_fchmod 94 |
#define __NR_fchown 95 |
#define __NR_getpriority 96 |
#define __NR_setpriority 97 |
#define __NR_profil 98 |
#define __NR_statfs 99 |
#define __NR_fstatfs 100 |
#define __NR_ioperm 101 |
#define __NR_socketcall 102 |
#define __NR_syslog 103 |
#define __NR_setitimer 104 |
#define __NR_getitimer 105 |
#define __NR_stat 106 |
#define __NR_lstat 107 |
#define __NR_fstat 108 |
#define __NR_olduname 109 |
#define __NR_iopl 110 |
#define __NR_vhangup 111 |
#define __NR_idle 112 |
#define __NR_vm86 113 |
#define __NR_wait4 114 |
#define __NR_swapoff 115 |
#define __NR_sysinfo 116 |
#define __NR_ipc 117 |
#define __NR_fsync 118 |
#define __NR_sigreturn 119 |
#define __NR_clone 120 |
#define __NR_setdomainname 121 |
#define __NR_uname 122 |
#define __NR_modify_ldt 123 |
#define __NR_adjtimex 124 |
#define __NR_mprotect 125 |
#define __NR_sigprocmask 126 |
#define __NR_create_module 127 |
#define __NR_init_module 128 |
#define __NR_delete_module 129 |
#define __NR_get_kernel_syms 130 |
#define __NR_quotactl 131 |
#define __NR_getpgid 132 |
#define __NR_fchdir 133 |
#define __NR_bdflush 134 |
#define __NR_sysfs 135 |
#define __NR_personality 136 |
#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ |
#define __NR_setfsuid 138 |
#define __NR_setfsgid 139 |
#define __NR__llseek 140 |
#define __NR_getdents 141 |
#define __NR__newselect 142 |
#define __NR_flock 143 |
#define __NR_msync 144 |
#define __NR_readv 145 |
#define __NR_writev 146 |
#define __NR_getsid 147 |
#define __NR_fdatasync 148 |
#define __NR__sysctl 149 |
#define __NR_mlock 150 |
#define __NR_munlock 151 |
#define __NR_mlockall 152 |
#define __NR_munlockall 153 |
#define __NR_sched_setparam 154 |
#define __NR_sched_getparam 155 |
#define __NR_sched_setscheduler 156 |
#define __NR_sched_getscheduler 157 |
#define __NR_sched_yield 158 |
#define __NR_sched_get_priority_max 159 |
#define __NR_sched_get_priority_min 160 |
#define __NR_sched_rr_get_interval 161 |
#define __NR_nanosleep 162 |
#define __NR_mremap 163 |
#define __NR_poll 168 |
#define __NR_getpmsg 188 |
#define __NR_putpmsg 189 |
|
/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ |
#define _syscall0(type,name) \ |
type name(void) \ |
{ \ |
long __res; \ |
__asm__ volatile ("int $0x80" \ |
: "=a" (__res) \ |
: "0" (__NR_##name)); \ |
if (__res >= 0) \ |
return (type) __res; \ |
errno = -__res; \ |
return -1; \ |
} |
|
#define _syscall1(type,name,type1,arg1) \ |
type name(type1 arg1) \ |
{ \ |
long __res; \ |
__asm__ volatile ("int $0x80" \ |
: "=a" (__res) \ |
: "0" (__NR_##name),"b" ((long)(arg1))); \ |
if (__res >= 0) \ |
return (type) __res; \ |
errno = -__res; \ |
return -1; \ |
} |
|
#define _syscall2(type,name,type1,arg1,type2,arg2) \ |
type name(type1 arg1,type2 arg2) \ |
{ \ |
long __res; \ |
__asm__ volatile ("int $0x80" \ |
: "=a" (__res) \ |
: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \ |
if (__res >= 0) \ |
return (type) __res; \ |
errno = -__res; \ |
return -1; \ |
} |
|
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ |
type name(type1 arg1,type2 arg2,type3 arg3) \ |
{ \ |
long __res; \ |
__asm__ volatile ("int $0x80" \ |
: "=a" (__res) \ |
: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ |
"d" ((long)(arg3))); \ |
if (__res>=0) \ |
return (type) __res; \ |
errno=-__res; \ |
return -1; \ |
} |
|
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ |
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ |
{ \ |
long __res; \ |
__asm__ volatile ("int $0x80" \ |
: "=a" (__res) \ |
: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ |
"d" ((long)(arg3)),"S" ((long)(arg4))); \ |
if (__res>=0) \ |
return (type) __res; \ |
errno=-__res; \ |
return -1; \ |
} |
|
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ |
type5,arg5) \ |
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ |
{ \ |
long __res; \ |
__asm__ volatile ("int $0x80" \ |
: "=a" (__res) \ |
: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ |
"d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \ |
if (__res>=0) \ |
return (type) __res; \ |
errno=-__res; \ |
return -1; \ |
} |
|
#ifdef __KERNEL_SYSCALLS__ |
|
/* |
* we need this inline - forking from kernel space will result |
* in NO COPY ON WRITE (!!!), until an execve is executed. This |
* is no problem, but for the stack. This is handled by not letting |
* main() use the stack at all after fork(). Thus, no function |
* calls - which means inline code for fork too, as otherwise we |
* would use the stack upon exit from 'fork()'. |
* |
* Actually only pause and fork are needed inline, so that there |
* won't be any messing with the stack from main(), but we define |
* some others too. |
*/ |
#define __NR__exit __NR_exit |
static inline _syscall0(int,idle) |
static inline _syscall0(int,fork) |
static inline _syscall2(int,clone,unsigned long,flags,char *,esp) |
static inline _syscall0(int,pause) |
static inline _syscall0(int,setup) |
static inline _syscall0(int,sync) |
static inline _syscall0(pid_t,setsid) |
static inline _syscall3(int,read,int,fd,char *,buf,off_t,count) |
static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count) |
static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count) |
static inline _syscall1(int,dup,int,fd) |
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp) |
static inline _syscall3(int,open,const char *,file,int,flag,int,mode) |
static inline _syscall1(int,close,int,fd) |
static inline _syscall1(int,_exit,int,exitcode) |
static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options) |
|
static inline pid_t wait(int * wait_stat) |
{ |
return waitpid(-1,wait_stat,0); |
} |
|
/* |
* This is the mechanism for creating a new kernel thread. |
* |
* NOTE! Only a kernel-only process(ie the swapper or direct descendants |
* who haven't done an "execve()") should use this: it will work within |
* a system call from a "real" process, but the process memory space will |
* not be free'd until both the parent and the child have exited. |
*/ |
static inline pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) |
{ |
long retval; |
|
__asm__ __volatile__( |
"movl %%esp,%%esi\n\t" |
"int $0x80\n\t" /* Linux/i386 system call */ |
"cmpl %%esp,%%esi\n\t" /* child or parent? */ |
"je 1f\n\t" /* parent - jump */ |
"pushl %3\n\t" /* push argument */ |
"call *%4\n\t" /* call fn */ |
"movl %2,%0\n\t" /* exit */ |
"int $0x80\n" |
"1:\t" |
:"=a" (retval) |
:"0" (__NR_clone), "i" (__NR_exit), |
"r" (arg), "r" (fn), |
"b" (flags | CLONE_VM) |
:"si"); |
return retval; |
} |
|
#endif |
|
#endif /* _ASM_I386_UNISTD_H_ */ |
/i82489.h
0,0 → 1,88
#ifndef __ASM_I82489_H |
#define __ASM_I82489_H |
|
/* |
* Offsets for programming the 82489 and Pentium integrated APIC |
* |
* Alan Cox <Alan.Cox@linux.org>, 1995. |
*/ |
|
#define APIC_ID 0x20 |
#define GET_APIC_ID(x) (((x)>>24)&0x0F) |
#define APIC_VERSION 0x30 |
#define APIC_TASKPRI 0x80 |
#define APIC_TPRI_MASK 0xFF |
#define APIC_ARBPRI 0x90 |
#define APIC_PROCPRI 0xA0 |
#define APIC_EOI 0xB0 |
#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ |
#define APIC_RRR 0xC0 |
#define APIC_LDR 0xD0 |
#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) |
#define APIC_DFR 0xE0 |
#define GET_APIC_DFR(x) (((x)>>28)&0x0F) |
#define SET_APIC_DFR(x) ((x)<<28) |
#define APIC_SPIV 0xF0 |
#define APIC_ISR 0x100 |
#define APIC_TMR 0x180 |
#define APIC_IRR 0x200 |
#define APIC_ESR 0x280 |
#define APIC_ESR_SEND_CS 0x00001 |
#define APIC_ESR_RECV_CS 0x00002 |
#define APIC_ESR_SEND_ACC 0x00004 |
#define APIC_ESR_RECV_ACC 0x00008 |
#define APIC_ESR_SENDILL 0x00020 |
#define APIC_ESR_RECVILL 0x00040 |
#define APIC_ESR_ILLREGA 0x00080 |
#define APIC_ICR 0x300 |
#define APIC_DEST_FIELD 0x00000 |
#define APIC_DEST_SELF 0x40000 |
#define APIC_DEST_ALLINC 0x80000 |
#define APIC_DEST_ALLBUT 0xC0000 |
#define APIC_DEST_RR_MASK 0x30000 |
#define APIC_DEST_RR_INVALID 0x00000 |
#define APIC_DEST_RR_INPROG 0x10000 |
#define APIC_DEST_RR_VALID 0x20000 |
#define APIC_DEST_LEVELTRIG 0x08000 |
#define APIC_DEST_ASSERT 0x04000 |
#define APIC_DEST_BUSY 0x01000 |
#define APIC_DEST_LOGICAL 0x00800 |
#define APIC_DEST_DM_FIXED 0x00000 |
#define APIC_DEST_DM_LOWEST 0x00100 |
#define APIC_DEST_DM_SMI 0x00200 |
#define APIC_DEST_DM_REMRD 0x00300 |
#define APIC_DEST_DM_NMI 0x00400 |
#define APIC_DEST_DM_INIT 0x00500 |
#define APIC_DEST_DM_STARTUP 0x00600 |
#define APIC_DEST_VECTOR_MASK 0x000FF |
#define APIC_ICR2 0x310 |
#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) |
#define SET_APIC_DEST_FIELD(x) ((x)<<24) |
#define APIC_LVTT 0x320 |
#define APIC_LVT0 0x350 |
#define APIC_LVT_TIMER_PERIODIC (1<<17) |
#define APIC_LVT_MASKED (1<<16) |
#define APIC_LVT_LEVEL_TRIGGER (1<<15) |
#define APIC_LVT_REMOTE_IRR (1<<14) |
#define APIC_INPUT_POLARITY (1<<13) |
#define APIC_SEND_PENDING (1<<12) |
#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) |
#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) |
#define APIC_MODE_FIXED 0x0 |
#define APIC_MODE_NMI 0x4 |
#define APIC_MODE_EXINT 0x7 |
#define APIC_LVT1 0x360 |
#define APIC_LVERR 0x370 |
#define APIC_TMICT 0x380 |
#define APIC_TMCCT 0x390 |
#define APIC_TDCR 0x3E0 |
#define APIC_TDR_DIV_1 0xB |
#define APIC_TDR_DIV_2 0x0 |
#define APIC_TDR_DIV_4 0x1 |
#define APIC_TDR_DIV_8 0x2 |
#define APIC_TDR_DIV_16 0x3 |
#define APIC_TDR_DIV_32 0x8 |
#define APIC_TDR_DIV_64 0x9 |
#define APIC_TDR_DIV_128 0xA |
|
#endif |
/smp_lock.h
0,0 → 1,69
#ifndef __I386_SMPLOCK_H |
#define __I386_SMPLOCK_H |
|
#ifdef __SMP__ |
|
/* |
* Locking the kernel |
*/ |
|
extern __inline void lock_kernel(void) |
{ |
unsigned long flags; |
int proc = smp_processor_id(); |
|
save_flags(flags); |
cli(); |
/* set_bit works atomic in SMP machines */ |
while(set_bit(0, (void *)&kernel_flag)) |
{ |
/* |
* We just start another level if we have the lock |
*/ |
if (proc == active_kernel_processor) |
break; |
do |
{ |
#ifdef __SMP_PROF__ |
smp_spins[smp_processor_id()]++; |
#endif |
/* |
* Doing test_bit here doesn't lock the bus |
*/ |
if (test_bit(proc, (void *)&smp_invalidate_needed)) |
if (clear_bit(proc, (void *)&smp_invalidate_needed)) |
local_flush_tlb(); |
} |
while(test_bit(0, (void *)&kernel_flag)); |
} |
/* |
* We got the lock, so tell the world we are here and increment |
* the level counter |
*/ |
active_kernel_processor = proc; |
kernel_counter++; |
restore_flags(flags); |
} |
|
extern __inline void unlock_kernel(void) |
{ |
unsigned long flags; |
save_flags(flags); |
cli(); |
/* |
* If it's the last level we have in the kernel, then |
* free the lock |
*/ |
if (kernel_counter == 0) |
panic("Kernel counter wrong.\n"); /* FIXME: Why is kernel_counter sometimes 0 here? */ |
|
if(! --kernel_counter) |
{ |
active_kernel_processor = NO_PROC_ID; |
clear_bit(0, (void *)&kernel_flag); |
} |
restore_flags(flags); |
} |
|
#endif |
#endif |
/shmparam.h
0,0 → 1,44
#ifndef _ASMI386_SHMPARAM_H |
#define _ASMI386_SHMPARAM_H |
|
/* address range for shared memory attaches if no address passed to shmat() */ |
#define SHM_RANGE_START 0x50000000 |
#define SHM_RANGE_END 0x60000000 |
|
/* |
* Format of a swap-entry for shared memory pages currently out in |
* swap space (see also mm/swap.c). |
* |
* SWP_TYPE = SHM_SWP_TYPE |
* SWP_OFFSET is used as follows: |
* |
* bits 0..6 : id of shared memory segment page belongs to (SHM_ID) |
* bits 7..21: index of page within shared memory segment (SHM_IDX) |
* (actually fewer bits get used since SHMMAX is so low) |
*/ |
|
/* |
* Keep _SHM_ID_BITS as low as possible since SHMMNI depends on it and |
* there is a static array of size SHMMNI. |
*/ |
#define _SHM_ID_BITS 7 |
#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1) |
|
#define SHM_IDX_SHIFT (_SHM_ID_BITS) |
#define _SHM_IDX_BITS 15 |
#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1) |
|
/* |
* _SHM_ID_BITS + _SHM_IDX_BITS must be <= 24 on the i386 and |
* SHMMAX <= (PAGE_SIZE << _SHM_IDX_BITS). |
*/ |
|
#define SHMMAX 0x2000000 /* max shared seg size (bytes) */ |
#define SHMMIN 1 /* really PAGE_SIZE */ /* min shared seg size (bytes) */ |
#define SHMMNI (1<<_SHM_ID_BITS) /* max num of segs system wide */ |
#define SHMALL /* max shm system wide (pages) */ \ |
(1<<(_SHM_IDX_BITS+_SHM_ID_BITS)) |
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ |
#define SHMSEG SHMMNI /* max shared segs per process */ |
|
#endif /* _ASMI386_SHMPARAM_H */ |
/sockios.h
0,0 → 1,12
#ifndef __ARCH_I386_SOCKIOS__ |
#define __ARCH_I386_SOCKIOS__ |
|
/* Socket-level I/O control calls. */ |
#define FIOSETOWN 0x8901 |
#define SIOCSPGRP 0x8902 |
#define FIOGETOWN 0x8903 |
#define SIOCGPGRP 0x8904 |
#define SIOCATMARK 0x8905 |
#define SIOCGSTAMP 0x8906 /* Get stamp */ |
|
#endif |
/locks.h
0,0 → 1,133
/* |
* SMP locks primitives for building ix86 locks |
* (not yet used). |
* |
* Alan Cox, alan@cymru.net, 1995 |
*/ |
|
/* |
* This would be much easier but far less clear and easy |
* to borrow for other processors if it was just assembler. |
*/ |
|
extern __inline__ void prim_spin_lock(struct spinlock *sp) |
{ |
int processor=smp_processor_id(); |
|
/* |
* Grab the lock bit |
*/ |
|
while(lock_set_bit(0,&sp->lock)) |
{ |
/* |
* Failed, but that's cos we own it! |
*/ |
|
if(sp->cpu==processor) |
{ |
sp->users++; |
return 0; |
} |
/* |
* Spin in the cache S state if possible |
*/ |
while(sp->lock) |
{ |
/* |
* Wait for any invalidates to go off |
*/ |
|
if(smp_invalidate_needed&(1<<processor)) |
while(lock_clear_bit(processor,&smp_invalidate_needed)) |
local_flush_tlb(); |
sp->spins++; |
} |
/* |
* Someone wrote the line, we go 'I' and get |
* the cache entry. Now try to regrab |
*/ |
} |
sp->users++;sp->cpu=processor; |
return 1; |
} |
|
/* |
* Release a spin lock |
*/ |
|
extern __inline__ int prim_spin_unlock(struct spinlock *sp) |
{ |
/* This is safe. The decrement is still guarded by the lock. A multilock would |
not be safe this way */ |
if(!--sp->users) |
{ |
lock_clear_bit(0,&sp->lock);sp->cpu= NO_PROC_ID; |
return 1; |
} |
return 0; |
} |
|
|
/* |
* Non blocking lock grab |
*/ |
|
extern __inline__ int prim_spin_lock_nb(struct spinlock *sp) |
{ |
if(lock_set_bit(0,&sp->lock)) |
return 0; /* Locked already */ |
sp->users++; |
return 1; /* We got the lock */ |
} |
|
|
/* |
* These wrap the locking primitives up for usage |
*/ |
|
extern __inline__ void spinlock(struct spinlock *sp) |
{ |
if(sp->priority<current->lock_order) |
panic("lock order violation: %s (%d)\n", sp->name, current->lock_order); |
if(prim_spin_lock(sp)) |
{ |
/* |
* We got a new lock. Update the priority chain |
*/ |
sp->oldpri=current->lock_order; |
current->lock_order=sp->priority; |
} |
} |
|
extern __inline__ void spinunlock(struct spinlock *sp) |
{ |
if(current->lock_order!=sp->priority) |
panic("lock release order violation %s (%d)\n", sp->name, current->lock_order); |
if(prim_spin_unlock(sp)) |
{ |
/* |
* Update the debugging lock priority chain. We dumped |
* our last right to the lock. |
*/ |
current->lock_order=sp->oldpri; |
} |
} |
|
extern __inline__ void spintestlock(struct spinlock *sp) |
{ |
/* |
* We do no sanity checks, it's legal to optimistically |
* get a lower lock. |
*/ |
prim_spin_lock_nb(sp); |
} |
|
extern __inline__ void spintestunlock(struct spinlock *sp) |
{ |
/* |
* A testlock doesn't update the lock chain so we |
* must not update it on free |
*/ |
prim_spin_unlock(sp); |
} |
/atomic.h
0,0 → 1,67
#ifndef __ARCH_I386_ATOMIC__ |
#define __ARCH_I386_ATOMIC__ |
|
/* |
* Atomic operations that C can't guarantee us. Useful for |
* resource counting etc.. |
*/ |
|
#ifdef __SMP__ |
#define LOCK "lock ; " |
#else |
#define LOCK "" |
#endif |
|
/* |
* Make sure gcc doesn't try to be clever and move things around |
* on us. We need to use _exactly_ the address the user gave us, |
* not some alias that contains the same information. |
*/ |
#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x) |
|
typedef int atomic_t; |
|
static __inline__ void atomic_add(atomic_t i, atomic_t *v) |
{ |
__asm__ __volatile__( |
LOCK "addl %1,%0" |
:"=m" (__atomic_fool_gcc(v)) |
:"ir" (i), "m" (__atomic_fool_gcc(v))); |
} |
|
static __inline__ void atomic_sub(atomic_t i, atomic_t *v) |
{ |
__asm__ __volatile__( |
LOCK "subl %1,%0" |
:"=m" (__atomic_fool_gcc(v)) |
:"ir" (i), "m" (__atomic_fool_gcc(v))); |
} |
|
static __inline__ void atomic_inc(atomic_t *v) |
{ |
__asm__ __volatile__( |
LOCK "incl %0" |
:"=m" (__atomic_fool_gcc(v)) |
:"m" (__atomic_fool_gcc(v))); |
} |
|
static __inline__ void atomic_dec(atomic_t *v) |
{ |
__asm__ __volatile__( |
LOCK "decl %0" |
:"=m" (__atomic_fool_gcc(v)) |
:"m" (__atomic_fool_gcc(v))); |
} |
|
static __inline__ int atomic_dec_and_test(atomic_t *v) |
{ |
unsigned char c; |
|
__asm__ __volatile__( |
LOCK "decl %0; sete %1" |
:"=m" (__atomic_fool_gcc(v)), "=qm" (c) |
:"m" (__atomic_fool_gcc(v))); |
return c != 0; |
} |
|
#endif |
/signal.h
0,0 → 1,97
#ifndef _ASMi386_SIGNAL_H |
#define _ASMi386_SIGNAL_H |
|
typedef unsigned long sigset_t; /* at least 32 bits */ |
|
#define _NSIG 32 |
#define NSIG _NSIG |
|
#define SIGHUP 1 |
#define SIGINT 2 |
#define SIGQUIT 3 |
#define SIGILL 4 |
#define SIGTRAP 5 |
#define SIGABRT 6 |
#define SIGIOT 6 |
#define SIGBUS 7 |
#define SIGFPE 8 |
#define SIGKILL 9 |
#define SIGUSR1 10 |
#define SIGSEGV 11 |
#define SIGUSR2 12 |
#define SIGPIPE 13 |
#define SIGALRM 14 |
#define SIGTERM 15 |
#define SIGSTKFLT 16 |
#define SIGCHLD 17 |
#define SIGCONT 18 |
#define SIGSTOP 19 |
#define SIGTSTP 20 |
#define SIGTTIN 21 |
#define SIGTTOU 22 |
#define SIGURG 23 |
#define SIGXCPU 24 |
#define SIGXFSZ 25 |
#define SIGVTALRM 26 |
#define SIGPROF 27 |
#define SIGWINCH 28 |
#define SIGIO 29 |
#define SIGPOLL SIGIO |
/* |
#define SIGLOST 29 |
*/ |
#define SIGPWR 30 |
#define SIGUNUSED 31 |
|
/* |
* sa_flags values: SA_STACK is not currently supported, but will allow the |
* usage of signal stacks by using the (now obsolete) sa_restorer field in |
* the sigaction structure as a stack pointer. This is now possible due to |
* the changes in signal handling. LBT 010493. |
* SA_INTERRUPT is a no-op, but left due to historical reasons. Use the |
* SA_RESTART flag to get restarting signals (which were the default long ago) |
* SA_SHIRQ flag is for shared interrupt support on PCI and EISA. |
*/ |
#define SA_NOCLDSTOP 1 |
#define SA_SHIRQ 0x04000000 |
#define SA_STACK 0x08000000 |
#define SA_RESTART 0x10000000 |
#define SA_INTERRUPT 0x20000000 |
#define SA_NOMASK 0x40000000 |
#define SA_ONESHOT 0x80000000 |
|
#ifdef __KERNEL__ |
/* |
* These values of sa_flags are used only by the kernel as part of the |
* irq handling routines. |
* |
* SA_INTERRUPT is also used by the irq handling routines. |
*/ |
#define SA_PROBE SA_ONESHOT |
#define SA_SAMPLE_RANDOM SA_RESTART |
#endif |
|
|
#define SIG_BLOCK 0 /* for blocking signals */ |
#define SIG_UNBLOCK 1 /* for unblocking signals */ |
#define SIG_SETMASK 2 /* for setting the signal mask */ |
|
/* Type of a signal handler. */ |
typedef void (*__sighandler_t)(int); |
|
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ |
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ |
#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ |
|
struct sigaction { |
__sighandler_t sa_handler; |
sigset_t sa_mask; |
unsigned long sa_flags; |
void (*sa_restorer)(void); |
}; |
|
#ifdef __KERNEL__ |
#include <asm/sigcontext.h> |
#endif |
|
#endif |
/ptrace.h
0,0 → 1,60
#ifndef _I386_PTRACE_H |
#define _I386_PTRACE_H |
|
#define EBX 0 |
#define ECX 1 |
#define EDX 2 |
#define ESI 3 |
#define EDI 4 |
#define EBP 5 |
#define EAX 6 |
#define DS 7 |
#define ES 8 |
#define FS 9 |
#define GS 10 |
#define ORIG_EAX 11 |
#define EIP 12 |
#define CS 13 |
#define EFL 14 |
#define UESP 15 |
#define SS 16 |
|
|
/* this struct defines the way the registers are stored on the |
stack during a system call. */ |
|
struct pt_regs { |
long ebx; |
long ecx; |
long edx; |
long esi; |
long edi; |
long ebp; |
long eax; |
unsigned short ds, __dsu; |
unsigned short es, __esu; |
unsigned short fs, __fsu; |
unsigned short gs, __gsu; |
long orig_eax; |
long eip; |
unsigned short cs, __csu; |
long eflags; |
long esp; |
unsigned short ss, __ssu; |
}; |
|
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ |
#define PTRACE_GETREGS 12 |
#define PTRACE_SETREGS 13 |
#define PTRACE_GETFPREGS 14 |
#define PTRACE_SETFPREGS 15 |
|
#ifdef __KERNEL__ |
#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->cs)) |
#define instruction_pointer(regs) ((regs)->eip) |
extern void show_regs(struct pt_regs *); |
struct task_struct; |
extern void get_pt_regs_for_task(struct pt_regs *, struct task_struct *p); |
#endif |
|
#endif |
/pgtable.h
0,0 → 1,532
#ifndef _I386_PGTABLE_H |
#define _I386_PGTABLE_H |
|
#include <linux/config.h> |
|
/* |
* Define USE_PENTIUM_MM if you want the 4MB page table optimizations. |
* This works only on a intel Pentium. |
*/ |
#define USE_PENTIUM_MM 1 |
|
/* |
* The Linux memory management assumes a three-level page table setup. On |
* the i386, we use that, but "fold" the mid level into the top-level page |
* table, so that we physically have the same two-level page table as the |
* i386 mmu expects. |
* |
* This file contains the functions and defines necessary to modify and use |
* the i386 page table tree. |
*/ |
|
#ifndef __ASSEMBLY__ |
|
/* Caches aren't brain-dead on the intel. */ |
#define flush_cache_all() do { } while (0) |
#define flush_cache_mm(mm) do { } while (0) |
#define flush_cache_range(mm, start, end) do { } while (0) |
#define flush_cache_page(vma, vmaddr) do { } while (0) |
#define flush_page_to_ram(page) do { } while (0) |
#define flush_pages_to_ram(page,n) do { } while (0) |
|
/* |
* TLB flushing: |
* |
* - flush_tlb() flushes the current mm struct TLBs |
* - flush_tlb_all() flushes all processes TLBs |
* - flush_tlb_mm(mm) flushes the specified mm context TLB's |
* - flush_tlb_page(vma, vmaddr) flushes one page |
* - flush_tlb_range(mm, start, end) flushes a range of pages |
* |
* ..but the i386 has somewhat limited tlb flushing capabilities, |
* and page-granular flushes are available only on i486 and up. |
*/ |
|
#define __flush_tlb() \ |
do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0) |
|
/* |
* NOTE! The intel "invlpg" semantics are extremely strange. The |
* chip will add the segment base to the memory address, even though |
* no segment checking is done. We correct for this by using an |
* offset of -__PAGE_OFFSET that will wrap around the kernel segment base |
* of __PAGE_OFFSET to get the correct address (it will always be outside |
* the kernel segment, but we're only interested in the final linear |
* address. |
*/ |
#define __invlpg_mem(addr) \ |
(*((char *)(addr)-__PAGE_OFFSET)) |
#define __invlpg(addr) \ |
__asm__ __volatile__("invlpg %0": :"m" (__invlpg_mem(addr))) |
|
/* |
* The i386 doesn't have a page-granular invalidate. Invalidate |
* everything for it. |
*/ |
#ifdef CONFIG_M386 |
#define __flush_tlb_one(addr) __flush_tlb() |
#else |
#define __flush_tlb_one(addr) __invlpg(addr) |
#endif |
|
#ifndef __SMP__ |
|
#define flush_tlb() __flush_tlb() |
#define flush_tlb_all() __flush_tlb() |
#define local_flush_tlb() __flush_tlb() |
|
static inline void flush_tlb_mm(struct mm_struct *mm) |
{ |
if (mm == current->mm) |
__flush_tlb(); |
} |
|
static inline void flush_tlb_page(struct vm_area_struct *vma, |
unsigned long addr) |
{ |
if (vma->vm_mm == current->mm) |
__flush_tlb_one(addr); |
} |
|
static inline void flush_tlb_range(struct mm_struct *mm, |
unsigned long start, unsigned long end) |
{ |
if (mm == current->mm) |
__flush_tlb(); |
} |
|
#else |
|
/* |
* We aren't very clever about this yet - SMP could certainly |
* avoid some global flushes.. |
*/ |
|
#include <asm/smp.h> |
|
#define local_flush_tlb() \ |
__flush_tlb() |
|
|
#define CLEVER_SMP_INVALIDATE |
#ifdef CLEVER_SMP_INVALIDATE |
|
/* |
* Smarter SMP flushing macros. |
* c/o Linus Torvalds. |
* |
* These mean you can really definitely utterly forget about |
* writing to user space from interrupts. (Its not allowed anyway). |
*/ |
|
static inline void flush_tlb_current_task(void) |
{ |
if (current->mm->count == 1) /* just one copy of this mm */ |
local_flush_tlb(); /* and that's us, so.. */ |
else |
smp_flush_tlb(); |
} |
|
#define flush_tlb() flush_tlb_current_task() |
|
#define flush_tlb_all() smp_flush_tlb() |
|
static inline void flush_tlb_mm(struct mm_struct * mm) |
{ |
if (mm == current->mm && mm->count == 1) |
local_flush_tlb(); |
else |
smp_flush_tlb(); |
} |
|
static inline void flush_tlb_page(struct vm_area_struct * vma, |
unsigned long va) |
{ |
if (vma->vm_mm == current->mm && current->mm->count == 1) |
__flush_tlb_one(va); |
else |
smp_flush_tlb(); |
} |
|
static inline void flush_tlb_range(struct mm_struct * mm, |
unsigned long start, unsigned long end) |
{ |
flush_tlb_mm(mm); |
} |
|
|
#else |
|
#define flush_tlb() \ |
smp_flush_tlb() |
|
#define flush_tlb_all() flush_tlb() |
|
static inline void flush_tlb_mm(struct mm_struct *mm) |
{ |
flush_tlb(); |
} |
|
static inline void flush_tlb_page(struct vm_area_struct *vma, |
unsigned long addr) |
{ |
flush_tlb(); |
} |
|
static inline void flush_tlb_range(struct mm_struct *mm, |
unsigned long start, unsigned long end) |
{ |
flush_tlb(); |
} |
#endif |
#endif |
#endif /* !__ASSEMBLY__ */ |
|
|
/* Certain architectures need to do special things when pte's |
* within a page table are directly modified. Thus, the following |
* hook is made available. |
*/ |
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) |
|
/* PMD_SHIFT determines the size of the area a second-level page table can map */ |
#define PMD_SHIFT 22 |
#define PMD_SIZE (1UL << PMD_SHIFT) |
#define PMD_MASK (~(PMD_SIZE-1)) |
|
/* PGDIR_SHIFT determines what a third-level page table entry can map */ |
#define PGDIR_SHIFT 22 |
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
#define PGDIR_MASK (~(PGDIR_SIZE-1)) |
|
/* |
* entries per page directory level: the i386 is two-level, so |
* we don't really have any PMD directory physically. |
*/ |
#define PTRS_PER_PTE 1024 |
#define PTRS_PER_PMD 1 |
#define PTRS_PER_PGD 1024 |
|
/* |
* pgd entries used up by user/kernel: |
*/ |
|
#if CONFIG_MAX_MEMSIZE & 3 |
#error Invalid max physical memory size requested |
#endif |
|
#define USER_PGD_PTRS ((unsigned long)__PAGE_OFFSET >> PGDIR_SHIFT) |
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) |
#define __USER_PGD_PTRS (__PAGE_OFFSET >> PGDIR_SHIFT) |
#define __KERNEL_PGD_PTRS (PTRS_PER_PGD-__USER_PGD_PTRS) |
|
#ifndef __ASSEMBLY__ |
|
/* Just any arbitrary offset to the start of the vmalloc VM area: the |
* current 8MB value just means that there will be a 8MB "hole" after the |
* physical memory until the kernel virtual memory starts. That means that |
* any out-of-bounds memory accesses will hopefully be caught. |
* The vmalloc() routines leaves a hole of 4kB between each vmalloced |
* area for the same reason. ;) |
*/ |
#define VMALLOC_OFFSET (8*1024*1024) |
#define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
#define VMALLOC_VMADDR(x) (TASK_SIZE + (unsigned long)(x)) |
|
/* |
* The 4MB page is guessing.. Detailed in the infamous "Chapter H" |
* of the Pentium details, but assuming intel did the straightforward |
* thing, this bit set in the page directory entry just means that |
* the page directory entry points directly to a 4MB-aligned block of |
* memory. |
*/ |
#define _PAGE_PRESENT 0x001 |
#define _PAGE_RW 0x002 |
#define _PAGE_USER 0x004 |
#define _PAGE_PCD 0x010 |
#define _PAGE_ACCESSED 0x020 |
#define _PAGE_DIRTY 0x040 |
#define _PAGE_4M 0x080 /* 4 MB page, Pentium+.. */ |
|
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) |
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
|
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) |
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) |
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) |
|
/* |
* The i386 can't do page protection for execute, and considers that the same are read. |
* Also, write permissions imply read permissions. This is the closest we can get.. |
*/ |
#define __P000 PAGE_NONE |
#define __P001 PAGE_READONLY |
#define __P010 PAGE_COPY |
#define __P011 PAGE_COPY |
#define __P100 PAGE_READONLY |
#define __P101 PAGE_READONLY |
#define __P110 PAGE_COPY |
#define __P111 PAGE_COPY |
|
#define __S000 PAGE_NONE |
#define __S001 PAGE_READONLY |
#define __S010 PAGE_SHARED |
#define __S011 PAGE_SHARED |
#define __S100 PAGE_READONLY |
#define __S101 PAGE_READONLY |
#define __S110 PAGE_SHARED |
#define __S111 PAGE_SHARED |
|
/* |
* Define this if things work differently on a i386 and a i486: |
* it will (on a i486) warn about kernel memory accesses that are |
* done without a 'verify_area(VERIFY_WRITE,..)' |
*/ |
#undef TEST_VERIFY_AREA |
|
/* page table for 0-4MB for everybody */ |
extern unsigned long pg0[1024]; |
/* zero page used for uninitialized stuff */ |
extern unsigned long empty_zero_page[1024]; |
|
/* |
* BAD_PAGETABLE is used when we need a bogus page-table, while |
* BAD_PAGE is used for a bogus page. |
* |
* ZERO_PAGE is a global shared page that is always zero: used |
* for zero-mapped memory areas etc.. |
*/ |
extern pte_t __bad_page(void); |
extern pte_t * __bad_pagetable(void); |
|
#define BAD_PAGETABLE __bad_pagetable() |
#define BAD_PAGE __bad_page() |
#define ZERO_PAGE ((unsigned long) empty_zero_page) |
|
/* number of bits that fit into a memory pointer */ |
#define BITS_PER_PTR (8*sizeof(unsigned long)) |
|
/* to align the pointer to a pointer address */ |
#define PTR_MASK (~(sizeof(void*)-1)) |
|
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ |
/* 64-bit machines, beware! SRB. */ |
#define SIZEOF_PTR_LOG2 2 |
|
/* to find an entry in a page-table */ |
#define PAGE_PTR(address) \ |
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) |
|
/* to set the page-dir */ |
#define SET_PAGE_DIR(tsk,pgdir) \ |
do { \ |
(tsk)->tss.cr3 = (unsigned long) (pgdir); \ |
if ((tsk) == current) \ |
__asm__ __volatile__("movl %0,%%cr3": :"r" (pgdir)); \ |
} while (0) |
|
#define pte_none(x) (!pte_val(x)) |
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) |
#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0) |
|
#define pmd_none(x) (!pmd_val(x)) |
#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE) |
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) |
|
/* |
* The "pgd_xxx()" functions here are trivial for a folded two-level |
* setup: the pgd is never bad, and a pmd always exists (as it's folded |
* into the pgd entry) |
*/ |
extern inline int pgd_none(pgd_t pgd) { return 0; } |
extern inline int pgd_bad(pgd_t pgd) { return 0; } |
extern inline int pgd_present(pgd_t pgd) { return 1; } |
extern inline void pgd_clear(pgd_t * pgdp) { } |
|
/* |
* The following only work if pte_present() is true. |
* Undefined behaviour if not.. |
*/ |
extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } |
extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } |
extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } |
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
|
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_RW; return pte; } |
extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; } |
extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; } |
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } |
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } |
extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; } |
extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; } |
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } |
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
|
/* |
* Conversion functions: convert a page and protection to a page entry, |
* and a page entry and page directory to the page they refer to. |
*/ |
extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot) |
{ pte_t pte; pte_val(pte) = page | pgprot_val(pgprot); return pte; } |
|
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } |
|
extern inline unsigned long pte_page(pte_t pte) |
{ return pte_val(pte) & PAGE_MASK; } |
|
extern inline unsigned long pmd_page(pmd_t pmd) |
{ return pmd_val(pmd) & PAGE_MASK; } |
|
/* to find an entry in a page-table-directory */ |
extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) |
{ |
return mm->pgd + (address >> PGDIR_SHIFT); |
} |
|
/* Find an entry in the second-level page table.. */ |
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) |
{ |
return (pmd_t *) dir; |
} |
|
/* Find an entry in the third-level page table.. */ |
extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address) |
{ |
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); |
} |
|
/* |
* Allocate and free page tables. The xxx_kernel() versions are |
* used to allocate a kernel page table - this turns on ASN bits |
* if any. |
*/ |
extern inline void pte_free_kernel(pte_t * pte) |
{ |
free_page((unsigned long) pte); |
} |
|
extern const char bad_pmd_string[]; |
|
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address) |
{ |
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
if (pmd_none(*pmd)) { |
pte_t * page = (pte_t *) get_free_page(GFP_KERNEL); |
if (pmd_none(*pmd)) { |
if (page) { |
pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page; |
return page + address; |
} |
pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE; |
return NULL; |
} |
free_page((unsigned long) page); |
} |
if (pmd_bad(*pmd)) { |
printk(bad_pmd_string, pmd_val(*pmd)); |
pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE; |
return NULL; |
} |
return (pte_t *) pmd_page(*pmd) + address; |
} |
|
/* |
* allocating and freeing a pmd is trivial: the 1-entry pmd is |
* inside the pgd, so has no extra memory associated with it. |
*/ |
extern inline void pmd_free_kernel(pmd_t * pmd) |
{ |
pmd_val(*pmd) = 0; |
} |
|
extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address) |
{ |
return (pmd_t *) pgd; |
} |
|
extern inline void pte_free(pte_t * pte) |
{ |
free_page((unsigned long) pte); |
} |
|
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address) |
{ |
address = (address >> (PAGE_SHIFT-2)) & 4*(PTRS_PER_PTE - 1); |
|
repeat: |
if (pmd_none(*pmd)) |
goto getnew; |
if (pmd_bad(*pmd)) |
goto fix; |
return (pte_t *) (pmd_page(*pmd) + address); |
|
getnew: |
{ |
unsigned long page = __get_free_page(GFP_KERNEL); |
if (!pmd_none(*pmd)) |
goto freenew; |
if (!page) |
goto oom; |
memset((void *) page, 0, PAGE_SIZE); |
pmd_val(*pmd) = _PAGE_TABLE | page; |
return (pte_t *) (page + address); |
freenew: |
free_page(page); |
goto repeat; |
} |
|
fix: |
printk(bad_pmd_string, pmd_val(*pmd)); |
oom: |
pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE; |
return NULL; |
} |
|
/* |
* allocating and freeing a pmd is trivial: the 1-entry pmd is |
* inside the pgd, so has no extra memory associated with it. |
*/ |
extern inline void pmd_free(pmd_t * pmd) |
{ |
pmd_val(*pmd) = 0; |
} |
|
extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address) |
{ |
return (pmd_t *) pgd; |
} |
|
extern inline void pgd_free(pgd_t * pgd) |
{ |
free_page((unsigned long) pgd); |
} |
|
extern inline pgd_t * pgd_alloc(void) |
{ |
return (pgd_t *) get_free_page(GFP_KERNEL); |
} |
|
extern pgd_t swapper_pg_dir[1024]; |
|
/* |
* The i386 doesn't have any external MMU info: the kernel page |
* tables contain all the necessary information. |
*/ |
extern inline void update_mmu_cache(struct vm_area_struct * vma, |
unsigned long address, pte_t pte) |
{ |
} |
|
#define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) |
#define SWP_OFFSET(entry) ((entry) >> 8) |
#define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8)) |
|
#endif /* !__ASSEMBLY__ */ |
|
#endif /* _I386_PAGE_H */ |
/termios.h
0,0 → 1,92
#ifndef _I386_TERMIOS_H |
#define _I386_TERMIOS_H |
|
#include <asm/termbits.h> |
#include <asm/ioctls.h> |
|
struct winsize { |
unsigned short ws_row; |
unsigned short ws_col; |
unsigned short ws_xpixel; |
unsigned short ws_ypixel; |
}; |
|
#define NCC 8 |
struct termio { |
unsigned short c_iflag; /* input mode flags */ |
unsigned short c_oflag; /* output mode flags */ |
unsigned short c_cflag; /* control mode flags */ |
unsigned short c_lflag; /* local mode flags */ |
unsigned char c_line; /* line discipline */ |
unsigned char c_cc[NCC]; /* control characters */ |
}; |
|
#ifdef __KERNEL__ |
/* intr=^C quit=^\ erase=del kill=^U |
eof=^D vtime=\0 vmin=\1 sxtc=\0 |
start=^Q stop=^S susp=^Z eol=\0 |
reprint=^R discard=^U werase=^W lnext=^V |
eol2=\0 |
*/ |
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" |
#endif |
|
/* modem lines */ |
#define TIOCM_LE 0x001 |
#define TIOCM_DTR 0x002 |
#define TIOCM_RTS 0x004 |
#define TIOCM_ST 0x008 |
#define TIOCM_SR 0x010 |
#define TIOCM_CTS 0x020 |
#define TIOCM_CAR 0x040 |
#define TIOCM_RNG 0x080 |
#define TIOCM_DSR 0x100 |
#define TIOCM_CD TIOCM_CAR |
#define TIOCM_RI TIOCM_RNG |
|
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ |
|
/* line disciplines */ |
#define N_TTY 0 |
#define N_SLIP 1 |
#define N_MOUSE 2 |
#define N_PPP 3 |
#define N_STRIP 4 |
#define N_AX25 5 |
|
#ifdef __KERNEL__ |
|
#include <linux/string.h> |
|
/* |
* Translate a "termio" structure into a "termios". Ugh. |
*/ |
extern inline void trans_from_termio(struct termio * termio, |
struct termios * termios) |
{ |
#define SET_LOW_BITS(x,y) (*(unsigned short *)(&x) = (y)) |
SET_LOW_BITS(termios->c_iflag, termio->c_iflag); |
SET_LOW_BITS(termios->c_oflag, termio->c_oflag); |
SET_LOW_BITS(termios->c_cflag, termio->c_cflag); |
SET_LOW_BITS(termios->c_lflag, termio->c_lflag); |
#undef SET_LOW_BITS |
memcpy(termios->c_cc, termio->c_cc, NCC); |
} |
|
/* |
* Translate a "termios" structure into a "termio". Ugh. |
*/ |
extern inline void trans_to_termio(struct termios * termios, |
struct termio * termio) |
{ |
termio->c_iflag = termios->c_iflag; |
termio->c_oflag = termios->c_oflag; |
termio->c_cflag = termios->c_cflag; |
termio->c_lflag = termios->c_lflag; |
termio->c_line = termios->c_line; |
memcpy(termio->c_cc, termios->c_cc, NCC); |
} |
|
#endif /* __KERNEL__ */ |
|
#endif /* _I386_TERMIOS_H */ |
/mtrr.h
0,0 → 1,65
/* Generic MTRR (Memory Type Range Register) ioctls. |
|
Copyright (C) 1997 Richard Gooch |
|
This library is free software; you can redistribute it and/or |
modify it under the terms of the GNU Library General Public |
License as published by the Free Software Foundation; either |
version 2 of the License, or (at your option) any later version. |
|
This library is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
Library General Public License for more details. |
|
You should have received a copy of the GNU Library General Public |
License along with this library; if not, write to the Free |
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
|
Richard Gooch may be reached by email at rgooch@atnf.csiro.au |
The postal address is: |
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. |
|
modified by Mathias Fr"ohlich, Jan, 1998 |
<frohlich@na.uni-tuebingen.de> |
*/ |
#ifndef _LINUX_MTRR_H |
#define _LINUX_MTRR_H |
|
/* These are the region types */ |
#define MTRR_TYPE_UNCACHABLE 0 |
#define MTRR_TYPE_WRCOMB 1 |
/*#define MTRR_TYPE_ 2*/ |
/*#define MTRR_TYPE_ 3*/ |
#define MTRR_TYPE_WRTHROUGH 4 |
#define MTRR_TYPE_WRPROT 5 |
#define MTRR_TYPE_WRBACK 6 |
#define MTRR_NUM_TYPES 7 |
|
static char *attrib_to_str (int x) __attribute__ ((unused)); |
|
static char *attrib_to_str (int x) |
{ |
switch (x) { |
case 0: return "uncachable"; |
case 1: return "write-combining"; |
case 4: return "write-through"; |
case 5: return "write-protect"; |
case 6: return "write-back"; |
default: return "?"; |
} |
} /* End Function attrib_to_str */ |
|
#ifdef __KERNEL__ |
|
#ifdef CONFIG_MTRR |
|
extern void check_mtrr_config(void); |
extern void init_mtrr_config(void); |
/* extern void set_mtrr_config(void); */ |
|
#endif /* CONFIG_MTRR */ |
|
#endif /* __KERNEL__ */ |
|
#endif /* _LINUX_MTRR_H */ |
/errno.h
0,0 → 1,132
#ifndef _I386_ERRNO_H |
#define _I386_ERRNO_H |
|
#define EPERM 1 /* Operation not permitted */ |
#define ENOENT 2 /* No such file or directory */ |
#define ESRCH 3 /* No such process */ |
#define EINTR 4 /* Interrupted system call */ |
#define EIO 5 /* I/O error */ |
#define ENXIO 6 /* No such device or address */ |
#define E2BIG 7 /* Arg list too long */ |
#define ENOEXEC 8 /* Exec format error */ |
#define EBADF 9 /* Bad file number */ |
#define ECHILD 10 /* No child processes */ |
#define EAGAIN 11 /* Try again */ |
#define ENOMEM 12 /* Out of memory */ |
#define EACCES 13 /* Permission denied */ |
#define EFAULT 14 /* Bad address */ |
#define ENOTBLK 15 /* Block device required */ |
#define EBUSY 16 /* Device or resource busy */ |
#define EEXIST 17 /* File exists */ |
#define EXDEV 18 /* Cross-device link */ |
#define ENODEV 19 /* No such device */ |
#define ENOTDIR 20 /* Not a directory */ |
#define EISDIR 21 /* Is a directory */ |
#define EINVAL 22 /* Invalid argument */ |
#define ENFILE 23 /* File table overflow */ |
#define EMFILE 24 /* Too many open files */ |
#define ENOTTY 25 /* Not a typewriter */ |
#define ETXTBSY 26 /* Text file busy */ |
#define EFBIG 27 /* File too large */ |
#define ENOSPC 28 /* No space left on device */ |
#define ESPIPE 29 /* Illegal seek */ |
#define EROFS 30 /* Read-only file system */ |
#define EMLINK 31 /* Too many links */ |
#define EPIPE 32 /* Broken pipe */ |
#define EDOM 33 /* Math argument out of domain of func */ |
#define ERANGE 34 /* Math result not representable */ |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
|
#define EDEADLOCK EDEADLK |
|
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale NFS file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
|
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
|
#endif |
/string-486.h
0,0 → 1,702
#ifndef _I386_STRING_I486_H_ |
#define _I386_STRING_I486_H_ |
|
/* |
* This string-include defines all string functions as inline |
* functions. Use gcc. It also assumes ds=es=data space, this should be |
* normal. Most of the string-functions are rather heavily hand-optimized, |
* see especially strtok,strstr,str[c]spn. They should work, but are not |
* very easy to understand. Everything is done entirely within the register |
* set, making the functions fast and clean. |
* |
* Copyright (C) 1991, 1992 Linus Torvalds |
* Revised and optimized for i486/pentium |
* 1994/03/15 by Alberto Vignani/Davide Parodi @crf.it |
* |
* Split into 2 CPU specific files by Alan Cox to keep #ifdef noise down. |
*/ |
|
#define __HAVE_ARCH_STRCPY |
extern inline char * strcpy(char * dest,const char *src) |
{ |
register char *tmp= (char *)dest; |
register char dummy; |
__asm__ __volatile__( |
"\n1:\t" |
"movb (%0),%b2\n\t" |
"incl %0\n\t" |
"movb %b2,(%1)\n\t" |
"incl %1\n\t" |
"testb %b2,%b2\n\t" |
"jne 1b" |
:"=r" (src), "=r" (tmp), "=q" (dummy) |
:"0" (src), "1" (tmp) |
:"memory"); |
return dest; |
} |
|
#define __HAVE_ARCH_STRNCPY |
extern inline char * strncpy(char * dest,const char *src,size_t count) |
{ |
register char *tmp= (char *)dest; |
register char dummy; |
if (count) { |
__asm__ __volatile__( |
"\n1:\t" |
"movb (%0),%b2\n\t" |
"incl %0\n\t" |
"movb %b2,(%1)\n\t" |
"incl %1\n\t" |
"decl %3\n\t" |
"je 3f\n\t" |
"testb %b2,%b2\n\t" |
"jne 1b\n\t" |
"2:\tmovb %b2,(%1)\n\t" |
"incl %1\n\t" |
"decl %3\n\t" |
"jne 2b\n\t" |
"3:" |
:"=r" (src), "=r" (tmp), "=q" (dummy), "=r" (count) |
:"0" (src), "1" (tmp), "3" (count) |
:"memory"); |
} /* if (count) */ |
return dest; |
} |
|
#define __HAVE_ARCH_STRCAT |
extern inline char * strcat(char * dest,const char * src) |
{ |
register char *tmp = (char *)(dest-1); |
register char dummy; |
__asm__ __volatile__( |
"\n1:\tincl %1\n\t" |
"cmpb $0,(%1)\n\t" |
"jne 1b\n" |
"2:\tmovb (%2),%b0\n\t" |
"incl %2\n\t" |
"movb %b0,(%1)\n\t" |
"incl %1\n\t" |
"testb %b0,%b0\n\t" |
"jne 2b\n" |
:"=q" (dummy), "=r" (tmp), "=r" (src) |
:"1" (tmp), "2" (src) |
:"memory"); |
return dest; |
} |
|
#define __HAVE_ARCH_STRNCAT |
extern inline char * strncat(char * dest,const char * src,size_t count) |
{ |
register char *tmp = (char *)(dest-1); |
register char dummy; |
__asm__ __volatile__( |
"\n1:\tincl %1\n\t" |
"cmpb $0,(%1)\n\t" |
"jne 1b\n" |
"2:\tdecl %3\n\t" |
"js 3f\n\t" |
"movb (%2),%b0\n\t" |
"incl %2\n\t" |
"movb %b0,(%1)\n\t" |
"incl %1\n\t" |
"testb %b0,%b0\n\t" |
"jne 2b\n" |
"3:\txorb %b0,%b0\n\t" |
"movb %b0,(%1)\n\t" |
:"=q" (dummy), "=r" (tmp), "=r" (src), "=r" (count) |
:"1" (tmp), "2" (src), "3" (count) |
:"memory"); |
return dest; |
} |
|
#define __HAVE_ARCH_STRCMP |
extern inline int strcmp(const char * cs,const char * ct) |
{ |
register int __res; |
__asm__ __volatile__( |
"\n1:\tmovb (%1),%b0\n\t" |
"incl %1\n\t" |
"cmpb %b0,(%2)\n\t" |
"jne 2f\n\t" |
"incl %2\n\t" |
"testb %b0,%b0\n\t" |
"jne 1b\n\t" |
"xorl %k0,%k0\n\t" |
"jmp 3f\n" |
"2:\tmovl $1,%k0\n\t" |
"jb 3f\n\t" |
"negl %k0\n" |
"3:" |
:"=q" (__res), "=r" (cs), "=r" (ct) |
:"1" (cs), "2" (ct) |
: "memory" ); |
return __res; |
} |
|
#define __HAVE_ARCH_STRNCMP |
extern inline int strncmp(const char * cs,const char * ct,size_t count) |
{ |
register int __res; |
__asm__ __volatile__( |
"\n1:\tdecl %3\n\t" |
"js 2f\n\t" |
"movb (%1),%b0\n\t" |
"incl %1\n\t" |
"cmpb %b0,(%2)\n\t" |
"jne 3f\n\t" |
"incl %2\n\t" |
"testb %b0,%b0\n\t" |
"jne 1b\n" |
"2:\txorl %k0,%k0\n\t" |
"jmp 4f\n" |
"3:\tmovl $1,%k0\n\t" |
"jb 4f\n\t" |
"negl %k0\n" |
"4:" |
:"=q" (__res), "=r" (cs), "=r" (ct), "=r" (count) |
:"1" (cs), "2" (ct), "3" (count)); |
return __res; |
} |
|
#define __HAVE_ARCH_STRCHR |
extern inline char * strchr(const char * s, int c) |
{ |
register char * __res; |
__asm__ __volatile__( |
"movb %%al,%%ah\n" |
"1:\tmovb (%1),%%al\n\t" |
"cmpb %%ah,%%al\n\t" |
"je 2f\n\t" |
"incl %1\n\t" |
"testb %%al,%%al\n\t" |
"jne 1b\n\t" |
"xorl %1,%1\n" |
"2:\tmovl %1,%0\n\t" |
:"=a" (__res), "=r" (s) |
:"0" (c), "1" (s)); |
return __res; |
} |
|
#define __HAVE_ARCH_STRRCHR |
extern inline char * strrchr(const char * s, int c) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movb %%al,%%ah\n" |
"1:\tlodsb\n\t" |
"cmpb %%ah,%%al\n\t" |
"jne 2f\n\t" |
"leal -1(%%esi),%0\n" |
"2:\ttestb %%al,%%al\n\t" |
"jne 1b" |
:"=d" (__res):"0" (0),"S" (s),"a" (c):"ax","si"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRSPN |
extern inline size_t strspn(const char * cs, const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" |
"movl %%ecx,%%edx\n" |
"1:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 2f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"je 1b\n" |
"2:\tdecl %0" |
:"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct) |
:"ax","cx","dx","di"); |
return __res-cs; |
} |
|
#define __HAVE_ARCH_STRCSPN |
extern inline size_t strcspn(const char * cs, const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" |
"movl %%ecx,%%edx\n" |
"1:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 2f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"jne 1b\n" |
"2:\tdecl %0" |
:"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct) |
:"ax","cx","dx","di"); |
return __res-cs; |
} |
|
#define __HAVE_ARCH_STRPBRK |
extern inline char * strpbrk(const char * cs,const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" |
"movl %%ecx,%%edx\n" |
"1:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 2f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"jne 1b\n\t" |
"decl %0\n\t" |
"jmp 3f\n" |
"2:\txorl %0,%0\n" |
"3:" |
:"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct) |
:"ax","cx","dx","di"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRSTR |
extern inline char * strstr(const char * cs,const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"cld\n\t" \ |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */ |
"movl %%ecx,%%edx\n" |
"1:\tmovl %4,%%edi\n\t" |
"movl %%esi,%%eax\n\t" |
"movl %%edx,%%ecx\n\t" |
"repe\n\t" |
"cmpsb\n\t" |
"je 2f\n\t" /* also works for empty string, see above */ |
"xchgl %%eax,%%esi\n\t" |
"incl %%esi\n\t" |
"cmpb $0,-1(%%eax)\n\t" |
"jne 1b\n\t" |
"xorl %%eax,%%eax\n\t" |
"2:" |
:"=a" (__res):"0" (0),"c" (0xffffffff),"S" (cs),"g" (ct) |
:"cx","dx","di","si"); |
return __res; |
} |
|
#define __HAVE_ARCH_STRLEN |
extern inline size_t strlen(const char * s) |
{ |
/* |
* slightly slower on a 486, but with better chances of |
* register allocation |
*/ |
register char dummy, *tmp= (char *)s; |
__asm__ __volatile__( |
"\n1:\t" |
"movb\t(%0),%1\n\t" |
"incl\t%0\n\t" |
"testb\t%1,%1\n\t" |
"jne\t1b" |
:"=r" (tmp),"=q" (dummy) |
:"0" (s) |
: "memory" ); |
return (tmp-s-1); |
} |
|
/* Added by Gertjan van Wingerde to make minix and sysv module work */ |
#define __HAVE_ARCH_STRNLEN |
extern inline size_t strnlen(const char * s, size_t count) |
{ |
register int __res; |
__asm__ __volatile__( |
"movl %1,%0\n\t" |
"jmp 2f\n" |
"1:\tcmpb $0,(%0)\n\t" |
"je 3f\n\t" |
"incl %0\n" |
"2:\tdecl %2\n\t" |
"cmpl $-1,%2\n\t" |
"jne 1b\n" |
"3:\tsubl %1,%0" |
:"=a" (__res) |
:"c" (s),"d" (count) |
:"dx"); |
return __res; |
} |
/* end of additional stuff */ |
|
#define __HAVE_ARCH_STRTOK |
extern inline char * strtok(char * s,const char * ct) |
{ |
register char * __res; |
__asm__ __volatile__( |
"testl %1,%1\n\t" |
"jne 1f\n\t" |
"testl %0,%0\n\t" |
"je 8f\n\t" |
"movl %0,%1\n" |
"1:\txorl %0,%0\n\t" |
"movl $-1,%%ecx\n\t" |
"xorl %%eax,%%eax\n\t" |
"cld\n\t" |
"movl %4,%%edi\n\t" |
"repne\n\t" |
"scasb\n\t" |
"notl %%ecx\n\t" |
"decl %%ecx\n\t" |
"je 7f\n\t" /* empty delimiter-string */ |
"movl %%ecx,%%edx\n" |
"2:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 7f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"je 2b\n\t" |
"decl %1\n\t" |
"cmpb $0,(%1)\n\t" |
"je 7f\n\t" |
"movl %1,%0\n" |
"3:\tlodsb\n\t" |
"testb %%al,%%al\n\t" |
"je 5f\n\t" |
"movl %4,%%edi\n\t" |
"movl %%edx,%%ecx\n\t" |
"repne\n\t" |
"scasb\n\t" |
"jne 3b\n\t" |
"decl %1\n\t" |
"cmpb $0,(%1)\n\t" |
"je 5f\n\t" |
"movb $0,(%1)\n\t" |
"incl %1\n\t" |
"jmp 6f\n" |
"5:\txorl %1,%1\n" |
"6:\tcmpb $0,(%0)\n\t" |
"jne 7f\n\t" |
"xorl %0,%0\n" |
"7:\ttestl %0,%0\n\t" |
"jne 8f\n\t" |
"movl %0,%1\n" |
"8:" |
:"=b" (__res),"=S" (___strtok) |
:"0" (___strtok),"1" (s),"g" (ct) |
:"ax","cx","dx","di","memory"); |
return __res; |
} |
|
#define __memcpy_c(d,s,count) \ |
((count%4==0) ? \ |
__memcpy_by4((d),(s),(count)) : \ |
((count%2==0) ? \ |
__memcpy_by2((d),(s),(count)) : \ |
__memcpy_g((d),(s),(count)))) |
|
#define __HAVE_ARCH_MEMCPY |
#define memcpy(d,s,count) \ |
(count == 0 \ |
? d \ |
: __builtin_constant_p(count) \ |
? __memcpy_c((d),(s),(count)) \ |
: __memcpy_g((d),(s),(count))) |
|
/* |
* These ought to get tweaked to do some cache priming. |
*/ |
|
extern inline void * __memcpy_by4(void * to, const void * from, size_t n) |
{ |
register void *tmp = (void *)to; |
register int dummy1,dummy2; |
__asm__ __volatile__ ( |
"\n1:\tmovl (%2),%0\n\t" |
"addl $4,%2\n\t" |
"movl %0,(%1)\n\t" |
"addl $4,%1\n\t" |
"decl %3\n\t" |
"jnz 1b" |
:"=r" (dummy1), "=r" (tmp), "=r" (from), "=r" (dummy2) |
:"1" (tmp), "2" (from), "3" (n/4) |
:"memory"); |
return to; |
} |
|
extern inline void * __memcpy_by2(void * to, const void * from, size_t n) |
{ |
register void *tmp = (void *)to; |
register int dummy1,dummy2; |
__asm__ __volatile__ ( |
"shrl $1,%3\n\t" |
"jz 2f\n" /* only a word */ |
"1:\tmovl (%2),%0\n\t" |
"addl $4,%2\n\t" |
"movl %0,(%1)\n\t" |
"addl $4,%1\n\t" |
"decl %3\n\t" |
"jnz 1b\n" |
"2:\tmovw (%2),%w0\n\t" |
"movw %w0,(%1)" |
:"=r" (dummy1), "=r" (tmp), "=r" (from), "=r" (dummy2) |
:"1" (tmp), "2" (from), "3" (n/2) |
:"memory"); |
return to; |
} |
|
extern inline void * __memcpy_g(void * to, const void * from, size_t n) |
{ |
register void *tmp = (void *)to; |
__asm__ __volatile__ ( |
"cld\n\t" |
"shrl $1,%%ecx\n\t" |
"jnc 1f\n\t" |
"movsb\n" |
"1:\tshrl $1,%%ecx\n\t" |
"jnc 2f\n\t" |
"movsw\n" |
"2:\trep\n\t" |
"movsl" |
: /* no output */ |
:"c" (n),"D" ((long) tmp),"S" ((long) from) |
:"cx","di","si","memory"); |
return to; |
} |
|
|
#define __HAVE_ARCH_MEMMOVE |
extern inline void * memmove(void * dest,const void * src, size_t n) |
{ |
register void *tmp = (void *)dest; |
if (dest<src) |
__asm__ __volatile__ ( |
"cld\n\t" |
"rep\n\t" |
"movsb" |
: /* no output */ |
:"c" (n),"S" (src),"D" (tmp) |
:"cx","si","di"); |
else |
__asm__ __volatile__ ( |
"std\n\t" |
"rep\n\t" |
"movsb\n\t" |
"cld\n\t" |
: /* no output */ |
:"c" (n), "S" (n-1+(const char *)src), "D" (n-1+(char *)tmp) |
:"cx","si","di","memory"); |
return dest; |
} |
|
extern inline int memcmp(const void * cs,const void * ct,size_t count) |
{ |
register int __res; |
__asm__ __volatile__( |
"cld\n\t" |
"repe\n\t" |
"cmpsb\n\t" |
"je 1f\n\t" |
"sbbl %0,%0\n\t" |
"orb $1,%b0\n" |
"1:" |
:"=abd" (__res):"0" (0),"S" (cs),"D" (ct),"c" (count) |
:"si","di","cx"); |
return __res; |
} |
|
#define __HAVE_ARCH_MEMCHR |
extern inline void * memchr(const void * cs,int c,size_t count) |
{ |
register void * __res; |
if (!count) |
return NULL; |
__asm__ __volatile__( |
"cld\n\t" |
"repne\n\t" |
"scasb\n\t" |
"je 1f\n\t" |
"movl $1,%0\n" |
"1:\tdecl %0" |
:"=D" (__res):"a" (c),"D" (cs),"c" (count) |
:"cx"); |
return __res; |
} |
|
#define __memset_cc(s,c,count) \ |
((count%4==0) ? \ |
__memset_cc_by4((s),(c),(count)) : \ |
((count%2==0) ? \ |
__memset_cc_by2((s),(c),(count)) : \ |
__memset_cg((s),(c),(count)))) |
|
#define __memset_gc(s,c,count) \ |
((count%4==0) ? \ |
__memset_gc_by4((s),(c),(count)) : \ |
((count%2==0) ? \ |
__memset_gc_by2((s),(c),(count)) : \ |
__memset_gg((s),(c),(count)))) |
|
#define __HAVE_ARCH_MEMSET |
#define memset(s,c,count) \ |
(count == 0 \ |
? s \ |
: __builtin_constant_p(c) \ |
? __builtin_constant_p(count) \ |
? __memset_cc((s),(c),(count)) \ |
: __memset_cg((s),(c),(count)) \ |
: __builtin_constant_p(count) \ |
? __memset_gc((s),(c),(count)) \ |
: __memset_gg((s),(c),(count))) |
|
extern inline void * __memset_cc_by4(void * s, char c, size_t count) |
{ |
/* |
* register char *tmp = s; |
*/ |
register char *tmp = (char *)s; |
register int dummy; |
__asm__ __volatile__ ( |
"\n1:\tmovl %2,(%0)\n\t" |
"addl $4,%0\n\t" |
"decl %1\n\t" |
"jnz 1b" |
:"=r" (tmp), "=r" (dummy) |
:"r" (0x01010101UL * (unsigned char) c), "0" (tmp), "1" (count/4) |
:"memory"); |
return s; |
} |
|
extern inline void * __memset_cc_by2(void * s, char c, size_t count) |
{ |
register void *tmp = (void *)s; |
register int dummy; |
__asm__ __volatile__ ( |
"shrl $1,%1\n\t" /* may be divisible also by 4 */ |
"jz 2f\n" |
"\n1:\tmovl %2,(%0)\n\t" |
"addl $4,%0\n\t" |
"decl %1\n\t" |
"jnz 1b\n" |
"2:\tmovw %w2,(%0)" |
:"=r" (tmp), "=r" (dummy) |
:"r" (0x01010101UL * (unsigned char) c), "0" (tmp), "1" (count/2) |
:"memory"); |
return s; |
} |
|
extern inline void * __memset_gc_by4(void * s, char c, size_t count) |
{ |
register void *tmp = (void *)s; |
register int dummy; |
__asm__ __volatile__ ( |
"movb %b0,%h0\n" |
"pushw %w0\n\t" |
"shll $16,%k0\n\t" |
"popw %w0\n" |
"1:\tmovl %k0,(%1)\n\t" |
"addl $4,%1\n\t" |
"decl %2\n\t" |
"jnz 1b\n" |
:"=q" (c), "=r" (tmp), "=r" (dummy) |
:"0" ((unsigned) c), "1" (tmp), "2" (count/4) |
:"memory"); |
return s; |
} |
|
extern inline void * __memset_gc_by2(void * s, char c, size_t count) |
{ |
register void *tmp = (void *)s; |
register int dummy1,dummy2; |
__asm__ __volatile__ ( |
"movb %b0,%h0\n\t" |
"shrl $1,%2\n\t" /* may be divisible also by 4 */ |
"jz 2f\n\t" |
"pushw %w0\n\t" |
"shll $16,%k0\n\t" |
"popw %w0\n" |
"1:\tmovl %k0,(%1)\n\t" |
"addl $4,%1\n\t" |
"decl %2\n\t" |
"jnz 1b\n" |
"2:\tmovw %w0,(%1)" |
:"=q" (dummy1), "=r" (tmp), "=r" (dummy2) |
:"0" ((unsigned) c), "1" (tmp), "2" (count/2) |
:"memory"); |
return s; |
} |
|
extern inline void * __memset_cg(void * s, char c, size_t count) |
{ |
register void *tmp = (void *)s; |
__asm__ __volatile__ ( |
"shrl $1,%%ecx\n\t" |
"cld\n\t" |
"rep\n\t" |
"stosw\n\t" |
"jnc 1f\n\t" |
"movb %%al,(%%edi)\n" |
"1:" |
: /* no output */ |
:"c" (count),"D" (tmp), "a" (0x0101U * (unsigned char) c) |
:"cx","di","memory"); |
return s; |
} |
|
extern inline void * __memset_gg(void * s,char c,size_t count) |
{ |
register void *tmp = (void *)s; |
__asm__ __volatile__ ( |
"movb %%al,%%ah\n\t" |
"shrl $1,%%ecx\n\t" |
"cld\n\t" |
"rep\n\t" |
"stosw\n\t" |
"jnc 1f\n\t" |
"movb %%al,(%%edi)\n" |
"1:" |
: /* no output */ |
:"c" (count),"D" (tmp), "a" (c) |
:"cx","di","memory"); |
return s; |
} |
|
|
/* |
* find the first occurrence of byte 'c', or 1 past the area if none |
*/ |
#define __HAVE_ARCH_MEMSCAN |
extern inline void * memscan(void * addr, int c, size_t size) |
{ |
if (!size) |
return addr; |
__asm__("cld |
repnz; scasb |
jnz 1f |
dec %%edi |
1: " |
: "=D" (addr), "=c" (size) |
: "0" (addr), "1" (size), "a" (c)); |
return addr; |
} |
|
#endif |
/posix_types.h
0,0 → 1,63
#ifndef __ARCH_I386_POSIX_TYPES_H |
#define __ARCH_I386_POSIX_TYPES_H |
|
/* |
* This file is generally used by user-level software, so you need to |
* be a little careful about namespace pollution etc. Also, we cannot |
* assume GCC is being used. |
*/ |
|
typedef unsigned short __kernel_dev_t; |
typedef unsigned long __kernel_ino_t; |
typedef unsigned short __kernel_mode_t; |
typedef unsigned short __kernel_nlink_t; |
typedef long __kernel_off_t; |
typedef int __kernel_pid_t; |
typedef unsigned short __kernel_uid_t; |
typedef unsigned short __kernel_gid_t; |
typedef unsigned int __kernel_size_t; |
typedef int __kernel_ssize_t; |
typedef int __kernel_ptrdiff_t; |
typedef long __kernel_time_t; |
typedef long __kernel_clock_t; |
typedef int __kernel_daddr_t; |
typedef char * __kernel_caddr_t; |
|
#ifdef __GNUC__ |
typedef long long __kernel_loff_t; |
#endif |
|
typedef struct { |
#if defined(__KERNEL__) || defined(__USE_ALL) |
int val[2]; |
#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ |
int __val[2]; |
#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ |
} __kernel_fsid_t; |
|
#undef __FD_SET |
#define __FD_SET(fd,fdsetp) \ |
__asm__ __volatile__("btsl %1,%0": \ |
"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) |
|
#undef __FD_CLR |
#define __FD_CLR(fd,fdsetp) \ |
__asm__ __volatile__("btrl %1,%0": \ |
"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) |
|
#undef __FD_ISSET |
#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ |
unsigned char __result; \ |
__asm__ __volatile__("btl %1,%2 ; setb %0" \ |
:"=q" (__result) :"r" ((int) (fd)), \ |
"m" (*(__kernel_fd_set *) (fdsetp))); \ |
__result; })) |
|
#undef __FD_ZERO |
#define __FD_ZERO(fdsetp) \ |
__asm__ __volatile__("cld ; rep ; stosl" \ |
:"=m" (*(__kernel_fd_set *) (fdsetp)) \ |
:"a" (0), "c" (__FDSET_LONGS), \ |
"D" ((__kernel_fd_set *) (fdsetp)) :"cx","di") |
|
#endif |
/sigcontext.h
0,0 → 1,54
#ifndef _ASMi386_SIGCONTEXT_H |
#define _ASMi386_SIGCONTEXT_H |
|
/* |
* As documented in the iBCS2 standard.. |
* |
* The first part of "struct _fpstate" is just the |
* normal i387 hardware setup, the extra "status" |
* word is used to save the coprocessor status word |
* before entering the handler. |
*/ |
struct _fpreg { |
unsigned short significand[4]; |
unsigned short exponent; |
}; |
|
struct _fpstate { |
unsigned long cw, |
sw, |
tag, |
ipoff, |
cssel, |
dataoff, |
datasel; |
struct _fpreg _st[8]; |
unsigned long status; |
}; |
|
struct sigcontext_struct { |
unsigned short gs, __gsh; |
unsigned short fs, __fsh; |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned long edi; |
unsigned long esi; |
unsigned long ebp; |
unsigned long esp; |
unsigned long ebx; |
unsigned long edx; |
unsigned long ecx; |
unsigned long eax; |
unsigned long trapno; |
unsigned long err; |
unsigned long eip; |
unsigned short cs, __csh; |
unsigned long eflags; |
unsigned long esp_at_signal; |
unsigned short ss, __ssh; |
struct _fpstate * fpstate; |
unsigned long oldmask; |
unsigned long cr2; |
}; |
|
#endif |
/mman.h
0,0 → 1,31
#ifndef __I386_MMAN_H__ |
#define __I386_MMAN_H__ |
|
#define PROT_READ 0x1 /* page can be read */ |
#define PROT_WRITE 0x2 /* page can be written */ |
#define PROT_EXEC 0x4 /* page can be executed */ |
#define PROT_NONE 0x0 /* page can not be accessed */ |
|
#define MAP_SHARED 0x01 /* Share changes */ |
#define MAP_PRIVATE 0x02 /* Changes are private */ |
#define MAP_TYPE 0x0f /* Mask for type of mapping */ |
#define MAP_FIXED 0x10 /* Interpret addr exactly */ |
#define MAP_ANONYMOUS 0x20 /* don't use a file */ |
|
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ |
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ |
#define MAP_EXECUTABLE 0x1000 /* mark it as a executable */ |
#define MAP_LOCKED 0x2000 /* pages are locked */ |
|
#define MS_ASYNC 1 /* sync memory asynchronously */ |
#define MS_INVALIDATE 2 /* invalidate the caches */ |
#define MS_SYNC 4 /* synchronous memory sync */ |
|
#define MCL_CURRENT 1 /* lock all current mappings */ |
#define MCL_FUTURE 2 /* lock all future mappings */ |
|
/* compatibility flags */ |
#define MAP_ANON MAP_ANONYMOUS |
#define MAP_FILE 0 |
|
#endif /* __I386_MMAN_H__ */ |
/socket.h
0,0 → 1,27
#ifndef _ASM_SOCKET_H |
#define _ASM_SOCKET_H |
|
#include <asm/sockios.h> |
|
/* For setsockoptions(2) */ |
#define SOL_SOCKET 1 |
|
#define SO_DEBUG 1 |
#define SO_REUSEADDR 2 |
#define SO_TYPE 3 |
#define SO_ERROR 4 |
#define SO_DONTROUTE 5 |
#define SO_BROADCAST 6 |
#define SO_SNDBUF 7 |
#define SO_RCVBUF 8 |
#define SO_KEEPALIVE 9 |
#define SO_OOBINLINE 10 |
#define SO_NO_CHECK 11 |
#define SO_PRIORITY 12 |
#define SO_LINGER 13 |
#define SO_BSDCOMPAT 14 |
/* To add :#define SO_REUSEPORT 15 */ |
|
#define SO_BINDTODEVICE 25 |
|
#endif /* _ASM_SOCKET_H */ |