OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [i386/] [kernel/] [process.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1623 jcastillo
/*
2
 *  linux/arch/i386/kernel/process.c
3
 *
4
 *  Copyright (C) 1995  Linus Torvalds
5
 */
6
 
7
/*
8
 * This file handles the architecture-dependent parts of process handling..
9
 */
10
 
11
#define __KERNEL_SYSCALLS__
12
#include <stdarg.h>
13
 
14
#include <linux/errno.h>
15
#include <linux/sched.h>
16
#include <linux/kernel.h>
17
#include <linux/mm.h>
18
#include <linux/stddef.h>
19
#include <linux/unistd.h>
20
#include <linux/ptrace.h>
21
#include <linux/malloc.h>
22
#include <linux/ldt.h>
23
#include <linux/user.h>
24
#include <linux/a.out.h>
25
#include <linux/interrupt.h>
26
#include <linux/config.h>
27
#include <linux/unistd.h>
28
#include <linux/delay.h>
29
 
30
#include <asm/segment.h>
31
#include <asm/pgtable.h>
32
#include <asm/system.h>
33
#include <asm/io.h>
34
#include <linux/smp.h>
35
 
36
asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
37
 
38
#ifdef CONFIG_APM
39
extern int  apm_do_idle(void);
40
extern void apm_do_busy(void);
41
#endif
42
 
43
static int hlt_counter=0;
44
 
45
#define HARD_IDLE_TIMEOUT (HZ / 3)
46
 
47
void disable_hlt(void)
48
{
49
        hlt_counter++;
50
}
51
 
52
void enable_hlt(void)
53
{
54
        hlt_counter--;
55
}
56
 
57
#ifndef __SMP__
58
 
59
static void hard_idle(void)
60
{
61
        while (!need_resched) {
62
                if (hlt_works_ok && !hlt_counter) {
63
#ifdef CONFIG_APM
64
                                /* If the APM BIOS is not enabled, or there
65
                                 is an error calling the idle routine, we
66
                                 should hlt if possible.  We need to check
67
                                 need_resched again because an interrupt
68
                                 may have occurred in apm_do_idle(). */
69
                        start_bh_atomic();
70
                        if (!apm_do_idle() && !need_resched)
71
                                __asm__("hlt");
72
                        end_bh_atomic();
73
#else
74
                        __asm__("hlt");
75
#endif
76
                }
77
                if (need_resched)
78
                        break;
79
                schedule();
80
        }
81
#ifdef CONFIG_APM
82
        apm_do_busy();
83
#endif
84
}
85
 
86
/*
87
 * The idle loop on a uniprocessor i386..
88
 */
89
 
90
asmlinkage int sys_idle(void)
91
{
92
        unsigned long start_idle = 0;
93
 
94
        if (current->pid != 0)
95
                return -EPERM;
96
        /* endless idle loop with no priority at all */
97
        current->counter = -100;
98
        for (;;)
99
        {
100
                /*
101
                 *      We are locked at this point. So we can safely call
102
                 *      the APM bios knowing only one CPU at a time will do
103
                 *      so.
104
                 */
105
                if (!start_idle)
106
                        start_idle = jiffies;
107
                if (jiffies - start_idle > HARD_IDLE_TIMEOUT)
108
                {
109
                        hard_idle();
110
                }
111
                else
112
                {
113
                        if (hlt_works_ok && !hlt_counter && !need_resched)
114
                                __asm__("hlt");
115
                }
116
                if (need_resched)
117
                        start_idle = 0;
118
                schedule();
119
        }
120
}
121
 
122
#else
123
 
124
/*
125
 *      In the SMP world we hlt outside of kernel syscall rather than within
126
 *      so as to get the right locking semantics.
127
 */
128
 
129
asmlinkage int sys_idle(void)
130
{
131
        if(current->pid != 0)
132
                return -EPERM;
133
#ifdef __SMP_PROF__
134
        smp_spins_sys_idle[smp_processor_id()]+=
135
          smp_spins_syscall_cur[smp_processor_id()];
136
#endif
137
        current->counter= -100;
138
        schedule();
139
        return 0;
140
}
141
 
142
/*
143
 *      This is being executed in task 0 'user space'.
144
 */
145
 
146
int cpu_idle(void *unused)
147
{
148
        while(1)
149
        {
150
                if(cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched)
151
                        __asm("hlt");
152
                if(0==(0x7fffffff & smp_process_available))
153
                        continue;
154
                while(0x80000000 & smp_process_available);
155
                cli();
156
                while(set_bit(31,&smp_process_available))
157
                        while(test_bit(31,&smp_process_available))
158
                {
159
                        /*
160
                         *      Oops.. This is kind of important in some cases...
161
                         */
162
                        if(clear_bit(smp_processor_id(), &smp_invalidate_needed))
163
                                local_flush_tlb();
164
                }
165
                if (0==(0x7fffffff & smp_process_available)){
166
                        clear_bit(31,&smp_process_available);
167
                        sti();
168
                        continue;
169
                }
170
                smp_process_available--;
171
                clear_bit(31,&smp_process_available);
172
                sti();
173
                idle();
174
        }
175
}
176
 
177
#endif
178
 
179
/*
180
 * This routine reboots the machine by asking the keyboard
181
 * controller to pulse the reset-line low. We try that for a while,
182
 * and if it doesn't work, we do some other stupid things.
183
 */
184
static long no_idt[2] = {0, 0};
185
static int reboot_mode = 0;
186
static int reboot_thru_bios = 0;
187
 
188
void reboot_setup(char *str, int *ints)
189
{
190
        while(1) {
191
                switch (*str) {
192
                case 'w': /* "warm" reboot (no memory testing etc) */
193
                        reboot_mode = 0x1234;
194
                        break;
195
                case 'c': /* "cold" reboot (with memory testing etc) */
196
                        reboot_mode = 0x0;
197
                        break;
198
                case 'b': /* "bios" reboot by jumping through the BIOS */
199
                        reboot_thru_bios = 1;
200
                        break;
201
                case 'h': /* "hard" reboot by toggling RESET and/or crashing the CPU */
202
                        reboot_thru_bios = 0;
203
                        break;
204
                }
205
                if((str = strchr(str,',')) != NULL)
206
                        str++;
207
                else
208
                        break;
209
        }
210
}
211
 
212
 
213
/* The following code and data reboots the machine by switching to real
214
   mode and jumping to the BIOS reset entry point, as if the CPU has
215
   really been reset.  The previous version asked the keyboard
216
   controller to pulse the CPU reset line, which is more thorough, but
217
   doesn't work with at least one type of 486 motherboard.  It is easy
218
   to stop this code working; hence the copious comments. */
219
 
220
unsigned long long
221
real_mode_gdt_entries [3] =
222
{
223
        0x0000000000000000ULL,  /* Null descriptor */
224
        0x00009a000000ffffULL,  /* 16-bit real-mode 64k code at 0x00000000 */
225
        0x000092000100ffffULL           /* 16-bit real-mode 64k data at 0x00000100 */
226
};
227
 
228
struct
229
{
230
        unsigned short       size __attribute__ ((packed));
231
        unsigned long long * base __attribute__ ((packed));
232
}
233
real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
234
real_mode_idt = { 0x3ff, 0 };
235
 
236
/* This is 16-bit protected mode code to disable paging and the cache,
237
   switch to real mode and jump to the BIOS reset code.
238
 
239
   The instruction that switches to real mode by writing to CR0 must be
240
   followed immediately by a far jump instruction, which set CS to a
241
   valid value for real mode, and flushes the prefetch queue to avoid
242
   running instructions that have already been decoded in protected
243
   mode.
244
 
245
   Clears all the flags except ET, especially PG (paging), PE
246
   (protected-mode enable) and TS (task switch for coprocessor state
247
   save).  Flushes the TLB after paging has been disabled.  Sets CD and
248
   NW, to disable the cache on a 486, and invalidates the cache.  This
249
   is more like the state of a 486 after reset.  I don't know if
250
   something else should be done for other chips.
251
 
252
   More could be done here to set up the registers as if a CPU reset had
253
   occurred; hopefully real BIOSes don't assume much. */
254
 
255
unsigned char real_mode_switch [] =
256
{
257
        0x66, 0x0f, 0x20, 0xc0,                 /*    movl  %cr0,%eax        */
258
        0x66, 0x83, 0xe0, 0x11,                 /*    andl  $0x00000011,%eax */
259
        0x66, 0x0d, 0x00, 0x00, 0x00, 0x60,             /*    orl   $0x60000000,%eax */
260
        0x66, 0x0f, 0x22, 0xc0,                 /*    movl  %eax,%cr0        */
261
        0x66, 0x0f, 0x22, 0xd8,                 /*    movl  %eax,%cr3        */
262
        0x66, 0x0f, 0x20, 0xc3,                 /*    movl  %cr0,%ebx        */
263
        0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60,       /*    andl  $0x60000000,%ebx */
264
        0x74, 0x02,                                     /*    jz    f                */
265
        0x0f, 0x08,                                     /*    invd                   */
266
        0x24, 0x10,                                     /* f: andb  $0x10,al         */
267
        0x66, 0x0f, 0x22, 0xc0,                 /*    movl  %eax,%cr0        */
268
        0xea, 0x00, 0x00, 0xff, 0xff                    /*    ljmp  $0xffff,$0x0000  */
269
};
270
 
271
static inline void kb_wait(void)
272
{
273
        int i;
274
        for (i=0; i<0x10000; i++)
275
                if ((inb_p(0x64) & 0x02) == 0)
276
                        break;
277
}
278
 
279
void hard_reset_now (void)
280
{
281
 
282
        if(!reboot_thru_bios) {
283
                sti();
284
                /* rebooting needs to touch the page at absolute addr 0 */
285
                pg0[0] = 7;
286
                *((unsigned short *)0x472) = reboot_mode;
287
                for (;;) {
288
                        int i;
289
                        for (i=0; i<100; i++) {
290
                                int j;
291
                                kb_wait();
292
                                for(j = 0; j < 100000 ; j++)
293
                                        /* nothing */;
294
                                outb(0xfe,0x64);         /* pulse reset low */
295
                                udelay(10);
296
                        }
297
                        __asm__ __volatile__("\tlidt %0": "=m" (no_idt));
298
                }
299
        }
300
 
301
        cli ();
302
 
303
        /* Write zero to CMOS register number 0x0f, which the BIOS POST
304
           routine will recognize as telling it to do a proper reboot.  (Well
305
           that's what this book in front of me says -- it may only apply to
306
           the Phoenix BIOS though, it's not clear).  At the same time,
307
           disable NMIs by setting the top bit in the CMOS address register,
308
           as we're about to do peculiar things to the CPU.  I'm not sure if
309
           `outb_p' is needed instead of just `outb'.  Use it to be on the
310
           safe side. */
311
 
312
        outb_p (0x8f, 0x70);
313
        outb_p (0x00, 0x71);
314
 
315
        /* Remap the kernel at virtual address zero, as well as offset zero
316
           from the kernel segment.  This assumes the kernel segment starts at
317
           virtual address PAGE_OFFSET. */
318
 
319
        memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
320
                sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
321
 
322
        /* Make sure the first page is mapped to the start of physical memory.
323
           It is normally not mapped, to trap kernel NULL pointer dereferences. */
324
 
325
        pg0 [0] = 7;
326
 
327
        /* Use `swapper_pg_dir' as our page directory.  Don't bother with
328
           `SET_PAGE_DIR' because interrupts are disabled and we're rebooting.
329
           This instruction flushes the TLB. */
330
 
331
        __asm__ __volatile__ ("movl %0,%%cr3" : : "a" (swapper_pg_dir) : "memory");
332
 
333
        /* Write 0x1234 to absolute memory location 0x472.  The BIOS reads
334
           this on booting to tell it to "Bypass memory test (also warm
335
           boot)".  This seems like a fairly standard thing that gets set by
336
           REBOOT.COM programs, and the previous reset routine did this
337
           too. */
338
 
339
        *((unsigned short *)0x472) = reboot_mode;
340
 
341
        /* For the switch to real mode, copy some code to low memory.  It has
342
           to be in the first 64k because it is running in 16-bit mode, and it
343
           has to have the same physical and virtual address, because it turns
344
           off paging.  Copy it near the end of the first page, out of the way
345
           of BIOS variables. */
346
 
347
        memcpy ((void *) (0x1000 - sizeof (real_mode_switch)),
348
                real_mode_switch, sizeof (real_mode_switch));
349
 
350
        /* Set up the IDT for real mode. */
351
 
352
        __asm__ __volatile__ ("lidt %0" : : "m" (real_mode_idt));
353
 
354
        /* Set up a GDT from which we can load segment descriptors for real
355
           mode.  The GDT is not used in real mode; it is just needed here to
356
           prepare the descriptors. */
357
 
358
        __asm__ __volatile__ ("lgdt %0" : : "m" (real_mode_gdt));
359
 
360
        /* Load the data segment registers, and thus the descriptors ready for
361
           real mode.  The base address of each segment is 0x100, 16 times the
362
           selector value being loaded here.  This is so that the segment
363
           registers don't have to be reloaded after switching to real mode:
364
           the values are consistent for real mode operation already. */
365
 
366
        __asm__ __volatile__ ("movw $0x0010,%%ax\n"
367
                                "\tmovw %%ax,%%ds\n"
368
                                "\tmovw %%ax,%%es\n"
369
                                "\tmovw %%ax,%%fs\n"
370
                                "\tmovw %%ax,%%gs\n"
371
                                "\tmovw %%ax,%%ss" : : : "eax");
372
 
373
        /* Jump to the 16-bit code that we copied earlier.  It disables paging
374
           and the cache, switches to real mode, and jumps to the BIOS reset
375
           entry point. */
376
 
377
        __asm__ __volatile__ ("ljmp $0x0008,%0"
378
                                :
379
                                : "i" ((void *) (0x1000 - sizeof (real_mode_switch))));
380
}
381
 
382
void show_regs(struct pt_regs * regs)
383
{
384
        printk("\n");
385
        printk("EIP: %04x:[<%08lx>]",0xffff & regs->cs,regs->eip);
386
        if (regs->cs & 3)
387
                printk(" ESP: %04x:%08lx",0xffff & regs->ss,regs->esp);
388
        printk(" EFLAGS: %08lx\n",regs->eflags);
389
        printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
390
                regs->eax,regs->ebx,regs->ecx,regs->edx);
391
        printk("ESI: %08lx EDI: %08lx EBP: %08lx",
392
                regs->esi, regs->edi, regs->ebp);
393
        printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
394
                0xffff & regs->ds,0xffff & regs->es,
395
                0xffff & regs->fs,0xffff & regs->gs);
396
}
397
 
398
/*
399
 * Free current thread data structures etc..
400
 */
401
 
402
void exit_thread(void)
403
{
404
        /* forget lazy i387 state */
405
        if (last_task_used_math == current)
406
                last_task_used_math = NULL;
407
        /* forget local segments */
408
        __asm__ __volatile__("mov %w0,%%fs ; mov %w0,%%gs ; lldt %w0"
409
                : /* no outputs */
410
                : "r" (0));
411
        current->tss.ldt = 0;
412
        if (current->ldt) {
413
                void * ldt = current->ldt;
414
                current->ldt = NULL;
415
                vfree(ldt);
416
        }
417
}
418
 
419
void flush_thread(void)
420
{
421
        int i;
422
 
423
        if (current->ldt) {
424
                void * ldt = current->ldt;
425
                current->ldt = NULL;
426
                vfree(ldt);
427
                for (i=1 ; i<NR_TASKS ; i++) {
428
                        if (task[i] == current)  {
429
                                set_ldt_desc(gdt+(i<<1)+
430
                                             FIRST_LDT_ENTRY,&default_ldt, 1);
431
                                load_ldt(i);
432
                        }
433
                }
434
        }
435
 
436
        for (i=0 ; i<8 ; i++)
437
                current->debugreg[i] = 0;
438
 
439
        /*
440
         * Forget coprocessor state..
441
         */
442
#ifdef __SMP__
443
        if (current->flags & PF_USEDFPU) {
444
                stts();
445
        }
446
#else
447
        if (last_task_used_math == current) {
448
                last_task_used_math = NULL;
449
                stts();
450
        }
451
#endif
452
        current->used_math = 0;
453
        current->flags &= ~PF_USEDFPU;
454
}
455
 
456
void release_thread(struct task_struct *dead_task)
457
{
458
}
459
 
460
void copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
461
        struct task_struct * p, struct pt_regs * regs)
462
{
463
        int i;
464
        struct pt_regs * childregs;
465
 
466
        p->tss.es = KERNEL_DS;
467
        p->tss.cs = KERNEL_CS;
468
        p->tss.ss = KERNEL_DS;
469
        p->tss.ds = KERNEL_DS;
470
        p->tss.fs = USER_DS;
471
        p->tss.gs = KERNEL_DS;
472
        p->tss.ss0 = KERNEL_DS;
473
        p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
474
        p->tss.tr = _TSS(nr);
475
        childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
476
        p->tss.esp = (unsigned long) childregs;
477
        p->tss.eip = (unsigned long) ret_from_sys_call;
478
        *childregs = *regs;
479
        childregs->eax = 0;
480
        childregs->esp = esp;
481
        p->tss.back_link = 0;
482
        p->tss.eflags = regs->eflags & 0xffffcfff;      /* iopl is always 0 for a new process */
483
        p->tss.ldt = _LDT(nr);
484
        if (p->ldt) {
485
                p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
486
                if (p->ldt != NULL)
487
                        memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
488
        }
489
        set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
490
        if (p->ldt)
491
                set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, LDT_ENTRIES);
492
        else
493
                set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
494
        p->tss.bitmap = offsetof(struct thread_struct,io_bitmap);
495
        for (i = 0; i < IO_BITMAP_SIZE+1 ; i++) /* IO bitmap is actually SIZE+1 */
496
                p->tss.io_bitmap[i] = ~0;
497
        if (last_task_used_math == current)
498
                __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
499
}
500
 
501
/*
502
 * fill in the fpu structure for a core dump..
503
 */
504
int dump_fpu (struct pt_regs * regs, struct user_i387_struct* fpu)
505
{
506
        int fpvalid;
507
 
508
        if (hard_math) {
509
                if ((fpvalid = current->used_math) != 0) {
510
#ifdef __SMP__
511
                        if (current->flags & PF_USEDFPU)
512
#else
513
                        if (last_task_used_math == current)
514
#endif
515
                                __asm__("clts ; fnsave %0": :"m" (*fpu));
516
                        else
517
                                memcpy(fpu,&current->tss.i387.hard,sizeof(*fpu));
518
                }
519
                } else {
520
                /* We dump the emulator state here.
521
                   We convert it into standard 387 format first.. */
522
#ifdef CONFIG_MATH_EMULATION
523
                int i;
524
                unsigned long top;
525
                char (*hardreg)[10];
526
                struct i387_soft_struct *soft_fpu = &current->tss.i387.soft;
527
                struct fpu_reg* softreg;
528
                long int control_word = soft_fpu->cwd;
529
 
530
                fpu->cwd = soft_fpu->cwd;
531
                fpu->swd = soft_fpu->swd;
532
                fpu->twd = soft_fpu->twd;
533
                fpu->fip = soft_fpu->fip;
534
                fpu->fcs = soft_fpu->fcs;
535
                fpu->foo = soft_fpu->foo;
536
                fpu->fos = soft_fpu->fos;
537
                hardreg = (char (*)[10]) &fpu->st_space[0];
538
                top = (unsigned long) soft_fpu->top % 8;
539
                softreg = &soft_fpu->regs[top];
540
                for (i = top ; i < 8; i ++) {
541
                        softreg_to_hardreg(softreg, *hardreg, control_word);
542
                        hardreg++;
543
                        softreg++;
544
                }
545
                softreg = &soft_fpu->regs[0];
546
                for (i = 0; i < top; i++) {
547
                        softreg_to_hardreg(softreg, *hardreg, control_word);
548
                        hardreg++;
549
                        softreg++;
550
                }
551
                fpvalid = 1;
552
#else /* defined(CONFIG_MATH_EMULATION) */
553
                fpvalid = 0;
554
#endif /* !defined(CONFIG_MATH_EMULATION) */
555
        }
556
 
557
        return fpvalid;
558
}
559
 
560
/*
561
 * fill in the user structure for a core dump..
562
 */
563
void dump_thread(struct pt_regs * regs, struct user * dump)
564
{
565
        int i;
566
 
567
/* changed the size calculations - should hopefully work better. lbt */
568
        dump->magic = CMAGIC;
569
        dump->start_code = 0;
570
        dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
571
        dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
572
        dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
573
        dump->u_dsize -= dump->u_tsize;
574
        dump->u_ssize = 0;
575
        for (i = 0; i < 8; i++)
576
                dump->u_debugreg[i] = current->debugreg[i];
577
 
578
        if (dump->start_stack < TASK_SIZE)
579
                dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
580
 
581
        dump->regs = *regs;
582
 
583
        dump->u_fpvalid = dump_fpu (regs, &dump->i387);
584
}
585
 
586
asmlinkage int sys_fork(struct pt_regs regs)
587
{
588
        return do_fork(SIGCHLD, regs.esp, &regs);
589
}
590
 
591
asmlinkage int sys_clone(struct pt_regs regs)
592
{
593
        unsigned long clone_flags;
594
        unsigned long newsp;
595
 
596
        clone_flags = regs.ebx;
597
        newsp = regs.ecx;
598
        if (!newsp)
599
                newsp = regs.esp;
600
        return do_fork(clone_flags, newsp, &regs);
601
}
602
 
603
/*
604
 * sys_execve() executes a new program.
605
 */
606
asmlinkage int sys_execve(struct pt_regs regs)
607
{
608
        int error;
609
        char * filename;
610
 
611
        error = getname((char *) regs.ebx, &filename);
612
        if (error)
613
                return error;
614
        error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, &regs);
615
        putname(filename);
616
        return error;
617
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.