OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [fs/] [exec.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/fs/exec.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 */
6
 
7
/*
8
 * #!-checking implemented by tytso.
9
 */
10
/*
11
 * Demand-loading implemented 01.12.91 - no need to read anything but
12
 * the header into memory. The inode of the executable is put into
13
 * "current->executable", and page faults do the actual loading. Clean.
14
 *
15
 * Once more I can proudly say that linux stood up to being changed: it
16
 * was less than 2 hours work to get demand-loading completely implemented.
17
 *
18
 * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19
 * current->executable is only used by the procfs.  This allows a dispatch
20
 * table to check for several different types  of binary formats.  We keep
21
 * trying until we recognize the file or we run out of supported binary
22
 * formats.
23
 */
24
 
25
#include <linux/config.h>
26
#include <linux/slab.h>
27
#include <linux/file.h>
28
#include <linux/mman.h>
29
#include <linux/a.out.h>
30
#include <linux/stat.h>
31
#include <linux/fcntl.h>
32
#include <linux/smp_lock.h>
33
#include <linux/init.h>
34
#include <linux/pagemap.h>
35
#include <linux/highmem.h>
36
#include <linux/spinlock.h>
37
#include <linux/personality.h>
38
#include <linux/swap.h>
39
#include <linux/utsname.h>
40
#define __NO_VERSION__
41
#include <linux/module.h>
42
 
43
#include <asm/uaccess.h>
44
#include <asm/pgalloc.h>
45
#include <asm/mmu_context.h>
46
 
47
#ifdef CONFIG_KMOD
48
#include <linux/kmod.h>
49
#endif
50
 
51
int core_uses_pid;
52
char core_pattern[65] = "core";
53
int core_setuid_ok = 0;
54
/* The maximal length of core_pattern is also specified in sysctl.c */
55
 
56
static struct linux_binfmt *formats;
57
static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
58
 
59
int register_binfmt(struct linux_binfmt * fmt)
60
{
61
        struct linux_binfmt ** tmp = &formats;
62
 
63
        if (!fmt)
64
                return -EINVAL;
65
        if (fmt->next)
66
                return -EBUSY;
67
        write_lock(&binfmt_lock);
68
        while (*tmp) {
69
                if (fmt == *tmp) {
70
                        write_unlock(&binfmt_lock);
71
                        return -EBUSY;
72
                }
73
                tmp = &(*tmp)->next;
74
        }
75
        fmt->next = formats;
76
        formats = fmt;
77
        write_unlock(&binfmt_lock);
78
        return 0;
79
}
80
 
81
int unregister_binfmt(struct linux_binfmt * fmt)
82
{
83
        struct linux_binfmt ** tmp = &formats;
84
 
85
        write_lock(&binfmt_lock);
86
        while (*tmp) {
87
                if (fmt == *tmp) {
88
                        *tmp = fmt->next;
89
                        write_unlock(&binfmt_lock);
90
                        return 0;
91
                }
92
                tmp = &(*tmp)->next;
93
        }
94
        write_unlock(&binfmt_lock);
95
        return -EINVAL;
96
}
97
 
98
static inline void put_binfmt(struct linux_binfmt * fmt)
99
{
100
        if (fmt->module)
101
                __MOD_DEC_USE_COUNT(fmt->module);
102
}
103
 
104
/*
105
 * Note that a shared library must be both readable and executable due to
106
 * security reasons.
107
 *
108
 * Also note that we take the address to load from from the file itself.
109
 */
110
asmlinkage long sys_uselib(const char * library)
111
{
112
        struct file * file;
113
        struct nameidata nd;
114
        int error;
115
 
116
        error = user_path_walk(library, &nd);
117
        if (error)
118
                goto out;
119
 
120
        error = -EINVAL;
121
        if (!S_ISREG(nd.dentry->d_inode->i_mode))
122
                goto exit;
123
 
124
        error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC);
125
        if (error)
126
                goto exit;
127
 
128
        file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
129
        error = PTR_ERR(file);
130
        if (IS_ERR(file))
131
                goto out;
132
 
133
        error = -ENOEXEC;
134
        if(file->f_op && file->f_op->read) {
135
                struct linux_binfmt * fmt;
136
 
137
                read_lock(&binfmt_lock);
138
                for (fmt = formats ; fmt ; fmt = fmt->next) {
139
                        if (!fmt->load_shlib)
140
                                continue;
141
                        if (!try_inc_mod_count(fmt->module))
142
                                continue;
143
                        read_unlock(&binfmt_lock);
144
                        error = fmt->load_shlib(file);
145
                        read_lock(&binfmt_lock);
146
                        put_binfmt(fmt);
147
                        if (error != -ENOEXEC)
148
                                break;
149
                }
150
                read_unlock(&binfmt_lock);
151
        }
152
        fput(file);
153
out:
154
        return error;
155
exit:
156
        path_release(&nd);
157
        goto out;
158
}
159
 
160
/*
161
 * count() counts the number of arguments/envelopes
162
 */
163
static int count(char ** argv, int max)
164
{
165
        int i = 0;
166
 
167
        if (argv != NULL) {
168
                for (;;) {
169
                        char * p;
170
 
171
                        if (get_user(p, argv))
172
                                return -EFAULT;
173
                        if (!p)
174
                                break;
175
                        argv++;
176
                        if(++i > max)
177
                                return -E2BIG;
178
                }
179
        }
180
        return i;
181
}
182
 
183
/*
184
 * 'copy_strings()' copies argument/envelope strings from user
185
 * memory to free pages in kernel mem. These are in a format ready
186
 * to be put directly into the top of new user memory.
187
 */
188
int copy_strings(int argc,char ** argv, struct linux_binprm *bprm)
189
{
190
        struct page *kmapped_page = NULL;
191
        char *kaddr = NULL;
192
        int ret;
193
 
194
        while (argc-- > 0) {
195
                char *str;
196
                int len;
197
                unsigned long pos;
198
 
199
                if (get_user(str, argv+argc) ||
200
                                !(len = strnlen_user(str, bprm->p))) {
201
                        ret = -EFAULT;
202
                        goto out;
203
                }
204
 
205
                if (bprm->p < len)  {
206
                        ret = -E2BIG;
207
                        goto out;
208
                }
209
 
210
                bprm->p -= len;
211
                /* XXX: add architecture specific overflow check here. */
212
                pos = bprm->p;
213
 
214
                while (len > 0) {
215
                        int i, new, err;
216
                        int offset, bytes_to_copy;
217
                        struct page *page;
218
 
219
                        offset = pos % PAGE_SIZE;
220
                        i = pos/PAGE_SIZE;
221
                        page = bprm->page[i];
222
                        new = 0;
223
                        if (!page) {
224
                                page = alloc_page(GFP_HIGHUSER);
225
                                bprm->page[i] = page;
226
                                if (!page) {
227
                                        ret = -ENOMEM;
228
                                        goto out;
229
                                }
230
                                new = 1;
231
                        }
232
 
233
                        if (page != kmapped_page) {
234
                                if (kmapped_page)
235
                                        kunmap(kmapped_page);
236
                                kmapped_page = page;
237
                                kaddr = kmap(kmapped_page);
238
                        }
239
                        if (new && offset)
240
                                memset(kaddr, 0, offset);
241
                        bytes_to_copy = PAGE_SIZE - offset;
242
                        if (bytes_to_copy > len) {
243
                                bytes_to_copy = len;
244
                                if (new)
245
                                        memset(kaddr+offset+len, 0,
246
                                                PAGE_SIZE-offset-len);
247
                        }
248
                        err = copy_from_user(kaddr+offset, str, bytes_to_copy);
249
                        if (err) {
250
                                ret = -EFAULT;
251
                                goto out;
252
                        }
253
 
254
                        pos += bytes_to_copy;
255
                        str += bytes_to_copy;
256
                        len -= bytes_to_copy;
257
                }
258
        }
259
        ret = 0;
260
out:
261
        if (kmapped_page)
262
                kunmap(kmapped_page);
263
        return ret;
264
}
265
 
266
/*
267
 * Like copy_strings, but get argv and its values from kernel memory.
268
 */
269
int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
270
{
271
        int r;
272
        mm_segment_t oldfs = get_fs();
273
        set_fs(KERNEL_DS);
274
        r = copy_strings(argc, argv, bprm);
275
        set_fs(oldfs);
276
        return r;
277
}
278
 
279
/*
280
 * This routine is used to map in a page into an address space: needed by
281
 * execve() for the initial stack and environment pages.
282
 *
283
 * tsk->mmap_sem is held for writing.
284
 */
285
void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long address)
286
{
287
        pgd_t * pgd;
288
        pmd_t * pmd;
289
        pte_t * pte;
290
        struct vm_area_struct *vma;
291
        pgprot_t prot = PAGE_COPY;
292
 
293
        if (page_count(page) != 1)
294
                printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page, address);
295
        pgd = pgd_offset(tsk->mm, address);
296
 
297
        spin_lock(&tsk->mm->page_table_lock);
298
        pmd = pmd_alloc(tsk->mm, pgd, address);
299
        if (!pmd)
300
                goto out;
301
        pte = pte_alloc(tsk->mm, pmd, address);
302
        if (!pte)
303
                goto out;
304
        if (!pte_none(*pte))
305
                goto out;
306
        lru_cache_add(page);
307
        flush_dcache_page(page);
308
        flush_page_to_ram(page);
309
        /* lookup is cheap because there is only a single entry in the list */
310
        vma = find_vma(tsk->mm, address);
311
        if (vma)
312
                prot = vma->vm_page_prot;
313
        set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot))));
314
        tsk->mm->rss++;
315
        spin_unlock(&tsk->mm->page_table_lock);
316
 
317
        /* no need for flush_tlb */
318
        return;
319
out:
320
        spin_unlock(&tsk->mm->page_table_lock);
321
        __free_page(page);
322
        force_sig(SIGKILL, tsk);
323
        return;
324
}
325
 
326
int setup_arg_pages(struct linux_binprm *bprm)
327
{
328
        unsigned long stack_base;
329
        struct vm_area_struct *mpnt;
330
        int i;
331
 
332
        stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
333
 
334
        bprm->p += stack_base;
335
        if (bprm->loader)
336
                bprm->loader += stack_base;
337
        bprm->exec += stack_base;
338
 
339
        mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
340
        if (!mpnt)
341
                return -ENOMEM;
342
 
343
        down_write(&current->mm->mmap_sem);
344
        {
345
                mpnt->vm_mm = current->mm;
346
                mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
347
                mpnt->vm_end = STACK_TOP;
348
                mpnt->vm_flags = VM_STACK_FLAGS;
349
                mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
350
                mpnt->vm_ops = NULL;
351
                mpnt->vm_pgoff = 0;
352
                mpnt->vm_file = NULL;
353
                mpnt->vm_private_data = (void *) 0;
354
                insert_vm_struct(current->mm, mpnt);
355
                current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
356
        }
357
 
358
        for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
359
                struct page *page = bprm->page[i];
360
                if (page) {
361
                        bprm->page[i] = NULL;
362
                        put_dirty_page(current,page,stack_base);
363
                }
364
                stack_base += PAGE_SIZE;
365
        }
366
        up_write(&current->mm->mmap_sem);
367
 
368
        return 0;
369
}
370
 
371
struct file *open_exec(const char *name)
372
{
373
        struct nameidata nd;
374
        struct inode *inode;
375
        struct file *file;
376
        int err = 0;
377
 
378
        err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_POSITIVE, &nd);
379
        file = ERR_PTR(err);
380
        if (!err) {
381
                inode = nd.dentry->d_inode;
382
                file = ERR_PTR(-EACCES);
383
                if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
384
                    S_ISREG(inode->i_mode)) {
385
                        int err = permission(inode, MAY_EXEC);
386
                        if (!err && !(inode->i_mode & 0111))
387
                                err = -EACCES;
388
                        file = ERR_PTR(err);
389
                        if (!err) {
390
                                file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
391
                                if (!IS_ERR(file)) {
392
                                        err = deny_write_access(file);
393
                                        if (err) {
394
                                                fput(file);
395
                                                file = ERR_PTR(err);
396
                                        }
397
                                }
398
out:
399
                                return file;
400
                        }
401
                }
402
                path_release(&nd);
403
        }
404
        goto out;
405
}
406
 
407
int kernel_read(struct file *file, unsigned long offset,
408
        char * addr, unsigned long count)
409
{
410
        mm_segment_t old_fs;
411
        loff_t pos = offset;
412
        int result = -ENOSYS;
413
 
414
        if (!file->f_op->read)
415
                goto fail;
416
        old_fs = get_fs();
417
        set_fs(get_ds());
418
        result = file->f_op->read(file, addr, count, &pos);
419
        set_fs(old_fs);
420
fail:
421
        return result;
422
}
423
 
424
static int exec_mmap(void)
425
{
426
        struct mm_struct * mm, * old_mm;
427
 
428
        old_mm = current->mm;
429
 
430
        if (old_mm && atomic_read(&old_mm->mm_users) == 1) {
431
                mm_release();
432
                down_write(&old_mm->mmap_sem);
433
                exit_mmap(old_mm);
434
                up_write(&old_mm->mmap_sem);
435
                return 0;
436
        }
437
 
438
 
439
        mm = mm_alloc();
440
        if (mm) {
441
                struct mm_struct *active_mm;
442
 
443
                if (init_new_context(current, mm)) {
444
                        mmdrop(mm);
445
                        return -ENOMEM;
446
                }
447
 
448
                /* Add it to the list of mm's */
449
                spin_lock(&mmlist_lock);
450
                list_add(&mm->mmlist, &init_mm.mmlist);
451
                mmlist_nr++;
452
                spin_unlock(&mmlist_lock);
453
 
454
                task_lock(current);
455
                active_mm = current->active_mm;
456
                current->mm = mm;
457
                current->active_mm = mm;
458
                task_unlock(current);
459
                activate_mm(active_mm, mm);
460
                mm_release();
461
                if (old_mm) {
462
                        if (active_mm != old_mm) BUG();
463
                        mmput(old_mm);
464
                        return 0;
465
                }
466
                mmdrop(active_mm);
467
                return 0;
468
        }
469
        return -ENOMEM;
470
}
471
 
472
/*
473
 * This function makes sure the current process has its own signal table,
474
 * so that flush_signal_handlers can later reset the handlers without
475
 * disturbing other processes.  (Other processes might share the signal
476
 * table via the CLONE_SIGNAL option to clone().)
477
 */
478
 
479
static inline int make_private_signals(void)
480
{
481
        struct signal_struct * newsig;
482
 
483
        if (atomic_read(&current->sig->count) <= 1)
484
                return 0;
485
        newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
486
        if (newsig == NULL)
487
                return -ENOMEM;
488
        spin_lock_init(&newsig->siglock);
489
        atomic_set(&newsig->count, 1);
490
        memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
491
        spin_lock_irq(&current->sigmask_lock);
492
        current->sig = newsig;
493
        spin_unlock_irq(&current->sigmask_lock);
494
        return 0;
495
}
496
 
497
/*
498
 * If make_private_signals() made a copy of the signal table, decrement the
499
 * refcount of the original table, and free it if necessary.
500
 * We don't do that in make_private_signals() so that we can back off
501
 * in flush_old_exec() if an error occurs after calling make_private_signals().
502
 */
503
 
504
static inline void release_old_signals(struct signal_struct * oldsig)
505
{
506
        if (current->sig == oldsig)
507
                return;
508
        if (atomic_dec_and_test(&oldsig->count))
509
                kmem_cache_free(sigact_cachep, oldsig);
510
}
511
 
512
/*
513
 * These functions flushes out all traces of the currently running executable
514
 * so that a new one can be started
515
 */
516
 
517
static inline void flush_old_files(struct files_struct * files)
518
{
519
        long j = -1;
520
 
521
        write_lock(&files->file_lock);
522
        for (;;) {
523
                unsigned long set, i;
524
 
525
                j++;
526
                i = j * __NFDBITS;
527
                if (i >= files->max_fds || i >= files->max_fdset)
528
                        break;
529
                set = files->close_on_exec->fds_bits[j];
530
                if (!set)
531
                        continue;
532
                files->close_on_exec->fds_bits[j] = 0;
533
                write_unlock(&files->file_lock);
534
                for ( ; set ; i++,set >>= 1) {
535
                        if (set & 1) {
536
                                sys_close(i);
537
                        }
538
                }
539
                write_lock(&files->file_lock);
540
 
541
        }
542
        write_unlock(&files->file_lock);
543
}
544
 
545
/*
546
 * An execve() will automatically "de-thread" the process.
547
 * Note: we don't have to hold the tasklist_lock to test
548
 * whether we migth need to do this. If we're not part of
549
 * a thread group, there is no way we can become one
550
 * dynamically. And if we are, we only need to protect the
551
 * unlink - even if we race with the last other thread exit,
552
 * at worst the list_del_init() might end up being a no-op.
553
 */
554
static inline void de_thread(struct task_struct *tsk)
555
{
556
        if (!list_empty(&tsk->thread_group)) {
557
                write_lock_irq(&tasklist_lock);
558
                list_del_init(&tsk->thread_group);
559
                write_unlock_irq(&tasklist_lock);
560
        }
561
 
562
        /* Minor oddity: this might stay the same. */
563
        tsk->tgid = tsk->pid;
564
}
565
 
566
int flush_old_exec(struct linux_binprm * bprm)
567
{
568
        char * name;
569
        int i, ch, retval;
570
        struct signal_struct * oldsig;
571
        struct files_struct * files;
572
 
573
        /*
574
         * Make sure we have a private signal table
575
         */
576
        oldsig = current->sig;
577
        retval = make_private_signals();
578
        if (retval) goto flush_failed;
579
 
580
        /*
581
         * Make sure we have private file handles. Ask the
582
         * fork helper to do the work for us and the exit
583
         * helper to do the cleanup of the old one.
584
         */
585
 
586
        files = current->files;         /* refcounted so safe to hold */
587
        retval = unshare_files();
588
        if(retval)
589
                goto flush_failed;
590
 
591
        /*
592
         * Release all of the old mmap stuff
593
         */
594
        retval = exec_mmap();
595
        if (retval) goto mmap_failed;
596
 
597
        /* This is the point of no return */
598
        steal_locks(files);
599
        put_files_struct(files);
600
        release_old_signals(oldsig);
601
 
602
        current->sas_ss_sp = current->sas_ss_size = 0;
603
 
604
        if (current->euid == current->uid && current->egid == current->gid) {
605
                current->mm->dumpable = 1;
606
                current->task_dumpable = 1;
607
        }
608
        name = bprm->filename;
609
        for (i=0; (ch = *(name++)) != '\0';) {
610
                if (ch == '/')
611
                        i = 0;
612
                else
613
                        if (i < 15)
614
                                current->comm[i++] = ch;
615
        }
616
        current->comm[i] = '\0';
617
 
618
        flush_thread();
619
 
620
        de_thread(current);
621
 
622
        if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
623
            permission(bprm->file->f_dentry->d_inode,MAY_READ))
624
                current->mm->dumpable = 0;
625
 
626
        /* An exec changes our domain. We are no longer part of the thread
627
           group */
628
 
629
        current->self_exec_id++;
630
 
631
        flush_signal_handlers(current);
632
        flush_old_files(current->files);
633
 
634
        return 0;
635
 
636
mmap_failed:
637
        put_files_struct(current->files);
638
        current->files = files;
639
flush_failed:
640
        spin_lock_irq(&current->sigmask_lock);
641
        if (current->sig != oldsig) {
642
                kmem_cache_free(sigact_cachep, current->sig);
643
                current->sig = oldsig;
644
        }
645
        spin_unlock_irq(&current->sigmask_lock);
646
        return retval;
647
}
648
 
649
/*
650
 * We mustn't allow tracing of suid binaries, unless
651
 * the tracer has the capability to trace anything..
652
 */
653
static inline int must_not_trace_exec(struct task_struct * p)
654
{
655
        return (p->ptrace & PT_PTRACED) && !(p->ptrace & PT_PTRACE_CAP);
656
}
657
 
658
/*
659
 * Fill the binprm structure from the inode.
660
 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
661
 */
662
int prepare_binprm(struct linux_binprm *bprm)
663
{
664
        int mode;
665
        struct inode * inode = bprm->file->f_dentry->d_inode;
666
 
667
        mode = inode->i_mode;
668
        /*
669
         * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
670
         * vfs_permission lets a non-executable through
671
         */
672
        if (!(mode & 0111))     /* with at least _one_ execute bit set */
673
                return -EACCES;
674
        if (bprm->file->f_op == NULL)
675
                return -EACCES;
676
 
677
        bprm->e_uid = current->euid;
678
        bprm->e_gid = current->egid;
679
 
680
        if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
681
                /* Set-uid? */
682
                if (mode & S_ISUID)
683
                        bprm->e_uid = inode->i_uid;
684
 
685
                /* Set-gid? */
686
                /*
687
                 * If setgid is set but no group execute bit then this
688
                 * is a candidate for mandatory locking, not a setgid
689
                 * executable.
690
                 */
691
                if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
692
                        bprm->e_gid = inode->i_gid;
693
        }
694
 
695
        /* We don't have VFS support for capabilities yet */
696
        cap_clear(bprm->cap_inheritable);
697
        cap_clear(bprm->cap_permitted);
698
        cap_clear(bprm->cap_effective);
699
 
700
        /*  To support inheritance of root-permissions and suid-root
701
         *  executables under compatibility mode, we raise all three
702
         *  capability sets for the file.
703
         *
704
         *  If only the real uid is 0, we only raise the inheritable
705
         *  and permitted sets of the executable file.
706
         */
707
 
708
        if (!issecure(SECURE_NOROOT)) {
709
                if (bprm->e_uid == 0 || current->uid == 0) {
710
                        cap_set_full(bprm->cap_inheritable);
711
                        cap_set_full(bprm->cap_permitted);
712
                }
713
                if (bprm->e_uid == 0)
714
                        cap_set_full(bprm->cap_effective);
715
        }
716
 
717
        memset(bprm->buf,0,BINPRM_BUF_SIZE);
718
        return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
719
}
720
 
721
/*
722
 * This function is used to produce the new IDs and capabilities
723
 * from the old ones and the file's capabilities.
724
 *
725
 * The formula used for evolving capabilities is:
726
 *
727
 *       pI' = pI
728
 * (***) pP' = (fP & X) | (fI & pI)
729
 *       pE' = pP' & fE          [NB. fE is 0 or ~0]
730
 *
731
 * I=Inheritable, P=Permitted, E=Effective // p=process, f=file
732
 * ' indicates post-exec(), and X is the global 'cap_bset'.
733
 *
734
 */
735
 
736
void compute_creds(struct linux_binprm *bprm)
737
{
738
        kernel_cap_t new_permitted, working;
739
        int do_unlock = 0;
740
 
741
        new_permitted = cap_intersect(bprm->cap_permitted, cap_bset);
742
        working = cap_intersect(bprm->cap_inheritable,
743
                                current->cap_inheritable);
744
        new_permitted = cap_combine(new_permitted, working);
745
 
746
        if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
747
            !cap_issubset(new_permitted, current->cap_permitted)) {
748
                current->mm->dumpable = 0;
749
 
750
                lock_kernel();
751
                if (must_not_trace_exec(current)
752
                    || atomic_read(&current->fs->count) > 1
753
                    || atomic_read(&current->files->count) > 1
754
                    || atomic_read(&current->sig->count) > 1) {
755
                        if(!capable(CAP_SETUID)) {
756
                                bprm->e_uid = current->uid;
757
                                bprm->e_gid = current->gid;
758
                        }
759
                        if(!capable(CAP_SETPCAP)) {
760
                                new_permitted = cap_intersect(new_permitted,
761
                                                        current->cap_permitted);
762
                        }
763
                }
764
                do_unlock = 1;
765
        }
766
 
767
 
768
        /* For init, we want to retain the capabilities set
769
         * in the init_task struct. Thus we skip the usual
770
         * capability rules */
771
        if (current->pid != 1) {
772
                current->cap_permitted = new_permitted;
773
                current->cap_effective =
774
                        cap_intersect(new_permitted, bprm->cap_effective);
775
        }
776
 
777
        /* AUD: Audit candidate if current->cap_effective is set */
778
 
779
        current->suid = current->euid = current->fsuid = bprm->e_uid;
780
        current->sgid = current->egid = current->fsgid = bprm->e_gid;
781
 
782
        if(do_unlock)
783
                unlock_kernel();
784
        current->keep_capabilities = 0;
785
}
786
 
787
 
788
void remove_arg_zero(struct linux_binprm *bprm)
789
{
790
        if (bprm->argc) {
791
                unsigned long offset;
792
                char * kaddr;
793
                struct page *page;
794
 
795
                offset = bprm->p % PAGE_SIZE;
796
                goto inside;
797
 
798
                while (bprm->p++, *(kaddr+offset++)) {
799
                        if (offset != PAGE_SIZE)
800
                                continue;
801
                        offset = 0;
802
                        kunmap(page);
803
inside:
804
                        page = bprm->page[bprm->p/PAGE_SIZE];
805
                        kaddr = kmap(page);
806
                }
807
                kunmap(page);
808
                bprm->argc--;
809
        }
810
}
811
 
812
/*
813
 * cycle the list of binary formats handler, until one recognizes the image
814
 */
815
int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
816
{
817
        int try,retval=0;
818
        struct linux_binfmt *fmt;
819
#ifdef __alpha__
820
        /* handle /sbin/loader.. */
821
        {
822
            struct exec * eh = (struct exec *) bprm->buf;
823
 
824
            if (!bprm->loader && eh->fh.f_magic == 0x183 &&
825
                (eh->fh.f_flags & 0x3000) == 0x3000)
826
            {
827
                struct file * file;
828
                unsigned long loader;
829
 
830
                allow_write_access(bprm->file);
831
                fput(bprm->file);
832
                bprm->file = NULL;
833
 
834
                loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
835
 
836
                file = open_exec("/sbin/loader");
837
                retval = PTR_ERR(file);
838
                if (IS_ERR(file))
839
                        return retval;
840
 
841
                /* Remember if the application is TASO.  */
842
                bprm->sh_bang = eh->ah.entry < 0x100000000;
843
 
844
                bprm->file = file;
845
                bprm->loader = loader;
846
                retval = prepare_binprm(bprm);
847
                if (retval<0)
848
                        return retval;
849
                /* should call search_binary_handler recursively here,
850
                   but it does not matter */
851
            }
852
        }
853
#endif
854
        /* kernel module loader fixup */
855
        /* so we don't try to load run modprobe in kernel space. */
856
        set_fs(USER_DS);
857
        for (try=0; try<2; try++) {
858
                read_lock(&binfmt_lock);
859
                for (fmt = formats ; fmt ; fmt = fmt->next) {
860
                        int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
861
                        if (!fn)
862
                                continue;
863
                        if (!try_inc_mod_count(fmt->module))
864
                                continue;
865
                        read_unlock(&binfmt_lock);
866
                        retval = fn(bprm, regs);
867
                        if (retval >= 0) {
868
                                put_binfmt(fmt);
869
                                allow_write_access(bprm->file);
870
                                if (bprm->file)
871
                                        fput(bprm->file);
872
                                bprm->file = NULL;
873
                                current->did_exec = 1;
874
                                return retval;
875
                        }
876
                        read_lock(&binfmt_lock);
877
                        put_binfmt(fmt);
878
                        if (retval != -ENOEXEC)
879
                                break;
880
                        if (!bprm->file) {
881
                                read_unlock(&binfmt_lock);
882
                                return retval;
883
                        }
884
                }
885
                read_unlock(&binfmt_lock);
886
                if (retval != -ENOEXEC) {
887
                        break;
888
#ifdef CONFIG_KMOD
889
                }else{
890
#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
891
                        char modname[20];
892
                        if (printable(bprm->buf[0]) &&
893
                            printable(bprm->buf[1]) &&
894
                            printable(bprm->buf[2]) &&
895
                            printable(bprm->buf[3]))
896
                                break; /* -ENOEXEC */
897
                        sprintf(modname, "binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
898
                        request_module(modname);
899
#endif
900
                }
901
        }
902
        return retval;
903
}
904
 
905
 
906
/*
907
 * sys_execve() executes a new program.
908
 */
909
int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs)
910
{
911
        struct linux_binprm bprm;
912
        struct file *file;
913
        int retval;
914
        int i;
915
 
916
        file = open_exec(filename);
917
 
918
        retval = PTR_ERR(file);
919
        if (IS_ERR(file))
920
                return retval;
921
 
922
        bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
923
        memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
924
 
925
        bprm.file = file;
926
        bprm.filename = filename;
927
        bprm.sh_bang = 0;
928
        bprm.loader = 0;
929
        bprm.exec = 0;
930
        if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) {
931
                allow_write_access(file);
932
                fput(file);
933
                return bprm.argc;
934
        }
935
 
936
        if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) {
937
                allow_write_access(file);
938
                fput(file);
939
                return bprm.envc;
940
        }
941
 
942
        retval = prepare_binprm(&bprm);
943
        if (retval < 0)
944
                goto out;
945
 
946
        retval = copy_strings_kernel(1, &bprm.filename, &bprm);
947
        if (retval < 0)
948
                goto out;
949
 
950
        bprm.exec = bprm.p;
951
        retval = copy_strings(bprm.envc, envp, &bprm);
952
        if (retval < 0)
953
                goto out;
954
 
955
        retval = copy_strings(bprm.argc, argv, &bprm);
956
        if (retval < 0)
957
                goto out;
958
 
959
        retval = search_binary_handler(&bprm,regs);
960
        if (retval >= 0)
961
                /* execve success */
962
                return retval;
963
 
964
out:
965
        /* Something went wrong, return the inode and free the argument pages*/
966
        allow_write_access(bprm.file);
967
        if (bprm.file)
968
                fput(bprm.file);
969
 
970
        for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
971
                struct page * page = bprm.page[i];
972
                if (page)
973
                        __free_page(page);
974
        }
975
 
976
        return retval;
977
}
978
 
979
void set_binfmt(struct linux_binfmt *new)
980
{
981
        struct linux_binfmt *old = current->binfmt;
982
        if (new && new->module)
983
                __MOD_INC_USE_COUNT(new->module);
984
        current->binfmt = new;
985
        if (old && old->module)
986
                __MOD_DEC_USE_COUNT(old->module);
987
}
988
 
989
#define CORENAME_MAX_SIZE 64
990
 
991
/* format_corename will inspect the pattern parameter, and output a
992
 * name into corename, which must have space for at least
993
 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
994
 */
995
void format_corename(char *corename, const char *pattern, long signr)
996
{
997
        const char *pat_ptr = pattern;
998
        char *out_ptr = corename;
999
        char *const out_end = corename + CORENAME_MAX_SIZE;
1000
        int rc;
1001
        int pid_in_pattern = 0;
1002
 
1003
        /* Repeat as long as we have more pattern to process and more output
1004
           space */
1005
        while (*pat_ptr) {
1006
                if (*pat_ptr != '%') {
1007
                        if (out_ptr == out_end)
1008
                                goto out;
1009
                        *out_ptr++ = *pat_ptr++;
1010
                } else {
1011
                        switch (*++pat_ptr) {
1012
                        case 0:
1013
                                goto out;
1014
                        /* Double percent, output one percent */
1015
                        case '%':
1016
                                if (out_ptr == out_end)
1017
                                        goto out;
1018
                                *out_ptr++ = '%';
1019
                                break;
1020
                        /* pid */
1021
                        case 'p':
1022
                                pid_in_pattern = 1;
1023
                                rc = snprintf(out_ptr, out_end - out_ptr,
1024
                                              "%d", current->pid);
1025
                                if (rc > out_end - out_ptr)
1026
                                        goto out;
1027
                                out_ptr += rc;
1028
                                break;
1029
                        /* uid */
1030
                        case 'u':
1031
                                rc = snprintf(out_ptr, out_end - out_ptr,
1032
                                              "%d", current->uid);
1033
                                if (rc > out_end - out_ptr)
1034
                                        goto out;
1035
                                out_ptr += rc;
1036
                                break;
1037
                        /* gid */
1038
                        case 'g':
1039
                                rc = snprintf(out_ptr, out_end - out_ptr,
1040
                                              "%d", current->gid);
1041
                                if (rc > out_end - out_ptr)
1042
                                        goto out;
1043
                                out_ptr += rc;
1044
                                break;
1045
                        /* signal that caused the coredump */
1046
                        case 's':
1047
                                rc = snprintf(out_ptr, out_end - out_ptr,
1048
                                              "%ld", signr);
1049
                                if (rc > out_end - out_ptr)
1050
                                        goto out;
1051
                                out_ptr += rc;
1052
                                break;
1053
                        /* UNIX time of coredump */
1054
                        case 't': {
1055
                                struct timeval tv;
1056
                                do_gettimeofday(&tv);
1057
                                rc = snprintf(out_ptr, out_end - out_ptr,
1058
                                              "%ld", tv.tv_sec);
1059
                                if (rc > out_end - out_ptr)
1060
                                        goto out;
1061
                                out_ptr += rc;
1062
                                break;
1063
                        }
1064
                        /* hostname */
1065
                        case 'h':
1066
                                down_read(&uts_sem);
1067
                                rc = snprintf(out_ptr, out_end - out_ptr,
1068
                                              "%s", system_utsname.nodename);
1069
                                up_read(&uts_sem);
1070
                                if (rc > out_end - out_ptr)
1071
                                        goto out;
1072
                                out_ptr += rc;
1073
                                break;
1074
                        /* executable */
1075
                        case 'e':
1076
                                rc = snprintf(out_ptr, out_end - out_ptr,
1077
                                              "%s", current->comm);
1078
                                if (rc > out_end - out_ptr)
1079
                                        goto out;
1080
                                out_ptr += rc;
1081
                                break;
1082
                        default:
1083
                                break;
1084
                        }
1085
                        ++pat_ptr;
1086
                }
1087
        }
1088
        /* Backward compatibility with core_uses_pid:
1089
         *
1090
         * If core_pattern does not include a %p (as is the default)
1091
         * and core_uses_pid is set, then .%pid will be appended to
1092
         * the filename */
1093
        if (!pid_in_pattern
1094
            && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
1095
                rc = snprintf(out_ptr, out_end - out_ptr,
1096
                              ".%d", current->pid);
1097
                if (rc > out_end - out_ptr)
1098
                        goto out;
1099
                out_ptr += rc;
1100
        }
1101
      out:
1102
        *out_ptr = 0;
1103
}
1104
 
1105
int do_coredump(long signr, struct pt_regs * regs)
1106
{
1107
        struct linux_binfmt * binfmt;
1108
        char corename[CORENAME_MAX_SIZE + 1];
1109
        struct file * file;
1110
        struct inode * inode;
1111
        int retval = 0;
1112
        int fsuid = current->fsuid;
1113
 
1114
        lock_kernel();
1115
        binfmt = current->binfmt;
1116
        if (!binfmt || !binfmt->core_dump)
1117
                goto fail;
1118
        if (!is_dumpable(current))
1119
        {
1120
                if(!core_setuid_ok || !current->task_dumpable)
1121
                        goto fail;
1122
                current->fsuid = 0;
1123
        }
1124
        current->mm->dumpable = 0;
1125
        if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1126
                goto fail;
1127
 
1128
        format_corename(corename, core_pattern, signr);
1129
        file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW, 0600);
1130
        if (IS_ERR(file))
1131
                goto fail;
1132
        inode = file->f_dentry->d_inode;
1133
        if (inode->i_nlink > 1)
1134
                goto close_fail;        /* multiple links - don't dump */
1135
        if (d_unhashed(file->f_dentry))
1136
                goto close_fail;
1137
 
1138
        if (!S_ISREG(inode->i_mode))
1139
                goto close_fail;
1140
        if (!file->f_op)
1141
                goto close_fail;
1142
        if (!file->f_op->write)
1143
                goto close_fail;
1144
        if (do_truncate(file->f_dentry, 0) != 0)
1145
                goto close_fail;
1146
 
1147
        retval = binfmt->core_dump(signr, regs, file);
1148
 
1149
close_fail:
1150
        filp_close(file, NULL);
1151
fail:
1152
        if (fsuid != current->fsuid)
1153
                current->fsuid = fsuid;
1154
        unlock_kernel();
1155
        return retval;
1156
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.