OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [kernel/] [signal.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/kernel/signal.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 *
6
 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7
 */
8
 
9
#include <linux/config.h>
10
#include <linux/slab.h>
11
#include <linux/module.h>
12
#include <linux/unistd.h>
13
#include <linux/smp_lock.h>
14
#include <linux/init.h>
15
#include <linux/sched.h>
16
 
17
#include <asm/uaccess.h>
18
 
19
/*
20
 * SLAB caches for signal bits.
21
 */
22
 
23
#define DEBUG_SIG 0
24
 
25
#if DEBUG_SIG
26
#define SIG_SLAB_DEBUG  (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
27
#else
28
#define SIG_SLAB_DEBUG  0
29
#endif
30
 
31
static kmem_cache_t *sigqueue_cachep;
32
 
33
atomic_t nr_queued_signals;
34
int max_queued_signals = 1024;
35
 
36
void __init signals_init(void)
37
{
38
        sigqueue_cachep =
39
                kmem_cache_create("sigqueue",
40
                                  sizeof(struct sigqueue),
41
                                  __alignof__(struct sigqueue),
42
                                  SIG_SLAB_DEBUG, NULL, NULL);
43
        if (!sigqueue_cachep)
44
                panic("signals_init(): cannot create sigqueue SLAB cache");
45
}
46
 
47
 
48
/* Given the mask, find the first available signal that should be serviced. */
49
 
50
static int
51
next_signal(struct task_struct *tsk, sigset_t *mask)
52
{
53
        unsigned long i, *s, *m, x;
54
        int sig = 0;
55
 
56
        s = tsk->pending.signal.sig;
57
        m = mask->sig;
58
        switch (_NSIG_WORDS) {
59
        default:
60
                for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
61
                        if ((x = *s &~ *m) != 0) {
62
                                sig = ffz(~x) + i*_NSIG_BPW + 1;
63
                                break;
64
                        }
65
                break;
66
 
67
        case 2: if ((x = s[0] &~ m[0]) != 0)
68
                        sig = 1;
69
                else if ((x = s[1] &~ m[1]) != 0)
70
                        sig = _NSIG_BPW + 1;
71
                else
72
                        break;
73
                sig += ffz(~x);
74
                break;
75
 
76
        case 1: if ((x = *s &~ *m) != 0)
77
                        sig = ffz(~x) + 1;
78
                break;
79
        }
80
 
81
        return sig;
82
}
83
 
84
static void flush_sigqueue(struct sigpending *queue)
85
{
86
        struct sigqueue *q, *n;
87
 
88
        sigemptyset(&queue->signal);
89
        q = queue->head;
90
        queue->head = NULL;
91
        queue->tail = &queue->head;
92
 
93
        while (q) {
94
                n = q->next;
95
                kmem_cache_free(sigqueue_cachep, q);
96
                atomic_dec(&nr_queued_signals);
97
                q = n;
98
        }
99
}
100
 
101
/*
102
 * Flush all pending signals for a task.
103
 */
104
 
105
void
106
flush_signals(struct task_struct *t)
107
{
108
        t->sigpending = 0;
109
        flush_sigqueue(&t->pending);
110
}
111
 
112
void exit_sighand(struct task_struct *tsk)
113
{
114
        struct signal_struct * sig = tsk->sig;
115
 
116
        spin_lock_irq(&tsk->sigmask_lock);
117
        if (sig) {
118
                tsk->sig = NULL;
119
                if (atomic_dec_and_test(&sig->count))
120
                        kmem_cache_free(sigact_cachep, sig);
121
        }
122
        tsk->sigpending = 0;
123
        flush_sigqueue(&tsk->pending);
124
        spin_unlock_irq(&tsk->sigmask_lock);
125
}
126
 
127
/*
128
 * Flush all handlers for a task.
129
 */
130
 
131
void
132
flush_signal_handlers(struct task_struct *t)
133
{
134
        int i;
135
        struct k_sigaction *ka = &t->sig->action[0];
136
        for (i = _NSIG ; i != 0 ; i--) {
137
                if (ka->sa.sa_handler != SIG_IGN)
138
                        ka->sa.sa_handler = SIG_DFL;
139
                ka->sa.sa_flags = 0;
140
                sigemptyset(&ka->sa.sa_mask);
141
                ka++;
142
        }
143
}
144
 
145
/*
146
 * sig_exit - cause the current task to exit due to a signal.
147
 */
148
 
149
void
150
sig_exit(int sig, int exit_code, struct siginfo *info)
151
{
152
        struct task_struct *t;
153
 
154
        sigaddset(&current->pending.signal, sig);
155
        recalc_sigpending(current);
156
        current->flags |= PF_SIGNALED;
157
 
158
        /* Propagate the signal to all the tasks in
159
         *  our thread group
160
         */
161
        if (info && (unsigned long)info != 1
162
            && info->si_code != SI_TKILL) {
163
                read_lock(&tasklist_lock);
164
                for_each_thread(t) {
165
                        force_sig_info(sig, info, t);
166
                }
167
                read_unlock(&tasklist_lock);
168
        }
169
 
170
        do_exit(exit_code);
171
        /* NOTREACHED */
172
}
173
 
174
/* Notify the system that a driver wants to block all signals for this
175
 * process, and wants to be notified if any signals at all were to be
176
 * sent/acted upon.  If the notifier routine returns non-zero, then the
177
 * signal will be acted upon after all.  If the notifier routine returns 0,
178
 * then then signal will be blocked.  Only one block per process is
179
 * allowed.  priv is a pointer to private data that the notifier routine
180
 * can use to determine if the signal should be blocked or not.  */
181
 
182
void
183
block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
184
{
185
        unsigned long flags;
186
 
187
        spin_lock_irqsave(&current->sigmask_lock, flags);
188
        current->notifier_mask = mask;
189
        current->notifier_data = priv;
190
        current->notifier = notifier;
191
        spin_unlock_irqrestore(&current->sigmask_lock, flags);
192
}
193
 
194
/* Notify the system that blocking has ended. */
195
 
196
void
197
unblock_all_signals(void)
198
{
199
        unsigned long flags;
200
 
201
        spin_lock_irqsave(&current->sigmask_lock, flags);
202
        current->notifier = NULL;
203
        current->notifier_data = NULL;
204
        recalc_sigpending(current);
205
        spin_unlock_irqrestore(&current->sigmask_lock, flags);
206
}
207
 
208
static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
209
{
210
        if (sigismember(&list->signal, sig)) {
211
                /* Collect the siginfo appropriate to this signal.  */
212
                struct sigqueue *q, **pp;
213
                pp = &list->head;
214
                while ((q = *pp) != NULL) {
215
                        if (q->info.si_signo == sig)
216
                                goto found_it;
217
                        pp = &q->next;
218
                }
219
 
220
                /* Ok, it wasn't in the queue.  We must have
221
                   been out of queue space.  So zero out the
222
                   info.  */
223
                sigdelset(&list->signal, sig);
224
                info->si_signo = sig;
225
                info->si_errno = 0;
226
                info->si_code = 0;
227
                info->si_pid = 0;
228
                info->si_uid = 0;
229
                return 1;
230
 
231
found_it:
232
                if ((*pp = q->next) == NULL)
233
                        list->tail = pp;
234
 
235
                /* Copy the sigqueue information and free the queue entry */
236
                copy_siginfo(info, &q->info);
237
                kmem_cache_free(sigqueue_cachep,q);
238
                atomic_dec(&nr_queued_signals);
239
 
240
                /* Non-RT signals can exist multiple times.. */
241
                if (sig >= SIGRTMIN) {
242
                        while ((q = *pp) != NULL) {
243
                                if (q->info.si_signo == sig)
244
                                        goto found_another;
245
                                pp = &q->next;
246
                        }
247
                }
248
 
249
                sigdelset(&list->signal, sig);
250
found_another:
251
                return 1;
252
        }
253
        return 0;
254
}
255
 
256
/*
257
 * Dequeue a signal and return the element to the caller, which is
258
 * expected to free it.
259
 *
260
 * All callers must be holding current->sigmask_lock.
261
 */
262
 
263
int
264
dequeue_signal(sigset_t *mask, siginfo_t *info)
265
{
266
        int sig = 0;
267
 
268
#if DEBUG_SIG
269
printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
270
        signal_pending(current));
271
#endif
272
 
273
        sig = next_signal(current, mask);
274
        if (sig) {
275
                if (current->notifier) {
276
                        if (sigismember(current->notifier_mask, sig)) {
277
                                if (!(current->notifier)(current->notifier_data)) {
278
                                        current->sigpending = 0;
279
                                        return 0;
280
                                }
281
                        }
282
                }
283
 
284
                if (!collect_signal(sig, &current->pending, info))
285
                        sig = 0;
286
 
287
                /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
288
                   we need to xchg out the timer overrun values.  */
289
        }
290
        recalc_sigpending(current);
291
 
292
#if DEBUG_SIG
293
printk(" %d -> %d\n", signal_pending(current), sig);
294
#endif
295
 
296
        return sig;
297
}
298
 
299
static int rm_from_queue(int sig, struct sigpending *s)
300
{
301
        struct sigqueue *q, **pp;
302
 
303
        if (!sigismember(&s->signal, sig))
304
                return 0;
305
 
306
        sigdelset(&s->signal, sig);
307
 
308
        pp = &s->head;
309
 
310
        while ((q = *pp) != NULL) {
311
                if (q->info.si_signo == sig) {
312
                        if ((*pp = q->next) == NULL)
313
                                s->tail = pp;
314
                        kmem_cache_free(sigqueue_cachep,q);
315
                        atomic_dec(&nr_queued_signals);
316
                        continue;
317
                }
318
                pp = &q->next;
319
        }
320
        return 1;
321
}
322
 
323
/*
324
 * Remove signal sig from t->pending.
325
 * Returns 1 if sig was found.
326
 *
327
 * All callers must be holding t->sigmask_lock.
328
 */
329
static int rm_sig_from_queue(int sig, struct task_struct *t)
330
{
331
        return rm_from_queue(sig, &t->pending);
332
}
333
 
334
/*
335
 * Bad permissions for sending the signal
336
 */
337
int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
338
{
339
        return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
340
            && ((sig != SIGCONT) || (current->session != t->session))
341
            && (current->euid ^ t->suid) && (current->euid ^ t->uid)
342
            && (current->uid ^ t->suid) && (current->uid ^ t->uid)
343
            && !capable(CAP_KILL);
344
}
345
 
346
/*
347
 * Signal type:
348
 *    < 0 : global action (kill - spread to all non-blocked threads)
349
 *    = 0 : ignored
350
 *    > 0 : wake up.
351
 */
352
static int signal_type(int sig, struct signal_struct *signals)
353
{
354
        unsigned long handler;
355
 
356
        if (!signals)
357
                return 0;
358
 
359
        handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
360
        if (handler > 1)
361
                return 1;
362
 
363
        /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
364
        if (handler == 1)
365
                return sig == SIGCHLD;
366
 
367
        /* Default handler. Normally lethal, but.. */
368
        switch (sig) {
369
 
370
        /* Ignored */
371
        case SIGCONT: case SIGWINCH:
372
        case SIGCHLD: case SIGURG:
373
                return 0;
374
 
375
        /* Implicit behaviour */
376
        case SIGTSTP: case SIGTTIN: case SIGTTOU:
377
                return 1;
378
 
379
        /* Implicit actions (kill or do special stuff) */
380
        default:
381
                return -1;
382
        }
383
}
384
 
385
 
386
/*
387
 * Determine whether a signal should be posted or not.
388
 *
389
 * Signals with SIG_IGN can be ignored, except for the
390
 * special case of a SIGCHLD.
391
 *
392
 * Some signals with SIG_DFL default to a non-action.
393
 */
394
static int ignored_signal(int sig, struct task_struct *t)
395
{
396
        /* Don't ignore traced or blocked signals */
397
        if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
398
                return 0;
399
 
400
        return signal_type(sig, t->sig) == 0;
401
}
402
 
403
/*
404
 * Handle TASK_STOPPED cases etc implicit behaviour
405
 * of certain magical signals.
406
 *
407
 * SIGKILL gets spread out to every thread.
408
 */
409
static void handle_stop_signal(int sig, struct task_struct *t)
410
{
411
        switch (sig) {
412
        case SIGKILL: case SIGCONT:
413
                /* Wake up the process if stopped.  */
414
                if (t->state == TASK_STOPPED)
415
                        wake_up_process(t);
416
                t->exit_code = 0;
417
                rm_sig_from_queue(SIGSTOP, t);
418
                rm_sig_from_queue(SIGTSTP, t);
419
                rm_sig_from_queue(SIGTTOU, t);
420
                rm_sig_from_queue(SIGTTIN, t);
421
                break;
422
 
423
        case SIGSTOP: case SIGTSTP:
424
        case SIGTTIN: case SIGTTOU:
425
                /* If we're stopping again, cancel SIGCONT */
426
                rm_sig_from_queue(SIGCONT, t);
427
                break;
428
        }
429
}
430
 
431
static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
432
{
433
        struct sigqueue * q = NULL;
434
 
435
        /* Real-time signals must be queued if sent by sigqueue, or
436
           some other real-time mechanism.  It is implementation
437
           defined whether kill() does so.  We attempt to do so, on
438
           the principle of least surprise, but since kill is not
439
           allowed to fail with EAGAIN when low on memory we just
440
           make sure at least one signal gets delivered and don't
441
           pass on the info struct.  */
442
 
443
        if (atomic_read(&nr_queued_signals) < max_queued_signals) {
444
                q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
445
        }
446
 
447
        if (q) {
448
                atomic_inc(&nr_queued_signals);
449
                q->next = NULL;
450
                *signals->tail = q;
451
                signals->tail = &q->next;
452
                switch ((unsigned long) info) {
453
                        case 0:
454
                                q->info.si_signo = sig;
455
                                q->info.si_errno = 0;
456
                                q->info.si_code = SI_USER;
457
                                q->info.si_pid = current->pid;
458
                                q->info.si_uid = current->uid;
459
                                break;
460
                        case 1:
461
                                q->info.si_signo = sig;
462
                                q->info.si_errno = 0;
463
                                q->info.si_code = SI_KERNEL;
464
                                q->info.si_pid = 0;
465
                                q->info.si_uid = 0;
466
                                break;
467
                        default:
468
                                copy_siginfo(&q->info, info);
469
                                break;
470
                }
471
        } else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
472
                   && info->si_code != SI_USER) {
473
                /*
474
                 * Queue overflow, abort.  We may abort if the signal was rt
475
                 * and sent by user using something other than kill().
476
                 */
477
                return -EAGAIN;
478
        }
479
 
480
        sigaddset(&signals->signal, sig);
481
        return 0;
482
}
483
 
484
/*
485
 * Tell a process that it has a new active signal..
486
 *
487
 * NOTE! we rely on the previous spin_lock to
488
 * lock interrupts for us! We can only be called with
489
 * "sigmask_lock" held, and the local interrupt must
490
 * have been disabled when that got acquired!
491
 *
492
 * No need to set need_resched since signal event passing
493
 * goes through ->blocked
494
 */
495
static inline void signal_wake_up(struct task_struct *t)
496
{
497
        t->sigpending = 1;
498
 
499
#ifdef CONFIG_SMP
500
        /*
501
         * If the task is running on a different CPU
502
         * force a reschedule on the other CPU to make
503
         * it notice the new signal quickly.
504
         *
505
         * The code below is a tad loose and might occasionally
506
         * kick the wrong CPU if we catch the process in the
507
         * process of changing - but no harm is done by that
508
         * other than doing an extra (lightweight) IPI interrupt.
509
         */
510
        spin_lock(&runqueue_lock);
511
        if (task_has_cpu(t) && t->processor != smp_processor_id())
512
                smp_send_reschedule(t->processor);
513
        spin_unlock(&runqueue_lock);
514
#endif /* CONFIG_SMP */
515
 
516
        if (t->state & TASK_INTERRUPTIBLE) {
517
                wake_up_process(t);
518
                return;
519
        }
520
}
521
 
522
static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
523
{
524
        int retval = send_signal(sig, info, &t->pending);
525
 
526
        if (!retval && !sigismember(&t->blocked, sig))
527
                signal_wake_up(t);
528
 
529
        return retval;
530
}
531
 
532
int
533
send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
534
{
535
        unsigned long flags;
536
        int ret;
537
 
538
 
539
#if DEBUG_SIG
540
printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
541
#endif
542
 
543
        ret = -EINVAL;
544
        if (sig < 0 || sig > _NSIG)
545
                goto out_nolock;
546
        /* The somewhat baroque permissions check... */
547
        ret = -EPERM;
548
        if (bad_signal(sig, info, t))
549
                goto out_nolock;
550
 
551
        /* The null signal is a permissions and process existence probe.
552
           No signal is actually delivered.  Same goes for zombies. */
553
        ret = 0;
554
        if (!sig || !t->sig)
555
                goto out_nolock;
556
 
557
        spin_lock_irqsave(&t->sigmask_lock, flags);
558
        handle_stop_signal(sig, t);
559
 
560
        /* Optimize away the signal, if it's a signal that can be
561
           handled immediately (ie non-blocked and untraced) and
562
           that is ignored (either explicitly or by default).  */
563
 
564
        if (ignored_signal(sig, t))
565
                goto out;
566
 
567
        /* Support queueing exactly one non-rt signal, so that we
568
           can get more detailed information about the cause of
569
           the signal. */
570
        if (sig < SIGRTMIN && sigismember(&t->pending.signal, sig))
571
                goto out;
572
 
573
        ret = deliver_signal(sig, info, t);
574
out:
575
        spin_unlock_irqrestore(&t->sigmask_lock, flags);
576
out_nolock:
577
#if DEBUG_SIG
578
printk(" %d -> %d\n", signal_pending(t), ret);
579
#endif
580
 
581
        return ret;
582
}
583
 
584
/*
585
 * Force a signal that the process can't ignore: if necessary
586
 * we unblock the signal and change any SIG_IGN to SIG_DFL.
587
 */
588
 
589
int
590
force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
591
{
592
        unsigned long int flags;
593
 
594
        spin_lock_irqsave(&t->sigmask_lock, flags);
595
        if (t->sig == NULL) {
596
                spin_unlock_irqrestore(&t->sigmask_lock, flags);
597
                return -ESRCH;
598
        }
599
 
600
        if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
601
                t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
602
        sigdelset(&t->blocked, sig);
603
        recalc_sigpending(t);
604
        spin_unlock_irqrestore(&t->sigmask_lock, flags);
605
 
606
        return send_sig_info(sig, info, t);
607
}
608
 
609
/*
610
 * kill_pg_info() sends a signal to a process group: this is what the tty
611
 * control characters do (^C, ^Z etc)
612
 */
613
 
614
int
615
kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
616
{
617
        int retval = -EINVAL;
618
        if (pgrp > 0) {
619
                struct task_struct *p;
620
 
621
                retval = -ESRCH;
622
                read_lock(&tasklist_lock);
623
                for_each_task(p) {
624
                        if (p->pgrp == pgrp && thread_group_leader(p)) {
625
                                int err = send_sig_info(sig, info, p);
626
                                if (retval)
627
                                        retval = err;
628
                        }
629
                }
630
                read_unlock(&tasklist_lock);
631
        }
632
        return retval;
633
}
634
 
635
/*
636
 * kill_sl_info() sends a signal to the session leader: this is used
637
 * to send SIGHUP to the controlling process of a terminal when
638
 * the connection is lost.
639
 */
640
 
641
int
642
kill_sl_info(int sig, struct siginfo *info, pid_t sess)
643
{
644
        int retval = -EINVAL;
645
        if (sess > 0) {
646
                struct task_struct *p;
647
 
648
                retval = -ESRCH;
649
                read_lock(&tasklist_lock);
650
                for_each_task(p) {
651
                        if (p->leader && p->session == sess) {
652
                                int err = send_sig_info(sig, info, p);
653
                                if (retval)
654
                                        retval = err;
655
                        }
656
                }
657
                read_unlock(&tasklist_lock);
658
        }
659
        return retval;
660
}
661
 
662
inline int
663
kill_proc_info(int sig, struct siginfo *info, pid_t pid)
664
{
665
        int error;
666
        struct task_struct *p;
667
 
668
        read_lock(&tasklist_lock);
669
        p = find_task_by_pid(pid);
670
        error = -ESRCH;
671
        if (p) {
672
                if (!thread_group_leader(p)) {
673
                       struct task_struct *tg;
674
                       tg = find_task_by_pid(p->tgid);
675
                       if (tg)
676
                               p = tg;
677
                }
678
                error = send_sig_info(sig, info, p);
679
        }
680
        read_unlock(&tasklist_lock);
681
        return error;
682
}
683
 
684
 
685
/*
686
 * kill_something_info() interprets pid in interesting ways just like kill(2).
687
 *
688
 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
689
 * is probably wrong.  Should make it like BSD or SYSV.
690
 */
691
 
692
static int kill_something_info(int sig, struct siginfo *info, int pid)
693
{
694
        if (!pid) {
695
                return kill_pg_info(sig, info, current->pgrp);
696
        } else if (pid == -1) {
697
                int retval = 0, count = 0;
698
                struct task_struct * p;
699
 
700
                read_lock(&tasklist_lock);
701
                for_each_task(p) {
702
                        if (p->pid > 1 && p != current && thread_group_leader(p)) {
703
                                int err = send_sig_info(sig, info, p);
704
                                ++count;
705
                                if (err != -EPERM)
706
                                        retval = err;
707
                        }
708
                }
709
                read_unlock(&tasklist_lock);
710
                return count ? retval : -ESRCH;
711
        } else if (pid < 0) {
712
                return kill_pg_info(sig, info, -pid);
713
        } else {
714
                return kill_proc_info(sig, info, pid);
715
        }
716
}
717
 
718
/*
719
 * These are for backward compatibility with the rest of the kernel source.
720
 */
721
 
722
int
723
send_sig(int sig, struct task_struct *p, int priv)
724
{
725
        return send_sig_info(sig, (void*)(long)(priv != 0), p);
726
}
727
 
728
void
729
force_sig(int sig, struct task_struct *p)
730
{
731
        force_sig_info(sig, (void*)1L, p);
732
}
733
 
734
int
735
kill_pg(pid_t pgrp, int sig, int priv)
736
{
737
        return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
738
}
739
 
740
int
741
kill_sl(pid_t sess, int sig, int priv)
742
{
743
        return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
744
}
745
 
746
int
747
kill_proc(pid_t pid, int sig, int priv)
748
{
749
        return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
750
}
751
 
752
/*
753
 * Joy. Or not. Pthread wants us to wake up every thread
754
 * in our parent group.
755
 */
756
static void wake_up_parent(struct task_struct *parent)
757
{
758
        struct task_struct *tsk = parent;
759
 
760
        do {
761
                wake_up_interruptible(&tsk->wait_chldexit);
762
                tsk = next_thread(tsk);
763
        } while (tsk != parent);
764
}
765
 
766
/*
767
 * Let a parent know about a status change of a child.
768
 */
769
 
770
void do_notify_parent(struct task_struct *tsk, int sig)
771
{
772
        struct siginfo info;
773
        int why, status;
774
 
775
        info.si_signo = sig;
776
        info.si_errno = 0;
777
        info.si_pid = tsk->pid;
778
        info.si_uid = tsk->uid;
779
 
780
        /* FIXME: find out whether or not this is supposed to be c*time. */
781
        info.si_utime = tsk->times.tms_utime;
782
        info.si_stime = tsk->times.tms_stime;
783
 
784
        status = tsk->exit_code & 0x7f;
785
        why = SI_KERNEL;        /* shouldn't happen */
786
        switch (tsk->state) {
787
        case TASK_STOPPED:
788
                /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
789
                if (tsk->ptrace & PT_PTRACED)
790
                        why = CLD_TRAPPED;
791
                else
792
                        why = CLD_STOPPED;
793
                break;
794
 
795
        default:
796
                if (tsk->exit_code & 0x80)
797
                        why = CLD_DUMPED;
798
                else if (tsk->exit_code & 0x7f)
799
                        why = CLD_KILLED;
800
                else {
801
                        why = CLD_EXITED;
802
                        status = tsk->exit_code >> 8;
803
                }
804
                break;
805
        }
806
        info.si_code = why;
807
        info.si_status = status;
808
 
809
        send_sig_info(sig, &info, tsk->p_pptr);
810
        wake_up_parent(tsk->p_pptr);
811
}
812
 
813
 
814
/*
815
 * We need the tasklist lock because it's the only
816
 * thing that protects out "parent" pointer.
817
 *
818
 * exit.c calls "do_notify_parent()" directly, because
819
 * it already has the tasklist lock.
820
 */
821
void
822
notify_parent(struct task_struct *tsk, int sig)
823
{
824
        read_lock(&tasklist_lock);
825
        do_notify_parent(tsk, sig);
826
        read_unlock(&tasklist_lock);
827
}
828
 
829
EXPORT_SYMBOL(dequeue_signal);
830
EXPORT_SYMBOL(flush_signals);
831
EXPORT_SYMBOL(force_sig);
832
EXPORT_SYMBOL(force_sig_info);
833
EXPORT_SYMBOL(kill_pg);
834
EXPORT_SYMBOL(kill_pg_info);
835
EXPORT_SYMBOL(kill_proc);
836
EXPORT_SYMBOL(kill_proc_info);
837
EXPORT_SYMBOL(kill_sl);
838
EXPORT_SYMBOL(kill_sl_info);
839
EXPORT_SYMBOL(notify_parent);
840
EXPORT_SYMBOL(recalc_sigpending);
841
EXPORT_SYMBOL(send_sig);
842
EXPORT_SYMBOL(send_sig_info);
843
EXPORT_SYMBOL(block_all_signals);
844
EXPORT_SYMBOL(unblock_all_signals);
845
 
846
 
847
/*
848
 * System call entry points.
849
 */
850
 
851
/*
852
 * We don't need to get the kernel lock - this is all local to this
853
 * particular thread.. (and that's good, because this is _heavily_
854
 * used by various programs)
855
 */
856
 
857
asmlinkage long
858
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
859
{
860
        int error = -EINVAL;
861
        sigset_t old_set, new_set;
862
 
863
        /* XXX: Don't preclude handling different sized sigset_t's.  */
864
        if (sigsetsize != sizeof(sigset_t))
865
                goto out;
866
 
867
        if (set) {
868
                error = -EFAULT;
869
                if (copy_from_user(&new_set, set, sizeof(*set)))
870
                        goto out;
871
                sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
872
 
873
                spin_lock_irq(&current->sigmask_lock);
874
                old_set = current->blocked;
875
 
876
                error = 0;
877
                switch (how) {
878
                default:
879
                        error = -EINVAL;
880
                        break;
881
                case SIG_BLOCK:
882
                        sigorsets(&current->blocked, &old_set, &new_set);
883
                        break;
884
                case SIG_UNBLOCK:
885
                        signandsets(&current->blocked, &old_set, &new_set);
886
                        break;
887
                case SIG_SETMASK:
888
                        current->blocked = new_set;
889
                        break;
890
                }
891
 
892
                recalc_sigpending(current);
893
                spin_unlock_irq(&current->sigmask_lock);
894
                if (error)
895
                        goto out;
896
                if (oset)
897
                        goto set_old;
898
        } else if (oset) {
899
                spin_lock_irq(&current->sigmask_lock);
900
                old_set = current->blocked;
901
                spin_unlock_irq(&current->sigmask_lock);
902
 
903
        set_old:
904
                error = -EFAULT;
905
                if (copy_to_user(oset, &old_set, sizeof(*oset)))
906
                        goto out;
907
        }
908
        error = 0;
909
out:
910
        return error;
911
}
912
 
913
long do_sigpending(void *set, unsigned long sigsetsize)
914
{
915
        long error = -EINVAL;
916
        sigset_t pending;
917
 
918
        if (sigsetsize > sizeof(sigset_t))
919
                goto out;
920
 
921
        spin_lock_irq(&current->sigmask_lock);
922
        sigandsets(&pending, &current->blocked, &current->pending.signal);
923
        spin_unlock_irq(&current->sigmask_lock);
924
 
925
        error = -EFAULT;
926
        if (!copy_to_user(set, &pending, sigsetsize))
927
                error = 0;
928
out:
929
        return error;
930
}
931
 
932
asmlinkage long
933
sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
934
{
935
        return do_sigpending(set, sigsetsize);
936
}
937
 
938
asmlinkage long
939
sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
940
                    const struct timespec *uts, size_t sigsetsize)
941
{
942
        int ret, sig;
943
        sigset_t these;
944
        struct timespec ts;
945
        siginfo_t info;
946
        long timeout = 0;
947
 
948
        /* XXX: Don't preclude handling different sized sigset_t's.  */
949
        if (sigsetsize != sizeof(sigset_t))
950
                return -EINVAL;
951
 
952
        if (copy_from_user(&these, uthese, sizeof(these)))
953
                return -EFAULT;
954
 
955
        /*
956
         * Invert the set of allowed signals to get those we
957
         * want to block.
958
         */
959
        sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
960
        signotset(&these);
961
 
962
        if (uts) {
963
                if (copy_from_user(&ts, uts, sizeof(ts)))
964
                        return -EFAULT;
965
                if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
966
                    || ts.tv_sec < 0)
967
                        return -EINVAL;
968
        }
969
 
970
        spin_lock_irq(&current->sigmask_lock);
971
        sig = dequeue_signal(&these, &info);
972
        if (!sig) {
973
                timeout = MAX_SCHEDULE_TIMEOUT;
974
                if (uts)
975
                        timeout = (timespec_to_jiffies(&ts)
976
                                   + (ts.tv_sec || ts.tv_nsec));
977
 
978
                if (timeout) {
979
                        /* None ready -- temporarily unblock those we're
980
                         * interested while we are sleeping in so that we'll
981
                         * be awakened when they arrive.  */
982
                        sigset_t oldblocked = current->blocked;
983
                        sigandsets(&current->blocked, &current->blocked, &these);
984
                        recalc_sigpending(current);
985
                        spin_unlock_irq(&current->sigmask_lock);
986
 
987
                        current->state = TASK_INTERRUPTIBLE;
988
                        timeout = schedule_timeout(timeout);
989
 
990
                        spin_lock_irq(&current->sigmask_lock);
991
                        sig = dequeue_signal(&these, &info);
992
                        current->blocked = oldblocked;
993
                        recalc_sigpending(current);
994
                }
995
        }
996
        spin_unlock_irq(&current->sigmask_lock);
997
 
998
        if (sig) {
999
                ret = sig;
1000
                if (uinfo) {
1001
                        if (copy_siginfo_to_user(uinfo, &info))
1002
                                ret = -EFAULT;
1003
                }
1004
        } else {
1005
                ret = -EAGAIN;
1006
                if (timeout)
1007
                        ret = -EINTR;
1008
        }
1009
 
1010
        return ret;
1011
}
1012
 
1013
asmlinkage long
1014
sys_kill(int pid, int sig)
1015
{
1016
        struct siginfo info;
1017
 
1018
        info.si_signo = sig;
1019
        info.si_errno = 0;
1020
        info.si_code = SI_USER;
1021
        info.si_pid = current->pid;
1022
        info.si_uid = current->uid;
1023
 
1024
        return kill_something_info(sig, &info, pid);
1025
}
1026
 
1027
/*
1028
 *  Kill only one task, even if it's a CLONE_THREAD task.
1029
 */
1030
asmlinkage long
1031
sys_tkill(int pid, int sig)
1032
{
1033
       struct siginfo info;
1034
       int error;
1035
       struct task_struct *p;
1036
 
1037
       /* This is only valid for single tasks */
1038
       if (pid <= 0)
1039
           return -EINVAL;
1040
 
1041
       info.si_signo = sig;
1042
       info.si_errno = 0;
1043
       info.si_code = SI_TKILL;
1044
       info.si_pid = current->pid;
1045
       info.si_uid = current->uid;
1046
 
1047
       read_lock(&tasklist_lock);
1048
       p = find_task_by_pid(pid);
1049
       error = -ESRCH;
1050
       if (p) {
1051
               error = send_sig_info(sig, &info, p);
1052
       }
1053
       read_unlock(&tasklist_lock);
1054
       return error;
1055
}
1056
 
1057
asmlinkage long
1058
sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
1059
{
1060
        siginfo_t info;
1061
 
1062
        if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
1063
                return -EFAULT;
1064
 
1065
        /* Not even root can pretend to send signals from the kernel.
1066
           Nor can they impersonate a kill(), which adds source info.  */
1067
        if (info.si_code >= 0)
1068
                return -EPERM;
1069
        info.si_signo = sig;
1070
 
1071
        /* POSIX.1b doesn't mention process groups.  */
1072
        return kill_proc_info(sig, &info, pid);
1073
}
1074
 
1075
int
1076
do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
1077
{
1078
        struct k_sigaction *k;
1079
 
1080
        if (sig < 1 || sig > _NSIG ||
1081
            (act && (sig == SIGKILL || sig == SIGSTOP)))
1082
                return -EINVAL;
1083
 
1084
        k = &current->sig->action[sig-1];
1085
 
1086
        spin_lock(&current->sig->siglock);
1087
 
1088
        if (oact)
1089
                *oact = *k;
1090
 
1091
        if (act) {
1092
                *k = *act;
1093
                sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1094
 
1095
                /*
1096
                 * POSIX 3.3.1.3:
1097
                 *  "Setting a signal action to SIG_IGN for a signal that is
1098
                 *   pending shall cause the pending signal to be discarded,
1099
                 *   whether or not it is blocked."
1100
                 *
1101
                 *  "Setting a signal action to SIG_DFL for a signal that is
1102
                 *   pending and whose default action is to ignore the signal
1103
                 *   (for example, SIGCHLD), shall cause the pending signal to
1104
                 *   be discarded, whether or not it is blocked"
1105
                 *
1106
                 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1107
                 * signal isn't actually ignored, but does automatic child
1108
                 * reaping, while SIG_DFL is explicitly said by POSIX to force
1109
                 * the signal to be ignored.
1110
                 */
1111
 
1112
                if (k->sa.sa_handler == SIG_IGN
1113
                    || (k->sa.sa_handler == SIG_DFL
1114
                        && (sig == SIGCONT ||
1115
                            sig == SIGCHLD ||
1116
                            sig == SIGURG ||
1117
                            sig == SIGWINCH))) {
1118
                        spin_lock_irq(&current->sigmask_lock);
1119
                        if (rm_sig_from_queue(sig, current))
1120
                                recalc_sigpending(current);
1121
                        spin_unlock_irq(&current->sigmask_lock);
1122
                }
1123
        }
1124
 
1125
        spin_unlock(&current->sig->siglock);
1126
        return 0;
1127
}
1128
 
1129
int
1130
do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
1131
{
1132
        stack_t oss;
1133
        int error;
1134
 
1135
        if (uoss) {
1136
                oss.ss_sp = (void *) current->sas_ss_sp;
1137
                oss.ss_size = current->sas_ss_size;
1138
                oss.ss_flags = sas_ss_flags(sp);
1139
        }
1140
 
1141
        if (uss) {
1142
                void *ss_sp;
1143
                size_t ss_size;
1144
                int ss_flags;
1145
 
1146
                error = -EFAULT;
1147
                if (verify_area(VERIFY_READ, uss, sizeof(*uss))
1148
                    || __get_user(ss_sp, &uss->ss_sp)
1149
                    || __get_user(ss_flags, &uss->ss_flags)
1150
                    || __get_user(ss_size, &uss->ss_size))
1151
                        goto out;
1152
 
1153
                error = -EPERM;
1154
                if (on_sig_stack (sp))
1155
                        goto out;
1156
 
1157
                error = -EINVAL;
1158
                /*
1159
                 *
1160
                 * Note - this code used to test ss_flags incorrectly
1161
                 *        old code may have been written using ss_flags==0
1162
                 *        to mean ss_flags==SS_ONSTACK (as this was the only
1163
                 *        way that worked) - this fix preserves that older
1164
                 *        mechanism
1165
                 */
1166
                if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
1167
                        goto out;
1168
 
1169
                if (ss_flags == SS_DISABLE) {
1170
                        ss_size = 0;
1171
                        ss_sp = NULL;
1172
                } else {
1173
                        error = -ENOMEM;
1174
                        if (ss_size < MINSIGSTKSZ)
1175
                                goto out;
1176
                }
1177
 
1178
                current->sas_ss_sp = (unsigned long) ss_sp;
1179
                current->sas_ss_size = ss_size;
1180
        }
1181
 
1182
        if (uoss) {
1183
                error = -EFAULT;
1184
                if (copy_to_user(uoss, &oss, sizeof(oss)))
1185
                        goto out;
1186
        }
1187
 
1188
        error = 0;
1189
out:
1190
        return error;
1191
}
1192
 
1193
asmlinkage long
1194
sys_sigpending(old_sigset_t *set)
1195
{
1196
        return do_sigpending(set, sizeof(*set));
1197
}
1198
 
1199
#if !defined(__alpha__)
1200
/* Alpha has its own versions with special arguments.  */
1201
 
1202
asmlinkage long
1203
sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
1204
{
1205
        int error;
1206
        old_sigset_t old_set, new_set;
1207
 
1208
        if (set) {
1209
                error = -EFAULT;
1210
                if (copy_from_user(&new_set, set, sizeof(*set)))
1211
                        goto out;
1212
                new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
1213
 
1214
                spin_lock_irq(&current->sigmask_lock);
1215
                old_set = current->blocked.sig[0];
1216
 
1217
                error = 0;
1218
                switch (how) {
1219
                default:
1220
                        error = -EINVAL;
1221
                        break;
1222
                case SIG_BLOCK:
1223
                        sigaddsetmask(&current->blocked, new_set);
1224
                        break;
1225
                case SIG_UNBLOCK:
1226
                        sigdelsetmask(&current->blocked, new_set);
1227
                        break;
1228
                case SIG_SETMASK:
1229
                        current->blocked.sig[0] = new_set;
1230
                        break;
1231
                }
1232
 
1233
                recalc_sigpending(current);
1234
                spin_unlock_irq(&current->sigmask_lock);
1235
                if (error)
1236
                        goto out;
1237
                if (oset)
1238
                        goto set_old;
1239
        } else if (oset) {
1240
                old_set = current->blocked.sig[0];
1241
        set_old:
1242
                error = -EFAULT;
1243
                if (copy_to_user(oset, &old_set, sizeof(*oset)))
1244
                        goto out;
1245
        }
1246
        error = 0;
1247
out:
1248
        return error;
1249
}
1250
 
1251
#ifndef __sparc__
1252
asmlinkage long
1253
sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1254
                 size_t sigsetsize)
1255
{
1256
        struct k_sigaction new_sa, old_sa;
1257
        int ret = -EINVAL;
1258
 
1259
        /* XXX: Don't preclude handling different sized sigset_t's.  */
1260
        if (sigsetsize != sizeof(sigset_t))
1261
                goto out;
1262
 
1263
        if (act) {
1264
                if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1265
                        return -EFAULT;
1266
        }
1267
 
1268
        ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1269
 
1270
        if (!ret && oact) {
1271
                if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1272
                        return -EFAULT;
1273
        }
1274
out:
1275
        return ret;
1276
}
1277
#endif /* __sparc__ */
1278
#endif
1279
 
1280
#if !defined(__alpha__) && !defined(__ia64__)
1281
/*
1282
 * For backwards compatibility.  Functionality superseded by sigprocmask.
1283
 */
1284
asmlinkage long
1285
sys_sgetmask(void)
1286
{
1287
        /* SMP safe */
1288
        return current->blocked.sig[0];
1289
}
1290
 
1291
asmlinkage long
1292
sys_ssetmask(int newmask)
1293
{
1294
        int old;
1295
 
1296
        spin_lock_irq(&current->sigmask_lock);
1297
        old = current->blocked.sig[0];
1298
 
1299
        siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1300
                                                  sigmask(SIGSTOP)));
1301
        recalc_sigpending(current);
1302
        spin_unlock_irq(&current->sigmask_lock);
1303
 
1304
        return old;
1305
}
1306
#endif /* !defined(__alpha__) */
1307
 
1308
#if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1309
/*
1310
 * For backwards compatibility.  Functionality superseded by sigaction.
1311
 */
1312
asmlinkage unsigned long
1313
sys_signal(int sig, __sighandler_t handler)
1314
{
1315
        struct k_sigaction new_sa, old_sa;
1316
        int ret;
1317
 
1318
        new_sa.sa.sa_handler = handler;
1319
        new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1320
 
1321
        ret = do_sigaction(sig, &new_sa, &old_sa);
1322
 
1323
        return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1324
}
1325
#endif /* !alpha && !__ia64__ && !defined(__mips__) */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.