OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [kernel/] [signal.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  linux/kernel/signal.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 *
6
 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7
 *
8
 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9
 *              Changes to use preallocated sigqueue structures
10
 *              to allow signals to be sent reliably.
11
 */
12
 
13
#include <linux/slab.h>
14
#include <linux/module.h>
15
#include <linux/init.h>
16
#include <linux/sched.h>
17
#include <linux/fs.h>
18
#include <linux/tty.h>
19
#include <linux/binfmts.h>
20
#include <linux/security.h>
21
#include <linux/syscalls.h>
22
#include <linux/ptrace.h>
23
#include <linux/signal.h>
24
#include <linux/signalfd.h>
25
#include <linux/capability.h>
26
#include <linux/freezer.h>
27
#include <linux/pid_namespace.h>
28
#include <linux/nsproxy.h>
29
 
30
#include <asm/param.h>
31
#include <asm/uaccess.h>
32
#include <asm/unistd.h>
33
#include <asm/siginfo.h>
34
#include "audit.h"      /* audit_signal_info() */
35
 
36
/*
37
 * SLAB caches for signal bits.
38
 */
39
 
40
static struct kmem_cache *sigqueue_cachep;
41
 
42
 
43
static int sig_ignored(struct task_struct *t, int sig)
44
{
45
        void __user * handler;
46
 
47
        /*
48
         * Tracers always want to know about signals..
49
         */
50
        if (t->ptrace & PT_PTRACED)
51
                return 0;
52
 
53
        /*
54
         * Blocked signals are never ignored, since the
55
         * signal handler may change by the time it is
56
         * unblocked.
57
         */
58
        if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
59
                return 0;
60
 
61
        /* Is it explicitly or implicitly ignored? */
62
        handler = t->sighand->action[sig-1].sa.sa_handler;
63
        return   handler == SIG_IGN ||
64
                (handler == SIG_DFL && sig_kernel_ignore(sig));
65
}
66
 
67
/*
68
 * Re-calculate pending state from the set of locally pending
69
 * signals, globally pending signals, and blocked signals.
70
 */
71
static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
72
{
73
        unsigned long ready;
74
        long i;
75
 
76
        switch (_NSIG_WORDS) {
77
        default:
78
                for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79
                        ready |= signal->sig[i] &~ blocked->sig[i];
80
                break;
81
 
82
        case 4: ready  = signal->sig[3] &~ blocked->sig[3];
83
                ready |= signal->sig[2] &~ blocked->sig[2];
84
                ready |= signal->sig[1] &~ blocked->sig[1];
85
                ready |= signal->sig[0] &~ blocked->sig[0];
86
                break;
87
 
88
        case 2: ready  = signal->sig[1] &~ blocked->sig[1];
89
                ready |= signal->sig[0] &~ blocked->sig[0];
90
                break;
91
 
92
        case 1: ready  = signal->sig[0] &~ blocked->sig[0];
93
        }
94
        return ready != 0;
95
}
96
 
97
#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98
 
99
static int recalc_sigpending_tsk(struct task_struct *t)
100
{
101
        if (t->signal->group_stop_count > 0 ||
102
            PENDING(&t->pending, &t->blocked) ||
103
            PENDING(&t->signal->shared_pending, &t->blocked)) {
104
                set_tsk_thread_flag(t, TIF_SIGPENDING);
105
                return 1;
106
        }
107
        /*
108
         * We must never clear the flag in another thread, or in current
109
         * when it's possible the current syscall is returning -ERESTART*.
110
         * So we don't clear it here, and only callers who know they should do.
111
         */
112
        return 0;
113
}
114
 
115
/*
116
 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
117
 * This is superfluous when called on current, the wakeup is a harmless no-op.
118
 */
119
void recalc_sigpending_and_wake(struct task_struct *t)
120
{
121
        if (recalc_sigpending_tsk(t))
122
                signal_wake_up(t, 0);
123
}
124
 
125
void recalc_sigpending(void)
126
{
127
        if (!recalc_sigpending_tsk(current) && !freezing(current))
128
                clear_thread_flag(TIF_SIGPENDING);
129
 
130
}
131
 
132
/* Given the mask, find the first available signal that should be serviced. */
133
 
134
int next_signal(struct sigpending *pending, sigset_t *mask)
135
{
136
        unsigned long i, *s, *m, x;
137
        int sig = 0;
138
 
139
        s = pending->signal.sig;
140
        m = mask->sig;
141
        switch (_NSIG_WORDS) {
142
        default:
143
                for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
144
                        if ((x = *s &~ *m) != 0) {
145
                                sig = ffz(~x) + i*_NSIG_BPW + 1;
146
                                break;
147
                        }
148
                break;
149
 
150
        case 2: if ((x = s[0] &~ m[0]) != 0)
151
                        sig = 1;
152
                else if ((x = s[1] &~ m[1]) != 0)
153
                        sig = _NSIG_BPW + 1;
154
                else
155
                        break;
156
                sig += ffz(~x);
157
                break;
158
 
159
        case 1: if ((x = *s &~ *m) != 0)
160
                        sig = ffz(~x) + 1;
161
                break;
162
        }
163
 
164
        return sig;
165
}
166
 
167
static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
168
                                         int override_rlimit)
169
{
170
        struct sigqueue *q = NULL;
171
        struct user_struct *user;
172
 
173
        /*
174
         * In order to avoid problems with "switch_user()", we want to make
175
         * sure that the compiler doesn't re-load "t->user"
176
         */
177
        user = t->user;
178
        barrier();
179
        atomic_inc(&user->sigpending);
180
        if (override_rlimit ||
181
            atomic_read(&user->sigpending) <=
182
                        t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
183
                q = kmem_cache_alloc(sigqueue_cachep, flags);
184
        if (unlikely(q == NULL)) {
185
                atomic_dec(&user->sigpending);
186
        } else {
187
                INIT_LIST_HEAD(&q->list);
188
                q->flags = 0;
189
                q->user = get_uid(user);
190
        }
191
        return(q);
192
}
193
 
194
static void __sigqueue_free(struct sigqueue *q)
195
{
196
        if (q->flags & SIGQUEUE_PREALLOC)
197
                return;
198
        atomic_dec(&q->user->sigpending);
199
        free_uid(q->user);
200
        kmem_cache_free(sigqueue_cachep, q);
201
}
202
 
203
void flush_sigqueue(struct sigpending *queue)
204
{
205
        struct sigqueue *q;
206
 
207
        sigemptyset(&queue->signal);
208
        while (!list_empty(&queue->list)) {
209
                q = list_entry(queue->list.next, struct sigqueue , list);
210
                list_del_init(&q->list);
211
                __sigqueue_free(q);
212
        }
213
}
214
 
215
/*
216
 * Flush all pending signals for a task.
217
 */
218
void flush_signals(struct task_struct *t)
219
{
220
        unsigned long flags;
221
 
222
        spin_lock_irqsave(&t->sighand->siglock, flags);
223
        clear_tsk_thread_flag(t,TIF_SIGPENDING);
224
        flush_sigqueue(&t->pending);
225
        flush_sigqueue(&t->signal->shared_pending);
226
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
227
}
228
 
229
void ignore_signals(struct task_struct *t)
230
{
231
        int i;
232
 
233
        for (i = 0; i < _NSIG; ++i)
234
                t->sighand->action[i].sa.sa_handler = SIG_IGN;
235
 
236
        flush_signals(t);
237
}
238
 
239
/*
240
 * Flush all handlers for a task.
241
 */
242
 
243
void
244
flush_signal_handlers(struct task_struct *t, int force_default)
245
{
246
        int i;
247
        struct k_sigaction *ka = &t->sighand->action[0];
248
        for (i = _NSIG ; i != 0 ; i--) {
249
                if (force_default || ka->sa.sa_handler != SIG_IGN)
250
                        ka->sa.sa_handler = SIG_DFL;
251
                ka->sa.sa_flags = 0;
252
                sigemptyset(&ka->sa.sa_mask);
253
                ka++;
254
        }
255
}
256
 
257
int unhandled_signal(struct task_struct *tsk, int sig)
258
{
259
        if (is_global_init(tsk))
260
                return 1;
261
        if (tsk->ptrace & PT_PTRACED)
262
                return 0;
263
        return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
264
                (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
265
}
266
 
267
 
268
/* Notify the system that a driver wants to block all signals for this
269
 * process, and wants to be notified if any signals at all were to be
270
 * sent/acted upon.  If the notifier routine returns non-zero, then the
271
 * signal will be acted upon after all.  If the notifier routine returns 0,
272
 * then then signal will be blocked.  Only one block per process is
273
 * allowed.  priv is a pointer to private data that the notifier routine
274
 * can use to determine if the signal should be blocked or not.  */
275
 
276
void
277
block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
278
{
279
        unsigned long flags;
280
 
281
        spin_lock_irqsave(&current->sighand->siglock, flags);
282
        current->notifier_mask = mask;
283
        current->notifier_data = priv;
284
        current->notifier = notifier;
285
        spin_unlock_irqrestore(&current->sighand->siglock, flags);
286
}
287
 
288
/* Notify the system that blocking has ended. */
289
 
290
void
291
unblock_all_signals(void)
292
{
293
        unsigned long flags;
294
 
295
        spin_lock_irqsave(&current->sighand->siglock, flags);
296
        current->notifier = NULL;
297
        current->notifier_data = NULL;
298
        recalc_sigpending();
299
        spin_unlock_irqrestore(&current->sighand->siglock, flags);
300
}
301
 
302
static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
303
{
304
        struct sigqueue *q, *first = NULL;
305
        int still_pending = 0;
306
 
307
        if (unlikely(!sigismember(&list->signal, sig)))
308
                return 0;
309
 
310
        /*
311
         * Collect the siginfo appropriate to this signal.  Check if
312
         * there is another siginfo for the same signal.
313
        */
314
        list_for_each_entry(q, &list->list, list) {
315
                if (q->info.si_signo == sig) {
316
                        if (first) {
317
                                still_pending = 1;
318
                                break;
319
                        }
320
                        first = q;
321
                }
322
        }
323
        if (first) {
324
                list_del_init(&first->list);
325
                copy_siginfo(info, &first->info);
326
                __sigqueue_free(first);
327
                if (!still_pending)
328
                        sigdelset(&list->signal, sig);
329
        } else {
330
 
331
                /* Ok, it wasn't in the queue.  This must be
332
                   a fast-pathed signal or we must have been
333
                   out of queue space.  So zero out the info.
334
                 */
335
                sigdelset(&list->signal, sig);
336
                info->si_signo = sig;
337
                info->si_errno = 0;
338
                info->si_code = 0;
339
                info->si_pid = 0;
340
                info->si_uid = 0;
341
        }
342
        return 1;
343
}
344
 
345
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
346
                        siginfo_t *info)
347
{
348
        int sig = next_signal(pending, mask);
349
 
350
        if (sig) {
351
                if (current->notifier) {
352
                        if (sigismember(current->notifier_mask, sig)) {
353
                                if (!(current->notifier)(current->notifier_data)) {
354
                                        clear_thread_flag(TIF_SIGPENDING);
355
                                        return 0;
356
                                }
357
                        }
358
                }
359
 
360
                if (!collect_signal(sig, pending, info))
361
                        sig = 0;
362
        }
363
 
364
        return sig;
365
}
366
 
367
/*
368
 * Dequeue a signal and return the element to the caller, which is
369
 * expected to free it.
370
 *
371
 * All callers have to hold the siglock.
372
 */
373
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
374
{
375
        int signr = 0;
376
 
377
        /* We only dequeue private signals from ourselves, we don't let
378
         * signalfd steal them
379
         */
380
        signr = __dequeue_signal(&tsk->pending, mask, info);
381
        if (!signr) {
382
                signr = __dequeue_signal(&tsk->signal->shared_pending,
383
                                         mask, info);
384
                /*
385
                 * itimer signal ?
386
                 *
387
                 * itimers are process shared and we restart periodic
388
                 * itimers in the signal delivery path to prevent DoS
389
                 * attacks in the high resolution timer case. This is
390
                 * compliant with the old way of self restarting
391
                 * itimers, as the SIGALRM is a legacy signal and only
392
                 * queued once. Changing the restart behaviour to
393
                 * restart the timer in the signal dequeue path is
394
                 * reducing the timer noise on heavy loaded !highres
395
                 * systems too.
396
                 */
397
                if (unlikely(signr == SIGALRM)) {
398
                        struct hrtimer *tmr = &tsk->signal->real_timer;
399
 
400
                        if (!hrtimer_is_queued(tmr) &&
401
                            tsk->signal->it_real_incr.tv64 != 0) {
402
                                hrtimer_forward(tmr, tmr->base->get_time(),
403
                                                tsk->signal->it_real_incr);
404
                                hrtimer_restart(tmr);
405
                        }
406
                }
407
        }
408
        recalc_sigpending();
409
        if (signr && unlikely(sig_kernel_stop(signr))) {
410
                /*
411
                 * Set a marker that we have dequeued a stop signal.  Our
412
                 * caller might release the siglock and then the pending
413
                 * stop signal it is about to process is no longer in the
414
                 * pending bitmasks, but must still be cleared by a SIGCONT
415
                 * (and overruled by a SIGKILL).  So those cases clear this
416
                 * shared flag after we've set it.  Note that this flag may
417
                 * remain set after the signal we return is ignored or
418
                 * handled.  That doesn't matter because its only purpose
419
                 * is to alert stop-signal processing code when another
420
                 * processor has come along and cleared the flag.
421
                 */
422
                if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
423
                        tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
424
        }
425
        if (signr &&
426
             ((info->si_code & __SI_MASK) == __SI_TIMER) &&
427
             info->si_sys_private){
428
                /*
429
                 * Release the siglock to ensure proper locking order
430
                 * of timer locks outside of siglocks.  Note, we leave
431
                 * irqs disabled here, since the posix-timers code is
432
                 * about to disable them again anyway.
433
                 */
434
                spin_unlock(&tsk->sighand->siglock);
435
                do_schedule_next_timer(info);
436
                spin_lock(&tsk->sighand->siglock);
437
        }
438
        return signr;
439
}
440
 
441
/*
442
 * Tell a process that it has a new active signal..
443
 *
444
 * NOTE! we rely on the previous spin_lock to
445
 * lock interrupts for us! We can only be called with
446
 * "siglock" held, and the local interrupt must
447
 * have been disabled when that got acquired!
448
 *
449
 * No need to set need_resched since signal event passing
450
 * goes through ->blocked
451
 */
452
void signal_wake_up(struct task_struct *t, int resume)
453
{
454
        unsigned int mask;
455
 
456
        set_tsk_thread_flag(t, TIF_SIGPENDING);
457
 
458
        /*
459
         * For SIGKILL, we want to wake it up in the stopped/traced case.
460
         * We don't check t->state here because there is a race with it
461
         * executing another processor and just now entering stopped state.
462
         * By using wake_up_state, we ensure the process will wake up and
463
         * handle its death signal.
464
         */
465
        mask = TASK_INTERRUPTIBLE;
466
        if (resume)
467
                mask |= TASK_STOPPED | TASK_TRACED;
468
        if (!wake_up_state(t, mask))
469
                kick_process(t);
470
}
471
 
472
/*
473
 * Remove signals in mask from the pending set and queue.
474
 * Returns 1 if any signals were found.
475
 *
476
 * All callers must be holding the siglock.
477
 *
478
 * This version takes a sigset mask and looks at all signals,
479
 * not just those in the first mask word.
480
 */
481
static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
482
{
483
        struct sigqueue *q, *n;
484
        sigset_t m;
485
 
486
        sigandsets(&m, mask, &s->signal);
487
        if (sigisemptyset(&m))
488
                return 0;
489
 
490
        signandsets(&s->signal, &s->signal, mask);
491
        list_for_each_entry_safe(q, n, &s->list, list) {
492
                if (sigismember(mask, q->info.si_signo)) {
493
                        list_del_init(&q->list);
494
                        __sigqueue_free(q);
495
                }
496
        }
497
        return 1;
498
}
499
/*
500
 * Remove signals in mask from the pending set and queue.
501
 * Returns 1 if any signals were found.
502
 *
503
 * All callers must be holding the siglock.
504
 */
505
static int rm_from_queue(unsigned long mask, struct sigpending *s)
506
{
507
        struct sigqueue *q, *n;
508
 
509
        if (!sigtestsetmask(&s->signal, mask))
510
                return 0;
511
 
512
        sigdelsetmask(&s->signal, mask);
513
        list_for_each_entry_safe(q, n, &s->list, list) {
514
                if (q->info.si_signo < SIGRTMIN &&
515
                    (mask & sigmask(q->info.si_signo))) {
516
                        list_del_init(&q->list);
517
                        __sigqueue_free(q);
518
                }
519
        }
520
        return 1;
521
}
522
 
523
/*
524
 * Bad permissions for sending the signal
525
 */
526
static int check_kill_permission(int sig, struct siginfo *info,
527
                                 struct task_struct *t)
528
{
529
        int error = -EINVAL;
530
        if (!valid_signal(sig))
531
                return error;
532
 
533
        if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
534
                error = audit_signal_info(sig, t); /* Let audit system see the signal */
535
                if (error)
536
                        return error;
537
                error = -EPERM;
538
                if (((sig != SIGCONT) ||
539
                        (task_session_nr(current) != task_session_nr(t)))
540
                    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
541
                    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
542
                    && !capable(CAP_KILL))
543
                return error;
544
        }
545
 
546
        return security_task_kill(t, info, sig, 0);
547
}
548
 
549
/* forward decl */
550
static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
551
 
552
/*
553
 * Handle magic process-wide effects of stop/continue signals.
554
 * Unlike the signal actions, these happen immediately at signal-generation
555
 * time regardless of blocking, ignoring, or handling.  This does the
556
 * actual continuing for SIGCONT, but not the actual stopping for stop
557
 * signals.  The process stop is done as a signal action for SIG_DFL.
558
 */
559
static void handle_stop_signal(int sig, struct task_struct *p)
560
{
561
        struct task_struct *t;
562
 
563
        if (p->signal->flags & SIGNAL_GROUP_EXIT)
564
                /*
565
                 * The process is in the middle of dying already.
566
                 */
567
                return;
568
 
569
        if (sig_kernel_stop(sig)) {
570
                /*
571
                 * This is a stop signal.  Remove SIGCONT from all queues.
572
                 */
573
                rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
574
                t = p;
575
                do {
576
                        rm_from_queue(sigmask(SIGCONT), &t->pending);
577
                        t = next_thread(t);
578
                } while (t != p);
579
        } else if (sig == SIGCONT) {
580
                /*
581
                 * Remove all stop signals from all queues,
582
                 * and wake all threads.
583
                 */
584
                if (unlikely(p->signal->group_stop_count > 0)) {
585
                        /*
586
                         * There was a group stop in progress.  We'll
587
                         * pretend it finished before we got here.  We are
588
                         * obliged to report it to the parent: if the
589
                         * SIGSTOP happened "after" this SIGCONT, then it
590
                         * would have cleared this pending SIGCONT.  If it
591
                         * happened "before" this SIGCONT, then the parent
592
                         * got the SIGCHLD about the stop finishing before
593
                         * the continue happened.  We do the notification
594
                         * now, and it's as if the stop had finished and
595
                         * the SIGCHLD was pending on entry to this kill.
596
                         */
597
                        p->signal->group_stop_count = 0;
598
                        p->signal->flags = SIGNAL_STOP_CONTINUED;
599
                        spin_unlock(&p->sighand->siglock);
600
                        do_notify_parent_cldstop(p, CLD_STOPPED);
601
                        spin_lock(&p->sighand->siglock);
602
                }
603
                rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
604
                t = p;
605
                do {
606
                        unsigned int state;
607
                        rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
608
 
609
                        /*
610
                         * If there is a handler for SIGCONT, we must make
611
                         * sure that no thread returns to user mode before
612
                         * we post the signal, in case it was the only
613
                         * thread eligible to run the signal handler--then
614
                         * it must not do anything between resuming and
615
                         * running the handler.  With the TIF_SIGPENDING
616
                         * flag set, the thread will pause and acquire the
617
                         * siglock that we hold now and until we've queued
618
                         * the pending signal.
619
                         *
620
                         * Wake up the stopped thread _after_ setting
621
                         * TIF_SIGPENDING
622
                         */
623
                        state = TASK_STOPPED;
624
                        if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
625
                                set_tsk_thread_flag(t, TIF_SIGPENDING);
626
                                state |= TASK_INTERRUPTIBLE;
627
                        }
628
                        wake_up_state(t, state);
629
 
630
                        t = next_thread(t);
631
                } while (t != p);
632
 
633
                if (p->signal->flags & SIGNAL_STOP_STOPPED) {
634
                        /*
635
                         * We were in fact stopped, and are now continued.
636
                         * Notify the parent with CLD_CONTINUED.
637
                         */
638
                        p->signal->flags = SIGNAL_STOP_CONTINUED;
639
                        p->signal->group_exit_code = 0;
640
                        spin_unlock(&p->sighand->siglock);
641
                        do_notify_parent_cldstop(p, CLD_CONTINUED);
642
                        spin_lock(&p->sighand->siglock);
643
                } else {
644
                        /*
645
                         * We are not stopped, but there could be a stop
646
                         * signal in the middle of being processed after
647
                         * being removed from the queue.  Clear that too.
648
                         */
649
                        p->signal->flags = 0;
650
                }
651
        } else if (sig == SIGKILL) {
652
                /*
653
                 * Make sure that any pending stop signal already dequeued
654
                 * is undone by the wakeup for SIGKILL.
655
                 */
656
                p->signal->flags = 0;
657
        }
658
}
659
 
660
static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
661
                        struct sigpending *signals)
662
{
663
        struct sigqueue * q = NULL;
664
        int ret = 0;
665
 
666
        /*
667
         * Deliver the signal to listening signalfds. This must be called
668
         * with the sighand lock held.
669
         */
670
        signalfd_notify(t, sig);
671
 
672
        /*
673
         * fast-pathed signals for kernel-internal things like SIGSTOP
674
         * or SIGKILL.
675
         */
676
        if (info == SEND_SIG_FORCED)
677
                goto out_set;
678
 
679
        /* Real-time signals must be queued if sent by sigqueue, or
680
           some other real-time mechanism.  It is implementation
681
           defined whether kill() does so.  We attempt to do so, on
682
           the principle of least surprise, but since kill is not
683
           allowed to fail with EAGAIN when low on memory we just
684
           make sure at least one signal gets delivered and don't
685
           pass on the info struct.  */
686
 
687
        q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
688
                                             (is_si_special(info) ||
689
                                              info->si_code >= 0)));
690
        if (q) {
691
                list_add_tail(&q->list, &signals->list);
692
                switch ((unsigned long) info) {
693
                case (unsigned long) SEND_SIG_NOINFO:
694
                        q->info.si_signo = sig;
695
                        q->info.si_errno = 0;
696
                        q->info.si_code = SI_USER;
697
                        q->info.si_pid = task_pid_vnr(current);
698
                        q->info.si_uid = current->uid;
699
                        break;
700
                case (unsigned long) SEND_SIG_PRIV:
701
                        q->info.si_signo = sig;
702
                        q->info.si_errno = 0;
703
                        q->info.si_code = SI_KERNEL;
704
                        q->info.si_pid = 0;
705
                        q->info.si_uid = 0;
706
                        break;
707
                default:
708
                        copy_siginfo(&q->info, info);
709
                        break;
710
                }
711
        } else if (!is_si_special(info)) {
712
                if (sig >= SIGRTMIN && info->si_code != SI_USER)
713
                /*
714
                 * Queue overflow, abort.  We may abort if the signal was rt
715
                 * and sent by user using something other than kill().
716
                 */
717
                        return -EAGAIN;
718
        }
719
 
720
out_set:
721
        sigaddset(&signals->signal, sig);
722
        return ret;
723
}
724
 
725
#define LEGACY_QUEUE(sigptr, sig) \
726
        (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
727
 
728
int print_fatal_signals;
729
 
730
static void print_fatal_signal(struct pt_regs *regs, int signr)
731
{
732
        printk("%s/%d: potentially unexpected fatal signal %d.\n",
733
                current->comm, task_pid_nr(current), signr);
734
 
735
#if defined(__i386__) && !defined(__arch_um__)
736
        printk("code at %08lx: ", regs->eip);
737
        {
738
                int i;
739
                for (i = 0; i < 16; i++) {
740
                        unsigned char insn;
741
 
742
                        __get_user(insn, (unsigned char *)(regs->eip + i));
743
                        printk("%02x ", insn);
744
                }
745
        }
746
#endif
747
        printk("\n");
748
        show_regs(regs);
749
}
750
 
751
static int __init setup_print_fatal_signals(char *str)
752
{
753
        get_option (&str, &print_fatal_signals);
754
 
755
        return 1;
756
}
757
 
758
__setup("print-fatal-signals=", setup_print_fatal_signals);
759
 
760
static int
761
specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
762
{
763
        int ret = 0;
764
 
765
        BUG_ON(!irqs_disabled());
766
        assert_spin_locked(&t->sighand->siglock);
767
 
768
        /* Short-circuit ignored signals.  */
769
        if (sig_ignored(t, sig))
770
                goto out;
771
 
772
        /* Support queueing exactly one non-rt signal, so that we
773
           can get more detailed information about the cause of
774
           the signal. */
775
        if (LEGACY_QUEUE(&t->pending, sig))
776
                goto out;
777
 
778
        ret = send_signal(sig, info, t, &t->pending);
779
        if (!ret && !sigismember(&t->blocked, sig))
780
                signal_wake_up(t, sig == SIGKILL);
781
out:
782
        return ret;
783
}
784
 
785
/*
786
 * Force a signal that the process can't ignore: if necessary
787
 * we unblock the signal and change any SIG_IGN to SIG_DFL.
788
 *
789
 * Note: If we unblock the signal, we always reset it to SIG_DFL,
790
 * since we do not want to have a signal handler that was blocked
791
 * be invoked when user space had explicitly blocked it.
792
 *
793
 * We don't want to have recursive SIGSEGV's etc, for example.
794
 */
795
int
796
force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
797
{
798
        unsigned long int flags;
799
        int ret, blocked, ignored;
800
        struct k_sigaction *action;
801
 
802
        spin_lock_irqsave(&t->sighand->siglock, flags);
803
        action = &t->sighand->action[sig-1];
804
        ignored = action->sa.sa_handler == SIG_IGN;
805
        blocked = sigismember(&t->blocked, sig);
806
        if (blocked || ignored) {
807
                action->sa.sa_handler = SIG_DFL;
808
                if (blocked) {
809
                        sigdelset(&t->blocked, sig);
810
                        recalc_sigpending_and_wake(t);
811
                }
812
        }
813
        ret = specific_send_sig_info(sig, info, t);
814
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
815
 
816
        return ret;
817
}
818
 
819
void
820
force_sig_specific(int sig, struct task_struct *t)
821
{
822
        force_sig_info(sig, SEND_SIG_FORCED, t);
823
}
824
 
825
/*
826
 * Test if P wants to take SIG.  After we've checked all threads with this,
827
 * it's equivalent to finding no threads not blocking SIG.  Any threads not
828
 * blocking SIG were ruled out because they are not running and already
829
 * have pending signals.  Such threads will dequeue from the shared queue
830
 * as soon as they're available, so putting the signal on the shared queue
831
 * will be equivalent to sending it to one such thread.
832
 */
833
static inline int wants_signal(int sig, struct task_struct *p)
834
{
835
        if (sigismember(&p->blocked, sig))
836
                return 0;
837
        if (p->flags & PF_EXITING)
838
                return 0;
839
        if (sig == SIGKILL)
840
                return 1;
841
        if (p->state & (TASK_STOPPED | TASK_TRACED))
842
                return 0;
843
        return task_curr(p) || !signal_pending(p);
844
}
845
 
846
static void
847
__group_complete_signal(int sig, struct task_struct *p)
848
{
849
        struct task_struct *t;
850
 
851
        /*
852
         * Now find a thread we can wake up to take the signal off the queue.
853
         *
854
         * If the main thread wants the signal, it gets first crack.
855
         * Probably the least surprising to the average bear.
856
         */
857
        if (wants_signal(sig, p))
858
                t = p;
859
        else if (thread_group_empty(p))
860
                /*
861
                 * There is just one thread and it does not need to be woken.
862
                 * It will dequeue unblocked signals before it runs again.
863
                 */
864
                return;
865
        else {
866
                /*
867
                 * Otherwise try to find a suitable thread.
868
                 */
869
                t = p->signal->curr_target;
870
                if (t == NULL)
871
                        /* restart balancing at this thread */
872
                        t = p->signal->curr_target = p;
873
 
874
                while (!wants_signal(sig, t)) {
875
                        t = next_thread(t);
876
                        if (t == p->signal->curr_target)
877
                                /*
878
                                 * No thread needs to be woken.
879
                                 * Any eligible threads will see
880
                                 * the signal in the queue soon.
881
                                 */
882
                                return;
883
                }
884
                p->signal->curr_target = t;
885
        }
886
 
887
        /*
888
         * Found a killable thread.  If the signal will be fatal,
889
         * then start taking the whole group down immediately.
890
         */
891
        if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
892
            !sigismember(&t->real_blocked, sig) &&
893
            (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
894
                /*
895
                 * This signal will be fatal to the whole group.
896
                 */
897
                if (!sig_kernel_coredump(sig)) {
898
                        /*
899
                         * Start a group exit and wake everybody up.
900
                         * This way we don't have other threads
901
                         * running and doing things after a slower
902
                         * thread has the fatal signal pending.
903
                         */
904
                        p->signal->flags = SIGNAL_GROUP_EXIT;
905
                        p->signal->group_exit_code = sig;
906
                        p->signal->group_stop_count = 0;
907
                        t = p;
908
                        do {
909
                                sigaddset(&t->pending.signal, SIGKILL);
910
                                signal_wake_up(t, 1);
911
                        } while_each_thread(p, t);
912
                        return;
913
                }
914
 
915
                /*
916
                 * There will be a core dump.  We make all threads other
917
                 * than the chosen one go into a group stop so that nothing
918
                 * happens until it gets scheduled, takes the signal off
919
                 * the shared queue, and does the core dump.  This is a
920
                 * little more complicated than strictly necessary, but it
921
                 * keeps the signal state that winds up in the core dump
922
                 * unchanged from the death state, e.g. which thread had
923
                 * the core-dump signal unblocked.
924
                 */
925
                rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
926
                rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
927
                p->signal->group_stop_count = 0;
928
                p->signal->group_exit_task = t;
929
                p = t;
930
                do {
931
                        p->signal->group_stop_count++;
932
                        signal_wake_up(t, t == p);
933
                } while_each_thread(p, t);
934
                return;
935
        }
936
 
937
        /*
938
         * The signal is already in the shared-pending queue.
939
         * Tell the chosen thread to wake up and dequeue it.
940
         */
941
        signal_wake_up(t, sig == SIGKILL);
942
        return;
943
}
944
 
945
int
946
__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
947
{
948
        int ret = 0;
949
 
950
        assert_spin_locked(&p->sighand->siglock);
951
        handle_stop_signal(sig, p);
952
 
953
        /* Short-circuit ignored signals.  */
954
        if (sig_ignored(p, sig))
955
                return ret;
956
 
957
        if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
958
                /* This is a non-RT signal and we already have one queued.  */
959
                return ret;
960
 
961
        /*
962
         * Put this signal on the shared-pending queue, or fail with EAGAIN.
963
         * We always use the shared queue for process-wide signals,
964
         * to avoid several races.
965
         */
966
        ret = send_signal(sig, info, p, &p->signal->shared_pending);
967
        if (unlikely(ret))
968
                return ret;
969
 
970
        __group_complete_signal(sig, p);
971
        return 0;
972
}
973
 
974
/*
975
 * Nuke all other threads in the group.
976
 */
977
void zap_other_threads(struct task_struct *p)
978
{
979
        struct task_struct *t;
980
 
981
        p->signal->flags = SIGNAL_GROUP_EXIT;
982
        p->signal->group_stop_count = 0;
983
 
984
        for (t = next_thread(p); t != p; t = next_thread(t)) {
985
                /*
986
                 * Don't bother with already dead threads
987
                 */
988
                if (t->exit_state)
989
                        continue;
990
 
991
                /* SIGKILL will be handled before any pending SIGSTOP */
992
                sigaddset(&t->pending.signal, SIGKILL);
993
                signal_wake_up(t, 1);
994
        }
995
}
996
 
997
/*
998
 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
999
 */
1000
struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1001
{
1002
        struct sighand_struct *sighand;
1003
 
1004
        for (;;) {
1005
                sighand = rcu_dereference(tsk->sighand);
1006
                if (unlikely(sighand == NULL))
1007
                        break;
1008
 
1009
                spin_lock_irqsave(&sighand->siglock, *flags);
1010
                if (likely(sighand == tsk->sighand))
1011
                        break;
1012
                spin_unlock_irqrestore(&sighand->siglock, *flags);
1013
        }
1014
 
1015
        return sighand;
1016
}
1017
 
1018
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1019
{
1020
        unsigned long flags;
1021
        int ret;
1022
 
1023
        ret = check_kill_permission(sig, info, p);
1024
 
1025
        if (!ret && sig) {
1026
                ret = -ESRCH;
1027
                if (lock_task_sighand(p, &flags)) {
1028
                        ret = __group_send_sig_info(sig, info, p);
1029
                        unlock_task_sighand(p, &flags);
1030
                }
1031
        }
1032
 
1033
        return ret;
1034
}
1035
 
1036
/*
1037
 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1038
 * control characters do (^C, ^Z etc)
1039
 */
1040
 
1041
int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1042
{
1043
        struct task_struct *p = NULL;
1044
        int retval, success;
1045
 
1046
        success = 0;
1047
        retval = -ESRCH;
1048
        do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1049
                int err = group_send_sig_info(sig, info, p);
1050
                success |= !err;
1051
                retval = err;
1052
        } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1053
        return success ? 0 : retval;
1054
}
1055
 
1056
int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1057
{
1058
        int retval;
1059
 
1060
        read_lock(&tasklist_lock);
1061
        retval = __kill_pgrp_info(sig, info, pgrp);
1062
        read_unlock(&tasklist_lock);
1063
 
1064
        return retval;
1065
}
1066
 
1067
int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1068
{
1069
        int error;
1070
        struct task_struct *p;
1071
 
1072
        rcu_read_lock();
1073
        if (unlikely(sig_needs_tasklist(sig)))
1074
                read_lock(&tasklist_lock);
1075
 
1076
        p = pid_task(pid, PIDTYPE_PID);
1077
        error = -ESRCH;
1078
        if (p)
1079
                error = group_send_sig_info(sig, info, p);
1080
 
1081
        if (unlikely(sig_needs_tasklist(sig)))
1082
                read_unlock(&tasklist_lock);
1083
        rcu_read_unlock();
1084
        return error;
1085
}
1086
 
1087
int
1088
kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1089
{
1090
        int error;
1091
        rcu_read_lock();
1092
        error = kill_pid_info(sig, info, find_vpid(pid));
1093
        rcu_read_unlock();
1094
        return error;
1095
}
1096
 
1097
/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1098
int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1099
                      uid_t uid, uid_t euid, u32 secid)
1100
{
1101
        int ret = -EINVAL;
1102
        struct task_struct *p;
1103
 
1104
        if (!valid_signal(sig))
1105
                return ret;
1106
 
1107
        read_lock(&tasklist_lock);
1108
        p = pid_task(pid, PIDTYPE_PID);
1109
        if (!p) {
1110
                ret = -ESRCH;
1111
                goto out_unlock;
1112
        }
1113
        if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1114
            && (euid != p->suid) && (euid != p->uid)
1115
            && (uid != p->suid) && (uid != p->uid)) {
1116
                ret = -EPERM;
1117
                goto out_unlock;
1118
        }
1119
        ret = security_task_kill(p, info, sig, secid);
1120
        if (ret)
1121
                goto out_unlock;
1122
        if (sig && p->sighand) {
1123
                unsigned long flags;
1124
                spin_lock_irqsave(&p->sighand->siglock, flags);
1125
                ret = __group_send_sig_info(sig, info, p);
1126
                spin_unlock_irqrestore(&p->sighand->siglock, flags);
1127
        }
1128
out_unlock:
1129
        read_unlock(&tasklist_lock);
1130
        return ret;
1131
}
1132
EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1133
 
1134
/*
1135
 * kill_something_info() interprets pid in interesting ways just like kill(2).
1136
 *
1137
 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1138
 * is probably wrong.  Should make it like BSD or SYSV.
1139
 */
1140
 
1141
static int kill_something_info(int sig, struct siginfo *info, int pid)
1142
{
1143
        int ret;
1144
        rcu_read_lock();
1145
        if (!pid) {
1146
                ret = kill_pgrp_info(sig, info, task_pgrp(current));
1147
        } else if (pid == -1) {
1148
                int retval = 0, count = 0;
1149
                struct task_struct * p;
1150
 
1151
                read_lock(&tasklist_lock);
1152
                for_each_process(p) {
1153
                        if (p->pid > 1 && !same_thread_group(p, current)) {
1154
                                int err = group_send_sig_info(sig, info, p);
1155
                                ++count;
1156
                                if (err != -EPERM)
1157
                                        retval = err;
1158
                        }
1159
                }
1160
                read_unlock(&tasklist_lock);
1161
                ret = count ? retval : -ESRCH;
1162
        } else if (pid < 0) {
1163
                ret = kill_pgrp_info(sig, info, find_vpid(-pid));
1164
        } else {
1165
                ret = kill_pid_info(sig, info, find_vpid(pid));
1166
        }
1167
        rcu_read_unlock();
1168
        return ret;
1169
}
1170
 
1171
/*
1172
 * These are for backward compatibility with the rest of the kernel source.
1173
 */
1174
 
1175
/*
1176
 * These two are the most common entry points.  They send a signal
1177
 * just to the specific thread.
1178
 */
1179
int
1180
send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1181
{
1182
        int ret;
1183
        unsigned long flags;
1184
 
1185
        /*
1186
         * Make sure legacy kernel users don't send in bad values
1187
         * (normal paths check this in check_kill_permission).
1188
         */
1189
        if (!valid_signal(sig))
1190
                return -EINVAL;
1191
 
1192
        /*
1193
         * We need the tasklist lock even for the specific
1194
         * thread case (when we don't need to follow the group
1195
         * lists) in order to avoid races with "p->sighand"
1196
         * going away or changing from under us.
1197
         */
1198
        read_lock(&tasklist_lock);
1199
        spin_lock_irqsave(&p->sighand->siglock, flags);
1200
        ret = specific_send_sig_info(sig, info, p);
1201
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
1202
        read_unlock(&tasklist_lock);
1203
        return ret;
1204
}
1205
 
1206
#define __si_special(priv) \
1207
        ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1208
 
1209
int
1210
send_sig(int sig, struct task_struct *p, int priv)
1211
{
1212
        return send_sig_info(sig, __si_special(priv), p);
1213
}
1214
 
1215
/*
1216
 * This is the entry point for "process-wide" signals.
1217
 * They will go to an appropriate thread in the thread group.
1218
 */
1219
int
1220
send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1221
{
1222
        int ret;
1223
        read_lock(&tasklist_lock);
1224
        ret = group_send_sig_info(sig, info, p);
1225
        read_unlock(&tasklist_lock);
1226
        return ret;
1227
}
1228
 
1229
void
1230
force_sig(int sig, struct task_struct *p)
1231
{
1232
        force_sig_info(sig, SEND_SIG_PRIV, p);
1233
}
1234
 
1235
/*
1236
 * When things go south during signal handling, we
1237
 * will force a SIGSEGV. And if the signal that caused
1238
 * the problem was already a SIGSEGV, we'll want to
1239
 * make sure we don't even try to deliver the signal..
1240
 */
1241
int
1242
force_sigsegv(int sig, struct task_struct *p)
1243
{
1244
        if (sig == SIGSEGV) {
1245
                unsigned long flags;
1246
                spin_lock_irqsave(&p->sighand->siglock, flags);
1247
                p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1248
                spin_unlock_irqrestore(&p->sighand->siglock, flags);
1249
        }
1250
        force_sig(SIGSEGV, p);
1251
        return 0;
1252
}
1253
 
1254
int kill_pgrp(struct pid *pid, int sig, int priv)
1255
{
1256
        return kill_pgrp_info(sig, __si_special(priv), pid);
1257
}
1258
EXPORT_SYMBOL(kill_pgrp);
1259
 
1260
int kill_pid(struct pid *pid, int sig, int priv)
1261
{
1262
        return kill_pid_info(sig, __si_special(priv), pid);
1263
}
1264
EXPORT_SYMBOL(kill_pid);
1265
 
1266
int
1267
kill_proc(pid_t pid, int sig, int priv)
1268
{
1269
        int ret;
1270
 
1271
        rcu_read_lock();
1272
        ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1273
        rcu_read_unlock();
1274
        return ret;
1275
}
1276
 
1277
/*
1278
 * These functions support sending signals using preallocated sigqueue
1279
 * structures.  This is needed "because realtime applications cannot
1280
 * afford to lose notifications of asynchronous events, like timer
1281
 * expirations or I/O completions".  In the case of Posix Timers
1282
 * we allocate the sigqueue structure from the timer_create.  If this
1283
 * allocation fails we are able to report the failure to the application
1284
 * with an EAGAIN error.
1285
 */
1286
 
1287
struct sigqueue *sigqueue_alloc(void)
1288
{
1289
        struct sigqueue *q;
1290
 
1291
        if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1292
                q->flags |= SIGQUEUE_PREALLOC;
1293
        return(q);
1294
}
1295
 
1296
void sigqueue_free(struct sigqueue *q)
1297
{
1298
        unsigned long flags;
1299
        spinlock_t *lock = &current->sighand->siglock;
1300
 
1301
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1302
        /*
1303
         * If the signal is still pending remove it from the
1304
         * pending queue. We must hold ->siglock while testing
1305
         * q->list to serialize with collect_signal().
1306
         */
1307
        spin_lock_irqsave(lock, flags);
1308
        if (!list_empty(&q->list))
1309
                list_del_init(&q->list);
1310
        spin_unlock_irqrestore(lock, flags);
1311
 
1312
        q->flags &= ~SIGQUEUE_PREALLOC;
1313
        __sigqueue_free(q);
1314
}
1315
 
1316
int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1317
{
1318
        unsigned long flags;
1319
        int ret = 0;
1320
 
1321
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1322
 
1323
        /*
1324
         * The rcu based delayed sighand destroy makes it possible to
1325
         * run this without tasklist lock held. The task struct itself
1326
         * cannot go away as create_timer did get_task_struct().
1327
         *
1328
         * We return -1, when the task is marked exiting, so
1329
         * posix_timer_event can redirect it to the group leader
1330
         */
1331
        rcu_read_lock();
1332
 
1333
        if (!likely(lock_task_sighand(p, &flags))) {
1334
                ret = -1;
1335
                goto out_err;
1336
        }
1337
 
1338
        if (unlikely(!list_empty(&q->list))) {
1339
                /*
1340
                 * If an SI_TIMER entry is already queue just increment
1341
                 * the overrun count.
1342
                 */
1343
                BUG_ON(q->info.si_code != SI_TIMER);
1344
                q->info.si_overrun++;
1345
                goto out;
1346
        }
1347
        /* Short-circuit ignored signals.  */
1348
        if (sig_ignored(p, sig)) {
1349
                ret = 1;
1350
                goto out;
1351
        }
1352
        /*
1353
         * Deliver the signal to listening signalfds. This must be called
1354
         * with the sighand lock held.
1355
         */
1356
        signalfd_notify(p, sig);
1357
 
1358
        list_add_tail(&q->list, &p->pending.list);
1359
        sigaddset(&p->pending.signal, sig);
1360
        if (!sigismember(&p->blocked, sig))
1361
                signal_wake_up(p, sig == SIGKILL);
1362
 
1363
out:
1364
        unlock_task_sighand(p, &flags);
1365
out_err:
1366
        rcu_read_unlock();
1367
 
1368
        return ret;
1369
}
1370
 
1371
int
1372
send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1373
{
1374
        unsigned long flags;
1375
        int ret = 0;
1376
 
1377
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1378
 
1379
        read_lock(&tasklist_lock);
1380
        /* Since it_lock is held, p->sighand cannot be NULL. */
1381
        spin_lock_irqsave(&p->sighand->siglock, flags);
1382
        handle_stop_signal(sig, p);
1383
 
1384
        /* Short-circuit ignored signals.  */
1385
        if (sig_ignored(p, sig)) {
1386
                ret = 1;
1387
                goto out;
1388
        }
1389
 
1390
        if (unlikely(!list_empty(&q->list))) {
1391
                /*
1392
                 * If an SI_TIMER entry is already queue just increment
1393
                 * the overrun count.  Other uses should not try to
1394
                 * send the signal multiple times.
1395
                 */
1396
                BUG_ON(q->info.si_code != SI_TIMER);
1397
                q->info.si_overrun++;
1398
                goto out;
1399
        }
1400
        /*
1401
         * Deliver the signal to listening signalfds. This must be called
1402
         * with the sighand lock held.
1403
         */
1404
        signalfd_notify(p, sig);
1405
 
1406
        /*
1407
         * Put this signal on the shared-pending queue.
1408
         * We always use the shared queue for process-wide signals,
1409
         * to avoid several races.
1410
         */
1411
        list_add_tail(&q->list, &p->signal->shared_pending.list);
1412
        sigaddset(&p->signal->shared_pending.signal, sig);
1413
 
1414
        __group_complete_signal(sig, p);
1415
out:
1416
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
1417
        read_unlock(&tasklist_lock);
1418
        return ret;
1419
}
1420
 
1421
/*
1422
 * Wake up any threads in the parent blocked in wait* syscalls.
1423
 */
1424
static inline void __wake_up_parent(struct task_struct *p,
1425
                                    struct task_struct *parent)
1426
{
1427
        wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1428
}
1429
 
1430
/*
1431
 * Let a parent know about the death of a child.
1432
 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1433
 */
1434
 
1435
void do_notify_parent(struct task_struct *tsk, int sig)
1436
{
1437
        struct siginfo info;
1438
        unsigned long flags;
1439
        struct sighand_struct *psig;
1440
 
1441
        BUG_ON(sig == -1);
1442
 
1443
        /* do_notify_parent_cldstop should have been called instead.  */
1444
        BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1445
 
1446
        BUG_ON(!tsk->ptrace &&
1447
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1448
 
1449
        info.si_signo = sig;
1450
        info.si_errno = 0;
1451
        /*
1452
         * we are under tasklist_lock here so our parent is tied to
1453
         * us and cannot exit and release its namespace.
1454
         *
1455
         * the only it can is to switch its nsproxy with sys_unshare,
1456
         * bu uncharing pid namespaces is not allowed, so we'll always
1457
         * see relevant namespace
1458
         *
1459
         * write_lock() currently calls preempt_disable() which is the
1460
         * same as rcu_read_lock(), but according to Oleg, this is not
1461
         * correct to rely on this
1462
         */
1463
        rcu_read_lock();
1464
        info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1465
        rcu_read_unlock();
1466
 
1467
        info.si_uid = tsk->uid;
1468
 
1469
        /* FIXME: find out whether or not this is supposed to be c*time. */
1470
        info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1471
                                                       tsk->signal->utime));
1472
        info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1473
                                                       tsk->signal->stime));
1474
 
1475
        info.si_status = tsk->exit_code & 0x7f;
1476
        if (tsk->exit_code & 0x80)
1477
                info.si_code = CLD_DUMPED;
1478
        else if (tsk->exit_code & 0x7f)
1479
                info.si_code = CLD_KILLED;
1480
        else {
1481
                info.si_code = CLD_EXITED;
1482
                info.si_status = tsk->exit_code >> 8;
1483
        }
1484
 
1485
        psig = tsk->parent->sighand;
1486
        spin_lock_irqsave(&psig->siglock, flags);
1487
        if (!tsk->ptrace && sig == SIGCHLD &&
1488
            (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1489
             (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1490
                /*
1491
                 * We are exiting and our parent doesn't care.  POSIX.1
1492
                 * defines special semantics for setting SIGCHLD to SIG_IGN
1493
                 * or setting the SA_NOCLDWAIT flag: we should be reaped
1494
                 * automatically and not left for our parent's wait4 call.
1495
                 * Rather than having the parent do it as a magic kind of
1496
                 * signal handler, we just set this to tell do_exit that we
1497
                 * can be cleaned up without becoming a zombie.  Note that
1498
                 * we still call __wake_up_parent in this case, because a
1499
                 * blocked sys_wait4 might now return -ECHILD.
1500
                 *
1501
                 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1502
                 * is implementation-defined: we do (if you don't want
1503
                 * it, just use SIG_IGN instead).
1504
                 */
1505
                tsk->exit_signal = -1;
1506
                if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1507
                        sig = 0;
1508
        }
1509
        if (valid_signal(sig) && sig > 0)
1510
                __group_send_sig_info(sig, &info, tsk->parent);
1511
        __wake_up_parent(tsk, tsk->parent);
1512
        spin_unlock_irqrestore(&psig->siglock, flags);
1513
}
1514
 
1515
static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1516
{
1517
        struct siginfo info;
1518
        unsigned long flags;
1519
        struct task_struct *parent;
1520
        struct sighand_struct *sighand;
1521
 
1522
        if (tsk->ptrace & PT_PTRACED)
1523
                parent = tsk->parent;
1524
        else {
1525
                tsk = tsk->group_leader;
1526
                parent = tsk->real_parent;
1527
        }
1528
 
1529
        info.si_signo = SIGCHLD;
1530
        info.si_errno = 0;
1531
        /*
1532
         * see comment in do_notify_parent() abot the following 3 lines
1533
         */
1534
        rcu_read_lock();
1535
        info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1536
        rcu_read_unlock();
1537
 
1538
        info.si_uid = tsk->uid;
1539
 
1540
        /* FIXME: find out whether or not this is supposed to be c*time. */
1541
        info.si_utime = cputime_to_jiffies(tsk->utime);
1542
        info.si_stime = cputime_to_jiffies(tsk->stime);
1543
 
1544
        info.si_code = why;
1545
        switch (why) {
1546
        case CLD_CONTINUED:
1547
                info.si_status = SIGCONT;
1548
                break;
1549
        case CLD_STOPPED:
1550
                info.si_status = tsk->signal->group_exit_code & 0x7f;
1551
                break;
1552
        case CLD_TRAPPED:
1553
                info.si_status = tsk->exit_code & 0x7f;
1554
                break;
1555
        default:
1556
                BUG();
1557
        }
1558
 
1559
        sighand = parent->sighand;
1560
        spin_lock_irqsave(&sighand->siglock, flags);
1561
        if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1562
            !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1563
                __group_send_sig_info(SIGCHLD, &info, parent);
1564
        /*
1565
         * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1566
         */
1567
        __wake_up_parent(tsk, parent);
1568
        spin_unlock_irqrestore(&sighand->siglock, flags);
1569
}
1570
 
1571
static inline int may_ptrace_stop(void)
1572
{
1573
        if (!likely(current->ptrace & PT_PTRACED))
1574
                return 0;
1575
 
1576
        if (unlikely(current->parent == current->real_parent &&
1577
                    (current->ptrace & PT_ATTACHED)))
1578
                return 0;
1579
 
1580
        /*
1581
         * Are we in the middle of do_coredump?
1582
         * If so and our tracer is also part of the coredump stopping
1583
         * is a deadlock situation, and pointless because our tracer
1584
         * is dead so don't allow us to stop.
1585
         * If SIGKILL was already sent before the caller unlocked
1586
         * ->siglock we must see ->core_waiters != 0. Otherwise it
1587
         * is safe to enter schedule().
1588
         */
1589
        if (unlikely(current->mm->core_waiters) &&
1590
            unlikely(current->mm == current->parent->mm))
1591
                return 0;
1592
 
1593
        return 1;
1594
}
1595
 
1596
/*
1597
 * This must be called with current->sighand->siglock held.
1598
 *
1599
 * This should be the path for all ptrace stops.
1600
 * We always set current->last_siginfo while stopped here.
1601
 * That makes it a way to test a stopped process for
1602
 * being ptrace-stopped vs being job-control-stopped.
1603
 *
1604
 * If we actually decide not to stop at all because the tracer is gone,
1605
 * we leave nostop_code in current->exit_code.
1606
 */
1607
static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1608
{
1609
        /*
1610
         * If there is a group stop in progress,
1611
         * we must participate in the bookkeeping.
1612
         */
1613
        if (current->signal->group_stop_count > 0)
1614
                --current->signal->group_stop_count;
1615
 
1616
        current->last_siginfo = info;
1617
        current->exit_code = exit_code;
1618
 
1619
        /* Let the debugger run.  */
1620
        set_current_state(TASK_TRACED);
1621
        spin_unlock_irq(&current->sighand->siglock);
1622
        try_to_freeze();
1623
        read_lock(&tasklist_lock);
1624
        if (may_ptrace_stop()) {
1625
                do_notify_parent_cldstop(current, CLD_TRAPPED);
1626
                read_unlock(&tasklist_lock);
1627
                schedule();
1628
        } else {
1629
                /*
1630
                 * By the time we got the lock, our tracer went away.
1631
                 * Don't stop here.
1632
                 */
1633
                read_unlock(&tasklist_lock);
1634
                set_current_state(TASK_RUNNING);
1635
                current->exit_code = nostop_code;
1636
        }
1637
 
1638
        /*
1639
         * We are back.  Now reacquire the siglock before touching
1640
         * last_siginfo, so that we are sure to have synchronized with
1641
         * any signal-sending on another CPU that wants to examine it.
1642
         */
1643
        spin_lock_irq(&current->sighand->siglock);
1644
        current->last_siginfo = NULL;
1645
 
1646
        /*
1647
         * Queued signals ignored us while we were stopped for tracing.
1648
         * So check for any that we should take before resuming user mode.
1649
         * This sets TIF_SIGPENDING, but never clears it.
1650
         */
1651
        recalc_sigpending_tsk(current);
1652
}
1653
 
1654
void ptrace_notify(int exit_code)
1655
{
1656
        siginfo_t info;
1657
 
1658
        BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1659
 
1660
        memset(&info, 0, sizeof info);
1661
        info.si_signo = SIGTRAP;
1662
        info.si_code = exit_code;
1663
        info.si_pid = task_pid_vnr(current);
1664
        info.si_uid = current->uid;
1665
 
1666
        /* Let the debugger run.  */
1667
        spin_lock_irq(&current->sighand->siglock);
1668
        ptrace_stop(exit_code, 0, &info);
1669
        spin_unlock_irq(&current->sighand->siglock);
1670
}
1671
 
1672
static void
1673
finish_stop(int stop_count)
1674
{
1675
        /*
1676
         * If there are no other threads in the group, or if there is
1677
         * a group stop in progress and we are the last to stop,
1678
         * report to the parent.  When ptraced, every thread reports itself.
1679
         */
1680
        if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1681
                read_lock(&tasklist_lock);
1682
                do_notify_parent_cldstop(current, CLD_STOPPED);
1683
                read_unlock(&tasklist_lock);
1684
        }
1685
 
1686
        do {
1687
                schedule();
1688
        } while (try_to_freeze());
1689
        /*
1690
         * Now we don't run again until continued.
1691
         */
1692
        current->exit_code = 0;
1693
}
1694
 
1695
/*
1696
 * This performs the stopping for SIGSTOP and other stop signals.
1697
 * We have to stop all threads in the thread group.
1698
 * Returns nonzero if we've actually stopped and released the siglock.
1699
 * Returns zero if we didn't stop and still hold the siglock.
1700
 */
1701
static int do_signal_stop(int signr)
1702
{
1703
        struct signal_struct *sig = current->signal;
1704
        int stop_count;
1705
 
1706
        if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1707
                return 0;
1708
 
1709
        if (sig->group_stop_count > 0) {
1710
                /*
1711
                 * There is a group stop in progress.  We don't need to
1712
                 * start another one.
1713
                 */
1714
                stop_count = --sig->group_stop_count;
1715
        } else {
1716
                /*
1717
                 * There is no group stop already in progress.
1718
                 * We must initiate one now.
1719
                 */
1720
                struct task_struct *t;
1721
 
1722
                sig->group_exit_code = signr;
1723
 
1724
                stop_count = 0;
1725
                for (t = next_thread(current); t != current; t = next_thread(t))
1726
                        /*
1727
                         * Setting state to TASK_STOPPED for a group
1728
                         * stop is always done with the siglock held,
1729
                         * so this check has no races.
1730
                         */
1731
                        if (!t->exit_state &&
1732
                            !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1733
                                stop_count++;
1734
                                signal_wake_up(t, 0);
1735
                        }
1736
                sig->group_stop_count = stop_count;
1737
        }
1738
 
1739
        if (stop_count == 0)
1740
                sig->flags = SIGNAL_STOP_STOPPED;
1741
        current->exit_code = sig->group_exit_code;
1742
        __set_current_state(TASK_STOPPED);
1743
 
1744
        spin_unlock_irq(&current->sighand->siglock);
1745
        finish_stop(stop_count);
1746
        return 1;
1747
}
1748
 
1749
/*
1750
 * Do appropriate magic when group_stop_count > 0.
1751
 * We return nonzero if we stopped, after releasing the siglock.
1752
 * We return zero if we still hold the siglock and should look
1753
 * for another signal without checking group_stop_count again.
1754
 */
1755
static int handle_group_stop(void)
1756
{
1757
        int stop_count;
1758
 
1759
        if (current->signal->group_exit_task == current) {
1760
                /*
1761
                 * Group stop is so we can do a core dump,
1762
                 * We are the initiating thread, so get on with it.
1763
                 */
1764
                current->signal->group_exit_task = NULL;
1765
                return 0;
1766
        }
1767
 
1768
        if (current->signal->flags & SIGNAL_GROUP_EXIT)
1769
                /*
1770
                 * Group stop is so another thread can do a core dump,
1771
                 * or else we are racing against a death signal.
1772
                 * Just punt the stop so we can get the next signal.
1773
                 */
1774
                return 0;
1775
 
1776
        /*
1777
         * There is a group stop in progress.  We stop
1778
         * without any associated signal being in our queue.
1779
         */
1780
        stop_count = --current->signal->group_stop_count;
1781
        if (stop_count == 0)
1782
                current->signal->flags = SIGNAL_STOP_STOPPED;
1783
        current->exit_code = current->signal->group_exit_code;
1784
        set_current_state(TASK_STOPPED);
1785
        spin_unlock_irq(&current->sighand->siglock);
1786
        finish_stop(stop_count);
1787
        return 1;
1788
}
1789
 
1790
int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1791
                          struct pt_regs *regs, void *cookie)
1792
{
1793
        sigset_t *mask = &current->blocked;
1794
        int signr = 0;
1795
 
1796
        try_to_freeze();
1797
 
1798
relock:
1799
        spin_lock_irq(&current->sighand->siglock);
1800
        for (;;) {
1801
                struct k_sigaction *ka;
1802
 
1803
                if (unlikely(current->signal->group_stop_count > 0) &&
1804
                    handle_group_stop())
1805
                        goto relock;
1806
 
1807
                signr = dequeue_signal(current, mask, info);
1808
 
1809
                if (!signr)
1810
                        break; /* will return 0 */
1811
 
1812
                if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1813
                        ptrace_signal_deliver(regs, cookie);
1814
 
1815
                        /* Let the debugger run.  */
1816
                        ptrace_stop(signr, signr, info);
1817
 
1818
                        /* We're back.  Did the debugger cancel the sig?  */
1819
                        signr = current->exit_code;
1820
                        if (signr == 0)
1821
                                continue;
1822
 
1823
                        current->exit_code = 0;
1824
 
1825
                        /* Update the siginfo structure if the signal has
1826
                           changed.  If the debugger wanted something
1827
                           specific in the siginfo structure then it should
1828
                           have updated *info via PTRACE_SETSIGINFO.  */
1829
                        if (signr != info->si_signo) {
1830
                                info->si_signo = signr;
1831
                                info->si_errno = 0;
1832
                                info->si_code = SI_USER;
1833
                                info->si_pid = task_pid_vnr(current->parent);
1834
                                info->si_uid = current->parent->uid;
1835
                        }
1836
 
1837
                        /* If the (new) signal is now blocked, requeue it.  */
1838
                        if (sigismember(&current->blocked, signr)) {
1839
                                specific_send_sig_info(signr, info, current);
1840
                                continue;
1841
                        }
1842
                }
1843
 
1844
                ka = &current->sighand->action[signr-1];
1845
                if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1846
                        continue;
1847
                if (ka->sa.sa_handler != SIG_DFL) {
1848
                        /* Run the handler.  */
1849
                        *return_ka = *ka;
1850
 
1851
                        if (ka->sa.sa_flags & SA_ONESHOT)
1852
                                ka->sa.sa_handler = SIG_DFL;
1853
 
1854
                        break; /* will return non-zero "signr" value */
1855
                }
1856
 
1857
                /*
1858
                 * Now we are doing the default action for this signal.
1859
                 */
1860
                if (sig_kernel_ignore(signr)) /* Default is nothing. */
1861
                        continue;
1862
 
1863
                /*
1864
                 * Global init gets no signals it doesn't want.
1865
                 */
1866
                if (is_global_init(current))
1867
                        continue;
1868
 
1869
                if (sig_kernel_stop(signr)) {
1870
                        /*
1871
                         * The default action is to stop all threads in
1872
                         * the thread group.  The job control signals
1873
                         * do nothing in an orphaned pgrp, but SIGSTOP
1874
                         * always works.  Note that siglock needs to be
1875
                         * dropped during the call to is_orphaned_pgrp()
1876
                         * because of lock ordering with tasklist_lock.
1877
                         * This allows an intervening SIGCONT to be posted.
1878
                         * We need to check for that and bail out if necessary.
1879
                         */
1880
                        if (signr != SIGSTOP) {
1881
                                spin_unlock_irq(&current->sighand->siglock);
1882
 
1883
                                /* signals can be posted during this window */
1884
 
1885
                                if (is_current_pgrp_orphaned())
1886
                                        goto relock;
1887
 
1888
                                spin_lock_irq(&current->sighand->siglock);
1889
                        }
1890
 
1891
                        if (likely(do_signal_stop(signr))) {
1892
                                /* It released the siglock.  */
1893
                                goto relock;
1894
                        }
1895
 
1896
                        /*
1897
                         * We didn't actually stop, due to a race
1898
                         * with SIGCONT or something like that.
1899
                         */
1900
                        continue;
1901
                }
1902
 
1903
                spin_unlock_irq(&current->sighand->siglock);
1904
 
1905
                /*
1906
                 * Anything else is fatal, maybe with a core dump.
1907
                 */
1908
                current->flags |= PF_SIGNALED;
1909
                if ((signr != SIGKILL) && print_fatal_signals)
1910
                        print_fatal_signal(regs, signr);
1911
                if (sig_kernel_coredump(signr)) {
1912
                        /*
1913
                         * If it was able to dump core, this kills all
1914
                         * other threads in the group and synchronizes with
1915
                         * their demise.  If we lost the race with another
1916
                         * thread getting here, it set group_exit_code
1917
                         * first and our do_group_exit call below will use
1918
                         * that value and ignore the one we pass it.
1919
                         */
1920
                        do_coredump((long)signr, signr, regs);
1921
                }
1922
 
1923
                /*
1924
                 * Death signals, no core dump.
1925
                 */
1926
                do_group_exit(signr);
1927
                /* NOTREACHED */
1928
        }
1929
        spin_unlock_irq(&current->sighand->siglock);
1930
        return signr;
1931
}
1932
 
1933
EXPORT_SYMBOL(recalc_sigpending);
1934
EXPORT_SYMBOL_GPL(dequeue_signal);
1935
EXPORT_SYMBOL(flush_signals);
1936
EXPORT_SYMBOL(force_sig);
1937
EXPORT_SYMBOL(kill_proc);
1938
EXPORT_SYMBOL(ptrace_notify);
1939
EXPORT_SYMBOL(send_sig);
1940
EXPORT_SYMBOL(send_sig_info);
1941
EXPORT_SYMBOL(sigprocmask);
1942
EXPORT_SYMBOL(block_all_signals);
1943
EXPORT_SYMBOL(unblock_all_signals);
1944
 
1945
 
1946
/*
1947
 * System call entry points.
1948
 */
1949
 
1950
asmlinkage long sys_restart_syscall(void)
1951
{
1952
        struct restart_block *restart = &current_thread_info()->restart_block;
1953
        return restart->fn(restart);
1954
}
1955
 
1956
long do_no_restart_syscall(struct restart_block *param)
1957
{
1958
        return -EINTR;
1959
}
1960
 
1961
/*
1962
 * We don't need to get the kernel lock - this is all local to this
1963
 * particular thread.. (and that's good, because this is _heavily_
1964
 * used by various programs)
1965
 */
1966
 
1967
/*
1968
 * This is also useful for kernel threads that want to temporarily
1969
 * (or permanently) block certain signals.
1970
 *
1971
 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1972
 * interface happily blocks "unblockable" signals like SIGKILL
1973
 * and friends.
1974
 */
1975
int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1976
{
1977
        int error;
1978
 
1979
        spin_lock_irq(&current->sighand->siglock);
1980
        if (oldset)
1981
                *oldset = current->blocked;
1982
 
1983
        error = 0;
1984
        switch (how) {
1985
        case SIG_BLOCK:
1986
                sigorsets(&current->blocked, &current->blocked, set);
1987
                break;
1988
        case SIG_UNBLOCK:
1989
                signandsets(&current->blocked, &current->blocked, set);
1990
                break;
1991
        case SIG_SETMASK:
1992
                current->blocked = *set;
1993
                break;
1994
        default:
1995
                error = -EINVAL;
1996
        }
1997
        recalc_sigpending();
1998
        spin_unlock_irq(&current->sighand->siglock);
1999
 
2000
        return error;
2001
}
2002
 
2003
asmlinkage long
2004
sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2005
{
2006
        int error = -EINVAL;
2007
        sigset_t old_set, new_set;
2008
 
2009
        /* XXX: Don't preclude handling different sized sigset_t's.  */
2010
        if (sigsetsize != sizeof(sigset_t))
2011
                goto out;
2012
 
2013
        if (set) {
2014
                error = -EFAULT;
2015
                if (copy_from_user(&new_set, set, sizeof(*set)))
2016
                        goto out;
2017
                sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2018
 
2019
                error = sigprocmask(how, &new_set, &old_set);
2020
                if (error)
2021
                        goto out;
2022
                if (oset)
2023
                        goto set_old;
2024
        } else if (oset) {
2025
                spin_lock_irq(&current->sighand->siglock);
2026
                old_set = current->blocked;
2027
                spin_unlock_irq(&current->sighand->siglock);
2028
 
2029
        set_old:
2030
                error = -EFAULT;
2031
                if (copy_to_user(oset, &old_set, sizeof(*oset)))
2032
                        goto out;
2033
        }
2034
        error = 0;
2035
out:
2036
        return error;
2037
}
2038
 
2039
long do_sigpending(void __user *set, unsigned long sigsetsize)
2040
{
2041
        long error = -EINVAL;
2042
        sigset_t pending;
2043
 
2044
        if (sigsetsize > sizeof(sigset_t))
2045
                goto out;
2046
 
2047
        spin_lock_irq(&current->sighand->siglock);
2048
        sigorsets(&pending, &current->pending.signal,
2049
                  &current->signal->shared_pending.signal);
2050
        spin_unlock_irq(&current->sighand->siglock);
2051
 
2052
        /* Outside the lock because only this thread touches it.  */
2053
        sigandsets(&pending, &current->blocked, &pending);
2054
 
2055
        error = -EFAULT;
2056
        if (!copy_to_user(set, &pending, sigsetsize))
2057
                error = 0;
2058
 
2059
out:
2060
        return error;
2061
}
2062
 
2063
asmlinkage long
2064
sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2065
{
2066
        return do_sigpending(set, sigsetsize);
2067
}
2068
 
2069
#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2070
 
2071
int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2072
{
2073
        int err;
2074
 
2075
        if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2076
                return -EFAULT;
2077
        if (from->si_code < 0)
2078
                return __copy_to_user(to, from, sizeof(siginfo_t))
2079
                        ? -EFAULT : 0;
2080
        /*
2081
         * If you change siginfo_t structure, please be sure
2082
         * this code is fixed accordingly.
2083
         * Please remember to update the signalfd_copyinfo() function
2084
         * inside fs/signalfd.c too, in case siginfo_t changes.
2085
         * It should never copy any pad contained in the structure
2086
         * to avoid security leaks, but must copy the generic
2087
         * 3 ints plus the relevant union member.
2088
         */
2089
        err = __put_user(from->si_signo, &to->si_signo);
2090
        err |= __put_user(from->si_errno, &to->si_errno);
2091
        err |= __put_user((short)from->si_code, &to->si_code);
2092
        switch (from->si_code & __SI_MASK) {
2093
        case __SI_KILL:
2094
                err |= __put_user(from->si_pid, &to->si_pid);
2095
                err |= __put_user(from->si_uid, &to->si_uid);
2096
                break;
2097
        case __SI_TIMER:
2098
                 err |= __put_user(from->si_tid, &to->si_tid);
2099
                 err |= __put_user(from->si_overrun, &to->si_overrun);
2100
                 err |= __put_user(from->si_ptr, &to->si_ptr);
2101
                break;
2102
        case __SI_POLL:
2103
                err |= __put_user(from->si_band, &to->si_band);
2104
                err |= __put_user(from->si_fd, &to->si_fd);
2105
                break;
2106
        case __SI_FAULT:
2107
                err |= __put_user(from->si_addr, &to->si_addr);
2108
#ifdef __ARCH_SI_TRAPNO
2109
                err |= __put_user(from->si_trapno, &to->si_trapno);
2110
#endif
2111
                break;
2112
        case __SI_CHLD:
2113
                err |= __put_user(from->si_pid, &to->si_pid);
2114
                err |= __put_user(from->si_uid, &to->si_uid);
2115
                err |= __put_user(from->si_status, &to->si_status);
2116
                err |= __put_user(from->si_utime, &to->si_utime);
2117
                err |= __put_user(from->si_stime, &to->si_stime);
2118
                break;
2119
        case __SI_RT: /* This is not generated by the kernel as of now. */
2120
        case __SI_MESGQ: /* But this is */
2121
                err |= __put_user(from->si_pid, &to->si_pid);
2122
                err |= __put_user(from->si_uid, &to->si_uid);
2123
                err |= __put_user(from->si_ptr, &to->si_ptr);
2124
                break;
2125
        default: /* this is just in case for now ... */
2126
                err |= __put_user(from->si_pid, &to->si_pid);
2127
                err |= __put_user(from->si_uid, &to->si_uid);
2128
                break;
2129
        }
2130
        return err;
2131
}
2132
 
2133
#endif
2134
 
2135
asmlinkage long
2136
sys_rt_sigtimedwait(const sigset_t __user *uthese,
2137
                    siginfo_t __user *uinfo,
2138
                    const struct timespec __user *uts,
2139
                    size_t sigsetsize)
2140
{
2141
        int ret, sig;
2142
        sigset_t these;
2143
        struct timespec ts;
2144
        siginfo_t info;
2145
        long timeout = 0;
2146
 
2147
        /* XXX: Don't preclude handling different sized sigset_t's.  */
2148
        if (sigsetsize != sizeof(sigset_t))
2149
                return -EINVAL;
2150
 
2151
        if (copy_from_user(&these, uthese, sizeof(these)))
2152
                return -EFAULT;
2153
 
2154
        /*
2155
         * Invert the set of allowed signals to get those we
2156
         * want to block.
2157
         */
2158
        sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2159
        signotset(&these);
2160
 
2161
        if (uts) {
2162
                if (copy_from_user(&ts, uts, sizeof(ts)))
2163
                        return -EFAULT;
2164
                if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2165
                    || ts.tv_sec < 0)
2166
                        return -EINVAL;
2167
        }
2168
 
2169
        spin_lock_irq(&current->sighand->siglock);
2170
        sig = dequeue_signal(current, &these, &info);
2171
        if (!sig) {
2172
                timeout = MAX_SCHEDULE_TIMEOUT;
2173
                if (uts)
2174
                        timeout = (timespec_to_jiffies(&ts)
2175
                                   + (ts.tv_sec || ts.tv_nsec));
2176
 
2177
                if (timeout) {
2178
                        /* None ready -- temporarily unblock those we're
2179
                         * interested while we are sleeping in so that we'll
2180
                         * be awakened when they arrive.  */
2181
                        current->real_blocked = current->blocked;
2182
                        sigandsets(&current->blocked, &current->blocked, &these);
2183
                        recalc_sigpending();
2184
                        spin_unlock_irq(&current->sighand->siglock);
2185
 
2186
                        timeout = schedule_timeout_interruptible(timeout);
2187
 
2188
                        spin_lock_irq(&current->sighand->siglock);
2189
                        sig = dequeue_signal(current, &these, &info);
2190
                        current->blocked = current->real_blocked;
2191
                        siginitset(&current->real_blocked, 0);
2192
                        recalc_sigpending();
2193
                }
2194
        }
2195
        spin_unlock_irq(&current->sighand->siglock);
2196
 
2197
        if (sig) {
2198
                ret = sig;
2199
                if (uinfo) {
2200
                        if (copy_siginfo_to_user(uinfo, &info))
2201
                                ret = -EFAULT;
2202
                }
2203
        } else {
2204
                ret = -EAGAIN;
2205
                if (timeout)
2206
                        ret = -EINTR;
2207
        }
2208
 
2209
        return ret;
2210
}
2211
 
2212
asmlinkage long
2213
sys_kill(int pid, int sig)
2214
{
2215
        struct siginfo info;
2216
 
2217
        info.si_signo = sig;
2218
        info.si_errno = 0;
2219
        info.si_code = SI_USER;
2220
        info.si_pid = task_tgid_vnr(current);
2221
        info.si_uid = current->uid;
2222
 
2223
        return kill_something_info(sig, &info, pid);
2224
}
2225
 
2226
static int do_tkill(int tgid, int pid, int sig)
2227
{
2228
        int error;
2229
        struct siginfo info;
2230
        struct task_struct *p;
2231
 
2232
        error = -ESRCH;
2233
        info.si_signo = sig;
2234
        info.si_errno = 0;
2235
        info.si_code = SI_TKILL;
2236
        info.si_pid = task_tgid_vnr(current);
2237
        info.si_uid = current->uid;
2238
 
2239
        read_lock(&tasklist_lock);
2240
        p = find_task_by_vpid(pid);
2241
        if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2242
                error = check_kill_permission(sig, &info, p);
2243
                /*
2244
                 * The null signal is a permissions and process existence
2245
                 * probe.  No signal is actually delivered.
2246
                 */
2247
                if (!error && sig && p->sighand) {
2248
                        spin_lock_irq(&p->sighand->siglock);
2249
                        handle_stop_signal(sig, p);
2250
                        error = specific_send_sig_info(sig, &info, p);
2251
                        spin_unlock_irq(&p->sighand->siglock);
2252
                }
2253
        }
2254
        read_unlock(&tasklist_lock);
2255
 
2256
        return error;
2257
}
2258
 
2259
/**
2260
 *  sys_tgkill - send signal to one specific thread
2261
 *  @tgid: the thread group ID of the thread
2262
 *  @pid: the PID of the thread
2263
 *  @sig: signal to be sent
2264
 *
2265
 *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2266
 *  exists but it's not belonging to the target process anymore. This
2267
 *  method solves the problem of threads exiting and PIDs getting reused.
2268
 */
2269
asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2270
{
2271
        /* This is only valid for single tasks */
2272
        if (pid <= 0 || tgid <= 0)
2273
                return -EINVAL;
2274
 
2275
        return do_tkill(tgid, pid, sig);
2276
}
2277
 
2278
/*
2279
 *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2280
 */
2281
asmlinkage long
2282
sys_tkill(int pid, int sig)
2283
{
2284
        /* This is only valid for single tasks */
2285
        if (pid <= 0)
2286
                return -EINVAL;
2287
 
2288
        return do_tkill(0, pid, sig);
2289
}
2290
 
2291
asmlinkage long
2292
sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2293
{
2294
        siginfo_t info;
2295
 
2296
        if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2297
                return -EFAULT;
2298
 
2299
        /* Not even root can pretend to send signals from the kernel.
2300
           Nor can they impersonate a kill(), which adds source info.  */
2301
        if (info.si_code >= 0)
2302
                return -EPERM;
2303
        info.si_signo = sig;
2304
 
2305
        /* POSIX.1b doesn't mention process groups.  */
2306
        return kill_proc_info(sig, &info, pid);
2307
}
2308
 
2309
int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2310
{
2311
        struct k_sigaction *k;
2312
        sigset_t mask;
2313
 
2314
        if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2315
                return -EINVAL;
2316
 
2317
        k = &current->sighand->action[sig-1];
2318
 
2319
        spin_lock_irq(&current->sighand->siglock);
2320
        if (oact)
2321
                *oact = *k;
2322
 
2323
        if (act) {
2324
                sigdelsetmask(&act->sa.sa_mask,
2325
                              sigmask(SIGKILL) | sigmask(SIGSTOP));
2326
                *k = *act;
2327
                /*
2328
                 * POSIX 3.3.1.3:
2329
                 *  "Setting a signal action to SIG_IGN for a signal that is
2330
                 *   pending shall cause the pending signal to be discarded,
2331
                 *   whether or not it is blocked."
2332
                 *
2333
                 *  "Setting a signal action to SIG_DFL for a signal that is
2334
                 *   pending and whose default action is to ignore the signal
2335
                 *   (for example, SIGCHLD), shall cause the pending signal to
2336
                 *   be discarded, whether or not it is blocked"
2337
                 */
2338
                if (act->sa.sa_handler == SIG_IGN ||
2339
                   (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2340
                        struct task_struct *t = current;
2341
                        sigemptyset(&mask);
2342
                        sigaddset(&mask, sig);
2343
                        rm_from_queue_full(&mask, &t->signal->shared_pending);
2344
                        do {
2345
                                rm_from_queue_full(&mask, &t->pending);
2346
                                t = next_thread(t);
2347
                        } while (t != current);
2348
                }
2349
        }
2350
 
2351
        spin_unlock_irq(&current->sighand->siglock);
2352
        return 0;
2353
}
2354
 
2355
int
2356
do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2357
{
2358
        stack_t oss;
2359
        int error;
2360
 
2361
        if (uoss) {
2362
                oss.ss_sp = (void __user *) current->sas_ss_sp;
2363
                oss.ss_size = current->sas_ss_size;
2364
                oss.ss_flags = sas_ss_flags(sp);
2365
        }
2366
 
2367
        if (uss) {
2368
                void __user *ss_sp;
2369
                size_t ss_size;
2370
                int ss_flags;
2371
 
2372
                error = -EFAULT;
2373
                if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2374
                    || __get_user(ss_sp, &uss->ss_sp)
2375
                    || __get_user(ss_flags, &uss->ss_flags)
2376
                    || __get_user(ss_size, &uss->ss_size))
2377
                        goto out;
2378
 
2379
                error = -EPERM;
2380
                if (on_sig_stack(sp))
2381
                        goto out;
2382
 
2383
                error = -EINVAL;
2384
                /*
2385
                 *
2386
                 * Note - this code used to test ss_flags incorrectly
2387
                 *        old code may have been written using ss_flags==0
2388
                 *        to mean ss_flags==SS_ONSTACK (as this was the only
2389
                 *        way that worked) - this fix preserves that older
2390
                 *        mechanism
2391
                 */
2392
                if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2393
                        goto out;
2394
 
2395
                if (ss_flags == SS_DISABLE) {
2396
                        ss_size = 0;
2397
                        ss_sp = NULL;
2398
                } else {
2399
                        error = -ENOMEM;
2400
                        if (ss_size < MINSIGSTKSZ)
2401
                                goto out;
2402
                }
2403
 
2404
                current->sas_ss_sp = (unsigned long) ss_sp;
2405
                current->sas_ss_size = ss_size;
2406
        }
2407
 
2408
        if (uoss) {
2409
                error = -EFAULT;
2410
                if (copy_to_user(uoss, &oss, sizeof(oss)))
2411
                        goto out;
2412
        }
2413
 
2414
        error = 0;
2415
out:
2416
        return error;
2417
}
2418
 
2419
#ifdef __ARCH_WANT_SYS_SIGPENDING
2420
 
2421
asmlinkage long
2422
sys_sigpending(old_sigset_t __user *set)
2423
{
2424
        return do_sigpending(set, sizeof(*set));
2425
}
2426
 
2427
#endif
2428
 
2429
#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2430
/* Some platforms have their own version with special arguments others
2431
   support only sys_rt_sigprocmask.  */
2432
 
2433
asmlinkage long
2434
sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2435
{
2436
        int error;
2437
        old_sigset_t old_set, new_set;
2438
 
2439
        if (set) {
2440
                error = -EFAULT;
2441
                if (copy_from_user(&new_set, set, sizeof(*set)))
2442
                        goto out;
2443
                new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2444
 
2445
                spin_lock_irq(&current->sighand->siglock);
2446
                old_set = current->blocked.sig[0];
2447
 
2448
                error = 0;
2449
                switch (how) {
2450
                default:
2451
                        error = -EINVAL;
2452
                        break;
2453
                case SIG_BLOCK:
2454
                        sigaddsetmask(&current->blocked, new_set);
2455
                        break;
2456
                case SIG_UNBLOCK:
2457
                        sigdelsetmask(&current->blocked, new_set);
2458
                        break;
2459
                case SIG_SETMASK:
2460
                        current->blocked.sig[0] = new_set;
2461
                        break;
2462
                }
2463
 
2464
                recalc_sigpending();
2465
                spin_unlock_irq(&current->sighand->siglock);
2466
                if (error)
2467
                        goto out;
2468
                if (oset)
2469
                        goto set_old;
2470
        } else if (oset) {
2471
                old_set = current->blocked.sig[0];
2472
        set_old:
2473
                error = -EFAULT;
2474
                if (copy_to_user(oset, &old_set, sizeof(*oset)))
2475
                        goto out;
2476
        }
2477
        error = 0;
2478
out:
2479
        return error;
2480
}
2481
#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2482
 
2483
#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2484
asmlinkage long
2485
sys_rt_sigaction(int sig,
2486
                 const struct sigaction __user *act,
2487
                 struct sigaction __user *oact,
2488
                 size_t sigsetsize)
2489
{
2490
        struct k_sigaction new_sa, old_sa;
2491
        int ret = -EINVAL;
2492
 
2493
        /* XXX: Don't preclude handling different sized sigset_t's.  */
2494
        if (sigsetsize != sizeof(sigset_t))
2495
                goto out;
2496
 
2497
        if (act) {
2498
                if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2499
                        return -EFAULT;
2500
        }
2501
 
2502
        ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2503
 
2504
        if (!ret && oact) {
2505
                if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2506
                        return -EFAULT;
2507
        }
2508
out:
2509
        return ret;
2510
}
2511
#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2512
 
2513
#ifdef __ARCH_WANT_SYS_SGETMASK
2514
 
2515
/*
2516
 * For backwards compatibility.  Functionality superseded by sigprocmask.
2517
 */
2518
asmlinkage long
2519
sys_sgetmask(void)
2520
{
2521
        /* SMP safe */
2522
        return current->blocked.sig[0];
2523
}
2524
 
2525
asmlinkage long
2526
sys_ssetmask(int newmask)
2527
{
2528
        int old;
2529
 
2530
        spin_lock_irq(&current->sighand->siglock);
2531
        old = current->blocked.sig[0];
2532
 
2533
        siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2534
                                                  sigmask(SIGSTOP)));
2535
        recalc_sigpending();
2536
        spin_unlock_irq(&current->sighand->siglock);
2537
 
2538
        return old;
2539
}
2540
#endif /* __ARCH_WANT_SGETMASK */
2541
 
2542
#ifdef __ARCH_WANT_SYS_SIGNAL
2543
/*
2544
 * For backwards compatibility.  Functionality superseded by sigaction.
2545
 */
2546
asmlinkage unsigned long
2547
sys_signal(int sig, __sighandler_t handler)
2548
{
2549
        struct k_sigaction new_sa, old_sa;
2550
        int ret;
2551
 
2552
        new_sa.sa.sa_handler = handler;
2553
        new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2554
        sigemptyset(&new_sa.sa.sa_mask);
2555
 
2556
        ret = do_sigaction(sig, &new_sa, &old_sa);
2557
 
2558
        return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2559
}
2560
#endif /* __ARCH_WANT_SYS_SIGNAL */
2561
 
2562
#ifdef __ARCH_WANT_SYS_PAUSE
2563
 
2564
asmlinkage long
2565
sys_pause(void)
2566
{
2567
        current->state = TASK_INTERRUPTIBLE;
2568
        schedule();
2569
        return -ERESTARTNOHAND;
2570
}
2571
 
2572
#endif
2573
 
2574
#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2575
asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2576
{
2577
        sigset_t newset;
2578
 
2579
        /* XXX: Don't preclude handling different sized sigset_t's.  */
2580
        if (sigsetsize != sizeof(sigset_t))
2581
                return -EINVAL;
2582
 
2583
        if (copy_from_user(&newset, unewset, sizeof(newset)))
2584
                return -EFAULT;
2585
        sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2586
 
2587
        spin_lock_irq(&current->sighand->siglock);
2588
        current->saved_sigmask = current->blocked;
2589
        current->blocked = newset;
2590
        recalc_sigpending();
2591
        spin_unlock_irq(&current->sighand->siglock);
2592
 
2593
        current->state = TASK_INTERRUPTIBLE;
2594
        schedule();
2595
        set_thread_flag(TIF_RESTORE_SIGMASK);
2596
        return -ERESTARTNOHAND;
2597
}
2598
#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2599
 
2600
__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2601
{
2602
        return NULL;
2603
}
2604
 
2605
void __init signals_init(void)
2606
{
2607
        sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2608
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.