OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [kernel/] [softirq.c] - Blame information for rev 19

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *      linux/kernel/softirq.c
3
 *
4
 *      Copyright (C) 1992 Linus Torvalds
5
 *
6
 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
7
 */
8
 
9
#include <linux/module.h>
10
#include <linux/kernel_stat.h>
11
#include <linux/interrupt.h>
12
#include <linux/init.h>
13
#include <linux/mm.h>
14
#include <linux/notifier.h>
15
#include <linux/percpu.h>
16
#include <linux/cpu.h>
17
#include <linux/freezer.h>
18
#include <linux/kthread.h>
19
#include <linux/rcupdate.h>
20
#include <linux/smp.h>
21
#include <linux/tick.h>
22
 
23
#include <asm/irq.h>
24
/*
25
   - No shared variables, all the data are CPU local.
26
   - If a softirq needs serialization, let it serialize itself
27
     by its own spinlocks.
28
   - Even if softirq is serialized, only local cpu is marked for
29
     execution. Hence, we get something sort of weak cpu binding.
30
     Though it is still not clear, will it result in better locality
31
     or will not.
32
 
33
   Examples:
34
   - NET RX softirq. It is multithreaded and does not require
35
     any global serialization.
36
   - NET TX softirq. It kicks software netdevice queues, hence
37
     it is logically serialized per device, but this serialization
38
     is invisible to common code.
39
   - Tasklets: serialized wrt itself.
40
 */
41
 
42
#ifndef __ARCH_IRQ_STAT
43
irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
44
EXPORT_SYMBOL(irq_stat);
45
#endif
46
 
47
static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
48
 
49
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
50
 
51
/*
52
 * we cannot loop indefinitely here to avoid userspace starvation,
53
 * but we also don't want to introduce a worst case 1/HZ latency
54
 * to the pending events, so lets the scheduler to balance
55
 * the softirq load for us.
56
 */
57
static inline void wakeup_softirqd(void)
58
{
59
        /* Interrupts are disabled: no need to stop preemption */
60
        struct task_struct *tsk = __get_cpu_var(ksoftirqd);
61
 
62
        if (tsk && tsk->state != TASK_RUNNING)
63
                wake_up_process(tsk);
64
}
65
 
66
/*
67
 * This one is for softirq.c-internal use,
68
 * where hardirqs are disabled legitimately:
69
 */
70
#ifdef CONFIG_TRACE_IRQFLAGS
71
static void __local_bh_disable(unsigned long ip)
72
{
73
        unsigned long flags;
74
 
75
        WARN_ON_ONCE(in_irq());
76
 
77
        raw_local_irq_save(flags);
78
        add_preempt_count(SOFTIRQ_OFFSET);
79
        /*
80
         * Were softirqs turned off above:
81
         */
82
        if (softirq_count() == SOFTIRQ_OFFSET)
83
                trace_softirqs_off(ip);
84
        raw_local_irq_restore(flags);
85
}
86
#else /* !CONFIG_TRACE_IRQFLAGS */
87
static inline void __local_bh_disable(unsigned long ip)
88
{
89
        add_preempt_count(SOFTIRQ_OFFSET);
90
        barrier();
91
}
92
#endif /* CONFIG_TRACE_IRQFLAGS */
93
 
94
void local_bh_disable(void)
95
{
96
        __local_bh_disable((unsigned long)__builtin_return_address(0));
97
}
98
 
99
EXPORT_SYMBOL(local_bh_disable);
100
 
101
void __local_bh_enable(void)
102
{
103
        WARN_ON_ONCE(in_irq());
104
 
105
        /*
106
         * softirqs should never be enabled by __local_bh_enable(),
107
         * it always nests inside local_bh_enable() sections:
108
         */
109
        WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
110
 
111
        sub_preempt_count(SOFTIRQ_OFFSET);
112
}
113
EXPORT_SYMBOL_GPL(__local_bh_enable);
114
 
115
/*
116
 * Special-case - softirqs can safely be enabled in
117
 * cond_resched_softirq(), or by __do_softirq(),
118
 * without processing still-pending softirqs:
119
 */
120
void _local_bh_enable(void)
121
{
122
        WARN_ON_ONCE(in_irq());
123
        WARN_ON_ONCE(!irqs_disabled());
124
 
125
        if (softirq_count() == SOFTIRQ_OFFSET)
126
                trace_softirqs_on((unsigned long)__builtin_return_address(0));
127
        sub_preempt_count(SOFTIRQ_OFFSET);
128
}
129
 
130
EXPORT_SYMBOL(_local_bh_enable);
131
 
132
void local_bh_enable(void)
133
{
134
#ifdef CONFIG_TRACE_IRQFLAGS
135
        unsigned long flags;
136
 
137
        WARN_ON_ONCE(in_irq());
138
#endif
139
        WARN_ON_ONCE(irqs_disabled());
140
 
141
#ifdef CONFIG_TRACE_IRQFLAGS
142
        local_irq_save(flags);
143
#endif
144
        /*
145
         * Are softirqs going to be turned on now:
146
         */
147
        if (softirq_count() == SOFTIRQ_OFFSET)
148
                trace_softirqs_on((unsigned long)__builtin_return_address(0));
149
        /*
150
         * Keep preemption disabled until we are done with
151
         * softirq processing:
152
         */
153
        sub_preempt_count(SOFTIRQ_OFFSET - 1);
154
 
155
        if (unlikely(!in_interrupt() && local_softirq_pending()))
156
                do_softirq();
157
 
158
        dec_preempt_count();
159
#ifdef CONFIG_TRACE_IRQFLAGS
160
        local_irq_restore(flags);
161
#endif
162
        preempt_check_resched();
163
}
164
EXPORT_SYMBOL(local_bh_enable);
165
 
166
void local_bh_enable_ip(unsigned long ip)
167
{
168
#ifdef CONFIG_TRACE_IRQFLAGS
169
        unsigned long flags;
170
 
171
        WARN_ON_ONCE(in_irq());
172
 
173
        local_irq_save(flags);
174
#endif
175
        /*
176
         * Are softirqs going to be turned on now:
177
         */
178
        if (softirq_count() == SOFTIRQ_OFFSET)
179
                trace_softirqs_on(ip);
180
        /*
181
         * Keep preemption disabled until we are done with
182
         * softirq processing:
183
         */
184
        sub_preempt_count(SOFTIRQ_OFFSET - 1);
185
 
186
        if (unlikely(!in_interrupt() && local_softirq_pending()))
187
                do_softirq();
188
 
189
        dec_preempt_count();
190
#ifdef CONFIG_TRACE_IRQFLAGS
191
        local_irq_restore(flags);
192
#endif
193
        preempt_check_resched();
194
}
195
EXPORT_SYMBOL(local_bh_enable_ip);
196
 
197
/*
198
 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
199
 * and we fall back to softirqd after that.
200
 *
201
 * This number has been established via experimentation.
202
 * The two things to balance is latency against fairness -
203
 * we want to handle softirqs as soon as possible, but they
204
 * should not be able to lock up the box.
205
 */
206
#define MAX_SOFTIRQ_RESTART 10
207
 
208
asmlinkage void __do_softirq(void)
209
{
210
        struct softirq_action *h;
211
        __u32 pending;
212
        int max_restart = MAX_SOFTIRQ_RESTART;
213
        int cpu;
214
 
215
        pending = local_softirq_pending();
216
        account_system_vtime(current);
217
 
218
        __local_bh_disable((unsigned long)__builtin_return_address(0));
219
        trace_softirq_enter();
220
 
221
        cpu = smp_processor_id();
222
restart:
223
        /* Reset the pending bitmask before enabling irqs */
224
        set_softirq_pending(0);
225
 
226
        local_irq_enable();
227
 
228
        h = softirq_vec;
229
 
230
        do {
231
                if (pending & 1) {
232
                        h->action(h);
233
                        rcu_bh_qsctr_inc(cpu);
234
                }
235
                h++;
236
                pending >>= 1;
237
        } while (pending);
238
 
239
        local_irq_disable();
240
 
241
        pending = local_softirq_pending();
242
        if (pending && --max_restart)
243
                goto restart;
244
 
245
        if (pending)
246
                wakeup_softirqd();
247
 
248
        trace_softirq_exit();
249
 
250
        account_system_vtime(current);
251
        _local_bh_enable();
252
}
253
 
254
#ifndef __ARCH_HAS_DO_SOFTIRQ
255
 
256
asmlinkage void do_softirq(void)
257
{
258
        __u32 pending;
259
        unsigned long flags;
260
 
261
        if (in_interrupt())
262
                return;
263
 
264
        local_irq_save(flags);
265
 
266
        pending = local_softirq_pending();
267
 
268
        if (pending)
269
                __do_softirq();
270
 
271
        local_irq_restore(flags);
272
}
273
 
274
#endif
275
 
276
/*
277
 * Enter an interrupt context.
278
 */
279
void irq_enter(void)
280
{
281
        __irq_enter();
282
#ifdef CONFIG_NO_HZ
283
        if (idle_cpu(smp_processor_id()))
284
                tick_nohz_update_jiffies();
285
#endif
286
}
287
 
288
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
289
# define invoke_softirq()       __do_softirq()
290
#else
291
# define invoke_softirq()       do_softirq()
292
#endif
293
 
294
/*
295
 * Exit an interrupt context. Process softirqs if needed and possible:
296
 */
297
void irq_exit(void)
298
{
299
        account_system_vtime(current);
300
        trace_hardirq_exit();
301
        sub_preempt_count(IRQ_EXIT_OFFSET);
302
        if (!in_interrupt() && local_softirq_pending())
303
                invoke_softirq();
304
 
305
#ifdef CONFIG_NO_HZ
306
        /* Make sure that timer wheel updates are propagated */
307
        if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
308
                tick_nohz_stop_sched_tick();
309
#endif
310
        preempt_enable_no_resched();
311
}
312
 
313
/*
314
 * This function must run with irqs disabled!
315
 */
316
inline fastcall void raise_softirq_irqoff(unsigned int nr)
317
{
318
        __raise_softirq_irqoff(nr);
319
 
320
        /*
321
         * If we're in an interrupt or softirq, we're done
322
         * (this also catches softirq-disabled code). We will
323
         * actually run the softirq once we return from
324
         * the irq or softirq.
325
         *
326
         * Otherwise we wake up ksoftirqd to make sure we
327
         * schedule the softirq soon.
328
         */
329
        if (!in_interrupt())
330
                wakeup_softirqd();
331
}
332
 
333
void fastcall raise_softirq(unsigned int nr)
334
{
335
        unsigned long flags;
336
 
337
        local_irq_save(flags);
338
        raise_softirq_irqoff(nr);
339
        local_irq_restore(flags);
340
}
341
 
342
void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
343
{
344
        softirq_vec[nr].data = data;
345
        softirq_vec[nr].action = action;
346
}
347
 
348
/* Tasklets */
349
struct tasklet_head
350
{
351
        struct tasklet_struct *list;
352
};
353
 
354
/* Some compilers disobey section attribute on statics when not
355
   initialized -- RR */
356
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
357
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
358
 
359
void fastcall __tasklet_schedule(struct tasklet_struct *t)
360
{
361
        unsigned long flags;
362
 
363
        local_irq_save(flags);
364
        t->next = __get_cpu_var(tasklet_vec).list;
365
        __get_cpu_var(tasklet_vec).list = t;
366
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
367
        local_irq_restore(flags);
368
}
369
 
370
EXPORT_SYMBOL(__tasklet_schedule);
371
 
372
void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
373
{
374
        unsigned long flags;
375
 
376
        local_irq_save(flags);
377
        t->next = __get_cpu_var(tasklet_hi_vec).list;
378
        __get_cpu_var(tasklet_hi_vec).list = t;
379
        raise_softirq_irqoff(HI_SOFTIRQ);
380
        local_irq_restore(flags);
381
}
382
 
383
EXPORT_SYMBOL(__tasklet_hi_schedule);
384
 
385
static void tasklet_action(struct softirq_action *a)
386
{
387
        struct tasklet_struct *list;
388
 
389
        local_irq_disable();
390
        list = __get_cpu_var(tasklet_vec).list;
391
        __get_cpu_var(tasklet_vec).list = NULL;
392
        local_irq_enable();
393
 
394
        while (list) {
395
                struct tasklet_struct *t = list;
396
 
397
                list = list->next;
398
 
399
                if (tasklet_trylock(t)) {
400
                        if (!atomic_read(&t->count)) {
401
                                if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
402
                                        BUG();
403
                                t->func(t->data);
404
                                tasklet_unlock(t);
405
                                continue;
406
                        }
407
                        tasklet_unlock(t);
408
                }
409
 
410
                local_irq_disable();
411
                t->next = __get_cpu_var(tasklet_vec).list;
412
                __get_cpu_var(tasklet_vec).list = t;
413
                __raise_softirq_irqoff(TASKLET_SOFTIRQ);
414
                local_irq_enable();
415
        }
416
}
417
 
418
static void tasklet_hi_action(struct softirq_action *a)
419
{
420
        struct tasklet_struct *list;
421
 
422
        local_irq_disable();
423
        list = __get_cpu_var(tasklet_hi_vec).list;
424
        __get_cpu_var(tasklet_hi_vec).list = NULL;
425
        local_irq_enable();
426
 
427
        while (list) {
428
                struct tasklet_struct *t = list;
429
 
430
                list = list->next;
431
 
432
                if (tasklet_trylock(t)) {
433
                        if (!atomic_read(&t->count)) {
434
                                if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
435
                                        BUG();
436
                                t->func(t->data);
437
                                tasklet_unlock(t);
438
                                continue;
439
                        }
440
                        tasklet_unlock(t);
441
                }
442
 
443
                local_irq_disable();
444
                t->next = __get_cpu_var(tasklet_hi_vec).list;
445
                __get_cpu_var(tasklet_hi_vec).list = t;
446
                __raise_softirq_irqoff(HI_SOFTIRQ);
447
                local_irq_enable();
448
        }
449
}
450
 
451
 
452
void tasklet_init(struct tasklet_struct *t,
453
                  void (*func)(unsigned long), unsigned long data)
454
{
455
        t->next = NULL;
456
        t->state = 0;
457
        atomic_set(&t->count, 0);
458
        t->func = func;
459
        t->data = data;
460
}
461
 
462
EXPORT_SYMBOL(tasklet_init);
463
 
464
void tasklet_kill(struct tasklet_struct *t)
465
{
466
        if (in_interrupt())
467
                printk("Attempt to kill tasklet from interrupt\n");
468
 
469
        while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
470
                do
471
                        yield();
472
                while (test_bit(TASKLET_STATE_SCHED, &t->state));
473
        }
474
        tasklet_unlock_wait(t);
475
        clear_bit(TASKLET_STATE_SCHED, &t->state);
476
}
477
 
478
EXPORT_SYMBOL(tasklet_kill);
479
 
480
void __init softirq_init(void)
481
{
482
        open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
483
        open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
484
}
485
 
486
static int ksoftirqd(void * __bind_cpu)
487
{
488
        set_current_state(TASK_INTERRUPTIBLE);
489
 
490
        while (!kthread_should_stop()) {
491
                preempt_disable();
492
                if (!local_softirq_pending()) {
493
                        preempt_enable_no_resched();
494
                        schedule();
495
                        preempt_disable();
496
                }
497
 
498
                __set_current_state(TASK_RUNNING);
499
 
500
                while (local_softirq_pending()) {
501
                        /* Preempt disable stops cpu going offline.
502
                           If already offline, we'll be on wrong CPU:
503
                           don't process */
504
                        if (cpu_is_offline((long)__bind_cpu))
505
                                goto wait_to_die;
506
                        do_softirq();
507
                        preempt_enable_no_resched();
508
                        cond_resched();
509
                        preempt_disable();
510
                }
511
                preempt_enable();
512
                set_current_state(TASK_INTERRUPTIBLE);
513
        }
514
        __set_current_state(TASK_RUNNING);
515
        return 0;
516
 
517
wait_to_die:
518
        preempt_enable();
519
        /* Wait for kthread_stop */
520
        set_current_state(TASK_INTERRUPTIBLE);
521
        while (!kthread_should_stop()) {
522
                schedule();
523
                set_current_state(TASK_INTERRUPTIBLE);
524
        }
525
        __set_current_state(TASK_RUNNING);
526
        return 0;
527
}
528
 
529
#ifdef CONFIG_HOTPLUG_CPU
530
/*
531
 * tasklet_kill_immediate is called to remove a tasklet which can already be
532
 * scheduled for execution on @cpu.
533
 *
534
 * Unlike tasklet_kill, this function removes the tasklet
535
 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
536
 *
537
 * When this function is called, @cpu must be in the CPU_DEAD state.
538
 */
539
void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
540
{
541
        struct tasklet_struct **i;
542
 
543
        BUG_ON(cpu_online(cpu));
544
        BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
545
 
546
        if (!test_bit(TASKLET_STATE_SCHED, &t->state))
547
                return;
548
 
549
        /* CPU is dead, so no lock needed. */
550
        for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
551
                if (*i == t) {
552
                        *i = t->next;
553
                        return;
554
                }
555
        }
556
        BUG();
557
}
558
 
559
static void takeover_tasklets(unsigned int cpu)
560
{
561
        struct tasklet_struct **i;
562
 
563
        /* CPU is dead, so no lock needed. */
564
        local_irq_disable();
565
 
566
        /* Find end, append list for that CPU. */
567
        for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
568
        *i = per_cpu(tasklet_vec, cpu).list;
569
        per_cpu(tasklet_vec, cpu).list = NULL;
570
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
571
 
572
        for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
573
        *i = per_cpu(tasklet_hi_vec, cpu).list;
574
        per_cpu(tasklet_hi_vec, cpu).list = NULL;
575
        raise_softirq_irqoff(HI_SOFTIRQ);
576
 
577
        local_irq_enable();
578
}
579
#endif /* CONFIG_HOTPLUG_CPU */
580
 
581
static int __cpuinit cpu_callback(struct notifier_block *nfb,
582
                                  unsigned long action,
583
                                  void *hcpu)
584
{
585
        int hotcpu = (unsigned long)hcpu;
586
        struct task_struct *p;
587
 
588
        switch (action) {
589
        case CPU_UP_PREPARE:
590
        case CPU_UP_PREPARE_FROZEN:
591
                p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
592
                if (IS_ERR(p)) {
593
                        printk("ksoftirqd for %i failed\n", hotcpu);
594
                        return NOTIFY_BAD;
595
                }
596
                kthread_bind(p, hotcpu);
597
                per_cpu(ksoftirqd, hotcpu) = p;
598
                break;
599
        case CPU_ONLINE:
600
        case CPU_ONLINE_FROZEN:
601
                wake_up_process(per_cpu(ksoftirqd, hotcpu));
602
                break;
603
#ifdef CONFIG_HOTPLUG_CPU
604
        case CPU_UP_CANCELED:
605
        case CPU_UP_CANCELED_FROZEN:
606
                if (!per_cpu(ksoftirqd, hotcpu))
607
                        break;
608
                /* Unbind so it can run.  Fall thru. */
609
                kthread_bind(per_cpu(ksoftirqd, hotcpu),
610
                             any_online_cpu(cpu_online_map));
611
        case CPU_DEAD:
612
        case CPU_DEAD_FROZEN: {
613
                struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
614
 
615
                p = per_cpu(ksoftirqd, hotcpu);
616
                per_cpu(ksoftirqd, hotcpu) = NULL;
617
                sched_setscheduler(p, SCHED_FIFO, &param);
618
                kthread_stop(p);
619
                takeover_tasklets(hotcpu);
620
                break;
621
        }
622
#endif /* CONFIG_HOTPLUG_CPU */
623
        }
624
        return NOTIFY_OK;
625
}
626
 
627
static struct notifier_block __cpuinitdata cpu_nfb = {
628
        .notifier_call = cpu_callback
629
};
630
 
631
__init int spawn_ksoftirqd(void)
632
{
633
        void *cpu = (void *)(long)smp_processor_id();
634
        int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
635
 
636
        BUG_ON(err == NOTIFY_BAD);
637
        cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
638
        register_cpu_notifier(&cpu_nfb);
639
        return 0;
640
}
641
 
642
#ifdef CONFIG_SMP
643
/*
644
 * Call a function on all processors
645
 */
646
int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
647
{
648
        int ret = 0;
649
 
650
        preempt_disable();
651
        ret = smp_call_function(func, info, retry, wait);
652
        local_irq_disable();
653
        func(info);
654
        local_irq_enable();
655
        preempt_enable();
656
        return ret;
657
}
658
EXPORT_SYMBOL(on_each_cpu);
659
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.