OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [powerpc/] [kernel/] [irq.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  Derived from arch/i386/kernel/irq.c
3
 *    Copyright (C) 1992 Linus Torvalds
4
 *  Adapted from arch/i386 by Gary Thomas
5
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6
 *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7
 *    Copyright (C) 1996-2001 Cort Dougan
8
 *  Adapted for Power Macintosh by Paul Mackerras
9
 *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10
 *
11
 * This program is free software; you can redistribute it and/or
12
 * modify it under the terms of the GNU General Public License
13
 * as published by the Free Software Foundation; either version
14
 * 2 of the License, or (at your option) any later version.
15
 *
16
 * This file contains the code used by various IRQ handling routines:
17
 * asking for different IRQ's should be done through these routines
18
 * instead of just grabbing them. Thus setups with different IRQ numbers
19
 * shouldn't result in any weird surprises, and installing new handlers
20
 * should be easier.
21
 *
22
 * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23
 * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24
 * mask register (of which only 16 are defined), hence the weird shifting
25
 * and complement of the cached_irq_mask.  I want to be able to stuff
26
 * this right into the SIU SMASK register.
27
 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28
 * to reduce code space and undefined function references.
29
 */
30
 
31
#undef DEBUG
32
 
33
#include <linux/module.h>
34
#include <linux/threads.h>
35
#include <linux/kernel_stat.h>
36
#include <linux/signal.h>
37
#include <linux/sched.h>
38
#include <linux/ptrace.h>
39
#include <linux/ioport.h>
40
#include <linux/interrupt.h>
41
#include <linux/timex.h>
42
#include <linux/init.h>
43
#include <linux/slab.h>
44
#include <linux/delay.h>
45
#include <linux/irq.h>
46
#include <linux/seq_file.h>
47
#include <linux/cpumask.h>
48
#include <linux/profile.h>
49
#include <linux/bitops.h>
50
#include <linux/list.h>
51
#include <linux/radix-tree.h>
52
#include <linux/mutex.h>
53
#include <linux/bootmem.h>
54
#include <linux/pci.h>
55
#include <linux/debugfs.h>
56
 
57
#include <asm/uaccess.h>
58
#include <asm/system.h>
59
#include <asm/io.h>
60
#include <asm/pgtable.h>
61
#include <asm/irq.h>
62
#include <asm/cache.h>
63
#include <asm/prom.h>
64
#include <asm/ptrace.h>
65
#include <asm/machdep.h>
66
#include <asm/udbg.h>
67
#ifdef CONFIG_PPC64
68
#include <asm/paca.h>
69
#include <asm/firmware.h>
70
#include <asm/lv1call.h>
71
#endif
72
 
73
int __irq_offset_value;
74
static int ppc_spurious_interrupts;
75
 
76
#ifdef CONFIG_PPC32
77
EXPORT_SYMBOL(__irq_offset_value);
78
atomic_t ppc_n_lost_interrupts;
79
 
80
#ifndef CONFIG_PPC_MERGE
81
#define NR_MASK_WORDS   ((NR_IRQS + 31) / 32)
82
unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
83
#endif
84
 
85
#ifdef CONFIG_TAU_INT
86
extern int tau_initialized;
87
extern int tau_interrupts(int);
88
#endif
89
#endif /* CONFIG_PPC32 */
90
 
91
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
92
extern atomic_t ipi_recv;
93
extern atomic_t ipi_sent;
94
#endif
95
 
96
#ifdef CONFIG_PPC64
97
EXPORT_SYMBOL(irq_desc);
98
 
99
int distribute_irqs = 1;
100
 
101
static inline unsigned long get_hard_enabled(void)
102
{
103
        unsigned long enabled;
104
 
105
        __asm__ __volatile__("lbz %0,%1(13)"
106
        : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
107
 
108
        return enabled;
109
}
110
 
111
static inline void set_soft_enabled(unsigned long enable)
112
{
113
        __asm__ __volatile__("stb %0,%1(13)"
114
        : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
115
}
116
 
117
void local_irq_restore(unsigned long en)
118
{
119
        /*
120
         * get_paca()->soft_enabled = en;
121
         * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
122
         * That was allowed before, and in such a case we do need to take care
123
         * that gcc will set soft_enabled directly via r13, not choose to use
124
         * an intermediate register, lest we're preempted to a different cpu.
125
         */
126
        set_soft_enabled(en);
127
        if (!en)
128
                return;
129
 
130
        if (firmware_has_feature(FW_FEATURE_ISERIES)) {
131
                /*
132
                 * Do we need to disable preemption here?  Not really: in the
133
                 * unlikely event that we're preempted to a different cpu in
134
                 * between getting r13, loading its lppaca_ptr, and loading
135
                 * its any_int, we might call iseries_handle_interrupts without
136
                 * an interrupt pending on the new cpu, but that's no disaster,
137
                 * is it?  And the business of preempting us off the old cpu
138
                 * would itself involve a local_irq_restore which handles the
139
                 * interrupt to that cpu.
140
                 *
141
                 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
142
                 * to avoid any preemption checking added into get_paca().
143
                 */
144
                if (local_paca->lppaca_ptr->int_dword.any_int)
145
                        iseries_handle_interrupts();
146
                return;
147
        }
148
 
149
        /*
150
         * if (get_paca()->hard_enabled) return;
151
         * But again we need to take care that gcc gets hard_enabled directly
152
         * via r13, not choose to use an intermediate register, lest we're
153
         * preempted to a different cpu in between the two instructions.
154
         */
155
        if (get_hard_enabled())
156
                return;
157
 
158
        /*
159
         * Need to hard-enable interrupts here.  Since currently disabled,
160
         * no need to take further asm precautions against preemption; but
161
         * use local_paca instead of get_paca() to avoid preemption checking.
162
         */
163
        local_paca->hard_enabled = en;
164
        if ((int)mfspr(SPRN_DEC) < 0)
165
                mtspr(SPRN_DEC, 1);
166
 
167
        /*
168
         * Force the delivery of pending soft-disabled interrupts on PS3.
169
         * Any HV call will have this side effect.
170
         */
171
        if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
172
                u64 tmp;
173
                lv1_get_version_info(&tmp);
174
        }
175
 
176
        __hard_irq_enable();
177
}
178
#endif /* CONFIG_PPC64 */
179
 
180
int show_interrupts(struct seq_file *p, void *v)
181
{
182
        int i = *(loff_t *)v, j;
183
        struct irqaction *action;
184
        irq_desc_t *desc;
185
        unsigned long flags;
186
 
187
        if (i == 0) {
188
                seq_puts(p, "           ");
189
                for_each_online_cpu(j)
190
                        seq_printf(p, "CPU%d       ", j);
191
                seq_putc(p, '\n');
192
        }
193
 
194
        if (i < NR_IRQS) {
195
                desc = get_irq_desc(i);
196
                spin_lock_irqsave(&desc->lock, flags);
197
                action = desc->action;
198
                if (!action || !action->handler)
199
                        goto skip;
200
                seq_printf(p, "%3d: ", i);
201
#ifdef CONFIG_SMP
202
                for_each_online_cpu(j)
203
                        seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
204
#else
205
                seq_printf(p, "%10u ", kstat_irqs(i));
206
#endif /* CONFIG_SMP */
207
                if (desc->chip)
208
                        seq_printf(p, " %s ", desc->chip->typename);
209
                else
210
                        seq_puts(p, "  None      ");
211
                seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
212
                seq_printf(p, "    %s", action->name);
213
                for (action = action->next; action; action = action->next)
214
                        seq_printf(p, ", %s", action->name);
215
                seq_putc(p, '\n');
216
skip:
217
                spin_unlock_irqrestore(&desc->lock, flags);
218
        } else if (i == NR_IRQS) {
219
#ifdef CONFIG_PPC32
220
#ifdef CONFIG_TAU_INT
221
                if (tau_initialized){
222
                        seq_puts(p, "TAU: ");
223
                        for_each_online_cpu(j)
224
                                seq_printf(p, "%10u ", tau_interrupts(j));
225
                        seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
226
                }
227
#endif
228
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
229
                /* should this be per processor send/receive? */
230
                seq_printf(p, "IPI (recv/sent): %10u/%u\n",
231
                                atomic_read(&ipi_recv), atomic_read(&ipi_sent));
232
#endif
233
#endif /* CONFIG_PPC32 */
234
                seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
235
        }
236
        return 0;
237
}
238
 
239
#ifdef CONFIG_HOTPLUG_CPU
240
void fixup_irqs(cpumask_t map)
241
{
242
        unsigned int irq;
243
        static int warned;
244
 
245
        for_each_irq(irq) {
246
                cpumask_t mask;
247
 
248
                if (irq_desc[irq].status & IRQ_PER_CPU)
249
                        continue;
250
 
251
                cpus_and(mask, irq_desc[irq].affinity, map);
252
                if (any_online_cpu(mask) == NR_CPUS) {
253
                        printk("Breaking affinity for irq %i\n", irq);
254
                        mask = map;
255
                }
256
                if (irq_desc[irq].chip->set_affinity)
257
                        irq_desc[irq].chip->set_affinity(irq, mask);
258
                else if (irq_desc[irq].action && !(warned++))
259
                        printk("Cannot set affinity for irq %i\n", irq);
260
        }
261
 
262
        local_irq_enable();
263
        mdelay(1);
264
        local_irq_disable();
265
}
266
#endif
267
 
268
void do_IRQ(struct pt_regs *regs)
269
{
270
        struct pt_regs *old_regs = set_irq_regs(regs);
271
        unsigned int irq;
272
#ifdef CONFIG_IRQSTACKS
273
        struct thread_info *curtp, *irqtp;
274
#endif
275
 
276
        irq_enter();
277
 
278
#ifdef CONFIG_DEBUG_STACKOVERFLOW
279
        /* Debugging check for stack overflow: is there less than 2KB free? */
280
        {
281
                long sp;
282
 
283
                sp = __get_SP() & (THREAD_SIZE-1);
284
 
285
                if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
286
                        printk("do_IRQ: stack overflow: %ld\n",
287
                                sp - sizeof(struct thread_info));
288
                        dump_stack();
289
                }
290
        }
291
#endif
292
 
293
        /*
294
         * Every platform is required to implement ppc_md.get_irq.
295
         * This function will either return an irq number or NO_IRQ to
296
         * indicate there are no more pending.
297
         * The value NO_IRQ_IGNORE is for buggy hardware and means that this
298
         * IRQ has already been handled. -- Tom
299
         */
300
        irq = ppc_md.get_irq();
301
 
302
        if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
303
#ifdef CONFIG_IRQSTACKS
304
                /* Switch to the irq stack to handle this */
305
                curtp = current_thread_info();
306
                irqtp = hardirq_ctx[smp_processor_id()];
307
                if (curtp != irqtp) {
308
                        struct irq_desc *desc = irq_desc + irq;
309
                        void *handler = desc->handle_irq;
310
                        if (handler == NULL)
311
                                handler = &__do_IRQ;
312
                        irqtp->task = curtp->task;
313
                        irqtp->flags = 0;
314
                        call_handle_irq(irq, desc, irqtp, handler);
315
                        irqtp->task = NULL;
316
                        if (irqtp->flags)
317
                                set_bits(irqtp->flags, &curtp->flags);
318
                } else
319
#endif
320
                        generic_handle_irq(irq);
321
        } else if (irq != NO_IRQ_IGNORE)
322
                /* That's not SMP safe ... but who cares ? */
323
                ppc_spurious_interrupts++;
324
 
325
        irq_exit();
326
        set_irq_regs(old_regs);
327
 
328
#ifdef CONFIG_PPC_ISERIES
329
        if (firmware_has_feature(FW_FEATURE_ISERIES) &&
330
                        get_lppaca()->int_dword.fields.decr_int) {
331
                get_lppaca()->int_dword.fields.decr_int = 0;
332
                /* Signal a fake decrementer interrupt */
333
                timer_interrupt(regs);
334
        }
335
#endif
336
}
337
 
338
void __init init_IRQ(void)
339
{
340
        if (ppc_md.init_IRQ)
341
                ppc_md.init_IRQ();
342
#ifdef CONFIG_PPC64
343
        irq_ctx_init();
344
#endif
345
}
346
 
347
 
348
#ifdef CONFIG_IRQSTACKS
349
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
350
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
351
 
352
void irq_ctx_init(void)
353
{
354
        struct thread_info *tp;
355
        int i;
356
 
357
        for_each_possible_cpu(i) {
358
                memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
359
                tp = softirq_ctx[i];
360
                tp->cpu = i;
361
                tp->preempt_count = SOFTIRQ_OFFSET;
362
 
363
                memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
364
                tp = hardirq_ctx[i];
365
                tp->cpu = i;
366
                tp->preempt_count = HARDIRQ_OFFSET;
367
        }
368
}
369
 
370
static inline void do_softirq_onstack(void)
371
{
372
        struct thread_info *curtp, *irqtp;
373
 
374
        curtp = current_thread_info();
375
        irqtp = softirq_ctx[smp_processor_id()];
376
        irqtp->task = curtp->task;
377
        call_do_softirq(irqtp);
378
        irqtp->task = NULL;
379
}
380
 
381
#else
382
#define do_softirq_onstack()    __do_softirq()
383
#endif /* CONFIG_IRQSTACKS */
384
 
385
void do_softirq(void)
386
{
387
        unsigned long flags;
388
 
389
        if (in_interrupt())
390
                return;
391
 
392
        local_irq_save(flags);
393
 
394
        if (local_softirq_pending())
395
                do_softirq_onstack();
396
 
397
        local_irq_restore(flags);
398
}
399
 
400
 
401
/*
402
 * IRQ controller and virtual interrupts
403
 */
404
 
405
#ifdef CONFIG_PPC_MERGE
406
 
407
static LIST_HEAD(irq_hosts);
408
static DEFINE_SPINLOCK(irq_big_lock);
409
static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
410
static unsigned int irq_radix_writer;
411
struct irq_map_entry irq_map[NR_IRQS];
412
static unsigned int irq_virq_count = NR_IRQS;
413
static struct irq_host *irq_default_host;
414
 
415
irq_hw_number_t virq_to_hw(unsigned int virq)
416
{
417
        return irq_map[virq].hwirq;
418
}
419
EXPORT_SYMBOL_GPL(virq_to_hw);
420
 
421
static int default_irq_host_match(struct irq_host *h, struct device_node *np)
422
{
423
        return h->of_node != NULL && h->of_node == np;
424
}
425
 
426
struct irq_host *irq_alloc_host(struct device_node *of_node,
427
                                unsigned int revmap_type,
428
                                unsigned int revmap_arg,
429
                                struct irq_host_ops *ops,
430
                                irq_hw_number_t inval_irq)
431
{
432
        struct irq_host *host;
433
        unsigned int size = sizeof(struct irq_host);
434
        unsigned int i;
435
        unsigned int *rmap;
436
        unsigned long flags;
437
 
438
        /* Allocate structure and revmap table if using linear mapping */
439
        if (revmap_type == IRQ_HOST_MAP_LINEAR)
440
                size += revmap_arg * sizeof(unsigned int);
441
        host = zalloc_maybe_bootmem(size, GFP_KERNEL);
442
        if (host == NULL)
443
                return NULL;
444
 
445
        /* Fill structure */
446
        host->revmap_type = revmap_type;
447
        host->inval_irq = inval_irq;
448
        host->ops = ops;
449
        host->of_node = of_node;
450
 
451
        if (host->ops->match == NULL)
452
                host->ops->match = default_irq_host_match;
453
 
454
        spin_lock_irqsave(&irq_big_lock, flags);
455
 
456
        /* If it's a legacy controller, check for duplicates and
457
         * mark it as allocated (we use irq 0 host pointer for that
458
         */
459
        if (revmap_type == IRQ_HOST_MAP_LEGACY) {
460
                if (irq_map[0].host != NULL) {
461
                        spin_unlock_irqrestore(&irq_big_lock, flags);
462
                        /* If we are early boot, we can't free the structure,
463
                         * too bad...
464
                         * this will be fixed once slab is made available early
465
                         * instead of the current cruft
466
                         */
467
                        if (mem_init_done)
468
                                kfree(host);
469
                        return NULL;
470
                }
471
                irq_map[0].host = host;
472
        }
473
 
474
        list_add(&host->link, &irq_hosts);
475
        spin_unlock_irqrestore(&irq_big_lock, flags);
476
 
477
        /* Additional setups per revmap type */
478
        switch(revmap_type) {
479
        case IRQ_HOST_MAP_LEGACY:
480
                /* 0 is always the invalid number for legacy */
481
                host->inval_irq = 0;
482
                /* setup us as the host for all legacy interrupts */
483
                for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
484
                        irq_map[i].hwirq = i;
485
                        smp_wmb();
486
                        irq_map[i].host = host;
487
                        smp_wmb();
488
 
489
                        /* Clear norequest flags */
490
                        get_irq_desc(i)->status &= ~IRQ_NOREQUEST;
491
 
492
                        /* Legacy flags are left to default at this point,
493
                         * one can then use irq_create_mapping() to
494
                         * explicitly change them
495
                         */
496
                        ops->map(host, i, i);
497
                }
498
                break;
499
        case IRQ_HOST_MAP_LINEAR:
500
                rmap = (unsigned int *)(host + 1);
501
                for (i = 0; i < revmap_arg; i++)
502
                        rmap[i] = NO_IRQ;
503
                host->revmap_data.linear.size = revmap_arg;
504
                smp_wmb();
505
                host->revmap_data.linear.revmap = rmap;
506
                break;
507
        default:
508
                break;
509
        }
510
 
511
        pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
512
 
513
        return host;
514
}
515
 
516
struct irq_host *irq_find_host(struct device_node *node)
517
{
518
        struct irq_host *h, *found = NULL;
519
        unsigned long flags;
520
 
521
        /* We might want to match the legacy controller last since
522
         * it might potentially be set to match all interrupts in
523
         * the absence of a device node. This isn't a problem so far
524
         * yet though...
525
         */
526
        spin_lock_irqsave(&irq_big_lock, flags);
527
        list_for_each_entry(h, &irq_hosts, link)
528
                if (h->ops->match(h, node)) {
529
                        found = h;
530
                        break;
531
                }
532
        spin_unlock_irqrestore(&irq_big_lock, flags);
533
        return found;
534
}
535
EXPORT_SYMBOL_GPL(irq_find_host);
536
 
537
void irq_set_default_host(struct irq_host *host)
538
{
539
        pr_debug("irq: Default host set to @0x%p\n", host);
540
 
541
        irq_default_host = host;
542
}
543
 
544
void irq_set_virq_count(unsigned int count)
545
{
546
        pr_debug("irq: Trying to set virq count to %d\n", count);
547
 
548
        BUG_ON(count < NUM_ISA_INTERRUPTS);
549
        if (count < NR_IRQS)
550
                irq_virq_count = count;
551
}
552
 
553
/* radix tree not lockless safe ! we use a brlock-type mecanism
554
 * for now, until we can use a lockless radix tree
555
 */
556
static void irq_radix_wrlock(unsigned long *flags)
557
{
558
        unsigned int cpu, ok;
559
 
560
        spin_lock_irqsave(&irq_big_lock, *flags);
561
        irq_radix_writer = 1;
562
        smp_mb();
563
        do {
564
                barrier();
565
                ok = 1;
566
                for_each_possible_cpu(cpu) {
567
                        if (per_cpu(irq_radix_reader, cpu)) {
568
                                ok = 0;
569
                                break;
570
                        }
571
                }
572
                if (!ok)
573
                        cpu_relax();
574
        } while(!ok);
575
}
576
 
577
static void irq_radix_wrunlock(unsigned long flags)
578
{
579
        smp_wmb();
580
        irq_radix_writer = 0;
581
        spin_unlock_irqrestore(&irq_big_lock, flags);
582
}
583
 
584
static void irq_radix_rdlock(unsigned long *flags)
585
{
586
        local_irq_save(*flags);
587
        __get_cpu_var(irq_radix_reader) = 1;
588
        smp_mb();
589
        if (likely(irq_radix_writer == 0))
590
                return;
591
        __get_cpu_var(irq_radix_reader) = 0;
592
        smp_wmb();
593
        spin_lock(&irq_big_lock);
594
        __get_cpu_var(irq_radix_reader) = 1;
595
        spin_unlock(&irq_big_lock);
596
}
597
 
598
static void irq_radix_rdunlock(unsigned long flags)
599
{
600
        __get_cpu_var(irq_radix_reader) = 0;
601
        local_irq_restore(flags);
602
}
603
 
604
static int irq_setup_virq(struct irq_host *host, unsigned int virq,
605
                            irq_hw_number_t hwirq)
606
{
607
        /* Clear IRQ_NOREQUEST flag */
608
        get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
609
 
610
        /* map it */
611
        smp_wmb();
612
        irq_map[virq].hwirq = hwirq;
613
        smp_mb();
614
 
615
        if (host->ops->map(host, virq, hwirq)) {
616
                pr_debug("irq: -> mapping failed, freeing\n");
617
                irq_free_virt(virq, 1);
618
                return -1;
619
        }
620
 
621
        return 0;
622
}
623
 
624
unsigned int irq_create_direct_mapping(struct irq_host *host)
625
{
626
        unsigned int virq;
627
 
628
        if (host == NULL)
629
                host = irq_default_host;
630
 
631
        BUG_ON(host == NULL);
632
        WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
633
 
634
        virq = irq_alloc_virt(host, 1, 0);
635
        if (virq == NO_IRQ) {
636
                pr_debug("irq: create_direct virq allocation failed\n");
637
                return NO_IRQ;
638
        }
639
 
640
        pr_debug("irq: create_direct obtained virq %d\n", virq);
641
 
642
        if (irq_setup_virq(host, virq, virq))
643
                return NO_IRQ;
644
 
645
        return virq;
646
}
647
 
648
unsigned int irq_create_mapping(struct irq_host *host,
649
                                irq_hw_number_t hwirq)
650
{
651
        unsigned int virq, hint;
652
 
653
        pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
654
 
655
        /* Look for default host if nececssary */
656
        if (host == NULL)
657
                host = irq_default_host;
658
        if (host == NULL) {
659
                printk(KERN_WARNING "irq_create_mapping called for"
660
                       " NULL host, hwirq=%lx\n", hwirq);
661
                WARN_ON(1);
662
                return NO_IRQ;
663
        }
664
        pr_debug("irq: -> using host @%p\n", host);
665
 
666
        /* Check if mapping already exist, if it does, call
667
         * host->ops->map() to update the flags
668
         */
669
        virq = irq_find_mapping(host, hwirq);
670
        if (virq != NO_IRQ) {
671
                if (host->ops->remap)
672
                        host->ops->remap(host, virq, hwirq);
673
                pr_debug("irq: -> existing mapping on virq %d\n", virq);
674
                return virq;
675
        }
676
 
677
        /* Get a virtual interrupt number */
678
        if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
679
                /* Handle legacy */
680
                virq = (unsigned int)hwirq;
681
                if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
682
                        return NO_IRQ;
683
                return virq;
684
        } else {
685
                /* Allocate a virtual interrupt number */
686
                hint = hwirq % irq_virq_count;
687
                virq = irq_alloc_virt(host, 1, hint);
688
                if (virq == NO_IRQ) {
689
                        pr_debug("irq: -> virq allocation failed\n");
690
                        return NO_IRQ;
691
                }
692
        }
693
        pr_debug("irq: -> obtained virq %d\n", virq);
694
 
695
        if (irq_setup_virq(host, virq, hwirq))
696
                return NO_IRQ;
697
 
698
        return virq;
699
}
700
EXPORT_SYMBOL_GPL(irq_create_mapping);
701
 
702
unsigned int irq_create_of_mapping(struct device_node *controller,
703
                                   u32 *intspec, unsigned int intsize)
704
{
705
        struct irq_host *host;
706
        irq_hw_number_t hwirq;
707
        unsigned int type = IRQ_TYPE_NONE;
708
        unsigned int virq;
709
 
710
        if (controller == NULL)
711
                host = irq_default_host;
712
        else
713
                host = irq_find_host(controller);
714
        if (host == NULL) {
715
                printk(KERN_WARNING "irq: no irq host found for %s !\n",
716
                       controller->full_name);
717
                return NO_IRQ;
718
        }
719
 
720
        /* If host has no translation, then we assume interrupt line */
721
        if (host->ops->xlate == NULL)
722
                hwirq = intspec[0];
723
        else {
724
                if (host->ops->xlate(host, controller, intspec, intsize,
725
                                     &hwirq, &type))
726
                        return NO_IRQ;
727
        }
728
 
729
        /* Create mapping */
730
        virq = irq_create_mapping(host, hwirq);
731
        if (virq == NO_IRQ)
732
                return virq;
733
 
734
        /* Set type if specified and different than the current one */
735
        if (type != IRQ_TYPE_NONE &&
736
            type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK))
737
                set_irq_type(virq, type);
738
        return virq;
739
}
740
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
741
 
742
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
743
{
744
        struct of_irq oirq;
745
 
746
        if (of_irq_map_one(dev, index, &oirq))
747
                return NO_IRQ;
748
 
749
        return irq_create_of_mapping(oirq.controller, oirq.specifier,
750
                                     oirq.size);
751
}
752
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
753
 
754
void irq_dispose_mapping(unsigned int virq)
755
{
756
        struct irq_host *host;
757
        irq_hw_number_t hwirq;
758
        unsigned long flags;
759
 
760
        if (virq == NO_IRQ)
761
                return;
762
 
763
        host = irq_map[virq].host;
764
        WARN_ON (host == NULL);
765
        if (host == NULL)
766
                return;
767
 
768
        /* Never unmap legacy interrupts */
769
        if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
770
                return;
771
 
772
        /* remove chip and handler */
773
        set_irq_chip_and_handler(virq, NULL, NULL);
774
 
775
        /* Make sure it's completed */
776
        synchronize_irq(virq);
777
 
778
        /* Tell the PIC about it */
779
        if (host->ops->unmap)
780
                host->ops->unmap(host, virq);
781
        smp_mb();
782
 
783
        /* Clear reverse map */
784
        hwirq = irq_map[virq].hwirq;
785
        switch(host->revmap_type) {
786
        case IRQ_HOST_MAP_LINEAR:
787
                if (hwirq < host->revmap_data.linear.size)
788
                        host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
789
                break;
790
        case IRQ_HOST_MAP_TREE:
791
                /* Check if radix tree allocated yet */
792
                if (host->revmap_data.tree.gfp_mask == 0)
793
                        break;
794
                irq_radix_wrlock(&flags);
795
                radix_tree_delete(&host->revmap_data.tree, hwirq);
796
                irq_radix_wrunlock(flags);
797
                break;
798
        }
799
 
800
        /* Destroy map */
801
        smp_mb();
802
        irq_map[virq].hwirq = host->inval_irq;
803
 
804
        /* Set some flags */
805
        get_irq_desc(virq)->status |= IRQ_NOREQUEST;
806
 
807
        /* Free it */
808
        irq_free_virt(virq, 1);
809
}
810
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
811
 
812
unsigned int irq_find_mapping(struct irq_host *host,
813
                              irq_hw_number_t hwirq)
814
{
815
        unsigned int i;
816
        unsigned int hint = hwirq % irq_virq_count;
817
 
818
        /* Look for default host if nececssary */
819
        if (host == NULL)
820
                host = irq_default_host;
821
        if (host == NULL)
822
                return NO_IRQ;
823
 
824
        /* legacy -> bail early */
825
        if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
826
                return hwirq;
827
 
828
        /* Slow path does a linear search of the map */
829
        if (hint < NUM_ISA_INTERRUPTS)
830
                hint = NUM_ISA_INTERRUPTS;
831
        i = hint;
832
        do  {
833
                if (irq_map[i].host == host &&
834
                    irq_map[i].hwirq == hwirq)
835
                        return i;
836
                i++;
837
                if (i >= irq_virq_count)
838
                        i = NUM_ISA_INTERRUPTS;
839
        } while(i != hint);
840
        return NO_IRQ;
841
}
842
EXPORT_SYMBOL_GPL(irq_find_mapping);
843
 
844
 
845
unsigned int irq_radix_revmap(struct irq_host *host,
846
                              irq_hw_number_t hwirq)
847
{
848
        struct radix_tree_root *tree;
849
        struct irq_map_entry *ptr;
850
        unsigned int virq;
851
        unsigned long flags;
852
 
853
        WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
854
 
855
        /* Check if the radix tree exist yet. We test the value of
856
         * the gfp_mask for that. Sneaky but saves another int in the
857
         * structure. If not, we fallback to slow mode
858
         */
859
        tree = &host->revmap_data.tree;
860
        if (tree->gfp_mask == 0)
861
                return irq_find_mapping(host, hwirq);
862
 
863
        /* Now try to resolve */
864
        irq_radix_rdlock(&flags);
865
        ptr = radix_tree_lookup(tree, hwirq);
866
        irq_radix_rdunlock(flags);
867
 
868
        /* Found it, return */
869
        if (ptr) {
870
                virq = ptr - irq_map;
871
                return virq;
872
        }
873
 
874
        /* If not there, try to insert it */
875
        virq = irq_find_mapping(host, hwirq);
876
        if (virq != NO_IRQ) {
877
                irq_radix_wrlock(&flags);
878
                radix_tree_insert(tree, hwirq, &irq_map[virq]);
879
                irq_radix_wrunlock(flags);
880
        }
881
        return virq;
882
}
883
 
884
unsigned int irq_linear_revmap(struct irq_host *host,
885
                               irq_hw_number_t hwirq)
886
{
887
        unsigned int *revmap;
888
 
889
        WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
890
 
891
        /* Check revmap bounds */
892
        if (unlikely(hwirq >= host->revmap_data.linear.size))
893
                return irq_find_mapping(host, hwirq);
894
 
895
        /* Check if revmap was allocated */
896
        revmap = host->revmap_data.linear.revmap;
897
        if (unlikely(revmap == NULL))
898
                return irq_find_mapping(host, hwirq);
899
 
900
        /* Fill up revmap with slow path if no mapping found */
901
        if (unlikely(revmap[hwirq] == NO_IRQ))
902
                revmap[hwirq] = irq_find_mapping(host, hwirq);
903
 
904
        return revmap[hwirq];
905
}
906
 
907
unsigned int irq_alloc_virt(struct irq_host *host,
908
                            unsigned int count,
909
                            unsigned int hint)
910
{
911
        unsigned long flags;
912
        unsigned int i, j, found = NO_IRQ;
913
 
914
        if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
915
                return NO_IRQ;
916
 
917
        spin_lock_irqsave(&irq_big_lock, flags);
918
 
919
        /* Use hint for 1 interrupt if any */
920
        if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
921
            hint < irq_virq_count && irq_map[hint].host == NULL) {
922
                found = hint;
923
                goto hint_found;
924
        }
925
 
926
        /* Look for count consecutive numbers in the allocatable
927
         * (non-legacy) space
928
         */
929
        for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
930
                if (irq_map[i].host != NULL)
931
                        j = 0;
932
                else
933
                        j++;
934
 
935
                if (j == count) {
936
                        found = i - count + 1;
937
                        break;
938
                }
939
        }
940
        if (found == NO_IRQ) {
941
                spin_unlock_irqrestore(&irq_big_lock, flags);
942
                return NO_IRQ;
943
        }
944
 hint_found:
945
        for (i = found; i < (found + count); i++) {
946
                irq_map[i].hwirq = host->inval_irq;
947
                smp_wmb();
948
                irq_map[i].host = host;
949
        }
950
        spin_unlock_irqrestore(&irq_big_lock, flags);
951
        return found;
952
}
953
 
954
void irq_free_virt(unsigned int virq, unsigned int count)
955
{
956
        unsigned long flags;
957
        unsigned int i;
958
 
959
        WARN_ON (virq < NUM_ISA_INTERRUPTS);
960
        WARN_ON (count == 0 || (virq + count) > irq_virq_count);
961
 
962
        spin_lock_irqsave(&irq_big_lock, flags);
963
        for (i = virq; i < (virq + count); i++) {
964
                struct irq_host *host;
965
 
966
                if (i < NUM_ISA_INTERRUPTS ||
967
                    (virq + count) > irq_virq_count)
968
                        continue;
969
 
970
                host = irq_map[i].host;
971
                irq_map[i].hwirq = host->inval_irq;
972
                smp_wmb();
973
                irq_map[i].host = NULL;
974
        }
975
        spin_unlock_irqrestore(&irq_big_lock, flags);
976
}
977
 
978
void irq_early_init(void)
979
{
980
        unsigned int i;
981
 
982
        for (i = 0; i < NR_IRQS; i++)
983
                get_irq_desc(i)->status |= IRQ_NOREQUEST;
984
}
985
 
986
/* We need to create the radix trees late */
987
static int irq_late_init(void)
988
{
989
        struct irq_host *h;
990
        unsigned long flags;
991
 
992
        irq_radix_wrlock(&flags);
993
        list_for_each_entry(h, &irq_hosts, link) {
994
                if (h->revmap_type == IRQ_HOST_MAP_TREE)
995
                        INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
996
        }
997
        irq_radix_wrunlock(flags);
998
 
999
        return 0;
1000
}
1001
arch_initcall(irq_late_init);
1002
 
1003
#ifdef CONFIG_VIRQ_DEBUG
1004
static int virq_debug_show(struct seq_file *m, void *private)
1005
{
1006
        unsigned long flags;
1007
        irq_desc_t *desc;
1008
        const char *p;
1009
        char none[] = "none";
1010
        int i;
1011
 
1012
        seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
1013
                      "chip name", "host name");
1014
 
1015
        for (i = 1; i < NR_IRQS; i++) {
1016
                desc = get_irq_desc(i);
1017
                spin_lock_irqsave(&desc->lock, flags);
1018
 
1019
                if (desc->action && desc->action->handler) {
1020
                        seq_printf(m, "%5d  ", i);
1021
                        seq_printf(m, "0x%05lx  ", virq_to_hw(i));
1022
 
1023
                        if (desc->chip && desc->chip->typename)
1024
                                p = desc->chip->typename;
1025
                        else
1026
                                p = none;
1027
                        seq_printf(m, "%-15s  ", p);
1028
 
1029
                        if (irq_map[i].host && irq_map[i].host->of_node)
1030
                                p = irq_map[i].host->of_node->full_name;
1031
                        else
1032
                                p = none;
1033
                        seq_printf(m, "%s\n", p);
1034
                }
1035
 
1036
                spin_unlock_irqrestore(&desc->lock, flags);
1037
        }
1038
 
1039
        return 0;
1040
}
1041
 
1042
static int virq_debug_open(struct inode *inode, struct file *file)
1043
{
1044
        return single_open(file, virq_debug_show, inode->i_private);
1045
}
1046
 
1047
static const struct file_operations virq_debug_fops = {
1048
        .open = virq_debug_open,
1049
        .read = seq_read,
1050
        .llseek = seq_lseek,
1051
        .release = single_release,
1052
};
1053
 
1054
static int __init irq_debugfs_init(void)
1055
{
1056
        if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1057
                                 NULL, &virq_debug_fops))
1058
                return -ENOMEM;
1059
 
1060
        return 0;
1061
}
1062
__initcall(irq_debugfs_init);
1063
#endif /* CONFIG_VIRQ_DEBUG */
1064
 
1065
#endif /* CONFIG_PPC_MERGE */
1066
 
1067
#ifdef CONFIG_PPC64
1068
static int __init setup_noirqdistrib(char *str)
1069
{
1070
        distribute_irqs = 0;
1071
        return 1;
1072
}
1073
 
1074
__setup("noirqdistrib", setup_noirqdistrib);
1075
#endif /* CONFIG_PPC64 */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.