OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [alpha/] [kernel/] [irq.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *      linux/arch/alpha/kernel/irq.c
3
 *
4
 *      Copyright (C) 1995 Linus Torvalds
5
 *
6
 * This file contains the code used by various IRQ handling routines:
7
 * asking for different IRQ's should be done through these routines
8
 * instead of just grabbing them. Thus setups with different IRQ numbers
9
 * shouldn't result in any weird surprises, and installing new handlers
10
 * should be easier.
11
 */
12
 
13
#include <linux/config.h>
14
#include <linux/kernel.h>
15
#include <linux/ptrace.h>
16
#include <linux/errno.h>
17
#include <linux/kernel_stat.h>
18
#include <linux/signal.h>
19
#include <linux/sched.h>
20
#include <linux/interrupt.h>
21
#include <linux/slab.h>
22
#include <linux/random.h>
23
#include <linux/init.h>
24
#include <linux/irq.h>
25
#include <linux/proc_fs.h>
26
 
27
#include <asm/system.h>
28
#include <asm/io.h>
29
#include <asm/bitops.h>
30
#include <asm/uaccess.h>
31
 
32
/*
33
 * Controller mappings for all interrupt sources:
34
 */
35
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
36
        [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}
37
};
38
 
39
static void register_irq_proc(unsigned int irq);
40
 
41
volatile unsigned long irq_err_count;
42
 
43
/*
44
 * Special irq handlers.
45
 */
46
 
47
void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
48
 
49
/*
50
 * Generic no controller code
51
 */
52
 
53
static void no_irq_enable_disable(unsigned int irq) { }
54
static unsigned int no_irq_startup(unsigned int irq) { return 0; }
55
 
56
static void
57
no_irq_ack(unsigned int irq)
58
{
59
        irq_err_count++;
60
        printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
61
}
62
 
63
struct hw_interrupt_type no_irq_type = {
64
        typename:       "none",
65
        startup:        no_irq_startup,
66
        shutdown:       no_irq_enable_disable,
67
        enable:         no_irq_enable_disable,
68
        disable:        no_irq_enable_disable,
69
        ack:            no_irq_ack,
70
        end:            no_irq_enable_disable,
71
};
72
 
73
int
74
handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
75
                 struct irqaction *action)
76
{
77
        int status;
78
        int cpu = smp_processor_id();
79
 
80
        kstat.irqs[cpu][irq]++;
81
        irq_enter(cpu, irq);
82
 
83
        status = 1;     /* Force the "do bottom halves" bit */
84
 
85
        do {
86
                if (!(action->flags & SA_INTERRUPT))
87
                        __sti();
88
                else
89
                        __cli();
90
 
91
                status |= action->flags;
92
                action->handler(irq, action->dev_id, regs);
93
                action = action->next;
94
        } while (action);
95
        if (status & SA_SAMPLE_RANDOM)
96
                add_interrupt_randomness(irq);
97
        __cli();
98
 
99
        irq_exit(cpu, irq);
100
 
101
        return status;
102
}
103
 
104
/*
105
 * Generic enable/disable code: this just calls
106
 * down into the PIC-specific version for the actual
107
 * hardware disable after having gotten the irq
108
 * controller lock.
109
 */
110
void inline
111
disable_irq_nosync(unsigned int irq)
112
{
113
        irq_desc_t *desc = irq_desc + irq;
114
        unsigned long flags;
115
 
116
        spin_lock_irqsave(&desc->lock, flags);
117
        if (!desc->depth++) {
118
                desc->status |= IRQ_DISABLED;
119
                desc->handler->disable(irq);
120
        }
121
        spin_unlock_irqrestore(&desc->lock, flags);
122
}
123
 
124
/*
125
 * Synchronous version of the above, making sure the IRQ is
126
 * no longer running on any other IRQ..
127
 */
128
void
129
disable_irq(unsigned int irq)
130
{
131
        disable_irq_nosync(irq);
132
 
133
        if (!local_irq_count(smp_processor_id())) {
134
                do {
135
                        barrier();
136
                } while (irq_desc[irq].status & IRQ_INPROGRESS);
137
        }
138
}
139
 
140
void
141
enable_irq(unsigned int irq)
142
{
143
        irq_desc_t *desc = irq_desc + irq;
144
        unsigned long flags;
145
 
146
        spin_lock_irqsave(&desc->lock, flags);
147
        switch (desc->depth) {
148
        case 1: {
149
                unsigned int status = desc->status & ~IRQ_DISABLED;
150
                desc->status = status;
151
                if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
152
                        desc->status = status | IRQ_REPLAY;
153
                        hw_resend_irq(desc->handler,irq);
154
                }
155
                desc->handler->enable(irq);
156
                /* fall-through */
157
        }
158
        default:
159
                desc->depth--;
160
                break;
161
        case 0:
162
                printk(KERN_ERR "enable_irq() unbalanced from %p\n",
163
                       __builtin_return_address(0));
164
        }
165
        spin_unlock_irqrestore(&desc->lock, flags);
166
}
167
 
168
int
169
setup_irq(unsigned int irq, struct irqaction * new)
170
{
171
        int shared = 0;
172
        struct irqaction *old, **p;
173
        unsigned long flags;
174
        irq_desc_t *desc = irq_desc + irq;
175
 
176
        /*
177
         * Some drivers like serial.c use request_irq() heavily,
178
         * so we have to be careful not to interfere with a
179
         * running system.
180
         */
181
        if (new->flags & SA_SAMPLE_RANDOM) {
182
                /*
183
                 * This function might sleep, we want to call it first,
184
                 * outside of the atomic block.
185
                 * Yes, this might clear the entropy pool if the wrong
186
                 * driver is attempted to be loaded, without actually
187
                 * installing a new handler, but is this really a problem,
188
                 * only the sysadmin is able to do this.
189
                 */
190
                rand_initialize_irq(irq);
191
        }
192
 
193
        /*
194
         * The following block of code has to be executed atomically
195
         */
196
        spin_lock_irqsave(&desc->lock,flags);
197
        p = &desc->action;
198
        if ((old = *p) != NULL) {
199
                /* Can't share interrupts unless both agree to */
200
                if (!(old->flags & new->flags & SA_SHIRQ)) {
201
                        spin_unlock_irqrestore(&desc->lock,flags);
202
                        return -EBUSY;
203
                }
204
 
205
                /* add new interrupt at end of irq queue */
206
                do {
207
                        p = &old->next;
208
                        old = *p;
209
                } while (old);
210
                shared = 1;
211
        }
212
 
213
        *p = new;
214
 
215
        if (!shared) {
216
                desc->depth = 0;
217
                desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS);
218
                desc->handler->startup(irq);
219
        }
220
        spin_unlock_irqrestore(&desc->lock,flags);
221
 
222
        return 0;
223
}
224
 
225
static struct proc_dir_entry * root_irq_dir;
226
static struct proc_dir_entry * irq_dir[NR_IRQS];
227
 
228
#ifdef CONFIG_SMP
229
static struct proc_dir_entry * smp_affinity_entry[NR_IRQS];
230
static char irq_user_affinity[NR_IRQS];
231
static unsigned long irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
232
 
233
static void
234
select_smp_affinity(int irq)
235
{
236
        static int last_cpu;
237
        int cpu = last_cpu + 1;
238
 
239
        if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
240
                return;
241
 
242
        while (((cpu_present_mask >> cpu) & 1) == 0)
243
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
244
        last_cpu = cpu;
245
 
246
        irq_affinity[irq] = 1UL << cpu;
247
        irq_desc[irq].handler->set_affinity(irq, 1UL << cpu);
248
}
249
 
250
#define HEX_DIGITS 16
251
 
252
static int
253
irq_affinity_read_proc (char *page, char **start, off_t off,
254
                        int count, int *eof, void *data)
255
{
256
        if (count < HEX_DIGITS+1)
257
                return -EINVAL;
258
        return sprintf (page, "%016lx\n", irq_affinity[(long)data]);
259
}
260
 
261
static unsigned int
262
parse_hex_value (const char *buffer,
263
                 unsigned long count, unsigned long *ret)
264
{
265
        unsigned char hexnum [HEX_DIGITS];
266
        unsigned long value;
267
        int i;
268
 
269
        if (!count)
270
                return -EINVAL;
271
        if (count > HEX_DIGITS)
272
                count = HEX_DIGITS;
273
        if (copy_from_user(hexnum, buffer, count))
274
                return -EFAULT;
275
 
276
        /*
277
         * Parse the first 8 characters as a hex string, any non-hex char
278
         * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
279
         */
280
        value = 0;
281
 
282
        for (i = 0; i < count; i++) {
283
                unsigned int c = hexnum[i];
284
 
285
                switch (c) {
286
                        case '0' ... '9': c -= '0'; break;
287
                        case 'a' ... 'f': c -= 'a'-10; break;
288
                        case 'A' ... 'F': c -= 'A'-10; break;
289
                default:
290
                        goto out;
291
                }
292
                value = (value << 4) | c;
293
        }
294
out:
295
        *ret = value;
296
        return 0;
297
}
298
 
299
static int
300
irq_affinity_write_proc(struct file *file, const char *buffer,
301
                        unsigned long count, void *data)
302
{
303
        int irq = (long) data, full_count = count, err;
304
        unsigned long new_value;
305
 
306
        if (!irq_desc[irq].handler->set_affinity)
307
                return -EIO;
308
 
309
        err = parse_hex_value(buffer, count, &new_value);
310
 
311
        /* The special value 0 means release control of the
312
           affinity to kernel.  */
313
        if (new_value == 0) {
314
                irq_user_affinity[irq] = 0;
315
                select_smp_affinity(irq);
316
        }
317
        /* Do not allow disabling IRQs completely - it's a too easy
318
           way to make the system unusable accidentally :-) At least
319
           one online CPU still has to be targeted.  */
320
        else if (!(new_value & cpu_present_mask))
321
                return -EINVAL;
322
        else {
323
                irq_affinity[irq] = new_value;
324
                irq_user_affinity[irq] = 1;
325
                irq_desc[irq].handler->set_affinity(irq, new_value);
326
        }
327
 
328
        return full_count;
329
}
330
 
331
static int
332
prof_cpu_mask_read_proc(char *page, char **start, off_t off,
333
                        int count, int *eof, void *data)
334
{
335
        unsigned long *mask = (unsigned long *) data;
336
        if (count < HEX_DIGITS+1)
337
                return -EINVAL;
338
        return sprintf (page, "%016lx\n", *mask);
339
}
340
 
341
static int
342
prof_cpu_mask_write_proc(struct file *file, const char *buffer,
343
                         unsigned long count, void *data)
344
{
345
        unsigned long *mask = (unsigned long *) data, full_count = count, err;
346
        unsigned long new_value;
347
 
348
        err = parse_hex_value(buffer, count, &new_value);
349
        if (err)
350
                return err;
351
 
352
        *mask = new_value;
353
        return full_count;
354
}
355
#endif /* CONFIG_SMP */
356
 
357
#define MAX_NAMELEN 10
358
 
359
static void
360
register_irq_proc (unsigned int irq)
361
{
362
#ifdef CONFIG_SMP
363
        struct proc_dir_entry *entry;
364
#endif
365
        char name [MAX_NAMELEN];
366
 
367
        if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type))
368
                return;
369
 
370
        memset(name, 0, MAX_NAMELEN);
371
        sprintf(name, "%d", irq);
372
 
373
        /* create /proc/irq/1234 */
374
        irq_dir[irq] = proc_mkdir(name, root_irq_dir);
375
 
376
#ifdef CONFIG_SMP
377
        if (irq_desc[irq].handler->set_affinity) {
378
                /* create /proc/irq/1234/smp_affinity */
379
                entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
380
 
381
                entry->nlink = 1;
382
                entry->data = (void *)(long)irq;
383
                entry->read_proc = irq_affinity_read_proc;
384
                entry->write_proc = irq_affinity_write_proc;
385
 
386
                smp_affinity_entry[irq] = entry;
387
        }
388
#endif
389
}
390
 
391
unsigned long prof_cpu_mask = ~0UL;
392
 
393
void
394
init_irq_proc (void)
395
{
396
#ifdef CONFIG_SMP
397
        struct proc_dir_entry *entry;
398
#endif
399
        int i;
400
 
401
        /* create /proc/irq */
402
        root_irq_dir = proc_mkdir("irq", 0);
403
 
404
#ifdef CONFIG_SMP
405
        /* create /proc/irq/prof_cpu_mask */
406
        entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
407
 
408
        entry->nlink = 1;
409
        entry->data = (void *)&prof_cpu_mask;
410
        entry->read_proc = prof_cpu_mask_read_proc;
411
        entry->write_proc = prof_cpu_mask_write_proc;
412
#endif
413
 
414
        /*
415
         * Create entries for all existing IRQs. If the number of IRQs
416
         * is greater the 1/4 the total dynamic inode space for /proc,
417
         * don't pollute the inode space
418
         */
419
        if (ACTUAL_NR_IRQS < (PROC_NDYNAMIC / 4)) {
420
                for (i = 0; i < ACTUAL_NR_IRQS; i++) {
421
                        if (irq_desc[i].handler == &no_irq_type)
422
                                continue;
423
                        register_irq_proc(i);
424
                }
425
        }
426
}
427
 
428
int
429
request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
430
            unsigned long irqflags, const char * devname, void *dev_id)
431
{
432
        int retval;
433
        struct irqaction * action;
434
 
435
        if (irq >= ACTUAL_NR_IRQS)
436
                return -EINVAL;
437
        if (!handler)
438
                return -EINVAL;
439
 
440
#if 1
441
        /*
442
         * Sanity-check: shared interrupts should REALLY pass in
443
         * a real dev-ID, otherwise we'll have trouble later trying
444
         * to figure out which interrupt is which (messes up the
445
         * interrupt freeing logic etc).
446
         */
447
        if ((irqflags & SA_SHIRQ) && !dev_id) {
448
                printk(KERN_ERR
449
                       "Bad boy: %s (at %p) called us without a dev_id!\n",
450
                       devname, __builtin_return_address(0));
451
        }
452
#endif
453
 
454
        action = (struct irqaction *)
455
                        kmalloc(sizeof(struct irqaction), GFP_KERNEL);
456
        if (!action)
457
                return -ENOMEM;
458
 
459
        action->handler = handler;
460
        action->flags = irqflags;
461
        action->mask = 0;
462
        action->name = devname;
463
        action->next = NULL;
464
        action->dev_id = dev_id;
465
 
466
#ifdef CONFIG_SMP
467
        select_smp_affinity(irq);
468
#endif
469
 
470
        retval = setup_irq(irq, action);
471
        if (retval)
472
                kfree(action);
473
        return retval;
474
}
475
 
476
void
477
free_irq(unsigned int irq, void *dev_id)
478
{
479
        irq_desc_t *desc;
480
        struct irqaction **p;
481
        unsigned long flags;
482
 
483
        if (irq >= ACTUAL_NR_IRQS) {
484
                printk(KERN_CRIT "Trying to free IRQ%d\n", irq);
485
                return;
486
        }
487
 
488
        desc = irq_desc + irq;
489
        spin_lock_irqsave(&desc->lock,flags);
490
        p = &desc->action;
491
        for (;;) {
492
                struct irqaction * action = *p;
493
                if (action) {
494
                        struct irqaction **pp = p;
495
                        p = &action->next;
496
                        if (action->dev_id != dev_id)
497
                                continue;
498
 
499
                        /* Found - now remove it from the list of entries.  */
500
                        *pp = action->next;
501
                        if (!desc->action) {
502
                                desc->status |= IRQ_DISABLED;
503
                                desc->handler->shutdown(irq);
504
                        }
505
                        spin_unlock_irqrestore(&desc->lock,flags);
506
 
507
#ifdef CONFIG_SMP
508
                        /* Wait to make sure it's not being used on
509
                           another CPU.  */
510
                        while (desc->status & IRQ_INPROGRESS)
511
                                barrier();
512
#endif
513
                        kfree(action);
514
                        return;
515
                }
516
                printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
517
                spin_unlock_irqrestore(&desc->lock,flags);
518
                return;
519
        }
520
}
521
 
522
int
523
get_irq_list(char *buf)
524
{
525
#ifdef CONFIG_SMP
526
        int j;
527
#endif
528
        int i;
529
        struct irqaction * action;
530
        char *p = buf;
531
 
532
#ifdef CONFIG_SMP
533
        p += sprintf(p, "           ");
534
        for (i = 0; i < smp_num_cpus; i++)
535
                p += sprintf(p, "CPU%d       ", i);
536
#ifdef DO_BROADCAST_INTS
537
        for (i = 0; i < smp_num_cpus; i++)
538
                p += sprintf(p, "TRY%d       ", i);
539
#endif
540
        *p++ = '\n';
541
#endif
542
 
543
        for (i = 0; i < ACTUAL_NR_IRQS; i++) {
544
                action = irq_desc[i].action;
545
                if (!action)
546
                        continue;
547
                p += sprintf(p, "%3d: ",i);
548
#ifndef CONFIG_SMP
549
                p += sprintf(p, "%10u ", kstat_irqs(i));
550
#else
551
                for (j = 0; j < smp_num_cpus; j++)
552
                        p += sprintf(p, "%10u ",
553
                                     kstat.irqs[cpu_logical_map(j)][i]);
554
#ifdef DO_BROADCAST_INTS
555
                for (j = 0; j < smp_num_cpus; j++)
556
                        p += sprintf(p, "%10lu ",
557
                                     irq_attempt(cpu_logical_map(j), i));
558
#endif
559
#endif
560
                p += sprintf(p, " %14s", irq_desc[i].handler->typename);
561
                p += sprintf(p, "  %c%s",
562
                             (action->flags & SA_INTERRUPT)?'+':' ',
563
                             action->name);
564
 
565
                for (action=action->next; action; action = action->next) {
566
                        p += sprintf(p, ", %c%s",
567
                                     (action->flags & SA_INTERRUPT)?'+':' ',
568
                                     action->name);
569
                }
570
                *p++ = '\n';
571
        }
572
#if CONFIG_SMP
573
        p += sprintf(p, "IPI: ");
574
        for (j = 0; j < smp_num_cpus; j++)
575
                p += sprintf(p, "%10lu ",
576
                             cpu_data[cpu_logical_map(j)].ipi_count);
577
        p += sprintf(p, "\n");
578
#endif
579
        p += sprintf(p, "ERR: %10lu\n", irq_err_count);
580
        return p - buf;
581
}
582
 
583
 
584
/*
585
 * handle_irq handles all normal device IRQ's (the special
586
 * SMP cross-CPU interrupts have their own specific
587
 * handlers).
588
 */
589
 
590
#define MAX_ILLEGAL_IRQS 16
591
 
592
void
593
handle_irq(int irq, struct pt_regs * regs)
594
{
595
        /*
596
         * We ack quickly, we don't want the irq controller
597
         * thinking we're snobs just because some other CPU has
598
         * disabled global interrupts (we have already done the
599
         * INT_ACK cycles, it's too late to try to pretend to the
600
         * controller that we aren't taking the interrupt).
601
         *
602
         * 0 return value means that this irq is already being
603
         * handled by some other CPU. (or is disabled)
604
         */
605
        int cpu = smp_processor_id();
606
        irq_desc_t *desc = irq_desc + irq;
607
        struct irqaction * action;
608
        unsigned int status;
609
        static unsigned int illegal_count=0;
610
 
611
        if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
612
                irq_err_count++;
613
                illegal_count++;
614
                printk(KERN_CRIT "device_interrupt: illegal interrupt %d\n",
615
                       irq);
616
                return;
617
        }
618
 
619
        irq_attempt(cpu, irq)++;
620
        spin_lock_irq(&desc->lock); /* mask also the higher prio events */
621
        desc->handler->ack(irq);
622
        /*
623
         * REPLAY is when Linux resends an IRQ that was dropped earlier.
624
         * WAITING is used by probe to mark irqs that are being tested.
625
         */
626
        status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
627
        status |= IRQ_PENDING; /* we _want_ to handle it */
628
 
629
        /*
630
         * If the IRQ is disabled for whatever reason, we cannot
631
         * use the action we have.
632
         */
633
        action = NULL;
634
        if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
635
                action = desc->action;
636
                status &= ~IRQ_PENDING; /* we commit to handling */
637
                status |= IRQ_INPROGRESS; /* we are handling it */
638
        }
639
        desc->status = status;
640
 
641
        /*
642
         * If there is no IRQ handler or it was disabled, exit early.
643
         * Since we set PENDING, if another processor is handling
644
         * a different instance of this same irq, the other processor
645
         * will take care of it.
646
         */
647
        if (!action)
648
                goto out;
649
 
650
        /*
651
         * Edge triggered interrupts need to remember pending events.
652
         * This applies to any hw interrupts that allow a second
653
         * instance of the same irq to arrive while we are in handle_irq
654
         * or in the handler. But the code here only handles the _second_
655
         * instance of the irq, not the third or fourth. So it is mostly
656
         * useful for irq hardware that does not mask cleanly in an
657
         * SMP environment.
658
         */
659
        for (;;) {
660
                spin_unlock(&desc->lock);
661
                handle_IRQ_event(irq, regs, action);
662
                spin_lock(&desc->lock);
663
 
664
                if (!(desc->status & IRQ_PENDING)
665
                    || (desc->status & IRQ_LEVEL))
666
                        break;
667
                desc->status &= ~IRQ_PENDING;
668
        }
669
        desc->status &= ~IRQ_INPROGRESS;
670
out:
671
        /*
672
         * The ->end() handler has to deal with interrupts which got
673
         * disabled while the handler was running.
674
         */
675
        desc->handler->end(irq);
676
        spin_unlock(&desc->lock);
677
 
678
        if (softirq_pending(cpu))
679
                do_softirq();
680
}
681
 
682
/*
683
 * IRQ autodetection code..
684
 *
685
 * This depends on the fact that any interrupt that
686
 * comes in on to an unassigned handler will get stuck
687
 * with "IRQ_WAITING" cleared and the interrupt
688
 * disabled.
689
 */
690
unsigned long
691
probe_irq_on(void)
692
{
693
        int i;
694
        irq_desc_t *desc;
695
        unsigned long delay;
696
        unsigned long val;
697
 
698
        /* Something may have generated an irq long ago and we want to
699
           flush such a longstanding irq before considering it as spurious. */
700
        for (i = NR_IRQS-1; i >= 0; i--) {
701
                desc = irq_desc + i;
702
 
703
                spin_lock_irq(&desc->lock);
704
                if (!irq_desc[i].action)
705
                        irq_desc[i].handler->startup(i);
706
                spin_unlock_irq(&desc->lock);
707
        }
708
 
709
        /* Wait for longstanding interrupts to trigger. */
710
        for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
711
                /* about 20ms delay */ synchronize_irq();
712
 
713
        /* enable any unassigned irqs (we must startup again here because
714
           if a longstanding irq happened in the previous stage, it may have
715
           masked itself) first, enable any unassigned irqs. */
716
        for (i = NR_IRQS-1; i >= 0; i--) {
717
                desc = irq_desc + i;
718
 
719
                spin_lock_irq(&desc->lock);
720
                if (!desc->action) {
721
                        desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
722
                        if (desc->handler->startup(i))
723
                                desc->status |= IRQ_PENDING;
724
                }
725
                spin_unlock_irq(&desc->lock);
726
        }
727
 
728
        /*
729
         * Wait for spurious interrupts to trigger
730
         */
731
        for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
732
                /* about 100ms delay */ synchronize_irq();
733
 
734
        /*
735
         * Now filter out any obviously spurious interrupts
736
         */
737
        val = 0;
738
        for (i=0; i<NR_IRQS; i++) {
739
                irq_desc_t *desc = irq_desc + i;
740
                unsigned int status;
741
 
742
                spin_lock_irq(&desc->lock);
743
                status = desc->status;
744
 
745
                if (status & IRQ_AUTODETECT) {
746
                        /* It triggered already - consider it spurious. */
747
                        if (!(status & IRQ_WAITING)) {
748
                                desc->status = status & ~IRQ_AUTODETECT;
749
                                desc->handler->shutdown(i);
750
                        } else
751
                                if (i < 32)
752
                                        val |= 1 << i;
753
                }
754
                spin_unlock_irq(&desc->lock);
755
        }
756
 
757
        return val;
758
}
759
 
760
/*
761
 * Return a mask of triggered interrupts (this
762
 * can handle only legacy ISA interrupts).
763
 */
764
unsigned int
765
probe_irq_mask(unsigned long val)
766
{
767
        int i;
768
        unsigned int mask;
769
 
770
        mask = 0;
771
        for (i = 0; i < NR_IRQS; i++) {
772
                irq_desc_t *desc = irq_desc + i;
773
                unsigned int status;
774
 
775
                spin_lock_irq(&desc->lock);
776
                status = desc->status;
777
 
778
                if (status & IRQ_AUTODETECT) {
779
                        /* We only react to ISA interrupts */
780
                        if (!(status & IRQ_WAITING)) {
781
                                if (i < 16)
782
                                        mask |= 1 << i;
783
                        }
784
 
785
                        desc->status = status & ~IRQ_AUTODETECT;
786
                        desc->handler->shutdown(i);
787
                }
788
                spin_unlock_irq(&desc->lock);
789
        }
790
 
791
        return mask & val;
792
}
793
 
794
/*
795
 * Get the result of the IRQ probe.. A negative result means that
796
 * we have several candidates (but we return the lowest-numbered
797
 * one).
798
 */
799
 
800
int
801
probe_irq_off(unsigned long val)
802
{
803
        int i, irq_found, nr_irqs;
804
 
805
        nr_irqs = 0;
806
        irq_found = 0;
807
        for (i=0; i<NR_IRQS; i++) {
808
                irq_desc_t *desc = irq_desc + i;
809
                unsigned int status;
810
 
811
                spin_lock_irq(&desc->lock);
812
                status = desc->status;
813
 
814
                if (status & IRQ_AUTODETECT) {
815
                        if (!(status & IRQ_WAITING)) {
816
                                if (!nr_irqs)
817
                                        irq_found = i;
818
                                nr_irqs++;
819
                        }
820
                        desc->status = status & ~IRQ_AUTODETECT;
821
                        desc->handler->shutdown(i);
822
                }
823
                spin_unlock_irq(&desc->lock);
824
        }
825
 
826
        if (nr_irqs > 1)
827
                irq_found = -irq_found;
828
        return irq_found;
829
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.