OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [sparc/] [kernel/] [irq.c] - Blame information for rev 1777

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1624 jcastillo
/*  $Id: irq.c,v 1.1 2005-12-20 09:50:43 jcastillo Exp $
2
 *  arch/sparc/kernel/irq.c:  Interrupt request handling routines. On the
3
 *                            Sparc the IRQ's are basically 'cast in stone'
4
 *                            and you are supposed to probe the prom's device
5
 *                            node trees to find out who's got which IRQ.
6
 *
7
 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8
 *  Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
9
 *  Copyright (C) 1995 Pete A. Zaitcev (zaitcev@ipmce.su)
10
 *  Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
11
 */
12
 
13
#include <linux/config.h>
14
#include <linux/ptrace.h>
15
#include <linux/errno.h>
16
#include <linux/linkage.h>
17
#include <linux/kernel_stat.h>
18
#include <linux/signal.h>
19
#include <linux/sched.h>
20
#include <linux/interrupt.h>
21
#include <linux/malloc.h>
22
 
23
#include <asm/ptrace.h>
24
#include <asm/processor.h>
25
#include <asm/system.h>
26
#include <asm/psr.h>
27
#include <asm/smp.h>
28
#include <asm/vaddrs.h>
29
#include <asm/timer.h>
30
#include <asm/openprom.h>
31
#include <asm/oplib.h>
32
#include <asm/traps.h>
33
#include <asm/irq.h>
34
#include <asm/io.h>
35
 
36
/*
37
 * Dave Redman (djhr@tadpole.co.uk)
38
 *
39
 * IRQ numbers.. These are no longer restricted to 15..
40
 *
41
 * this is done to enable SBUS cards and onboard IO to be masked
42
 * correctly. using the interrupt level isn't good enough.
43
 *
44
 * For example:
45
 *   A device interrupting at sbus level6 and the Floppy both come in
46
 *   at IRQ11, but enabling and disabling them requires writing to
47
 *   different bits in the SLAVIO/SEC.
48
 *
49
 * As a result of these changes sun4m machines could now support
50
 * directed CPU interrupts using the existing enable/disable irq code
51
 * with tweaks.
52
 *
53
 */
54
 
55
static void irq_panic(void)
56
{
57
    extern char *cputypval;
58
    prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
59
    prom_halt();
60
}
61
 
62
void (*enable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
63
void (*disable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
64
void (*clear_clock_irq)( void ) = irq_panic;
65
void (*clear_profile_irq)( void ) = irq_panic;
66
void (*load_profile_irq)( unsigned int ) =  (void (*)(unsigned int)) irq_panic;
67
void (*init_timers)( void (*)(int, void *,struct pt_regs *)) =
68
    (void (*)( void (*)(int, void *,struct pt_regs *))) irq_panic;
69
 
70
#ifdef __SMP__
71
void (*set_cpu_int)(int, int);
72
void (*clear_cpu_int)(int, int);
73
void (*set_irq_udt)(int);
74
#endif
75
 
76
/*
77
 * Dave Redman (djhr@tadpole.co.uk)
78
 *
79
 * There used to be extern calls and hard coded values here.. very sucky!
80
 * instead, because some of the devices attach very early, I do something
81
 * equally sucky but at least we'll never try to free statically allocated
82
 * space or call kmalloc before kmalloc_init :(.
83
 *
84
 * In fact it's the timer10 that attaches first.. then timer14
85
 * then kmalloc_init is called.. then the tty interrupts attach.
86
 * hmmm....
87
 *
88
 */
89
#define MAX_STATIC_ALLOC        4
90
static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
91
static int static_irq_count = 0;
92
 
93
static struct irqaction *irq_action[NR_IRQS+1] = {
94
          NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
95
          NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
96
};
97
 
98
int get_irq_list(char *buf)
99
{
100
        int i, len = 0;
101
        struct irqaction * action;
102
 
103
        for (i = 0 ; i < (NR_IRQS+1) ; i++) {
104
                action = *(i + irq_action);
105
                if (!action)
106
                        continue;
107
                len += sprintf(buf+len, "%2d: %8d %c %s",
108
                        i, kstat.interrupts[i],
109
                        (action->flags & SA_INTERRUPT) ? '+' : ' ',
110
                        action->name);
111
                for (action=action->next; action; action = action->next) {
112
                        len += sprintf(buf+len, ",%s %s",
113
                                (action->flags & SA_INTERRUPT) ? " +" : "",
114
                                action->name);
115
                }
116
                len += sprintf(buf+len, "\n");
117
        }
118
        return len;
119
}
120
 
121
void free_irq(unsigned int irq, void *dev_id)
122
{
123
        struct irqaction * action;
124
        struct irqaction * tmp = NULL;
125
        unsigned long flags;
126
        unsigned int cpu_irq;
127
 
128
        cpu_irq = irq & NR_IRQS;
129
        action = *(cpu_irq + irq_action);
130
        if (cpu_irq > 14) {  /* 14 irq levels on the sparc */
131
                printk("Trying to free bogus IRQ %d\n", irq);
132
                return;
133
        }
134
        if (!action->handler) {
135
                printk("Trying to free free IRQ%d\n",irq);
136
                return;
137
        }
138
        if (dev_id) {
139
                for (; action; action = action->next) {
140
                        if (action->dev_id == dev_id) break;
141
                        tmp = action;
142
                }
143
                if (!action) {
144
                        printk("Trying to free free shared IRQ%d\n",irq);
145
                        return;
146
                }
147
        } else if (action->flags & SA_SHIRQ) {
148
                printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
149
                return;
150
        }
151
        if (action->flags & SA_STATIC_ALLOC)
152
        {
153
            /* This interrupt is marked as specially allocated
154
             * so it is a bad idea to free it.
155
             */
156
            printk("Attempt to free statically allocated IRQ%d (%s)\n",
157
                   irq, action->name);
158
            return;
159
        }
160
 
161
        save_flags(flags); cli();
162
        if (action && tmp)
163
                tmp->next = action->next;
164
        else
165
                *(cpu_irq + irq_action) = action->next;
166
 
167
        kfree_s(action, sizeof(struct irqaction));
168
 
169
        if (!(*(cpu_irq + irq_action)))
170
                disable_irq(irq);
171
 
172
        restore_flags(flags);
173
}
174
 
175
void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
176
{
177
        int i;
178
        struct irqaction * action;
179
        unsigned int cpu_irq;
180
 
181
        cpu_irq = irq & NR_IRQS;
182
        action = *(cpu_irq + irq_action);
183
 
184
        printk("IO device interrupt, irq = %d\n", irq);
185
        printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
186
                    regs->npc, regs->u_regs[14]);
187
        printk("Expecting: ");
188
        for (i = 0; i < 16; i++)
189
                if (action->handler)
190
                        prom_printf("[%s:%d:0x%x] ", action->name, (int) i,
191
                                    (unsigned int) action->handler);
192
        printk("AIEEE\n");
193
        panic("bogus interrupt received");
194
}
195
 
196
void handler_irq(int irq, struct pt_regs * regs)
197
{
198
        struct irqaction * action;
199
        unsigned int cpu_irq;
200
 
201
        cpu_irq = irq & NR_IRQS;
202
        action = *(cpu_irq + irq_action);
203
        kstat.interrupts[cpu_irq]++;
204
#if 0
205
        printk("I<%d,%d,%d>", smp_processor_id(), irq, smp_proc_in_lock[smp_processor_id()]);
206
#endif
207
        while (action) {
208
                if (!action->handler)
209
                        unexpected_irq(irq, action->dev_id, regs);
210
                else
211
                        action->handler(irq, action->dev_id, regs);
212
                action = action->next;
213
        }
214
}
215
 
216
/*
217
 * do_IRQ handles IRQ's that have been installed without the
218
 * SA_INTERRUPT flag: it uses the full signal-handling return
219
 * and runs with other interrupts enabled. All relatively slow
220
 * IRQ's should use this format: notably the keyboard/timer
221
 * routines.
222
 */
223
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
224
{
225
        struct irqaction * action;
226
        unsigned int cpu_irq;
227
 
228
        cpu_irq = irq & NR_IRQS;
229
        action = *(cpu_irq + irq_action);
230
        kstat.interrupts[cpu_irq]++;
231
        while (action) {
232
                action->handler(irq, action->dev_id, regs);
233
                action = action->next;
234
        }
235
}
236
 
237
/*
238
 * do_fast_IRQ handles IRQ's that don't need the fancy interrupt return
239
 * stuff - the handler is also running with interrupts disabled unless
240
 * it explicitly enables them later.
241
 */
242
asmlinkage void do_fast_IRQ(int irq)
243
{
244
        kstat.interrupts[irq&NR_IRQS]++;
245
        printk("Got FAST_IRQ number %04lx\n", (long unsigned int) irq);
246
        return;
247
}
248
 
249
/* Fast IRQ's on the Sparc can only have one routine attached to them,
250
 * thus no sharing possible.
251
 */
252
int request_fast_irq(unsigned int irq,
253
                     void (*handler)(int, void *, struct pt_regs *),
254
                     unsigned long irqflags, const char *devname)
255
{
256
        struct irqaction *action;
257
        unsigned long flags;
258
        unsigned int cpu_irq;
259
 
260
        cpu_irq = irq & NR_IRQS;
261
        if(cpu_irq > 14)
262
                return -EINVAL;
263
        if(!handler)
264
                return -EINVAL;
265
        action = *(cpu_irq + irq_action);
266
        if(action) {
267
                if(action->flags & SA_SHIRQ)
268
                        panic("Trying to register fast irq when already shared.\n");
269
                if(irqflags & SA_SHIRQ)
270
                        panic("Trying to register fast irq as shared.\n");
271
 
272
                /* Anyway, someone already owns it so cannot be made fast. */
273
                return -EBUSY;
274
        }
275
 
276
        save_flags(flags); cli();
277
 
278
        /* If this is flagged as statically allocated then we use our
279
         * private struct which is never freed.
280
         */
281
        if (irqflags & SA_STATIC_ALLOC)
282
            if (static_irq_count < MAX_STATIC_ALLOC)
283
                action = &static_irqaction[static_irq_count++];
284
            else
285
                printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
286
                       irq, devname);
287
 
288
        if (action == NULL)
289
            action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
290
                                                 GFP_KERNEL);
291
 
292
        if (!action) {
293
                restore_flags(flags);
294
                return -ENOMEM;
295
        }
296
 
297
        /* Dork with trap table if we get this far. */
298
        sparc_ttable[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one =
299
                SPARC_BRANCH((unsigned long) handler,
300
                             (unsigned long) &sparc_ttable[SP_TRAP_IRQ1+(irq-1)].inst_one);
301
        sparc_ttable[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = SPARC_RD_PSR_L0;
302
        sparc_ttable[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_NOP;
303
        sparc_ttable[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
304
 
305
        action->handler = handler;
306
        action->flags = irqflags;
307
        action->mask = 0;
308
        action->name = devname;
309
        action->dev_id = NULL;
310
 
311
        *(cpu_irq + irq_action) = action;
312
 
313
        enable_irq(irq);
314
        restore_flags(flags);
315
        return 0;
316
}
317
 
318
int request_irq(unsigned int irq,
319
                void (*handler)(int, void *, struct pt_regs *),
320
                unsigned long irqflags, const char * devname, void *dev_id)
321
{
322
        struct irqaction * action, *tmp = NULL;
323
        unsigned long flags;
324
        unsigned int cpu_irq;
325
 
326
        cpu_irq = irq & NR_IRQS;
327
        if(cpu_irq > 14)
328
                return -EINVAL;
329
 
330
        if (!handler)
331
            return -EINVAL;
332
        action = *(cpu_irq + irq_action);
333
        if (action) {
334
                if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
335
                        for (tmp = action; tmp->next; tmp = tmp->next);
336
                } else {
337
                        return -EBUSY;
338
                }
339
                if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
340
                        printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
341
                        return -EBUSY;
342
                }
343
        }
344
 
345
        save_flags(flags); cli();
346
 
347
        /* If this is flagged as statically allocated then we use our
348
         * private struct which is never freed.
349
         */
350
        if (irqflags & SA_STATIC_ALLOC)
351
            if (static_irq_count < MAX_STATIC_ALLOC)
352
                action = &static_irqaction[static_irq_count++];
353
            else
354
                printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname);
355
 
356
        if (action == NULL)
357
            action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
358
                                                 GFP_KERNEL);
359
 
360
        if (!action) {
361
                restore_flags(flags);
362
                return -ENOMEM;
363
        }
364
 
365
        action->handler = handler;
366
        action->flags = irqflags;
367
        action->mask = 0;
368
        action->name = devname;
369
        action->next = NULL;
370
        action->dev_id = dev_id;
371
 
372
        if (tmp)
373
                tmp->next = action;
374
        else
375
                *(cpu_irq + irq_action) = action;
376
 
377
        enable_irq(irq);
378
        restore_flags(flags);
379
        return 0;
380
}
381
 
382
/* We really don't need these at all on the Sparc.  We only have
383
 * stubs here because they are exported to modules.
384
 */
385
unsigned long probe_irq_on(void)
386
{
387
  return 0;
388
}
389
 
390
int probe_irq_off(unsigned long mask)
391
{
392
  return 0;
393
}
394
 
395
/* djhr
396
 * This could probably be made indirect too and assigned in the CPU
397
 * bits of the code. That would be much nicer I think and would also
398
 * fit in with the idea of being able to tune your kernel for your machine
399
 * by removing unrequired machine and device support.
400
 *
401
 */
402
 
403
void init_IRQ(void)
404
{
405
        extern void sun4c_init_IRQ( void );
406
        extern void sun4m_init_IRQ( void );
407
#if CONFIG_AP1000
408
        extern void ap_init_IRQ(void);
409
        ap_init_IRQ();
410
        return;
411
#endif
412
 
413
        switch(sparc_cpu_model) {
414
        case sun4c:
415
                sun4c_init_IRQ();
416
                break;
417
 
418
        case sun4m:
419
                sun4m_init_IRQ();
420
                break;
421
 
422
        default:
423
                prom_printf("Cannot initialize IRQ's on this Sun machine...");
424
                break;
425
        }
426
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.