OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [ia64/] [kernel/] [smp.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * SMP Support
3
 *
4
 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5
 * Copyright (C) 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
6
 *
7
 * Lots of stuff stolen from arch/alpha/kernel/smp.c
8
 *
9
 * 01/05/16 Rohit Seth <rohit.seth@intel.com>  IA64-SMP functions. Reorganized
10
 * the existing code (on the lines of x86 port).
11
 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12
 * calibration on each CPU.
13
 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14
 * 00/03/31 Rohit Seth <rohit.seth@intel.com>   Fixes for Bootstrap Processor
15
 * & cpu_online_map now gets done here (instead of setup.c)
16
 * 99/10/05 davidm      Update to bring it in sync with new command-line processing
17
 *  scheme.
18
 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19
 *              smp_call_function_single to resend IPI on timeouts
20
 */
21
#define __KERNEL_SYSCALLS__
22
 
23
#include <linux/config.h>
24
 
25
#include <linux/kernel.h>
26
#include <linux/sched.h>
27
#include <linux/init.h>
28
#include <linux/interrupt.h>
29
#include <linux/smp.h>
30
#include <linux/kernel_stat.h>
31
#include <linux/mm.h>
32
#include <linux/delay.h>
33
#include <linux/cache.h>
34
#include <linux/efi.h>
35
 
36
#include <asm/atomic.h>
37
#include <asm/bitops.h>
38
#include <asm/current.h>
39
#include <asm/delay.h>
40
#include <asm/machvec.h>
41
 
42
#include <asm/io.h>
43
#include <asm/irq.h>
44
#include <asm/page.h>
45
#include <asm/pgalloc.h>
46
#include <asm/pgtable.h>
47
#include <asm/processor.h>
48
#include <asm/ptrace.h>
49
#include <asm/sal.h>
50
#include <asm/system.h>
51
#include <asm/unistd.h>
52
#include <asm/mca.h>
53
 
54
/* The 'big kernel lock' */
55
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
56
 
57
/*
58
 * Structure and data for smp_call_function(). This is designed to minimise static memory
59
 * requirements. It also looks cleaner.
60
 */
61
static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
62
 
63
struct call_data_struct {
64
        void (*func) (void *info);
65
        void *info;
66
        long wait;
67
        atomic_t started;
68
        atomic_t finished;
69
};
70
 
71
static volatile struct call_data_struct *call_data;
72
 
73
#define IPI_CALL_FUNC           0
74
#define IPI_CPU_STOP            1
75
 
76
static void
77
stop_this_cpu (void)
78
{
79
        extern void cpu_halt (void);
80
        /*
81
         * Remove this CPU:
82
         */
83
        clear_bit(smp_processor_id(), &cpu_online_map);
84
        max_xtp();
85
        __cli();
86
        cpu_halt();
87
}
88
 
89
void
90
handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
91
{
92
        int this_cpu = smp_processor_id();
93
        unsigned long *pending_ipis = &local_cpu_data->ipi.operation;
94
        unsigned long ops;
95
 
96
        /* Count this now; we may make a call that never returns. */
97
        local_cpu_data->ipi_count++;
98
 
99
        mb();   /* Order interrupt and bit testing. */
100
        while ((ops = xchg(pending_ipis, 0)) != 0) {
101
          mb(); /* Order bit clearing and data access. */
102
          do {
103
                unsigned long which;
104
 
105
                which = ffz(~ops);
106
                ops &= ~(1 << which);
107
 
108
                switch (which) {
109
                case IPI_CALL_FUNC:
110
                        {
111
                                struct call_data_struct *data;
112
                                void (*func)(void *info);
113
                                void *info;
114
                                int wait;
115
 
116
                                /* release the 'pointer lock' */
117
                                data = (struct call_data_struct *) call_data;
118
                                func = data->func;
119
                                info = data->info;
120
                                wait = data->wait;
121
 
122
                                mb();
123
                                atomic_inc(&data->started);
124
 
125
                                /* At this point the structure may be gone unless wait is true.  */
126
                                (*func)(info);
127
 
128
                                /* Notify the sending CPU that the task is done.  */
129
                                mb();
130
                                if (wait)
131
                                        atomic_inc(&data->finished);
132
                        }
133
                        break;
134
 
135
                case IPI_CPU_STOP:
136
                        stop_this_cpu();
137
                        break;
138
 
139
                default:
140
                        printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
141
                        break;
142
                } /* Switch */
143
          } while (ops);
144
 
145
          mb(); /* Order data access and bit testing. */
146
        }
147
}
148
 
149
static inline void
150
send_IPI_single (int dest_cpu, int op)
151
{
152
        set_bit(op, &cpu_data(dest_cpu)->ipi.operation);
153
        platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
154
}
155
 
156
static inline void
157
send_IPI_allbutself (int op)
158
{
159
        int i;
160
 
161
        for (i = 0; i < smp_num_cpus; i++) {
162
                if (i != smp_processor_id())
163
                        send_IPI_single(i, op);
164
        }
165
}
166
 
167
static inline void
168
send_IPI_all (int op)
169
{
170
        int i;
171
 
172
        for (i = 0; i < smp_num_cpus; i++)
173
                send_IPI_single(i, op);
174
}
175
 
176
static inline void
177
send_IPI_self (int op)
178
{
179
        send_IPI_single(smp_processor_id(), op);
180
}
181
 
182
void
183
smp_send_reschedule (int cpu)
184
{
185
        platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
186
}
187
 
188
void
189
smp_flush_tlb_all (void)
190
{
191
        smp_call_function((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
192
        local_flush_tlb_all();
193
}
194
 
195
void
196
smp_flush_tlb_mm (struct mm_struct *mm)
197
{
198
        local_flush_tlb_mm(mm);
199
 
200
        /* this happens for the common case of a single-threaded fork():  */
201
        if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
202
                return;
203
 
204
        smp_call_function((void (*)(void *))local_flush_tlb_mm, mm, 1, 1);
205
}
206
 
207
/*
208
 * Run a function on another CPU
209
 *  <func>      The function to run. This must be fast and non-blocking.
210
 *  <info>      An arbitrary pointer to pass to the function.
211
 *  <nonatomic> Currently unused.
212
 *  <wait>      If true, wait until function has completed on other CPUs.
213
 *  [RETURNS]   0 on success, else a negative status code.
214
 *
215
 * Does not return until the remote CPU is nearly ready to execute <func>
216
 * or is or has executed.
217
 */
218
 
219
int
220
smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
221
                          int wait)
222
{
223
        struct call_data_struct data;
224
        int cpus = 1;
225
 
226
        if (cpuid == smp_processor_id()) {
227
                printk("%s: trying to call self\n", __FUNCTION__);
228
                return -EBUSY;
229
        }
230
 
231
        data.func = func;
232
        data.info = info;
233
        atomic_set(&data.started, 0);
234
        data.wait = wait;
235
        if (wait)
236
                atomic_set(&data.finished, 0);
237
 
238
        spin_lock_bh(&call_lock);
239
 
240
        call_data = &data;
241
        mb();   /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
242
        send_IPI_single(cpuid, IPI_CALL_FUNC);
243
 
244
        /* Wait for response */
245
        while (atomic_read(&data.started) != cpus)
246
                barrier();
247
 
248
        if (wait)
249
                while (atomic_read(&data.finished) != cpus)
250
                        barrier();
251
        call_data = NULL;
252
 
253
        spin_unlock_bh(&call_lock);
254
        return 0;
255
}
256
 
257
/*
258
 * this function sends a 'generic call function' IPI to all other CPUs
259
 * in the system.
260
 */
261
 
262
/*
263
 *  [SUMMARY]   Run a function on all other CPUs.
264
 *  <func>      The function to run. This must be fast and non-blocking.
265
 *  <info>      An arbitrary pointer to pass to the function.
266
 *  <nonatomic> currently unused.
267
 *  <wait>      If true, wait (atomically) until function has completed on other CPUs.
268
 *  [RETURNS]   0 on success, else a negative status code.
269
 *
270
 * Does not return until remote CPUs are nearly ready to execute <func> or are or have
271
 * executed.
272
 *
273
 * You must not call this function with disabled interrupts or from a hardware interrupt
274
 * handler, you may call it from a bottom half handler.
275
 */
276
int
277
smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
278
{
279
        struct call_data_struct data;
280
        int cpus = smp_num_cpus-1;
281
 
282
        if (!cpus)
283
                return 0;
284
 
285
        data.func = func;
286
        data.info = info;
287
        atomic_set(&data.started, 0);
288
        data.wait = wait;
289
        if (wait)
290
                atomic_set(&data.finished, 0);
291
 
292
        spin_lock_bh(&call_lock);
293
 
294
        call_data = &data;
295
        mb();   /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
296
        send_IPI_allbutself(IPI_CALL_FUNC);
297
 
298
        /* Wait for response */
299
        while (atomic_read(&data.started) != cpus)
300
                barrier();
301
 
302
        if (wait)
303
                while (atomic_read(&data.finished) != cpus)
304
                        barrier();
305
        call_data = NULL;
306
 
307
        spin_unlock_bh(&call_lock);
308
        return 0;
309
}
310
 
311
void
312
smp_do_timer (struct pt_regs *regs)
313
{
314
        int user = user_mode(regs);
315
 
316
        if (--local_cpu_data->prof_counter <= 0) {
317
                local_cpu_data->prof_counter = local_cpu_data->prof_multiplier;
318
                update_process_times(user);
319
        }
320
}
321
 
322
/*
323
 * this function calls the 'stop' function on all other CPUs in the system.
324
 */
325
void
326
smp_send_stop (void)
327
{
328
        send_IPI_allbutself(IPI_CPU_STOP);
329
        smp_num_cpus = 1;
330
}
331
 
332
int __init
333
setup_profiling_timer (unsigned int multiplier)
334
{
335
        return -EINVAL;
336
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.