OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [mips/] [kernel/] [smp.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * This program is free software; you can redistribute it and/or
3
 * modify it under the terms of the GNU General Public License
4
 * as published by the Free Software Foundation; either version 2
5
 * of the License, or (at your option) any later version.
6
 *
7
 * This program is distributed in the hope that it will be useful,
8
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
 * GNU General Public License for more details.
11
 *
12
 * You should have received a copy of the GNU General Public License
13
 * along with this program; if not, write to the Free Software
14
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15
 *
16
 * Copyright (C) 2000, 2001 Kanoj Sarcar
17
 * Copyright (C) 2000, 2001 Ralf Baechle
18
 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19
 * Copyright (C) 2000, 2001 Broadcom Corporation
20
 */
21
#include <linux/config.h>
22
#include <linux/cache.h>
23
#include <linux/delay.h>
24
#include <linux/init.h>
25
#include <linux/interrupt.h>
26
#include <linux/spinlock.h>
27
#include <linux/threads.h>
28
#include <linux/module.h>
29
#include <linux/time.h>
30
#include <linux/timex.h>
31
#include <linux/sched.h>
32
 
33
#include <asm/atomic.h>
34
#include <asm/cpu.h>
35
#include <asm/processor.h>
36
#include <asm/system.h>
37
#include <asm/hardirq.h>
38
#include <asm/softirq.h>
39
#include <asm/mmu_context.h>
40
#include <asm/smp.h>
41
 
42
/* The 'big kernel lock' */
43
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
44
int smp_threads_ready;  /* Not used */
45
atomic_t smp_commenced = ATOMIC_INIT(0);
46
 
47
atomic_t cpus_booted = ATOMIC_INIT(0);
48
 
49
int smp_num_cpus = 1;                   /* Number that came online.  */
50
cpumask_t cpu_online_map;               /* Bitmask of currently online CPUs */
51
int __cpu_number_map[NR_CPUS];
52
int __cpu_logical_map[NR_CPUS];
53
cycles_t cacheflush_time;
54
 
55
void __init smp_callin(void)
56
{
57
#if 0
58
        calibrate_delay();
59
        smp_store_cpu_info(cpuid);
60
#endif
61
}
62
 
63
void __init smp_commence(void)
64
{
65
        wmb();
66
        atomic_set(&smp_commenced, 1);
67
}
68
 
69
/*
70
 * this function sends a 'reschedule' IPI to another CPU.
71
 * it goes straight through and wastes no time serializing
72
 * anything. Worst case is that we lose a reschedule ...
73
 */
74
void smp_send_reschedule(int cpu)
75
{
76
        core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF);
77
}
78
 
79
spinlock_t smp_call_lock = SPIN_LOCK_UNLOCKED;
80
 
81
struct call_data_struct *call_data;
82
 
83
/*
84
 * Run a function on all other CPUs.
85
 *  <func>      The function to run. This must be fast and non-blocking.
86
 *  <info>      An arbitrary pointer to pass to the function.
87
 *  <retry>     If true, keep retrying until ready.
88
 *  <wait>      If true, wait until function has completed on other CPUs.
89
 *  [RETURNS]   0 on success, else a negative status code.
90
 *
91
 * Does not return until remote CPUs are nearly ready to execute <func>
92
 * or are or have executed.
93
 */
94
int smp_call_function (void (*func) (void *info), void *info, int retry,
95
                                                                int wait)
96
{
97
        struct call_data_struct data;
98
        int i, cpus = smp_num_cpus - 1;
99
        int cpu = smp_processor_id();
100
 
101
        if (!cpus)
102
                return 0;
103
 
104
        data.func = func;
105
        data.info = info;
106
        atomic_set(&data.started, 0);
107
        data.wait = wait;
108
        if (wait)
109
                atomic_set(&data.finished, 0);
110
 
111
        spin_lock(&smp_call_lock);
112
        call_data = &data;
113
 
114
        /* Send a message to all other CPUs and wait for them to respond */
115
        for (i = 0; i < smp_num_cpus; i++)
116
                if (i != cpu)
117
                        core_send_ipi(i, SMP_CALL_FUNCTION);
118
 
119
        /* Wait for response */
120
        /* FIXME: lock-up detection, backtrace on lock-up */
121
        while (atomic_read(&data.started) != cpus)
122
                barrier();
123
 
124
        if (wait)
125
                while (atomic_read(&data.finished) != cpus)
126
                        barrier();
127
        spin_unlock(&smp_call_lock);
128
 
129
        return 0;
130
}
131
 
132
void smp_call_function_interrupt(void)
133
{
134
        void (*func) (void *info) = call_data->func;
135
        void *info = call_data->info;
136
        int wait = call_data->wait;
137
        int cpu = smp_processor_id();
138
 
139
        irq_enter(cpu, 0);       /* XXX choose an irq number? */
140
        /*
141
         * Notify initiating CPU that I've grabbed the data and am
142
         * about to execute the function.
143
         */
144
        mb();
145
        atomic_inc(&call_data->started);
146
 
147
        /*
148
         * At this point the info structure may be out of scope unless wait==1.
149
         */
150
        (*func)(info);
151
        if (wait) {
152
                mb();
153
                atomic_inc(&call_data->finished);
154
        }
155
        irq_exit(cpu, 0);        /* XXX choose an irq number? */
156
}
157
 
158
static void stop_this_cpu(void *dummy)
159
{
160
        /*
161
         * Remove this CPU:
162
         */
163
        clear_bit(smp_processor_id(), &cpu_online_map);
164
        /* May need to service _machine_restart IPI */
165
        local_irq_enable();
166
        /* XXXKW wait if available? */
167
        for (;;);
168
}
169
 
170
void smp_send_stop(void)
171
{
172
        smp_call_function(stop_this_cpu, NULL, 1, 0);
173
        /*
174
         * Fix me: this prevents future IPIs, for example that would
175
         * cause a restart to happen on CPU0.
176
         */
177
        smp_num_cpus = 1;
178
}
179
 
180
/* Not really SMP stuff ... */
181
int setup_profiling_timer(unsigned int multiplier)
182
{
183
        return 0;
184
}
185
 
186
static void flush_tlb_all_ipi(void *info)
187
{
188
        local_flush_tlb_all();
189
}
190
 
191
void flush_tlb_all(void)
192
{
193
        smp_call_function(flush_tlb_all_ipi, 0, 1, 1);
194
        local_flush_tlb_all();
195
}
196
 
197
static void flush_tlb_mm_ipi(void *mm)
198
{
199
        local_flush_tlb_mm((struct mm_struct *)mm);
200
}
201
 
202
/*
203
 * The following tlb flush calls are invoked when old translations are
204
 * being torn down, or pte attributes are changing. For single threaded
205
 * address spaces, a new context is obtained on the current cpu, and tlb
206
 * context on other cpus are invalidated to force a new context allocation
207
 * at switch_mm time, should the mm ever be used on other cpus. For
208
 * multithreaded address spaces, intercpu interrupts have to be sent.
209
 * Another case where intercpu interrupts are required is when the target
210
 * mm might be active on another cpu (eg debuggers doing the flushes on
211
 * behalf of debugees, kswapd stealing pages from another process etc).
212
 * Kanoj 07/00.
213
 */
214
 
215
void flush_tlb_mm(struct mm_struct *mm)
216
{
217
        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
218
                smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
219
        } else {
220
                int i;
221
                for (i = 0; i < smp_num_cpus; i++)
222
                        if (smp_processor_id() != i)
223
                                cpu_context(i, mm) = 0;
224
        }
225
        local_flush_tlb_mm(mm);
226
}
227
 
228
struct flush_tlb_data {
229
        struct mm_struct *mm;
230
        struct vm_area_struct *vma;
231
        unsigned long addr1;
232
        unsigned long addr2;
233
};
234
 
235
static void flush_tlb_range_ipi(void *info)
236
{
237
        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
238
 
239
        local_flush_tlb_range(fd->mm, fd->addr1, fd->addr2);
240
}
241
 
242
void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
243
{
244
        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
245
                struct flush_tlb_data fd;
246
 
247
                fd.mm = mm;
248
                fd.addr1 = start;
249
                fd.addr2 = end;
250
                smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
251
        } else {
252
                int i;
253
                for (i = 0; i < smp_num_cpus; i++)
254
                        if (smp_processor_id() != i)
255
                                cpu_context(i, mm) = 0;
256
        }
257
        local_flush_tlb_range(mm, start, end);
258
}
259
 
260
static void flush_tlb_page_ipi(void *info)
261
{
262
        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
263
 
264
        local_flush_tlb_page(fd->vma, fd->addr1);
265
}
266
 
267
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
268
{
269
        if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
270
                struct flush_tlb_data fd;
271
 
272
                fd.vma = vma;
273
                fd.addr1 = page;
274
                smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
275
        } else {
276
                int i;
277
                for (i = 0; i < smp_num_cpus; i++)
278
                        if (smp_processor_id() != i)
279
                                cpu_context(i, vma->vm_mm) = 0;
280
        }
281
        local_flush_tlb_page(vma, page);
282
}
283
 
284
EXPORT_SYMBOL(smp_num_cpus);
285
EXPORT_SYMBOL(flush_tlb_page);
286
EXPORT_SYMBOL(cpu_data);
287
EXPORT_SYMBOL(synchronize_irq);
288
EXPORT_SYMBOL(kernel_flag);
289
EXPORT_SYMBOL(__global_sti);
290
EXPORT_SYMBOL(__global_cli);
291
EXPORT_SYMBOL(__global_save_flags);
292
EXPORT_SYMBOL(__global_restore_flags);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.