OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [x86/] [kernel/] [tsc_32.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
#include <linux/sched.h>
2
#include <linux/clocksource.h>
3
#include <linux/workqueue.h>
4
#include <linux/cpufreq.h>
5
#include <linux/jiffies.h>
6
#include <linux/init.h>
7
#include <linux/dmi.h>
8
 
9
#include <asm/delay.h>
10
#include <asm/tsc.h>
11
#include <asm/io.h>
12
#include <asm/timer.h>
13
 
14
#include "mach_timer.h"
15
 
16
static int tsc_enabled;
17
 
18
/*
19
 * On some systems the TSC frequency does not
20
 * change with the cpu frequency. So we need
21
 * an extra value to store the TSC freq
22
 */
23
unsigned int tsc_khz;
24
EXPORT_SYMBOL_GPL(tsc_khz);
25
 
26
int tsc_disable;
27
 
28
#ifdef CONFIG_X86_TSC
29
static int __init tsc_setup(char *str)
30
{
31
        printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
32
                                "cannot disable TSC.\n");
33
        return 1;
34
}
35
#else
36
/*
37
 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
38
 * in cpu/common.c
39
 */
40
static int __init tsc_setup(char *str)
41
{
42
        tsc_disable = 1;
43
 
44
        return 1;
45
}
46
#endif
47
 
48
__setup("notsc", tsc_setup);
49
 
50
/*
51
 * code to mark and check if the TSC is unstable
52
 * due to cpufreq or due to unsynced TSCs
53
 */
54
static int tsc_unstable;
55
 
56
int check_tsc_unstable(void)
57
{
58
        return tsc_unstable;
59
}
60
EXPORT_SYMBOL_GPL(check_tsc_unstable);
61
 
62
/* Accelerators for sched_clock()
63
 * convert from cycles(64bits) => nanoseconds (64bits)
64
 *  basic equation:
65
 *              ns = cycles / (freq / ns_per_sec)
66
 *              ns = cycles * (ns_per_sec / freq)
67
 *              ns = cycles * (10^9 / (cpu_khz * 10^3))
68
 *              ns = cycles * (10^6 / cpu_khz)
69
 *
70
 *      Then we use scaling math (suggested by george@mvista.com) to get:
71
 *              ns = cycles * (10^6 * SC / cpu_khz) / SC
72
 *              ns = cycles * cyc2ns_scale / SC
73
 *
74
 *      And since SC is a constant power of two, we can convert the div
75
 *  into a shift.
76
 *
77
 *  We can use khz divisor instead of mhz to keep a better precision, since
78
 *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
79
 *  (mathieu.desnoyers@polymtl.ca)
80
 *
81
 *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
82
 */
83
unsigned long cyc2ns_scale __read_mostly;
84
 
85
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
86
 
87
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
88
{
89
        cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
90
}
91
 
92
/*
93
 * Scheduler clock - returns current time in nanosec units.
94
 */
95
unsigned long long native_sched_clock(void)
96
{
97
        unsigned long long this_offset;
98
 
99
        /*
100
         * Fall back to jiffies if there's no TSC available:
101
         * ( But note that we still use it if the TSC is marked
102
         *   unstable. We do this because unlike Time Of Day,
103
         *   the scheduler clock tolerates small errors and it's
104
         *   very important for it to be as fast as the platform
105
         *   can achive it. )
106
         */
107
        if (unlikely(!tsc_enabled && !tsc_unstable))
108
                /* No locking but a rare wrong value is not a big deal: */
109
                return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
110
 
111
        /* read the Time Stamp Counter: */
112
        rdtscll(this_offset);
113
 
114
        /* return the value in ns */
115
        return cycles_2_ns(this_offset);
116
}
117
 
118
/* We need to define a real function for sched_clock, to override the
119
   weak default version */
120
#ifdef CONFIG_PARAVIRT
121
unsigned long long sched_clock(void)
122
{
123
        return paravirt_sched_clock();
124
}
125
#else
126
unsigned long long sched_clock(void)
127
        __attribute__((alias("native_sched_clock")));
128
#endif
129
 
130
unsigned long native_calculate_cpu_khz(void)
131
{
132
        unsigned long long start, end;
133
        unsigned long count;
134
        u64 delta64 = (u64)ULLONG_MAX;
135
        int i;
136
        unsigned long flags;
137
 
138
        local_irq_save(flags);
139
 
140
        /* run 3 times to ensure the cache is warm and to get an accurate reading */
141
        for (i = 0; i < 3; i++) {
142
                mach_prepare_counter();
143
                rdtscll(start);
144
                mach_countup(&count);
145
                rdtscll(end);
146
 
147
                /*
148
                 * Error: ECTCNEVERSET
149
                 * The CTC wasn't reliable: we got a hit on the very first read,
150
                 * or the CPU was so fast/slow that the quotient wouldn't fit in
151
                 * 32 bits..
152
                 */
153
                if (count <= 1)
154
                        continue;
155
 
156
                /* cpu freq too slow: */
157
                if ((end - start) <= CALIBRATE_TIME_MSEC)
158
                        continue;
159
 
160
                /*
161
                 * We want the minimum time of all runs in case one of them
162
                 * is inaccurate due to SMI or other delay
163
                 */
164
                delta64 = min(delta64, (end - start));
165
        }
166
 
167
        /* cpu freq too fast (or every run was bad): */
168
        if (delta64 > (1ULL<<32))
169
                goto err;
170
 
171
        delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
172
        do_div(delta64,CALIBRATE_TIME_MSEC);
173
 
174
        local_irq_restore(flags);
175
        return (unsigned long)delta64;
176
err:
177
        local_irq_restore(flags);
178
        return 0;
179
}
180
 
181
int recalibrate_cpu_khz(void)
182
{
183
#ifndef CONFIG_SMP
184
        unsigned long cpu_khz_old = cpu_khz;
185
 
186
        if (cpu_has_tsc) {
187
                cpu_khz = calculate_cpu_khz();
188
                tsc_khz = cpu_khz;
189
                cpu_data(0).loops_per_jiffy =
190
                        cpufreq_scale(cpu_data(0).loops_per_jiffy,
191
                                        cpu_khz_old, cpu_khz);
192
                return 0;
193
        } else
194
                return -ENODEV;
195
#else
196
        return -ENODEV;
197
#endif
198
}
199
 
200
EXPORT_SYMBOL(recalibrate_cpu_khz);
201
 
202
#ifdef CONFIG_CPU_FREQ
203
 
204
/*
205
 * if the CPU frequency is scaled, TSC-based delays will need a different
206
 * loops_per_jiffy value to function properly.
207
 */
208
static unsigned int ref_freq = 0;
209
static unsigned long loops_per_jiffy_ref = 0;
210
static unsigned long cpu_khz_ref = 0;
211
 
212
static int
213
time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
214
{
215
        struct cpufreq_freqs *freq = data;
216
 
217
        if (!ref_freq) {
218
                if (!freq->old){
219
                        ref_freq = freq->new;
220
                        return 0;
221
                }
222
                ref_freq = freq->old;
223
                loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
224
                cpu_khz_ref = cpu_khz;
225
        }
226
 
227
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
228
            (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
229
            (val == CPUFREQ_RESUMECHANGE)) {
230
                if (!(freq->flags & CPUFREQ_CONST_LOOPS))
231
                        cpu_data(freq->cpu).loops_per_jiffy =
232
                                cpufreq_scale(loops_per_jiffy_ref,
233
                                                ref_freq, freq->new);
234
 
235
                if (cpu_khz) {
236
 
237
                        if (num_online_cpus() == 1)
238
                                cpu_khz = cpufreq_scale(cpu_khz_ref,
239
                                                ref_freq, freq->new);
240
                        if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
241
                                tsc_khz = cpu_khz;
242
                                set_cyc2ns_scale(cpu_khz);
243
                                /*
244
                                 * TSC based sched_clock turns
245
                                 * to junk w/ cpufreq
246
                                 */
247
                                mark_tsc_unstable("cpufreq changes");
248
                        }
249
                }
250
        }
251
 
252
        return 0;
253
}
254
 
255
static struct notifier_block time_cpufreq_notifier_block = {
256
        .notifier_call  = time_cpufreq_notifier
257
};
258
 
259
static int __init cpufreq_tsc(void)
260
{
261
        return cpufreq_register_notifier(&time_cpufreq_notifier_block,
262
                                         CPUFREQ_TRANSITION_NOTIFIER);
263
}
264
core_initcall(cpufreq_tsc);
265
 
266
#endif
267
 
268
/* clock source code */
269
 
270
static unsigned long current_tsc_khz = 0;
271
 
272
static cycle_t read_tsc(void)
273
{
274
        cycle_t ret;
275
 
276
        rdtscll(ret);
277
 
278
        return ret;
279
}
280
 
281
static struct clocksource clocksource_tsc = {
282
        .name                   = "tsc",
283
        .rating                 = 300,
284
        .read                   = read_tsc,
285
        .mask                   = CLOCKSOURCE_MASK(64),
286
        .mult                   = 0, /* to be set */
287
        .shift                  = 22,
288
        .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
289
                                  CLOCK_SOURCE_MUST_VERIFY,
290
};
291
 
292
void mark_tsc_unstable(char *reason)
293
{
294
        if (!tsc_unstable) {
295
                tsc_unstable = 1;
296
                tsc_enabled = 0;
297
                printk("Marking TSC unstable due to: %s.\n", reason);
298
                /* Can be called before registration */
299
                if (clocksource_tsc.mult)
300
                        clocksource_change_rating(&clocksource_tsc, 0);
301
                else
302
                        clocksource_tsc.rating = 0;
303
        }
304
}
305
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
306
 
307
static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
308
{
309
        printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
310
                       d->ident);
311
        tsc_unstable = 1;
312
        return 0;
313
}
314
 
315
/* List of systems that have known TSC problems */
316
static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
317
        {
318
         .callback = dmi_mark_tsc_unstable,
319
         .ident = "IBM Thinkpad 380XD",
320
         .matches = {
321
                     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
322
                     DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
323
                     },
324
         },
325
         {}
326
};
327
 
328
/*
329
 * Make an educated guess if the TSC is trustworthy and synchronized
330
 * over all CPUs.
331
 */
332
__cpuinit int unsynchronized_tsc(void)
333
{
334
        if (!cpu_has_tsc || tsc_unstable)
335
                return 1;
336
        /*
337
         * Intel systems are normally all synchronized.
338
         * Exceptions must mark TSC as unstable:
339
         */
340
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
341
                /* assume multi socket systems are not synchronized: */
342
                if (num_possible_cpus() > 1)
343
                        tsc_unstable = 1;
344
        }
345
        return tsc_unstable;
346
}
347
 
348
/*
349
 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
350
 */
351
#ifdef CONFIG_MGEODE_LX
352
/* RTSC counts during suspend */
353
#define RTSC_SUSP 0x100
354
 
355
static void __init check_geode_tsc_reliable(void)
356
{
357
        unsigned long res_low, res_high;
358
 
359
        rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
360
        if (res_low & RTSC_SUSP)
361
                clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
362
}
363
#else
364
static inline void check_geode_tsc_reliable(void) { }
365
#endif
366
 
367
 
368
void __init tsc_init(void)
369
{
370
        if (!cpu_has_tsc || tsc_disable)
371
                goto out_no_tsc;
372
 
373
        cpu_khz = calculate_cpu_khz();
374
        tsc_khz = cpu_khz;
375
 
376
        if (!cpu_khz)
377
                goto out_no_tsc;
378
 
379
        printk("Detected %lu.%03lu MHz processor.\n",
380
                                (unsigned long)cpu_khz / 1000,
381
                                (unsigned long)cpu_khz % 1000);
382
 
383
        set_cyc2ns_scale(cpu_khz);
384
        use_tsc_delay();
385
 
386
        /* Check and install the TSC clocksource */
387
        dmi_check_system(bad_tsc_dmi_table);
388
 
389
        unsynchronized_tsc();
390
        check_geode_tsc_reliable();
391
        current_tsc_khz = tsc_khz;
392
        clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
393
                                                        clocksource_tsc.shift);
394
        /* lower the rating if we already know its unstable: */
395
        if (check_tsc_unstable()) {
396
                clocksource_tsc.rating = 0;
397
                clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
398
        } else
399
                tsc_enabled = 1;
400
 
401
        clocksource_register(&clocksource_tsc);
402
 
403
        return;
404
 
405
out_no_tsc:
406
        /*
407
         * Set the tsc_disable flag if there's no TSC support, this
408
         * makes it a fast flag for the kernel to see whether it
409
         * should be using the TSC.
410
         */
411
        tsc_disable = 1;
412
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.