OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [acpi/] [processor_idle.c] - Blame information for rev 67

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * processor_idle - idle state submodule to the ACPI processor driver
3
 *
4
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6
 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7
 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8
 *                      - Added processor hotplug support
9
 *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10
 *                      - Added support for C3 on SMP
11
 *
12
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13
 *
14
 *  This program is free software; you can redistribute it and/or modify
15
 *  it under the terms of the GNU General Public License as published by
16
 *  the Free Software Foundation; either version 2 of the License, or (at
17
 *  your option) any later version.
18
 *
19
 *  This program is distributed in the hope that it will be useful, but
20
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
21
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22
 *  General Public License for more details.
23
 *
24
 *  You should have received a copy of the GNU General Public License along
25
 *  with this program; if not, write to the Free Software Foundation, Inc.,
26
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27
 *
28
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29
 */
30
 
31
#include <linux/kernel.h>
32
#include <linux/module.h>
33
#include <linux/init.h>
34
#include <linux/cpufreq.h>
35
#include <linux/proc_fs.h>
36
#include <linux/seq_file.h>
37
#include <linux/acpi.h>
38
#include <linux/dmi.h>
39
#include <linux/moduleparam.h>
40
#include <linux/sched.h>        /* need_resched() */
41
#include <linux/latency.h>
42
#include <linux/clockchips.h>
43
#include <linux/cpuidle.h>
44
 
45
/*
46
 * Include the apic definitions for x86 to have the APIC timer related defines
47
 * available also for UP (on SMP it gets magically included via linux/smp.h).
48
 * asm/acpi.h is not an option, as it would require more include magic. Also
49
 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
50
 */
51
#ifdef CONFIG_X86
52
#include <asm/apic.h>
53
#endif
54
 
55
#include <asm/io.h>
56
#include <asm/uaccess.h>
57
 
58
#include <acpi/acpi_bus.h>
59
#include <acpi/processor.h>
60
 
61
#define ACPI_PROCESSOR_COMPONENT        0x01000000
62
#define ACPI_PROCESSOR_CLASS            "processor"
63
#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
64
ACPI_MODULE_NAME("processor_idle");
65
#define ACPI_PROCESSOR_FILE_POWER       "power"
66
#define US_TO_PM_TIMER_TICKS(t)         ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67
#define PM_TIMER_TICK_NS                (1000000000ULL/PM_TIMER_FREQUENCY)
68
#ifndef CONFIG_CPU_IDLE
69
#define C2_OVERHEAD                     4       /* 1us (3.579 ticks per us) */
70
#define C3_OVERHEAD                     4       /* 1us (3.579 ticks per us) */
71
static void (*pm_idle_save) (void) __read_mostly;
72
#else
73
#define C2_OVERHEAD                     1       /* 1us */
74
#define C3_OVERHEAD                     1       /* 1us */
75
#endif
76
#define PM_TIMER_TICKS_TO_US(p)         (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
77
 
78
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
79
#ifdef CONFIG_CPU_IDLE
80
module_param(max_cstate, uint, 0000);
81
#else
82
module_param(max_cstate, uint, 0644);
83
#endif
84
static unsigned int nocst __read_mostly;
85
module_param(nocst, uint, 0000);
86
 
87
#ifndef CONFIG_CPU_IDLE
88
/*
89
 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
90
 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
91
 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
92
 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
93
 * reduce history for more aggressive entry into C3
94
 */
95
static unsigned int bm_history __read_mostly =
96
    (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
97
module_param(bm_history, uint, 0644);
98
 
99
static int acpi_processor_set_power_policy(struct acpi_processor *pr);
100
 
101
#endif
102
 
103
/*
104
 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
105
 * For now disable this. Probably a bug somewhere else.
106
 *
107
 * To skip this limit, boot/load with a large max_cstate limit.
108
 */
109
static int set_max_cstate(const struct dmi_system_id *id)
110
{
111
        if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
112
                return 0;
113
 
114
        printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
115
               " Override with \"processor.max_cstate=%d\"\n", id->ident,
116
               (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
117
 
118
        max_cstate = (long)id->driver_data;
119
 
120
        return 0;
121
}
122
 
123
/* Actually this shouldn't be __cpuinitdata, would be better to fix the
124
   callers to only run once -AK */
125
static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
126
        { set_max_cstate, "IBM ThinkPad R40e", {
127
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
128
          DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
129
        { set_max_cstate, "IBM ThinkPad R40e", {
130
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131
          DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
132
        { set_max_cstate, "IBM ThinkPad R40e", {
133
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134
          DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
135
        { set_max_cstate, "IBM ThinkPad R40e", {
136
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137
          DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
138
        { set_max_cstate, "IBM ThinkPad R40e", {
139
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140
          DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
141
        { set_max_cstate, "IBM ThinkPad R40e", {
142
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143
          DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
144
        { set_max_cstate, "IBM ThinkPad R40e", {
145
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146
          DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
147
        { set_max_cstate, "IBM ThinkPad R40e", {
148
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149
          DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
150
        { set_max_cstate, "IBM ThinkPad R40e", {
151
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152
          DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
153
        { set_max_cstate, "IBM ThinkPad R40e", {
154
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155
          DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
156
        { set_max_cstate, "IBM ThinkPad R40e", {
157
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158
          DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
159
        { set_max_cstate, "IBM ThinkPad R40e", {
160
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
161
          DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
162
        { set_max_cstate, "IBM ThinkPad R40e", {
163
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
164
          DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
165
        { set_max_cstate, "IBM ThinkPad R40e", {
166
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
167
          DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
168
        { set_max_cstate, "IBM ThinkPad R40e", {
169
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
170
          DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
171
        { set_max_cstate, "IBM ThinkPad R40e", {
172
          DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
173
          DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
174
        { set_max_cstate, "Medion 41700", {
175
          DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
176
          DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
177
        { set_max_cstate, "Clevo 5600D", {
178
          DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
179
          DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
180
         (void *)2},
181
        {},
182
};
183
 
184
static inline u32 ticks_elapsed(u32 t1, u32 t2)
185
{
186
        if (t2 >= t1)
187
                return (t2 - t1);
188
        else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
189
                return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
190
        else
191
                return ((0xFFFFFFFF - t1) + t2);
192
}
193
 
194
static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
195
{
196
        if (t2 >= t1)
197
                return PM_TIMER_TICKS_TO_US(t2 - t1);
198
        else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
199
                return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
200
        else
201
                return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
202
}
203
 
204
static void acpi_safe_halt(void)
205
{
206
        current_thread_info()->status &= ~TS_POLLING;
207
        /*
208
         * TS_POLLING-cleared state must be visible before we
209
         * test NEED_RESCHED:
210
         */
211
        smp_mb();
212
        if (!need_resched())
213
                safe_halt();
214
        current_thread_info()->status |= TS_POLLING;
215
}
216
 
217
#ifndef CONFIG_CPU_IDLE
218
 
219
static void
220
acpi_processor_power_activate(struct acpi_processor *pr,
221
                              struct acpi_processor_cx *new)
222
{
223
        struct acpi_processor_cx *old;
224
 
225
        if (!pr || !new)
226
                return;
227
 
228
        old = pr->power.state;
229
 
230
        if (old)
231
                old->promotion.count = 0;
232
        new->demotion.count = 0;
233
 
234
        /* Cleanup from old state. */
235
        if (old) {
236
                switch (old->type) {
237
                case ACPI_STATE_C3:
238
                        /* Disable bus master reload */
239
                        if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
240
                                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
241
                        break;
242
                }
243
        }
244
 
245
        /* Prepare to use new state. */
246
        switch (new->type) {
247
        case ACPI_STATE_C3:
248
                /* Enable bus master reload */
249
                if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
250
                        acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
251
                break;
252
        }
253
 
254
        pr->power.state = new;
255
 
256
        return;
257
}
258
 
259
static atomic_t c3_cpu_count;
260
 
261
/* Common C-state entry for C2, C3, .. */
262
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
263
{
264
        if (cstate->space_id == ACPI_CSTATE_FFH) {
265
                /* Call into architectural FFH based C-state */
266
                acpi_processor_ffh_cstate_enter(cstate);
267
        } else {
268
                int unused;
269
                /* IO port based C-state */
270
                inb(cstate->address);
271
                /* Dummy wait op - must do something useless after P_LVL2 read
272
                   because chipsets cannot guarantee that STPCLK# signal
273
                   gets asserted in time to freeze execution properly. */
274
                unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
275
        }
276
}
277
#endif /* !CONFIG_CPU_IDLE */
278
 
279
#ifdef ARCH_APICTIMER_STOPS_ON_C3
280
 
281
/*
282
 * Some BIOS implementations switch to C3 in the published C2 state.
283
 * This seems to be a common problem on AMD boxen, but other vendors
284
 * are affected too. We pick the most conservative approach: we assume
285
 * that the local APIC stops in both C2 and C3.
286
 */
287
static void acpi_timer_check_state(int state, struct acpi_processor *pr,
288
                                   struct acpi_processor_cx *cx)
289
{
290
        struct acpi_processor_power *pwr = &pr->power;
291
        u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
292
 
293
        /*
294
         * Check, if one of the previous states already marked the lapic
295
         * unstable
296
         */
297
        if (pwr->timer_broadcast_on_state < state)
298
                return;
299
 
300
        if (cx->type >= type)
301
                pr->power.timer_broadcast_on_state = state;
302
}
303
 
304
static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
305
{
306
        unsigned long reason;
307
 
308
        reason = pr->power.timer_broadcast_on_state < INT_MAX ?
309
                CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
310
 
311
        clockevents_notify(reason, &pr->id);
312
}
313
 
314
/* Power(C) State timer broadcast control */
315
static void acpi_state_timer_broadcast(struct acpi_processor *pr,
316
                                       struct acpi_processor_cx *cx,
317
                                       int broadcast)
318
{
319
        int state = cx - pr->power.states;
320
 
321
        if (state >= pr->power.timer_broadcast_on_state) {
322
                unsigned long reason;
323
 
324
                reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
325
                        CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
326
                clockevents_notify(reason, &pr->id);
327
        }
328
}
329
 
330
#else
331
 
332
static void acpi_timer_check_state(int state, struct acpi_processor *pr,
333
                                   struct acpi_processor_cx *cstate) { }
334
static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
335
static void acpi_state_timer_broadcast(struct acpi_processor *pr,
336
                                       struct acpi_processor_cx *cx,
337
                                       int broadcast)
338
{
339
}
340
 
341
#endif
342
 
343
/*
344
 * Suspend / resume control
345
 */
346
static int acpi_idle_suspend;
347
 
348
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
349
{
350
        acpi_idle_suspend = 1;
351
        return 0;
352
}
353
 
354
int acpi_processor_resume(struct acpi_device * device)
355
{
356
        acpi_idle_suspend = 0;
357
        return 0;
358
}
359
 
360
#ifndef CONFIG_CPU_IDLE
361
static void acpi_processor_idle(void)
362
{
363
        struct acpi_processor *pr = NULL;
364
        struct acpi_processor_cx *cx = NULL;
365
        struct acpi_processor_cx *next_state = NULL;
366
        int sleep_ticks = 0;
367
        u32 t1, t2 = 0;
368
 
369
        /*
370
         * Interrupts must be disabled during bus mastering calculations and
371
         * for C2/C3 transitions.
372
         */
373
        local_irq_disable();
374
 
375
        pr = processors[smp_processor_id()];
376
        if (!pr) {
377
                local_irq_enable();
378
                return;
379
        }
380
 
381
        /*
382
         * Check whether we truly need to go idle, or should
383
         * reschedule:
384
         */
385
        if (unlikely(need_resched())) {
386
                local_irq_enable();
387
                return;
388
        }
389
 
390
        cx = pr->power.state;
391
        if (!cx || acpi_idle_suspend) {
392
                if (pm_idle_save)
393
                        pm_idle_save();
394
                else
395
                        acpi_safe_halt();
396
                return;
397
        }
398
 
399
        /*
400
         * Check BM Activity
401
         * -----------------
402
         * Check for bus mastering activity (if required), record, and check
403
         * for demotion.
404
         */
405
        if (pr->flags.bm_check) {
406
                u32 bm_status = 0;
407
                unsigned long diff = jiffies - pr->power.bm_check_timestamp;
408
 
409
                if (diff > 31)
410
                        diff = 31;
411
 
412
                pr->power.bm_activity <<= diff;
413
 
414
                acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
415
                if (bm_status) {
416
                        pr->power.bm_activity |= 0x1;
417
                        acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
418
                }
419
                /*
420
                 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
421
                 * the true state of bus mastering activity; forcing us to
422
                 * manually check the BMIDEA bit of each IDE channel.
423
                 */
424
                else if (errata.piix4.bmisx) {
425
                        if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
426
                            || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
427
                                pr->power.bm_activity |= 0x1;
428
                }
429
 
430
                pr->power.bm_check_timestamp = jiffies;
431
 
432
                /*
433
                 * If bus mastering is or was active this jiffy, demote
434
                 * to avoid a faulty transition.  Note that the processor
435
                 * won't enter a low-power state during this call (to this
436
                 * function) but should upon the next.
437
                 *
438
                 * TBD: A better policy might be to fallback to the demotion
439
                 *      state (use it for this quantum only) istead of
440
                 *      demoting -- and rely on duration as our sole demotion
441
                 *      qualification.  This may, however, introduce DMA
442
                 *      issues (e.g. floppy DMA transfer overrun/underrun).
443
                 */
444
                if ((pr->power.bm_activity & 0x1) &&
445
                    cx->demotion.threshold.bm) {
446
                        local_irq_enable();
447
                        next_state = cx->demotion.state;
448
                        goto end;
449
                }
450
        }
451
 
452
#ifdef CONFIG_HOTPLUG_CPU
453
        /*
454
         * Check for P_LVL2_UP flag before entering C2 and above on
455
         * an SMP system. We do it here instead of doing it at _CST/P_LVL
456
         * detection phase, to work cleanly with logical CPU hotplug.
457
         */
458
        if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
459
            !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
460
                cx = &pr->power.states[ACPI_STATE_C1];
461
#endif
462
 
463
        /*
464
         * Sleep:
465
         * ------
466
         * Invoke the current Cx state to put the processor to sleep.
467
         */
468
        if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
469
                current_thread_info()->status &= ~TS_POLLING;
470
                /*
471
                 * TS_POLLING-cleared state must be visible before we
472
                 * test NEED_RESCHED:
473
                 */
474
                smp_mb();
475
                if (need_resched()) {
476
                        current_thread_info()->status |= TS_POLLING;
477
                        local_irq_enable();
478
                        return;
479
                }
480
        }
481
 
482
        switch (cx->type) {
483
 
484
        case ACPI_STATE_C1:
485
                /*
486
                 * Invoke C1.
487
                 * Use the appropriate idle routine, the one that would
488
                 * be used without acpi C-states.
489
                 */
490
                if (pm_idle_save)
491
                        pm_idle_save();
492
                else
493
                        acpi_safe_halt();
494
 
495
                /*
496
                 * TBD: Can't get time duration while in C1, as resumes
497
                 *      go to an ISR rather than here.  Need to instrument
498
                 *      base interrupt handler.
499
                 *
500
                 * Note: the TSC better not stop in C1, sched_clock() will
501
                 *       skew otherwise.
502
                 */
503
                sleep_ticks = 0xFFFFFFFF;
504
                break;
505
 
506
        case ACPI_STATE_C2:
507
                /* Get start time (ticks) */
508
                t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
509
                /* Tell the scheduler that we are going deep-idle: */
510
                sched_clock_idle_sleep_event();
511
                /* Invoke C2 */
512
                acpi_state_timer_broadcast(pr, cx, 1);
513
                acpi_cstate_enter(cx);
514
                /* Get end time (ticks) */
515
                t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
516
 
517
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
518
                /* TSC halts in C2, so notify users */
519
                mark_tsc_unstable("possible TSC halt in C2");
520
#endif
521
                /* Compute time (ticks) that we were actually asleep */
522
                sleep_ticks = ticks_elapsed(t1, t2);
523
 
524
                /* Tell the scheduler how much we idled: */
525
                sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
526
 
527
                /* Re-enable interrupts */
528
                local_irq_enable();
529
                /* Do not account our idle-switching overhead: */
530
                sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
531
 
532
                current_thread_info()->status |= TS_POLLING;
533
                acpi_state_timer_broadcast(pr, cx, 0);
534
                break;
535
 
536
        case ACPI_STATE_C3:
537
                /*
538
                 * Must be done before busmaster disable as we might
539
                 * need to access HPET !
540
                 */
541
                acpi_state_timer_broadcast(pr, cx, 1);
542
                /*
543
                 * disable bus master
544
                 * bm_check implies we need ARB_DIS
545
                 * !bm_check implies we need cache flush
546
                 * bm_control implies whether we can do ARB_DIS
547
                 *
548
                 * That leaves a case where bm_check is set and bm_control is
549
                 * not set. In that case we cannot do much, we enter C3
550
                 * without doing anything.
551
                 */
552
                if (pr->flags.bm_check && pr->flags.bm_control) {
553
                        if (atomic_inc_return(&c3_cpu_count) ==
554
                            num_online_cpus()) {
555
                                /*
556
                                 * All CPUs are trying to go to C3
557
                                 * Disable bus master arbitration
558
                                 */
559
                                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
560
                        }
561
                } else if (!pr->flags.bm_check) {
562
                        /* SMP with no shared cache... Invalidate cache  */
563
                        ACPI_FLUSH_CPU_CACHE();
564
                }
565
 
566
                /* Get start time (ticks) */
567
                t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
568
                /* Invoke C3 */
569
                /* Tell the scheduler that we are going deep-idle: */
570
                sched_clock_idle_sleep_event();
571
                acpi_cstate_enter(cx);
572
                /* Get end time (ticks) */
573
                t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
574
                if (pr->flags.bm_check && pr->flags.bm_control) {
575
                        /* Enable bus master arbitration */
576
                        atomic_dec(&c3_cpu_count);
577
                        acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
578
                }
579
 
580
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
581
                /* TSC halts in C3, so notify users */
582
                mark_tsc_unstable("TSC halts in C3");
583
#endif
584
                /* Compute time (ticks) that we were actually asleep */
585
                sleep_ticks = ticks_elapsed(t1, t2);
586
                /* Tell the scheduler how much we idled: */
587
                sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
588
 
589
                /* Re-enable interrupts */
590
                local_irq_enable();
591
                /* Do not account our idle-switching overhead: */
592
                sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
593
 
594
                current_thread_info()->status |= TS_POLLING;
595
                acpi_state_timer_broadcast(pr, cx, 0);
596
                break;
597
 
598
        default:
599
                local_irq_enable();
600
                return;
601
        }
602
        cx->usage++;
603
        if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
604
                cx->time += sleep_ticks;
605
 
606
        next_state = pr->power.state;
607
 
608
#ifdef CONFIG_HOTPLUG_CPU
609
        /* Don't do promotion/demotion */
610
        if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
611
            !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
612
                next_state = cx;
613
                goto end;
614
        }
615
#endif
616
 
617
        /*
618
         * Promotion?
619
         * ----------
620
         * Track the number of longs (time asleep is greater than threshold)
621
         * and promote when the count threshold is reached.  Note that bus
622
         * mastering activity may prevent promotions.
623
         * Do not promote above max_cstate.
624
         */
625
        if (cx->promotion.state &&
626
            ((cx->promotion.state - pr->power.states) <= max_cstate)) {
627
                if (sleep_ticks > cx->promotion.threshold.ticks &&
628
                  cx->promotion.state->latency <= system_latency_constraint()) {
629
                        cx->promotion.count++;
630
                        cx->demotion.count = 0;
631
                        if (cx->promotion.count >=
632
                            cx->promotion.threshold.count) {
633
                                if (pr->flags.bm_check) {
634
                                        if (!
635
                                            (pr->power.bm_activity & cx->
636
                                             promotion.threshold.bm)) {
637
                                                next_state =
638
                                                    cx->promotion.state;
639
                                                goto end;
640
                                        }
641
                                } else {
642
                                        next_state = cx->promotion.state;
643
                                        goto end;
644
                                }
645
                        }
646
                }
647
        }
648
 
649
        /*
650
         * Demotion?
651
         * ---------
652
         * Track the number of shorts (time asleep is less than time threshold)
653
         * and demote when the usage threshold is reached.
654
         */
655
        if (cx->demotion.state) {
656
                if (sleep_ticks < cx->demotion.threshold.ticks) {
657
                        cx->demotion.count++;
658
                        cx->promotion.count = 0;
659
                        if (cx->demotion.count >= cx->demotion.threshold.count) {
660
                                next_state = cx->demotion.state;
661
                                goto end;
662
                        }
663
                }
664
        }
665
 
666
      end:
667
        /*
668
         * Demote if current state exceeds max_cstate
669
         * or if the latency of the current state is unacceptable
670
         */
671
        if ((pr->power.state - pr->power.states) > max_cstate ||
672
                pr->power.state->latency > system_latency_constraint()) {
673
                if (cx->demotion.state)
674
                        next_state = cx->demotion.state;
675
        }
676
 
677
        /*
678
         * New Cx State?
679
         * -------------
680
         * If we're going to start using a new Cx state we must clean up
681
         * from the previous and prepare to use the new.
682
         */
683
        if (next_state != pr->power.state)
684
                acpi_processor_power_activate(pr, next_state);
685
}
686
 
687
static int acpi_processor_set_power_policy(struct acpi_processor *pr)
688
{
689
        unsigned int i;
690
        unsigned int state_is_set = 0;
691
        struct acpi_processor_cx *lower = NULL;
692
        struct acpi_processor_cx *higher = NULL;
693
        struct acpi_processor_cx *cx;
694
 
695
 
696
        if (!pr)
697
                return -EINVAL;
698
 
699
        /*
700
         * This function sets the default Cx state policy (OS idle handler).
701
         * Our scheme is to promote quickly to C2 but more conservatively
702
         * to C3.  We're favoring C2  for its characteristics of low latency
703
         * (quick response), good power savings, and ability to allow bus
704
         * mastering activity.  Note that the Cx state policy is completely
705
         * customizable and can be altered dynamically.
706
         */
707
 
708
        /* startup state */
709
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
710
                cx = &pr->power.states[i];
711
                if (!cx->valid)
712
                        continue;
713
 
714
                if (!state_is_set)
715
                        pr->power.state = cx;
716
                state_is_set++;
717
                break;
718
        }
719
 
720
        if (!state_is_set)
721
                return -ENODEV;
722
 
723
        /* demotion */
724
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
725
                cx = &pr->power.states[i];
726
                if (!cx->valid)
727
                        continue;
728
 
729
                if (lower) {
730
                        cx->demotion.state = lower;
731
                        cx->demotion.threshold.ticks = cx->latency_ticks;
732
                        cx->demotion.threshold.count = 1;
733
                        if (cx->type == ACPI_STATE_C3)
734
                                cx->demotion.threshold.bm = bm_history;
735
                }
736
 
737
                lower = cx;
738
        }
739
 
740
        /* promotion */
741
        for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
742
                cx = &pr->power.states[i];
743
                if (!cx->valid)
744
                        continue;
745
 
746
                if (higher) {
747
                        cx->promotion.state = higher;
748
                        cx->promotion.threshold.ticks = cx->latency_ticks;
749
                        if (cx->type >= ACPI_STATE_C2)
750
                                cx->promotion.threshold.count = 4;
751
                        else
752
                                cx->promotion.threshold.count = 10;
753
                        if (higher->type == ACPI_STATE_C3)
754
                                cx->promotion.threshold.bm = bm_history;
755
                }
756
 
757
                higher = cx;
758
        }
759
 
760
        return 0;
761
}
762
#endif /* !CONFIG_CPU_IDLE */
763
 
764
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
765
{
766
 
767
        if (!pr)
768
                return -EINVAL;
769
 
770
        if (!pr->pblk)
771
                return -ENODEV;
772
 
773
        /* if info is obtained from pblk/fadt, type equals state */
774
        pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
775
        pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
776
 
777
#ifndef CONFIG_HOTPLUG_CPU
778
        /*
779
         * Check for P_LVL2_UP flag before entering C2 and above on
780
         * an SMP system.
781
         */
782
        if ((num_online_cpus() > 1) &&
783
            !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
784
                return -ENODEV;
785
#endif
786
 
787
        /* determine C2 and C3 address from pblk */
788
        pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
789
        pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
790
 
791
        /* determine latencies from FADT */
792
        pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
793
        pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
794
 
795
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
796
                          "lvl2[0x%08x] lvl3[0x%08x]\n",
797
                          pr->power.states[ACPI_STATE_C2].address,
798
                          pr->power.states[ACPI_STATE_C3].address));
799
 
800
        return 0;
801
}
802
 
803
static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
804
{
805
        if (!pr->power.states[ACPI_STATE_C1].valid) {
806
                /* set the first C-State to C1 */
807
                /* all processors need to support C1 */
808
                pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
809
                pr->power.states[ACPI_STATE_C1].valid = 1;
810
        }
811
        /* the C0 state only exists as a filler in our array */
812
        pr->power.states[ACPI_STATE_C0].valid = 1;
813
        return 0;
814
}
815
 
816
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
817
{
818
        acpi_status status = 0;
819
        acpi_integer count;
820
        int current_count;
821
        int i;
822
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
823
        union acpi_object *cst;
824
 
825
 
826
        if (nocst)
827
                return -ENODEV;
828
 
829
        current_count = 0;
830
 
831
        status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
832
        if (ACPI_FAILURE(status)) {
833
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
834
                return -ENODEV;
835
        }
836
 
837
        cst = buffer.pointer;
838
 
839
        /* There must be at least 2 elements */
840
        if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
841
                printk(KERN_ERR PREFIX "not enough elements in _CST\n");
842
                status = -EFAULT;
843
                goto end;
844
        }
845
 
846
        count = cst->package.elements[0].integer.value;
847
 
848
        /* Validate number of power states. */
849
        if (count < 1 || count != cst->package.count - 1) {
850
                printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
851
                status = -EFAULT;
852
                goto end;
853
        }
854
 
855
        /* Tell driver that at least _CST is supported. */
856
        pr->flags.has_cst = 1;
857
 
858
        for (i = 1; i <= count; i++) {
859
                union acpi_object *element;
860
                union acpi_object *obj;
861
                struct acpi_power_register *reg;
862
                struct acpi_processor_cx cx;
863
 
864
                memset(&cx, 0, sizeof(cx));
865
 
866
                element = &(cst->package.elements[i]);
867
                if (element->type != ACPI_TYPE_PACKAGE)
868
                        continue;
869
 
870
                if (element->package.count != 4)
871
                        continue;
872
 
873
                obj = &(element->package.elements[0]);
874
 
875
                if (obj->type != ACPI_TYPE_BUFFER)
876
                        continue;
877
 
878
                reg = (struct acpi_power_register *)obj->buffer.pointer;
879
 
880
                if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
881
                    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
882
                        continue;
883
 
884
                /* There should be an easy way to extract an integer... */
885
                obj = &(element->package.elements[1]);
886
                if (obj->type != ACPI_TYPE_INTEGER)
887
                        continue;
888
 
889
                cx.type = obj->integer.value;
890
                /*
891
                 * Some buggy BIOSes won't list C1 in _CST -
892
                 * Let acpi_processor_get_power_info_default() handle them later
893
                 */
894
                if (i == 1 && cx.type != ACPI_STATE_C1)
895
                        current_count++;
896
 
897
                cx.address = reg->address;
898
                cx.index = current_count + 1;
899
 
900
                cx.space_id = ACPI_CSTATE_SYSTEMIO;
901
                if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
902
                        if (acpi_processor_ffh_cstate_probe
903
                                        (pr->id, &cx, reg) == 0) {
904
                                cx.space_id = ACPI_CSTATE_FFH;
905
                        } else if (cx.type != ACPI_STATE_C1) {
906
                                /*
907
                                 * C1 is a special case where FIXED_HARDWARE
908
                                 * can be handled in non-MWAIT way as well.
909
                                 * In that case, save this _CST entry info.
910
                                 * That is, we retain space_id of SYSTEM_IO for
911
                                 * halt based C1.
912
                                 * Otherwise, ignore this info and continue.
913
                                 */
914
                                continue;
915
                        }
916
                }
917
 
918
                obj = &(element->package.elements[2]);
919
                if (obj->type != ACPI_TYPE_INTEGER)
920
                        continue;
921
 
922
                cx.latency = obj->integer.value;
923
 
924
                obj = &(element->package.elements[3]);
925
                if (obj->type != ACPI_TYPE_INTEGER)
926
                        continue;
927
 
928
                cx.power = obj->integer.value;
929
 
930
                current_count++;
931
                memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
932
 
933
                /*
934
                 * We support total ACPI_PROCESSOR_MAX_POWER - 1
935
                 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
936
                 */
937
                if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
938
                        printk(KERN_WARNING
939
                               "Limiting number of power states to max (%d)\n",
940
                               ACPI_PROCESSOR_MAX_POWER);
941
                        printk(KERN_WARNING
942
                               "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
943
                        break;
944
                }
945
        }
946
 
947
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
948
                          current_count));
949
 
950
        /* Validate number of power states discovered */
951
        if (current_count < 2)
952
                status = -EFAULT;
953
 
954
      end:
955
        kfree(buffer.pointer);
956
 
957
        return status;
958
}
959
 
960
static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
961
{
962
 
963
        if (!cx->address)
964
                return;
965
 
966
        /*
967
         * C2 latency must be less than or equal to 100
968
         * microseconds.
969
         */
970
        else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
971
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
972
                                  "latency too large [%d]\n", cx->latency));
973
                return;
974
        }
975
 
976
        /*
977
         * Otherwise we've met all of our C2 requirements.
978
         * Normalize the C2 latency to expidite policy
979
         */
980
        cx->valid = 1;
981
 
982
#ifndef CONFIG_CPU_IDLE
983
        cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
984
#else
985
        cx->latency_ticks = cx->latency;
986
#endif
987
 
988
        return;
989
}
990
 
991
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
992
                                           struct acpi_processor_cx *cx)
993
{
994
        static int bm_check_flag;
995
 
996
 
997
        if (!cx->address)
998
                return;
999
 
1000
        /*
1001
         * C3 latency must be less than or equal to 1000
1002
         * microseconds.
1003
         */
1004
        else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1005
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1006
                                  "latency too large [%d]\n", cx->latency));
1007
                return;
1008
        }
1009
 
1010
        /*
1011
         * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1012
         * DMA transfers are used by any ISA device to avoid livelock.
1013
         * Note that we could disable Type-F DMA (as recommended by
1014
         * the erratum), but this is known to disrupt certain ISA
1015
         * devices thus we take the conservative approach.
1016
         */
1017
        else if (errata.piix4.fdma) {
1018
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1019
                                  "C3 not supported on PIIX4 with Type-F DMA\n"));
1020
                return;
1021
        }
1022
 
1023
        /* All the logic here assumes flags.bm_check is same across all CPUs */
1024
        if (!bm_check_flag) {
1025
                /* Determine whether bm_check is needed based on CPU  */
1026
                acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1027
                bm_check_flag = pr->flags.bm_check;
1028
        } else {
1029
                pr->flags.bm_check = bm_check_flag;
1030
        }
1031
 
1032
        if (pr->flags.bm_check) {
1033
                if (!pr->flags.bm_control) {
1034
                        if (pr->flags.has_cst != 1) {
1035
                                /* bus mastering control is necessary */
1036
                                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1037
                                        "C3 support requires BM control\n"));
1038
                                return;
1039
                        } else {
1040
                                /* Here we enter C3 without bus mastering */
1041
                                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1042
                                        "C3 support without BM control\n"));
1043
                        }
1044
                }
1045
        } else {
1046
                /*
1047
                 * WBINVD should be set in fadt, for C3 state to be
1048
                 * supported on when bm_check is not required.
1049
                 */
1050
                if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1051
                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1052
                                          "Cache invalidation should work properly"
1053
                                          " for C3 to be enabled on SMP systems\n"));
1054
                        return;
1055
                }
1056
                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1057
        }
1058
 
1059
        /*
1060
         * Otherwise we've met all of our C3 requirements.
1061
         * Normalize the C3 latency to expidite policy.  Enable
1062
         * checking of bus mastering status (bm_check) so we can
1063
         * use this in our C3 policy
1064
         */
1065
        cx->valid = 1;
1066
 
1067
#ifndef CONFIG_CPU_IDLE
1068
        cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1069
#else
1070
        cx->latency_ticks = cx->latency;
1071
#endif
1072
 
1073
        return;
1074
}
1075
 
1076
static int acpi_processor_power_verify(struct acpi_processor *pr)
1077
{
1078
        unsigned int i;
1079
        unsigned int working = 0;
1080
 
1081
        pr->power.timer_broadcast_on_state = INT_MAX;
1082
 
1083
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1084
                struct acpi_processor_cx *cx = &pr->power.states[i];
1085
 
1086
                switch (cx->type) {
1087
                case ACPI_STATE_C1:
1088
                        cx->valid = 1;
1089
                        break;
1090
 
1091
                case ACPI_STATE_C2:
1092
                        acpi_processor_power_verify_c2(cx);
1093
                        if (cx->valid)
1094
                                acpi_timer_check_state(i, pr, cx);
1095
                        break;
1096
 
1097
                case ACPI_STATE_C3:
1098
                        acpi_processor_power_verify_c3(pr, cx);
1099
                        if (cx->valid)
1100
                                acpi_timer_check_state(i, pr, cx);
1101
                        break;
1102
                }
1103
 
1104
                if (cx->valid)
1105
                        working++;
1106
        }
1107
 
1108
        acpi_propagate_timer_broadcast(pr);
1109
 
1110
        return (working);
1111
}
1112
 
1113
static int acpi_processor_get_power_info(struct acpi_processor *pr)
1114
{
1115
        unsigned int i;
1116
        int result;
1117
 
1118
 
1119
        /* NOTE: the idle thread may not be running while calling
1120
         * this function */
1121
 
1122
        /* Zero initialize all the C-states info. */
1123
        memset(pr->power.states, 0, sizeof(pr->power.states));
1124
 
1125
        result = acpi_processor_get_power_info_cst(pr);
1126
        if (result == -ENODEV)
1127
                result = acpi_processor_get_power_info_fadt(pr);
1128
 
1129
        if (result)
1130
                return result;
1131
 
1132
        acpi_processor_get_power_info_default(pr);
1133
 
1134
        pr->power.count = acpi_processor_power_verify(pr);
1135
 
1136
#ifndef CONFIG_CPU_IDLE
1137
        /*
1138
         * Set Default Policy
1139
         * ------------------
1140
         * Now that we know which states are supported, set the default
1141
         * policy.  Note that this policy can be changed dynamically
1142
         * (e.g. encourage deeper sleeps to conserve battery life when
1143
         * not on AC).
1144
         */
1145
        result = acpi_processor_set_power_policy(pr);
1146
        if (result)
1147
                return result;
1148
#endif
1149
 
1150
        /*
1151
         * if one state of type C2 or C3 is available, mark this
1152
         * CPU as being "idle manageable"
1153
         */
1154
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1155
                if (pr->power.states[i].valid) {
1156
                        pr->power.count = i;
1157
                        if (pr->power.states[i].type >= ACPI_STATE_C2)
1158
                                pr->flags.power = 1;
1159
                }
1160
        }
1161
 
1162
        return 0;
1163
}
1164
 
1165
static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1166
{
1167
        struct acpi_processor *pr = seq->private;
1168
        unsigned int i;
1169
 
1170
 
1171
        if (!pr)
1172
                goto end;
1173
 
1174
        seq_printf(seq, "active state:            C%zd\n"
1175
                   "max_cstate:              C%d\n"
1176
                   "bus master activity:     %08x\n"
1177
                   "maximum allowed latency: %d usec\n",
1178
                   pr->power.state ? pr->power.state - pr->power.states : 0,
1179
                   max_cstate, (unsigned)pr->power.bm_activity,
1180
                   system_latency_constraint());
1181
 
1182
        seq_puts(seq, "states:\n");
1183
 
1184
        for (i = 1; i <= pr->power.count; i++) {
1185
                seq_printf(seq, "   %cC%d:                  ",
1186
                           (&pr->power.states[i] ==
1187
                            pr->power.state ? '*' : ' '), i);
1188
 
1189
                if (!pr->power.states[i].valid) {
1190
                        seq_puts(seq, "<not supported>\n");
1191
                        continue;
1192
                }
1193
 
1194
                switch (pr->power.states[i].type) {
1195
                case ACPI_STATE_C1:
1196
                        seq_printf(seq, "type[C1] ");
1197
                        break;
1198
                case ACPI_STATE_C2:
1199
                        seq_printf(seq, "type[C2] ");
1200
                        break;
1201
                case ACPI_STATE_C3:
1202
                        seq_printf(seq, "type[C3] ");
1203
                        break;
1204
                default:
1205
                        seq_printf(seq, "type[--] ");
1206
                        break;
1207
                }
1208
 
1209
                if (pr->power.states[i].promotion.state)
1210
                        seq_printf(seq, "promotion[C%zd] ",
1211
                                   (pr->power.states[i].promotion.state -
1212
                                    pr->power.states));
1213
                else
1214
                        seq_puts(seq, "promotion[--] ");
1215
 
1216
                if (pr->power.states[i].demotion.state)
1217
                        seq_printf(seq, "demotion[C%zd] ",
1218
                                   (pr->power.states[i].demotion.state -
1219
                                    pr->power.states));
1220
                else
1221
                        seq_puts(seq, "demotion[--] ");
1222
 
1223
                seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1224
                           pr->power.states[i].latency,
1225
                           pr->power.states[i].usage,
1226
                           (unsigned long long)pr->power.states[i].time);
1227
        }
1228
 
1229
      end:
1230
        return 0;
1231
}
1232
 
1233
static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1234
{
1235
        return single_open(file, acpi_processor_power_seq_show,
1236
                           PDE(inode)->data);
1237
}
1238
 
1239
static const struct file_operations acpi_processor_power_fops = {
1240
        .open = acpi_processor_power_open_fs,
1241
        .read = seq_read,
1242
        .llseek = seq_lseek,
1243
        .release = single_release,
1244
};
1245
 
1246
#ifndef CONFIG_CPU_IDLE
1247
 
1248
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1249
{
1250
        int result = 0;
1251
 
1252
 
1253
        if (!pr)
1254
                return -EINVAL;
1255
 
1256
        if (nocst) {
1257
                return -ENODEV;
1258
        }
1259
 
1260
        if (!pr->flags.power_setup_done)
1261
                return -ENODEV;
1262
 
1263
        /* Fall back to the default idle loop */
1264
        pm_idle = pm_idle_save;
1265
        synchronize_sched();    /* Relies on interrupts forcing exit from idle. */
1266
 
1267
        pr->flags.power = 0;
1268
        result = acpi_processor_get_power_info(pr);
1269
        if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1270
                pm_idle = acpi_processor_idle;
1271
 
1272
        return result;
1273
}
1274
 
1275
#ifdef CONFIG_SMP
1276
static void smp_callback(void *v)
1277
{
1278
        /* we already woke the CPU up, nothing more to do */
1279
}
1280
 
1281
/*
1282
 * This function gets called when a part of the kernel has a new latency
1283
 * requirement.  This means we need to get all processors out of their C-state,
1284
 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1285
 * wakes them all right up.
1286
 */
1287
static int acpi_processor_latency_notify(struct notifier_block *b,
1288
                unsigned long l, void *v)
1289
{
1290
        smp_call_function(smp_callback, NULL, 0, 1);
1291
        return NOTIFY_OK;
1292
}
1293
 
1294
static struct notifier_block acpi_processor_latency_notifier = {
1295
        .notifier_call = acpi_processor_latency_notify,
1296
};
1297
 
1298
#endif
1299
 
1300
#else /* CONFIG_CPU_IDLE */
1301
 
1302
/**
1303
 * acpi_idle_bm_check - checks if bus master activity was detected
1304
 */
1305
static int acpi_idle_bm_check(void)
1306
{
1307
        u32 bm_status = 0;
1308
 
1309
        acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1310
        if (bm_status)
1311
                acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1312
        /*
1313
         * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1314
         * the true state of bus mastering activity; forcing us to
1315
         * manually check the BMIDEA bit of each IDE channel.
1316
         */
1317
        else if (errata.piix4.bmisx) {
1318
                if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1319
                    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1320
                        bm_status = 1;
1321
        }
1322
        return bm_status;
1323
}
1324
 
1325
/**
1326
 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1327
 * @pr: the processor
1328
 * @target: the new target state
1329
 */
1330
static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1331
                                           struct acpi_processor_cx *target)
1332
{
1333
        if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1334
                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1335
                pr->flags.bm_rld_set = 0;
1336
        }
1337
 
1338
        if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1339
                acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1340
                pr->flags.bm_rld_set = 1;
1341
        }
1342
}
1343
 
1344
/**
1345
 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1346
 * @cx: cstate data
1347
 */
1348
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1349
{
1350
        if (cx->space_id == ACPI_CSTATE_FFH) {
1351
                /* Call into architectural FFH based C-state */
1352
                acpi_processor_ffh_cstate_enter(cx);
1353
        } else {
1354
                int unused;
1355
                /* IO port based C-state */
1356
                inb(cx->address);
1357
                /* Dummy wait op - must do something useless after P_LVL2 read
1358
                   because chipsets cannot guarantee that STPCLK# signal
1359
                   gets asserted in time to freeze execution properly. */
1360
                unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1361
        }
1362
}
1363
 
1364
/**
1365
 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1366
 * @dev: the target CPU
1367
 * @state: the state data
1368
 *
1369
 * This is equivalent to the HALT instruction.
1370
 */
1371
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1372
                              struct cpuidle_state *state)
1373
{
1374
        struct acpi_processor *pr;
1375
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1376
        pr = processors[smp_processor_id()];
1377
 
1378
        if (unlikely(!pr))
1379
                return 0;
1380
 
1381
        if (pr->flags.bm_check)
1382
                acpi_idle_update_bm_rld(pr, cx);
1383
 
1384
        acpi_safe_halt();
1385
 
1386
        cx->usage++;
1387
 
1388
        return 0;
1389
}
1390
 
1391
/**
1392
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1393
 * @dev: the target CPU
1394
 * @state: the state data
1395
 */
1396
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1397
                                  struct cpuidle_state *state)
1398
{
1399
        struct acpi_processor *pr;
1400
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1401
        u32 t1, t2;
1402
        int sleep_ticks = 0;
1403
 
1404
        pr = processors[smp_processor_id()];
1405
 
1406
        if (unlikely(!pr))
1407
                return 0;
1408
 
1409
        if (acpi_idle_suspend)
1410
                return(acpi_idle_enter_c1(dev, state));
1411
 
1412
        local_irq_disable();
1413
        current_thread_info()->status &= ~TS_POLLING;
1414
        /*
1415
         * TS_POLLING-cleared state must be visible before we test
1416
         * NEED_RESCHED:
1417
         */
1418
        smp_mb();
1419
 
1420
        if (unlikely(need_resched())) {
1421
                current_thread_info()->status |= TS_POLLING;
1422
                local_irq_enable();
1423
                return 0;
1424
        }
1425
 
1426
        /*
1427
         * Must be done before busmaster disable as we might need to
1428
         * access HPET !
1429
         */
1430
        acpi_state_timer_broadcast(pr, cx, 1);
1431
 
1432
        if (pr->flags.bm_check)
1433
                acpi_idle_update_bm_rld(pr, cx);
1434
 
1435
        if (cx->type == ACPI_STATE_C3)
1436
                ACPI_FLUSH_CPU_CACHE();
1437
 
1438
        t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1439
        /* Tell the scheduler that we are going deep-idle: */
1440
        sched_clock_idle_sleep_event();
1441
        acpi_idle_do_entry(cx);
1442
        t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1443
 
1444
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1445
        /* TSC could halt in idle, so notify users */
1446
        mark_tsc_unstable("TSC halts in idle");;
1447
#endif
1448
        sleep_ticks = ticks_elapsed(t1, t2);
1449
 
1450
        /* Tell the scheduler how much we idled: */
1451
        sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1452
 
1453
        local_irq_enable();
1454
        current_thread_info()->status |= TS_POLLING;
1455
 
1456
        cx->usage++;
1457
 
1458
        acpi_state_timer_broadcast(pr, cx, 0);
1459
        cx->time += sleep_ticks;
1460
        return ticks_elapsed_in_us(t1, t2);
1461
}
1462
 
1463
static int c3_cpu_count;
1464
static DEFINE_SPINLOCK(c3_lock);
1465
 
1466
/**
1467
 * acpi_idle_enter_bm - enters C3 with proper BM handling
1468
 * @dev: the target CPU
1469
 * @state: the state data
1470
 *
1471
 * If BM is detected, the deepest non-C3 idle state is entered instead.
1472
 */
1473
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1474
                              struct cpuidle_state *state)
1475
{
1476
        struct acpi_processor *pr;
1477
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1478
        u32 t1, t2;
1479
        int sleep_ticks = 0;
1480
 
1481
        pr = processors[smp_processor_id()];
1482
 
1483
        if (unlikely(!pr))
1484
                return 0;
1485
 
1486
        if (acpi_idle_suspend)
1487
                return(acpi_idle_enter_c1(dev, state));
1488
 
1489
        if (acpi_idle_bm_check()) {
1490
                if (dev->safe_state) {
1491
                        return dev->safe_state->enter(dev, dev->safe_state);
1492
                } else {
1493
                        acpi_safe_halt();
1494
                        return 0;
1495
                }
1496
        }
1497
 
1498
        local_irq_disable();
1499
        current_thread_info()->status &= ~TS_POLLING;
1500
        /*
1501
         * TS_POLLING-cleared state must be visible before we test
1502
         * NEED_RESCHED:
1503
         */
1504
        smp_mb();
1505
 
1506
        if (unlikely(need_resched())) {
1507
                current_thread_info()->status |= TS_POLLING;
1508
                local_irq_enable();
1509
                return 0;
1510
        }
1511
 
1512
        /* Tell the scheduler that we are going deep-idle: */
1513
        sched_clock_idle_sleep_event();
1514
        /*
1515
         * Must be done before busmaster disable as we might need to
1516
         * access HPET !
1517
         */
1518
        acpi_state_timer_broadcast(pr, cx, 1);
1519
 
1520
        acpi_idle_update_bm_rld(pr, cx);
1521
 
1522
        /*
1523
         * disable bus master
1524
         * bm_check implies we need ARB_DIS
1525
         * !bm_check implies we need cache flush
1526
         * bm_control implies whether we can do ARB_DIS
1527
         *
1528
         * That leaves a case where bm_check is set and bm_control is
1529
         * not set. In that case we cannot do much, we enter C3
1530
         * without doing anything.
1531
         */
1532
        if (pr->flags.bm_check && pr->flags.bm_control) {
1533
                spin_lock(&c3_lock);
1534
                c3_cpu_count++;
1535
                /* Disable bus master arbitration when all CPUs are in C3 */
1536
                if (c3_cpu_count == num_online_cpus())
1537
                        acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1538
                spin_unlock(&c3_lock);
1539
        } else if (!pr->flags.bm_check) {
1540
                ACPI_FLUSH_CPU_CACHE();
1541
        }
1542
 
1543
        t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1544
        acpi_idle_do_entry(cx);
1545
        t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1546
 
1547
        /* Re-enable bus master arbitration */
1548
        if (pr->flags.bm_check && pr->flags.bm_control) {
1549
                spin_lock(&c3_lock);
1550
                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1551
                c3_cpu_count--;
1552
                spin_unlock(&c3_lock);
1553
        }
1554
 
1555
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1556
        /* TSC could halt in idle, so notify users */
1557
        mark_tsc_unstable("TSC halts in idle");
1558
#endif
1559
        sleep_ticks = ticks_elapsed(t1, t2);
1560
        /* Tell the scheduler how much we idled: */
1561
        sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1562
 
1563
        local_irq_enable();
1564
        current_thread_info()->status |= TS_POLLING;
1565
 
1566
        cx->usage++;
1567
 
1568
        acpi_state_timer_broadcast(pr, cx, 0);
1569
        cx->time += sleep_ticks;
1570
        return ticks_elapsed_in_us(t1, t2);
1571
}
1572
 
1573
struct cpuidle_driver acpi_idle_driver = {
1574
        .name =         "acpi_idle",
1575
        .owner =        THIS_MODULE,
1576
};
1577
 
1578
/**
1579
 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1580
 * @pr: the ACPI processor
1581
 */
1582
static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1583
{
1584
        int i, count = 0;
1585
        struct acpi_processor_cx *cx;
1586
        struct cpuidle_state *state;
1587
        struct cpuidle_device *dev = &pr->power.dev;
1588
 
1589
        if (!pr->flags.power_setup_done)
1590
                return -EINVAL;
1591
 
1592
        if (pr->flags.power == 0) {
1593
                return -EINVAL;
1594
        }
1595
 
1596
        for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1597
                cx = &pr->power.states[i];
1598
                state = &dev->states[count];
1599
 
1600
                if (!cx->valid)
1601
                        continue;
1602
 
1603
#ifdef CONFIG_HOTPLUG_CPU
1604
                if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1605
                    !pr->flags.has_cst &&
1606
                    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1607
                        continue;
1608
#endif
1609
                cpuidle_set_statedata(state, cx);
1610
 
1611
                snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1612
                state->exit_latency = cx->latency;
1613
                state->target_residency = cx->latency * 6;
1614
                state->power_usage = cx->power;
1615
 
1616
                state->flags = 0;
1617
                switch (cx->type) {
1618
                        case ACPI_STATE_C1:
1619
                        state->flags |= CPUIDLE_FLAG_SHALLOW;
1620
                        state->enter = acpi_idle_enter_c1;
1621
                        dev->safe_state = state;
1622
                        break;
1623
 
1624
                        case ACPI_STATE_C2:
1625
                        state->flags |= CPUIDLE_FLAG_BALANCED;
1626
                        state->flags |= CPUIDLE_FLAG_TIME_VALID;
1627
                        state->enter = acpi_idle_enter_simple;
1628
                        dev->safe_state = state;
1629
                        break;
1630
 
1631
                        case ACPI_STATE_C3:
1632
                        state->flags |= CPUIDLE_FLAG_DEEP;
1633
                        state->flags |= CPUIDLE_FLAG_TIME_VALID;
1634
                        state->flags |= CPUIDLE_FLAG_CHECK_BM;
1635
                        state->enter = pr->flags.bm_check ?
1636
                                        acpi_idle_enter_bm :
1637
                                        acpi_idle_enter_simple;
1638
                        break;
1639
                }
1640
 
1641
                count++;
1642
        }
1643
 
1644
        dev->state_count = count;
1645
 
1646
        if (!count)
1647
                return -EINVAL;
1648
 
1649
        return 0;
1650
}
1651
 
1652
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1653
{
1654
        int ret;
1655
 
1656
        if (!pr)
1657
                return -EINVAL;
1658
 
1659
        if (nocst) {
1660
                return -ENODEV;
1661
        }
1662
 
1663
        if (!pr->flags.power_setup_done)
1664
                return -ENODEV;
1665
 
1666
        cpuidle_pause_and_lock();
1667
        cpuidle_disable_device(&pr->power.dev);
1668
        acpi_processor_get_power_info(pr);
1669
        acpi_processor_setup_cpuidle(pr);
1670
        ret = cpuidle_enable_device(&pr->power.dev);
1671
        cpuidle_resume_and_unlock();
1672
 
1673
        return ret;
1674
}
1675
 
1676
#endif /* CONFIG_CPU_IDLE */
1677
 
1678
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1679
                              struct acpi_device *device)
1680
{
1681
        acpi_status status = 0;
1682
        static int first_run;
1683
        struct proc_dir_entry *entry = NULL;
1684
        unsigned int i;
1685
 
1686
 
1687
        if (!first_run) {
1688
                dmi_check_system(processor_power_dmi_table);
1689
                max_cstate = acpi_processor_cstate_check(max_cstate);
1690
                if (max_cstate < ACPI_C_STATES_MAX)
1691
                        printk(KERN_NOTICE
1692
                               "ACPI: processor limited to max C-state %d\n",
1693
                               max_cstate);
1694
                first_run++;
1695
#if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP)
1696
                register_latency_notifier(&acpi_processor_latency_notifier);
1697
#endif
1698
        }
1699
 
1700
        if (!pr)
1701
                return -EINVAL;
1702
 
1703
        if (acpi_gbl_FADT.cst_control && !nocst) {
1704
                status =
1705
                    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1706
                if (ACPI_FAILURE(status)) {
1707
                        ACPI_EXCEPTION((AE_INFO, status,
1708
                                        "Notifying BIOS of _CST ability failed"));
1709
                }
1710
        }
1711
 
1712
        acpi_processor_get_power_info(pr);
1713
        pr->flags.power_setup_done = 1;
1714
 
1715
        /*
1716
         * Install the idle handler if processor power management is supported.
1717
         * Note that we use previously set idle handler will be used on
1718
         * platforms that only support C1.
1719
         */
1720
        if ((pr->flags.power) && (!boot_option_idle_override)) {
1721
#ifdef CONFIG_CPU_IDLE
1722
                acpi_processor_setup_cpuidle(pr);
1723
                pr->power.dev.cpu = pr->id;
1724
                if (cpuidle_register_device(&pr->power.dev))
1725
                        return -EIO;
1726
#endif
1727
 
1728
                printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1729
                for (i = 1; i <= pr->power.count; i++)
1730
                        if (pr->power.states[i].valid)
1731
                                printk(" C%d[C%d]", i,
1732
                                       pr->power.states[i].type);
1733
                printk(")\n");
1734
 
1735
#ifndef CONFIG_CPU_IDLE
1736
                if (pr->id == 0) {
1737
                        pm_idle_save = pm_idle;
1738
                        pm_idle = acpi_processor_idle;
1739
                }
1740
#endif
1741
        }
1742
 
1743
        /* 'power' [R] */
1744
        entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1745
                                  S_IRUGO, acpi_device_dir(device));
1746
        if (!entry)
1747
                return -EIO;
1748
        else {
1749
                entry->proc_fops = &acpi_processor_power_fops;
1750
                entry->data = acpi_driver_data(device);
1751
                entry->owner = THIS_MODULE;
1752
        }
1753
 
1754
        return 0;
1755
}
1756
 
1757
int acpi_processor_power_exit(struct acpi_processor *pr,
1758
                              struct acpi_device *device)
1759
{
1760
#ifdef CONFIG_CPU_IDLE
1761
        if ((pr->flags.power) && (!boot_option_idle_override))
1762
                cpuidle_unregister_device(&pr->power.dev);
1763
#endif
1764
        pr->flags.power_setup_done = 0;
1765
 
1766
        if (acpi_device_dir(device))
1767
                remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1768
                                  acpi_device_dir(device));
1769
 
1770
#ifndef CONFIG_CPU_IDLE
1771
 
1772
        /* Unregister the idle handler when processor #0 is removed. */
1773
        if (pr->id == 0) {
1774
                pm_idle = pm_idle_save;
1775
 
1776
                /*
1777
                 * We are about to unload the current idle thread pm callback
1778
                 * (pm_idle), Wait for all processors to update cached/local
1779
                 * copies of pm_idle before proceeding.
1780
                 */
1781
                cpu_idle_wait();
1782
#ifdef CONFIG_SMP
1783
                unregister_latency_notifier(&acpi_processor_latency_notifier);
1784
#endif
1785
        }
1786
#endif
1787
 
1788
        return 0;
1789
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.