OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [acpi/] [processor_throttling.c] - Blame information for rev 67

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * processor_throttling.c - Throttling submodule of the ACPI processor driver
3
 *
4
 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5
 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6
 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7
 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8
 *                      - Added processor hotplug support
9
 *
10
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11
 *
12
 *  This program is free software; you can redistribute it and/or modify
13
 *  it under the terms of the GNU General Public License as published by
14
 *  the Free Software Foundation; either version 2 of the License, or (at
15
 *  your option) any later version.
16
 *
17
 *  This program is distributed in the hope that it will be useful, but
18
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
19
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20
 *  General Public License for more details.
21
 *
22
 *  You should have received a copy of the GNU General Public License along
23
 *  with this program; if not, write to the Free Software Foundation, Inc.,
24
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25
 *
26
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27
 */
28
 
29
#include <linux/kernel.h>
30
#include <linux/module.h>
31
#include <linux/init.h>
32
#include <linux/sched.h>
33
#include <linux/cpufreq.h>
34
#include <linux/proc_fs.h>
35
#include <linux/seq_file.h>
36
 
37
#include <asm/io.h>
38
#include <asm/uaccess.h>
39
 
40
#include <acpi/acpi_bus.h>
41
#include <acpi/processor.h>
42
 
43
#define ACPI_PROCESSOR_COMPONENT        0x01000000
44
#define ACPI_PROCESSOR_CLASS            "processor"
45
#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
46
ACPI_MODULE_NAME("processor_throttling");
47
 
48
static int acpi_processor_get_throttling(struct acpi_processor *pr);
49
int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
50
 
51
/*
52
 * _TPC - Throttling Present Capabilities
53
 */
54
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
55
{
56
        acpi_status status = 0;
57
        unsigned long tpc = 0;
58
 
59
        if (!pr)
60
                return -EINVAL;
61
        status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
62
        if (ACPI_FAILURE(status)) {
63
                if (status != AE_NOT_FOUND) {
64
                        ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
65
                }
66
                return -ENODEV;
67
        }
68
        pr->throttling_platform_limit = (int)tpc;
69
        return 0;
70
}
71
 
72
int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
73
{
74
        int result = 0;
75
        int throttling_limit;
76
        int current_state;
77
        struct acpi_processor_limit *limit;
78
        int target_state;
79
 
80
        result = acpi_processor_get_platform_limit(pr);
81
        if (result) {
82
                /* Throttling Limit is unsupported */
83
                return result;
84
        }
85
 
86
        throttling_limit = pr->throttling_platform_limit;
87
        if (throttling_limit >= pr->throttling.state_count) {
88
                /* Uncorrect Throttling Limit */
89
                return -EINVAL;
90
        }
91
 
92
        current_state = pr->throttling.state;
93
        if (current_state > throttling_limit) {
94
                /*
95
                 * The current state can meet the requirement of
96
                 * _TPC limit. But it is reasonable that OSPM changes
97
                 * t-states from high to low for better performance.
98
                 * Of course the limit condition of thermal
99
                 * and user should be considered.
100
                 */
101
                limit = &pr->limit;
102
                target_state = throttling_limit;
103
                if (limit->thermal.tx > target_state)
104
                        target_state = limit->thermal.tx;
105
                if (limit->user.tx > target_state)
106
                        target_state = limit->user.tx;
107
        } else if (current_state == throttling_limit) {
108
                /*
109
                 * Unnecessary to change the throttling state
110
                 */
111
                return 0;
112
        } else {
113
                /*
114
                 * If the current state is lower than the limit of _TPC, it
115
                 * will be forced to switch to the throttling state defined
116
                 * by throttling_platfor_limit.
117
                 * Because the previous state meets with the limit condition
118
                 * of thermal and user, it is unnecessary to check it again.
119
                 */
120
                target_state = throttling_limit;
121
        }
122
        return acpi_processor_set_throttling(pr, target_state);
123
}
124
 
125
/*
126
 * _PTC - Processor Throttling Control (and status) register location
127
 */
128
static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
129
{
130
        int result = 0;
131
        acpi_status status = 0;
132
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
133
        union acpi_object *ptc = NULL;
134
        union acpi_object obj = { 0 };
135
        struct acpi_processor_throttling *throttling;
136
 
137
        status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
138
        if (ACPI_FAILURE(status)) {
139
                if (status != AE_NOT_FOUND) {
140
                        ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
141
                }
142
                return -ENODEV;
143
        }
144
 
145
        ptc = (union acpi_object *)buffer.pointer;
146
        if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
147
            || (ptc->package.count != 2)) {
148
                printk(KERN_ERR PREFIX "Invalid _PTC data\n");
149
                result = -EFAULT;
150
                goto end;
151
        }
152
 
153
        /*
154
         * control_register
155
         */
156
 
157
        obj = ptc->package.elements[0];
158
 
159
        if ((obj.type != ACPI_TYPE_BUFFER)
160
            || (obj.buffer.length < sizeof(struct acpi_ptc_register))
161
            || (obj.buffer.pointer == NULL)) {
162
                printk(KERN_ERR PREFIX
163
                       "Invalid _PTC data (control_register)\n");
164
                result = -EFAULT;
165
                goto end;
166
        }
167
        memcpy(&pr->throttling.control_register, obj.buffer.pointer,
168
               sizeof(struct acpi_ptc_register));
169
 
170
        /*
171
         * status_register
172
         */
173
 
174
        obj = ptc->package.elements[1];
175
 
176
        if ((obj.type != ACPI_TYPE_BUFFER)
177
            || (obj.buffer.length < sizeof(struct acpi_ptc_register))
178
            || (obj.buffer.pointer == NULL)) {
179
                printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
180
                result = -EFAULT;
181
                goto end;
182
        }
183
 
184
        memcpy(&pr->throttling.status_register, obj.buffer.pointer,
185
               sizeof(struct acpi_ptc_register));
186
 
187
        throttling = &pr->throttling;
188
 
189
        if ((throttling->control_register.bit_width +
190
                throttling->control_register.bit_offset) > 32) {
191
                printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
192
                result = -EFAULT;
193
                goto end;
194
        }
195
 
196
        if ((throttling->status_register.bit_width +
197
                throttling->status_register.bit_offset) > 32) {
198
                printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
199
                result = -EFAULT;
200
                goto end;
201
        }
202
 
203
      end:
204
        kfree(buffer.pointer);
205
 
206
        return result;
207
}
208
 
209
/*
210
 * _TSS - Throttling Supported States
211
 */
212
static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
213
{
214
        int result = 0;
215
        acpi_status status = AE_OK;
216
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
217
        struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
218
        struct acpi_buffer state = { 0, NULL };
219
        union acpi_object *tss = NULL;
220
        int i;
221
 
222
        status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
223
        if (ACPI_FAILURE(status)) {
224
                if (status != AE_NOT_FOUND) {
225
                        ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
226
                }
227
                return -ENODEV;
228
        }
229
 
230
        tss = buffer.pointer;
231
        if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
232
                printk(KERN_ERR PREFIX "Invalid _TSS data\n");
233
                result = -EFAULT;
234
                goto end;
235
        }
236
 
237
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
238
                          tss->package.count));
239
 
240
        pr->throttling.state_count = tss->package.count;
241
        pr->throttling.states_tss =
242
            kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
243
                    GFP_KERNEL);
244
        if (!pr->throttling.states_tss) {
245
                result = -ENOMEM;
246
                goto end;
247
        }
248
 
249
        for (i = 0; i < pr->throttling.state_count; i++) {
250
 
251
                struct acpi_processor_tx_tss *tx =
252
                    (struct acpi_processor_tx_tss *)&(pr->throttling.
253
                                                      states_tss[i]);
254
 
255
                state.length = sizeof(struct acpi_processor_tx_tss);
256
                state.pointer = tx;
257
 
258
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
259
 
260
                status = acpi_extract_package(&(tss->package.elements[i]),
261
                                              &format, &state);
262
                if (ACPI_FAILURE(status)) {
263
                        ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
264
                        result = -EFAULT;
265
                        kfree(pr->throttling.states_tss);
266
                        goto end;
267
                }
268
 
269
                if (!tx->freqpercentage) {
270
                        printk(KERN_ERR PREFIX
271
                               "Invalid _TSS data: freq is zero\n");
272
                        result = -EFAULT;
273
                        kfree(pr->throttling.states_tss);
274
                        goto end;
275
                }
276
        }
277
 
278
      end:
279
        kfree(buffer.pointer);
280
 
281
        return result;
282
}
283
 
284
/*
285
 * _TSD - T-State Dependencies
286
 */
287
static int acpi_processor_get_tsd(struct acpi_processor *pr)
288
{
289
        int result = 0;
290
        acpi_status status = AE_OK;
291
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
292
        struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
293
        struct acpi_buffer state = { 0, NULL };
294
        union acpi_object *tsd = NULL;
295
        struct acpi_tsd_package *pdomain;
296
 
297
        status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
298
        if (ACPI_FAILURE(status)) {
299
                if (status != AE_NOT_FOUND) {
300
                        ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
301
                }
302
                return -ENODEV;
303
        }
304
 
305
        tsd = buffer.pointer;
306
        if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
307
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
308
                result = -EFAULT;
309
                goto end;
310
        }
311
 
312
        if (tsd->package.count != 1) {
313
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
314
                result = -EFAULT;
315
                goto end;
316
        }
317
 
318
        pdomain = &(pr->throttling.domain_info);
319
 
320
        state.length = sizeof(struct acpi_tsd_package);
321
        state.pointer = pdomain;
322
 
323
        status = acpi_extract_package(&(tsd->package.elements[0]),
324
                                      &format, &state);
325
        if (ACPI_FAILURE(status)) {
326
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
327
                result = -EFAULT;
328
                goto end;
329
        }
330
 
331
        if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
332
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
333
                result = -EFAULT;
334
                goto end;
335
        }
336
 
337
        if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
338
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
339
                result = -EFAULT;
340
                goto end;
341
        }
342
 
343
      end:
344
        kfree(buffer.pointer);
345
        return result;
346
}
347
 
348
/* --------------------------------------------------------------------------
349
                              Throttling Control
350
   -------------------------------------------------------------------------- */
351
static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
352
{
353
        int state = 0;
354
        u32 value = 0;
355
        u32 duty_mask = 0;
356
        u32 duty_value = 0;
357
 
358
        if (!pr)
359
                return -EINVAL;
360
 
361
        if (!pr->flags.throttling)
362
                return -ENODEV;
363
 
364
        pr->throttling.state = 0;
365
 
366
        duty_mask = pr->throttling.state_count - 1;
367
 
368
        duty_mask <<= pr->throttling.duty_offset;
369
 
370
        local_irq_disable();
371
 
372
        value = inl(pr->throttling.address);
373
 
374
        /*
375
         * Compute the current throttling state when throttling is enabled
376
         * (bit 4 is on).
377
         */
378
        if (value & 0x10) {
379
                duty_value = value & duty_mask;
380
                duty_value >>= pr->throttling.duty_offset;
381
 
382
                if (duty_value)
383
                        state = pr->throttling.state_count - duty_value;
384
        }
385
 
386
        pr->throttling.state = state;
387
 
388
        local_irq_enable();
389
 
390
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
391
                          "Throttling state is T%d (%d%% throttling applied)\n",
392
                          state, pr->throttling.states[state].performance));
393
 
394
        return 0;
395
}
396
 
397
#ifdef CONFIG_X86
398
static int acpi_throttling_rdmsr(struct acpi_processor *pr,
399
                                        acpi_integer * value)
400
{
401
        struct cpuinfo_x86 *c;
402
        u64 msr_high, msr_low;
403
        unsigned int cpu;
404
        u64 msr = 0;
405
        int ret = -1;
406
 
407
        cpu = pr->id;
408
        c = &cpu_data(cpu);
409
 
410
        if ((c->x86_vendor != X86_VENDOR_INTEL) ||
411
                !cpu_has(c, X86_FEATURE_ACPI)) {
412
                printk(KERN_ERR PREFIX
413
                        "HARDWARE addr space,NOT supported yet\n");
414
        } else {
415
                msr_low = 0;
416
                msr_high = 0;
417
                rdmsr_safe(MSR_IA32_THERM_CONTROL,
418
                        (u32 *)&msr_low , (u32 *) &msr_high);
419
                msr = (msr_high << 32) | msr_low;
420
                *value = (acpi_integer) msr;
421
                ret = 0;
422
        }
423
        return ret;
424
}
425
 
426
static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
427
{
428
        struct cpuinfo_x86 *c;
429
        unsigned int cpu;
430
        int ret = -1;
431
        u64 msr;
432
 
433
        cpu = pr->id;
434
        c = &cpu_data(cpu);
435
 
436
        if ((c->x86_vendor != X86_VENDOR_INTEL) ||
437
                !cpu_has(c, X86_FEATURE_ACPI)) {
438
                printk(KERN_ERR PREFIX
439
                        "HARDWARE addr space,NOT supported yet\n");
440
        } else {
441
                msr = value;
442
                wrmsr_safe(MSR_IA32_THERM_CONTROL,
443
                        msr & 0xffffffff, msr >> 32);
444
                ret = 0;
445
        }
446
        return ret;
447
}
448
#else
449
static int acpi_throttling_rdmsr(struct acpi_processor *pr,
450
                                acpi_integer * value)
451
{
452
        printk(KERN_ERR PREFIX
453
                "HARDWARE addr space,NOT supported yet\n");
454
        return -1;
455
}
456
 
457
static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
458
{
459
        printk(KERN_ERR PREFIX
460
                "HARDWARE addr space,NOT supported yet\n");
461
        return -1;
462
}
463
#endif
464
 
465
static int acpi_read_throttling_status(struct acpi_processor *pr,
466
                                        acpi_integer *value)
467
{
468
        u32 bit_width, bit_offset;
469
        u64 ptc_value;
470
        u64 ptc_mask;
471
        struct acpi_processor_throttling *throttling;
472
        int ret = -1;
473
 
474
        throttling = &pr->throttling;
475
        switch (throttling->status_register.space_id) {
476
        case ACPI_ADR_SPACE_SYSTEM_IO:
477
                ptc_value = 0;
478
                bit_width = throttling->status_register.bit_width;
479
                bit_offset = throttling->status_register.bit_offset;
480
 
481
                acpi_os_read_port((acpi_io_address) throttling->status_register.
482
                                  address, (u32 *) &ptc_value,
483
                                  (u32) (bit_width + bit_offset));
484
                ptc_mask = (1 << bit_width) - 1;
485
                *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
486
                ret = 0;
487
                break;
488
        case ACPI_ADR_SPACE_FIXED_HARDWARE:
489
                ret = acpi_throttling_rdmsr(pr, value);
490
                break;
491
        default:
492
                printk(KERN_ERR PREFIX "Unknown addr space %d\n",
493
                       (u32) (throttling->status_register.space_id));
494
        }
495
        return ret;
496
}
497
 
498
static int acpi_write_throttling_state(struct acpi_processor *pr,
499
                                acpi_integer value)
500
{
501
        u32 bit_width, bit_offset;
502
        u64 ptc_value;
503
        u64 ptc_mask;
504
        struct acpi_processor_throttling *throttling;
505
        int ret = -1;
506
 
507
        throttling = &pr->throttling;
508
        switch (throttling->control_register.space_id) {
509
        case ACPI_ADR_SPACE_SYSTEM_IO:
510
                bit_width = throttling->control_register.bit_width;
511
                bit_offset = throttling->control_register.bit_offset;
512
                ptc_mask = (1 << bit_width) - 1;
513
                ptc_value = value & ptc_mask;
514
 
515
                acpi_os_write_port((acpi_io_address) throttling->
516
                                        control_register.address,
517
                                        (u32) (ptc_value << bit_offset),
518
                                        (u32) (bit_width + bit_offset));
519
                ret = 0;
520
                break;
521
        case ACPI_ADR_SPACE_FIXED_HARDWARE:
522
                ret = acpi_throttling_wrmsr(pr, value);
523
                break;
524
        default:
525
                printk(KERN_ERR PREFIX "Unknown addr space %d\n",
526
                       (u32) (throttling->control_register.space_id));
527
        }
528
        return ret;
529
}
530
 
531
static int acpi_get_throttling_state(struct acpi_processor *pr,
532
                                acpi_integer value)
533
{
534
        int i;
535
 
536
        for (i = 0; i < pr->throttling.state_count; i++) {
537
                struct acpi_processor_tx_tss *tx =
538
                    (struct acpi_processor_tx_tss *)&(pr->throttling.
539
                                                      states_tss[i]);
540
                if (tx->control == value)
541
                        break;
542
        }
543
        if (i > pr->throttling.state_count)
544
                i = -1;
545
        return i;
546
}
547
 
548
static int acpi_get_throttling_value(struct acpi_processor *pr,
549
                        int state, acpi_integer *value)
550
{
551
        int ret = -1;
552
 
553
        if (state >= 0 && state <= pr->throttling.state_count) {
554
                struct acpi_processor_tx_tss *tx =
555
                    (struct acpi_processor_tx_tss *)&(pr->throttling.
556
                                                      states_tss[state]);
557
                *value = tx->control;
558
                ret = 0;
559
        }
560
        return ret;
561
}
562
 
563
static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
564
{
565
        int state = 0;
566
        int ret;
567
        acpi_integer value;
568
 
569
        if (!pr)
570
                return -EINVAL;
571
 
572
        if (!pr->flags.throttling)
573
                return -ENODEV;
574
 
575
        pr->throttling.state = 0;
576
 
577
        value = 0;
578
        ret = acpi_read_throttling_status(pr, &value);
579
        if (ret >= 0) {
580
                state = acpi_get_throttling_state(pr, value);
581
                pr->throttling.state = state;
582
        }
583
 
584
        return 0;
585
}
586
 
587
static int acpi_processor_get_throttling(struct acpi_processor *pr)
588
{
589
        cpumask_t saved_mask;
590
        int ret;
591
 
592
        /*
593
         * Migrate task to the cpu pointed by pr.
594
         */
595
        saved_mask = current->cpus_allowed;
596
        set_cpus_allowed(current, cpumask_of_cpu(pr->id));
597
        ret = pr->throttling.acpi_processor_get_throttling(pr);
598
        /* restore the previous state */
599
        set_cpus_allowed(current, saved_mask);
600
 
601
        return ret;
602
}
603
 
604
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
605
{
606
        int i, step;
607
 
608
        if (!pr->throttling.address) {
609
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
610
                return -EINVAL;
611
        } else if (!pr->throttling.duty_width) {
612
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
613
                return -EINVAL;
614
        }
615
        /* TBD: Support duty_cycle values that span bit 4. */
616
        else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
617
                printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
618
                return -EINVAL;
619
        }
620
 
621
        pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
622
 
623
        /*
624
         * Compute state values. Note that throttling displays a linear power
625
         * performance relationship (at 50% performance the CPU will consume
626
         * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
627
         */
628
 
629
        step = (1000 / pr->throttling.state_count);
630
 
631
        for (i = 0; i < pr->throttling.state_count; i++) {
632
                pr->throttling.states[i].performance = 1000 - step * i;
633
                pr->throttling.states[i].power = 1000 - step * i;
634
        }
635
        return 0;
636
}
637
 
638
static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
639
                                              int state)
640
{
641
        u32 value = 0;
642
        u32 duty_mask = 0;
643
        u32 duty_value = 0;
644
 
645
        if (!pr)
646
                return -EINVAL;
647
 
648
        if ((state < 0) || (state > (pr->throttling.state_count - 1)))
649
                return -EINVAL;
650
 
651
        if (!pr->flags.throttling)
652
                return -ENODEV;
653
 
654
        if (state == pr->throttling.state)
655
                return 0;
656
 
657
        if (state < pr->throttling_platform_limit)
658
                return -EPERM;
659
        /*
660
         * Calculate the duty_value and duty_mask.
661
         */
662
        if (state) {
663
                duty_value = pr->throttling.state_count - state;
664
 
665
                duty_value <<= pr->throttling.duty_offset;
666
 
667
                /* Used to clear all duty_value bits */
668
                duty_mask = pr->throttling.state_count - 1;
669
 
670
                duty_mask <<= acpi_gbl_FADT.duty_offset;
671
                duty_mask = ~duty_mask;
672
        }
673
 
674
        local_irq_disable();
675
 
676
        /*
677
         * Disable throttling by writing a 0 to bit 4.  Note that we must
678
         * turn it off before you can change the duty_value.
679
         */
680
        value = inl(pr->throttling.address);
681
        if (value & 0x10) {
682
                value &= 0xFFFFFFEF;
683
                outl(value, pr->throttling.address);
684
        }
685
 
686
        /*
687
         * Write the new duty_value and then enable throttling.  Note
688
         * that a state value of 0 leaves throttling disabled.
689
         */
690
        if (state) {
691
                value &= duty_mask;
692
                value |= duty_value;
693
                outl(value, pr->throttling.address);
694
 
695
                value |= 0x00000010;
696
                outl(value, pr->throttling.address);
697
        }
698
 
699
        pr->throttling.state = state;
700
 
701
        local_irq_enable();
702
 
703
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
704
                          "Throttling state set to T%d (%d%%)\n", state,
705
                          (pr->throttling.states[state].performance ? pr->
706
                           throttling.states[state].performance / 10 : 0)));
707
 
708
        return 0;
709
}
710
 
711
static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
712
                                             int state)
713
{
714
        int ret;
715
        acpi_integer value;
716
 
717
        if (!pr)
718
                return -EINVAL;
719
 
720
        if ((state < 0) || (state > (pr->throttling.state_count - 1)))
721
                return -EINVAL;
722
 
723
        if (!pr->flags.throttling)
724
                return -ENODEV;
725
 
726
        if (state == pr->throttling.state)
727
                return 0;
728
 
729
        if (state < pr->throttling_platform_limit)
730
                return -EPERM;
731
 
732
        value = 0;
733
        ret = acpi_get_throttling_value(pr, state, &value);
734
        if (ret >= 0) {
735
                acpi_write_throttling_state(pr, value);
736
                pr->throttling.state = state;
737
        }
738
 
739
        return 0;
740
}
741
 
742
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
743
{
744
        cpumask_t saved_mask;
745
        int ret;
746
        /*
747
         * Migrate task to the cpu pointed by pr.
748
         */
749
        saved_mask = current->cpus_allowed;
750
        set_cpus_allowed(current, cpumask_of_cpu(pr->id));
751
        ret = pr->throttling.acpi_processor_set_throttling(pr, state);
752
        /* restore the previous state */
753
        set_cpus_allowed(current, saved_mask);
754
        return ret;
755
}
756
 
757
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
758
{
759
        int result = 0;
760
 
761
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
762
                          "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
763
                          pr->throttling.address,
764
                          pr->throttling.duty_offset,
765
                          pr->throttling.duty_width));
766
 
767
        if (!pr)
768
                return -EINVAL;
769
 
770
        /*
771
         * Evaluate _PTC, _TSS and _TPC
772
         * They must all be present or none of them can be used.
773
         */
774
        if (acpi_processor_get_throttling_control(pr) ||
775
                acpi_processor_get_throttling_states(pr) ||
776
                acpi_processor_get_platform_limit(pr))
777
        {
778
                pr->throttling.acpi_processor_get_throttling =
779
                    &acpi_processor_get_throttling_fadt;
780
                pr->throttling.acpi_processor_set_throttling =
781
                    &acpi_processor_set_throttling_fadt;
782
                if (acpi_processor_get_fadt_info(pr))
783
                        return 0;
784
        } else {
785
                pr->throttling.acpi_processor_get_throttling =
786
                    &acpi_processor_get_throttling_ptc;
787
                pr->throttling.acpi_processor_set_throttling =
788
                    &acpi_processor_set_throttling_ptc;
789
        }
790
 
791
        acpi_processor_get_tsd(pr);
792
 
793
        /*
794
         * PIIX4 Errata: We don't support throttling on the original PIIX4.
795
         * This shouldn't be an issue as few (if any) mobile systems ever
796
         * used this part.
797
         */
798
        if (errata.piix4.throttle) {
799
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
800
                                  "Throttling not supported on PIIX4 A- or B-step\n"));
801
                return 0;
802
        }
803
 
804
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
805
                          pr->throttling.state_count));
806
 
807
        pr->flags.throttling = 1;
808
 
809
        /*
810
         * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
811
         * thermal) decide to lower performance if it so chooses, but for now
812
         * we'll crank up the speed.
813
         */
814
 
815
        result = acpi_processor_get_throttling(pr);
816
        if (result)
817
                goto end;
818
 
819
        if (pr->throttling.state) {
820
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
821
                                  "Disabling throttling (was T%d)\n",
822
                                  pr->throttling.state));
823
                result = acpi_processor_set_throttling(pr, 0);
824
                if (result)
825
                        goto end;
826
        }
827
 
828
      end:
829
        if (result)
830
                pr->flags.throttling = 0;
831
 
832
        return result;
833
}
834
 
835
/* proc interface */
836
 
837
static int acpi_processor_throttling_seq_show(struct seq_file *seq,
838
                                              void *offset)
839
{
840
        struct acpi_processor *pr = seq->private;
841
        int i = 0;
842
        int result = 0;
843
 
844
        if (!pr)
845
                goto end;
846
 
847
        if (!(pr->throttling.state_count > 0)) {
848
                seq_puts(seq, "<not supported>\n");
849
                goto end;
850
        }
851
 
852
        result = acpi_processor_get_throttling(pr);
853
 
854
        if (result) {
855
                seq_puts(seq,
856
                         "Could not determine current throttling state.\n");
857
                goto end;
858
        }
859
 
860
        seq_printf(seq, "state count:             %d\n"
861
                   "active state:            T%d\n"
862
                   "state available: T%d to T%d\n",
863
                   pr->throttling.state_count, pr->throttling.state,
864
                   pr->throttling_platform_limit,
865
                   pr->throttling.state_count - 1);
866
 
867
        seq_puts(seq, "states:\n");
868
        if (pr->throttling.acpi_processor_get_throttling ==
869
                        acpi_processor_get_throttling_fadt) {
870
                for (i = 0; i < pr->throttling.state_count; i++)
871
                        seq_printf(seq, "   %cT%d:                  %02d%%\n",
872
                                   (i == pr->throttling.state ? '*' : ' '), i,
873
                                   (pr->throttling.states[i].performance ? pr->
874
                                    throttling.states[i].performance / 10 : 0));
875
        } else {
876
                for (i = 0; i < pr->throttling.state_count; i++)
877
                        seq_printf(seq, "   %cT%d:                  %02d%%\n",
878
                                   (i == pr->throttling.state ? '*' : ' '), i,
879
                                   (int)pr->throttling.states_tss[i].
880
                                   freqpercentage);
881
        }
882
 
883
      end:
884
        return 0;
885
}
886
 
887
static int acpi_processor_throttling_open_fs(struct inode *inode,
888
                                             struct file *file)
889
{
890
        return single_open(file, acpi_processor_throttling_seq_show,
891
                           PDE(inode)->data);
892
}
893
 
894
static ssize_t acpi_processor_write_throttling(struct file *file,
895
                                               const char __user * buffer,
896
                                               size_t count, loff_t * data)
897
{
898
        int result = 0;
899
        struct seq_file *m = file->private_data;
900
        struct acpi_processor *pr = m->private;
901
        char state_string[12] = { '\0' };
902
 
903
        if (!pr || (count > sizeof(state_string) - 1))
904
                return -EINVAL;
905
 
906
        if (copy_from_user(state_string, buffer, count))
907
                return -EFAULT;
908
 
909
        state_string[count] = '\0';
910
 
911
        result = acpi_processor_set_throttling(pr,
912
                                               simple_strtoul(state_string,
913
                                                              NULL, 0));
914
        if (result)
915
                return result;
916
 
917
        return count;
918
}
919
 
920
struct file_operations acpi_processor_throttling_fops = {
921
        .open = acpi_processor_throttling_open_fs,
922
        .read = seq_read,
923
        .write = acpi_processor_write_throttling,
924
        .llseek = seq_lseek,
925
        .release = single_release,
926
};

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.