OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [cpufreq/] [cpufreq_ondemand.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  drivers/cpufreq/cpufreq_ondemand.c
3
 *
4
 *  Copyright (C)  2001 Russell King
5
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6
 *                      Jun Nakajima <jun.nakajima@intel.com>
7
 *
8
 * This program is free software; you can redistribute it and/or modify
9
 * it under the terms of the GNU General Public License version 2 as
10
 * published by the Free Software Foundation.
11
 */
12
 
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/init.h>
16
#include <linux/cpufreq.h>
17
#include <linux/cpu.h>
18
#include <linux/jiffies.h>
19
#include <linux/kernel_stat.h>
20
#include <linux/mutex.h>
21
 
22
/*
23
 * dbs is used in this file as a shortform for demandbased switching
24
 * It helps to keep variable names smaller, simpler
25
 */
26
 
27
#define DEF_FREQUENCY_UP_THRESHOLD              (80)
28
#define MIN_FREQUENCY_UP_THRESHOLD              (11)
29
#define MAX_FREQUENCY_UP_THRESHOLD              (100)
30
 
31
/*
32
 * The polling frequency of this governor depends on the capability of
33
 * the processor. Default polling frequency is 1000 times the transition
34
 * latency of the processor. The governor will work on any processor with
35
 * transition latency <= 10mS, using appropriate sampling
36
 * rate.
37
 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
38
 * this governor will not work.
39
 * All times here are in uS.
40
 */
41
static unsigned int def_sampling_rate;
42
#define MIN_SAMPLING_RATE_RATIO                 (2)
43
/* for correct statistics, we need at least 10 ticks between each measure */
44
#define MIN_STAT_SAMPLING_RATE                  \
45
                        (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
46
#define MIN_SAMPLING_RATE                       \
47
                        (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
48
#define MAX_SAMPLING_RATE                       (500 * def_sampling_rate)
49
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER    (1000)
50
#define TRANSITION_LATENCY_LIMIT                (10 * 1000 * 1000)
51
 
52
static void do_dbs_timer(struct work_struct *work);
53
 
54
/* Sampling types */
55
enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
56
 
57
struct cpu_dbs_info_s {
58
        cputime64_t prev_cpu_idle;
59
        cputime64_t prev_cpu_wall;
60
        struct cpufreq_policy *cur_policy;
61
        struct delayed_work work;
62
        struct cpufreq_frequency_table *freq_table;
63
        unsigned int freq_lo;
64
        unsigned int freq_lo_jiffies;
65
        unsigned int freq_hi_jiffies;
66
        int cpu;
67
        unsigned int enable:1,
68
                     sample_type:1;
69
};
70
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71
 
72
static unsigned int dbs_enable; /* number of CPUs using this policy */
73
 
74
/*
75
 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
76
 * lock and dbs_mutex. cpu_hotplug lock should always be held before
77
 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
78
 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
79
 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
80
 * is recursive for the same process. -Venki
81
 */
82
static DEFINE_MUTEX(dbs_mutex);
83
 
84
static struct workqueue_struct  *kondemand_wq;
85
 
86
static struct dbs_tuners {
87
        unsigned int sampling_rate;
88
        unsigned int up_threshold;
89
        unsigned int ignore_nice;
90
        unsigned int powersave_bias;
91
} dbs_tuners_ins = {
92
        .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
93
        .ignore_nice = 0,
94
        .powersave_bias = 0,
95
};
96
 
97
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
98
{
99
        cputime64_t idle_time;
100
        cputime64_t cur_jiffies;
101
        cputime64_t busy_time;
102
 
103
        cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
104
        busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
105
                        kstat_cpu(cpu).cpustat.system);
106
 
107
        busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
108
        busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
109
        busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
110
 
111
        if (!dbs_tuners_ins.ignore_nice) {
112
                busy_time = cputime64_add(busy_time,
113
                                kstat_cpu(cpu).cpustat.nice);
114
        }
115
 
116
        idle_time = cputime64_sub(cur_jiffies, busy_time);
117
        return idle_time;
118
}
119
 
120
/*
121
 * Find right freq to be set now with powersave_bias on.
122
 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
123
 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
124
 */
125
static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
126
                                          unsigned int freq_next,
127
                                          unsigned int relation)
128
{
129
        unsigned int freq_req, freq_reduc, freq_avg;
130
        unsigned int freq_hi, freq_lo;
131
        unsigned int index = 0;
132
        unsigned int jiffies_total, jiffies_hi, jiffies_lo;
133
        struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
134
 
135
        if (!dbs_info->freq_table) {
136
                dbs_info->freq_lo = 0;
137
                dbs_info->freq_lo_jiffies = 0;
138
                return freq_next;
139
        }
140
 
141
        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
142
                        relation, &index);
143
        freq_req = dbs_info->freq_table[index].frequency;
144
        freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
145
        freq_avg = freq_req - freq_reduc;
146
 
147
        /* Find freq bounds for freq_avg in freq_table */
148
        index = 0;
149
        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
150
                        CPUFREQ_RELATION_H, &index);
151
        freq_lo = dbs_info->freq_table[index].frequency;
152
        index = 0;
153
        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
154
                        CPUFREQ_RELATION_L, &index);
155
        freq_hi = dbs_info->freq_table[index].frequency;
156
 
157
        /* Find out how long we have to be in hi and lo freqs */
158
        if (freq_hi == freq_lo) {
159
                dbs_info->freq_lo = 0;
160
                dbs_info->freq_lo_jiffies = 0;
161
                return freq_lo;
162
        }
163
        jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
164
        jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
165
        jiffies_hi += ((freq_hi - freq_lo) / 2);
166
        jiffies_hi /= (freq_hi - freq_lo);
167
        jiffies_lo = jiffies_total - jiffies_hi;
168
        dbs_info->freq_lo = freq_lo;
169
        dbs_info->freq_lo_jiffies = jiffies_lo;
170
        dbs_info->freq_hi_jiffies = jiffies_hi;
171
        return freq_hi;
172
}
173
 
174
static void ondemand_powersave_bias_init(void)
175
{
176
        int i;
177
        for_each_online_cpu(i) {
178
                struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
179
                dbs_info->freq_table = cpufreq_frequency_get_table(i);
180
                dbs_info->freq_lo = 0;
181
        }
182
}
183
 
184
/************************** sysfs interface ************************/
185
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
186
{
187
        return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
188
}
189
 
190
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
191
{
192
        return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
193
}
194
 
195
#define define_one_ro(_name)            \
196
static struct freq_attr _name =         \
197
__ATTR(_name, 0444, show_##_name, NULL)
198
 
199
define_one_ro(sampling_rate_max);
200
define_one_ro(sampling_rate_min);
201
 
202
/* cpufreq_ondemand Governor Tunables */
203
#define show_one(file_name, object)                                     \
204
static ssize_t show_##file_name                                         \
205
(struct cpufreq_policy *unused, char *buf)                              \
206
{                                                                       \
207
        return sprintf(buf, "%u\n", dbs_tuners_ins.object);             \
208
}
209
show_one(sampling_rate, sampling_rate);
210
show_one(up_threshold, up_threshold);
211
show_one(ignore_nice_load, ignore_nice);
212
show_one(powersave_bias, powersave_bias);
213
 
214
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
215
                const char *buf, size_t count)
216
{
217
        unsigned int input;
218
        int ret;
219
        ret = sscanf(buf, "%u", &input);
220
 
221
        mutex_lock(&dbs_mutex);
222
        if (ret != 1 || input > MAX_SAMPLING_RATE
223
                     || input < MIN_SAMPLING_RATE) {
224
                mutex_unlock(&dbs_mutex);
225
                return -EINVAL;
226
        }
227
 
228
        dbs_tuners_ins.sampling_rate = input;
229
        mutex_unlock(&dbs_mutex);
230
 
231
        return count;
232
}
233
 
234
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
235
                const char *buf, size_t count)
236
{
237
        unsigned int input;
238
        int ret;
239
        ret = sscanf(buf, "%u", &input);
240
 
241
        mutex_lock(&dbs_mutex);
242
        if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
243
                        input < MIN_FREQUENCY_UP_THRESHOLD) {
244
                mutex_unlock(&dbs_mutex);
245
                return -EINVAL;
246
        }
247
 
248
        dbs_tuners_ins.up_threshold = input;
249
        mutex_unlock(&dbs_mutex);
250
 
251
        return count;
252
}
253
 
254
static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
255
                const char *buf, size_t count)
256
{
257
        unsigned int input;
258
        int ret;
259
 
260
        unsigned int j;
261
 
262
        ret = sscanf(buf, "%u", &input);
263
        if ( ret != 1 )
264
                return -EINVAL;
265
 
266
        if ( input > 1 )
267
                input = 1;
268
 
269
        mutex_lock(&dbs_mutex);
270
        if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
271
                mutex_unlock(&dbs_mutex);
272
                return count;
273
        }
274
        dbs_tuners_ins.ignore_nice = input;
275
 
276
        /* we need to re-evaluate prev_cpu_idle */
277
        for_each_online_cpu(j) {
278
                struct cpu_dbs_info_s *dbs_info;
279
                dbs_info = &per_cpu(cpu_dbs_info, j);
280
                dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
281
                dbs_info->prev_cpu_wall = get_jiffies_64();
282
        }
283
        mutex_unlock(&dbs_mutex);
284
 
285
        return count;
286
}
287
 
288
static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
289
                const char *buf, size_t count)
290
{
291
        unsigned int input;
292
        int ret;
293
        ret = sscanf(buf, "%u", &input);
294
 
295
        if (ret != 1)
296
                return -EINVAL;
297
 
298
        if (input > 1000)
299
                input = 1000;
300
 
301
        mutex_lock(&dbs_mutex);
302
        dbs_tuners_ins.powersave_bias = input;
303
        ondemand_powersave_bias_init();
304
        mutex_unlock(&dbs_mutex);
305
 
306
        return count;
307
}
308
 
309
#define define_one_rw(_name) \
310
static struct freq_attr _name = \
311
__ATTR(_name, 0644, show_##_name, store_##_name)
312
 
313
define_one_rw(sampling_rate);
314
define_one_rw(up_threshold);
315
define_one_rw(ignore_nice_load);
316
define_one_rw(powersave_bias);
317
 
318
static struct attribute * dbs_attributes[] = {
319
        &sampling_rate_max.attr,
320
        &sampling_rate_min.attr,
321
        &sampling_rate.attr,
322
        &up_threshold.attr,
323
        &ignore_nice_load.attr,
324
        &powersave_bias.attr,
325
        NULL
326
};
327
 
328
static struct attribute_group dbs_attr_group = {
329
        .attrs = dbs_attributes,
330
        .name = "ondemand",
331
};
332
 
333
/************************** sysfs end ************************/
334
 
335
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
336
{
337
        unsigned int idle_ticks, total_ticks;
338
        unsigned int load = 0;
339
        cputime64_t cur_jiffies;
340
 
341
        struct cpufreq_policy *policy;
342
        unsigned int j;
343
 
344
        if (!this_dbs_info->enable)
345
                return;
346
 
347
        this_dbs_info->freq_lo = 0;
348
        policy = this_dbs_info->cur_policy;
349
        cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
350
        total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
351
                        this_dbs_info->prev_cpu_wall);
352
        this_dbs_info->prev_cpu_wall = get_jiffies_64();
353
 
354
        if (!total_ticks)
355
                return;
356
        /*
357
         * Every sampling_rate, we check, if current idle time is less
358
         * than 20% (default), then we try to increase frequency
359
         * Every sampling_rate, we look for a the lowest
360
         * frequency which can sustain the load while keeping idle time over
361
         * 30%. If such a frequency exist, we try to decrease to this frequency.
362
         *
363
         * Any frequency increase takes it to the maximum frequency.
364
         * Frequency reduction happens at minimum steps of
365
         * 5% (default) of current frequency
366
         */
367
 
368
        /* Get Idle Time */
369
        idle_ticks = UINT_MAX;
370
        for_each_cpu_mask(j, policy->cpus) {
371
                cputime64_t total_idle_ticks;
372
                unsigned int tmp_idle_ticks;
373
                struct cpu_dbs_info_s *j_dbs_info;
374
 
375
                j_dbs_info = &per_cpu(cpu_dbs_info, j);
376
                total_idle_ticks = get_cpu_idle_time(j);
377
                tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
378
                                j_dbs_info->prev_cpu_idle);
379
                j_dbs_info->prev_cpu_idle = total_idle_ticks;
380
 
381
                if (tmp_idle_ticks < idle_ticks)
382
                        idle_ticks = tmp_idle_ticks;
383
        }
384
        if (likely(total_ticks > idle_ticks))
385
                load = (100 * (total_ticks - idle_ticks)) / total_ticks;
386
 
387
        /* Check for frequency increase */
388
        if (load > dbs_tuners_ins.up_threshold) {
389
                /* if we are already at full speed then break out early */
390
                if (!dbs_tuners_ins.powersave_bias) {
391
                        if (policy->cur == policy->max)
392
                                return;
393
 
394
                        __cpufreq_driver_target(policy, policy->max,
395
                                CPUFREQ_RELATION_H);
396
                } else {
397
                        int freq = powersave_bias_target(policy, policy->max,
398
                                        CPUFREQ_RELATION_H);
399
                        __cpufreq_driver_target(policy, freq,
400
                                CPUFREQ_RELATION_L);
401
                }
402
                return;
403
        }
404
 
405
        /* Check for frequency decrease */
406
        /* if we cannot reduce the frequency anymore, break out early */
407
        if (policy->cur == policy->min)
408
                return;
409
 
410
        /*
411
         * The optimal frequency is the frequency that is the lowest that
412
         * can support the current CPU usage without triggering the up
413
         * policy. To be safe, we focus 10 points under the threshold.
414
         */
415
        if (load < (dbs_tuners_ins.up_threshold - 10)) {
416
                unsigned int freq_next, freq_cur;
417
 
418
                freq_cur = __cpufreq_driver_getavg(policy);
419
                if (!freq_cur)
420
                        freq_cur = policy->cur;
421
 
422
                freq_next = (freq_cur * load) /
423
                        (dbs_tuners_ins.up_threshold - 10);
424
 
425
                if (!dbs_tuners_ins.powersave_bias) {
426
                        __cpufreq_driver_target(policy, freq_next,
427
                                        CPUFREQ_RELATION_L);
428
                } else {
429
                        int freq = powersave_bias_target(policy, freq_next,
430
                                        CPUFREQ_RELATION_L);
431
                        __cpufreq_driver_target(policy, freq,
432
                                CPUFREQ_RELATION_L);
433
                }
434
        }
435
}
436
 
437
static void do_dbs_timer(struct work_struct *work)
438
{
439
        struct cpu_dbs_info_s *dbs_info =
440
                container_of(work, struct cpu_dbs_info_s, work.work);
441
        unsigned int cpu = dbs_info->cpu;
442
        int sample_type = dbs_info->sample_type;
443
 
444
        /* We want all CPUs to do sampling nearly on same jiffy */
445
        int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
446
 
447
        delay -= jiffies % delay;
448
 
449
        if (lock_policy_rwsem_write(cpu) < 0)
450
                return;
451
 
452
        if (!dbs_info->enable) {
453
                unlock_policy_rwsem_write(cpu);
454
                return;
455
        }
456
 
457
        /* Common NORMAL_SAMPLE setup */
458
        dbs_info->sample_type = DBS_NORMAL_SAMPLE;
459
        if (!dbs_tuners_ins.powersave_bias ||
460
            sample_type == DBS_NORMAL_SAMPLE) {
461
                dbs_check_cpu(dbs_info);
462
                if (dbs_info->freq_lo) {
463
                        /* Setup timer for SUB_SAMPLE */
464
                        dbs_info->sample_type = DBS_SUB_SAMPLE;
465
                        delay = dbs_info->freq_hi_jiffies;
466
                }
467
        } else {
468
                __cpufreq_driver_target(dbs_info->cur_policy,
469
                                        dbs_info->freq_lo,
470
                                        CPUFREQ_RELATION_H);
471
        }
472
        queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
473
        unlock_policy_rwsem_write(cpu);
474
}
475
 
476
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
477
{
478
        /* We want all CPUs to do sampling nearly on same jiffy */
479
        int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
480
        delay -= jiffies % delay;
481
 
482
        dbs_info->enable = 1;
483
        ondemand_powersave_bias_init();
484
        dbs_info->sample_type = DBS_NORMAL_SAMPLE;
485
        INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
486
        queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
487
                              delay);
488
}
489
 
490
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
491
{
492
        dbs_info->enable = 0;
493
        cancel_delayed_work(&dbs_info->work);
494
}
495
 
496
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
497
                                   unsigned int event)
498
{
499
        unsigned int cpu = policy->cpu;
500
        struct cpu_dbs_info_s *this_dbs_info;
501
        unsigned int j;
502
        int rc;
503
 
504
        this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
505
 
506
        switch (event) {
507
        case CPUFREQ_GOV_START:
508
                if ((!cpu_online(cpu)) || (!policy->cur))
509
                        return -EINVAL;
510
 
511
                if (this_dbs_info->enable) /* Already enabled */
512
                        break;
513
 
514
                mutex_lock(&dbs_mutex);
515
                dbs_enable++;
516
 
517
                rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
518
                if (rc) {
519
                        dbs_enable--;
520
                        mutex_unlock(&dbs_mutex);
521
                        return rc;
522
                }
523
 
524
                for_each_cpu_mask(j, policy->cpus) {
525
                        struct cpu_dbs_info_s *j_dbs_info;
526
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
527
                        j_dbs_info->cur_policy = policy;
528
 
529
                        j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
530
                        j_dbs_info->prev_cpu_wall = get_jiffies_64();
531
                }
532
                this_dbs_info->cpu = cpu;
533
                /*
534
                 * Start the timerschedule work, when this governor
535
                 * is used for first time
536
                 */
537
                if (dbs_enable == 1) {
538
                        unsigned int latency;
539
                        /* policy latency is in nS. Convert it to uS first */
540
                        latency = policy->cpuinfo.transition_latency / 1000;
541
                        if (latency == 0)
542
                                latency = 1;
543
 
544
                        def_sampling_rate = latency *
545
                                        DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
546
 
547
                        if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
548
                                def_sampling_rate = MIN_STAT_SAMPLING_RATE;
549
 
550
                        dbs_tuners_ins.sampling_rate = def_sampling_rate;
551
                }
552
                dbs_timer_init(this_dbs_info);
553
 
554
                mutex_unlock(&dbs_mutex);
555
                break;
556
 
557
        case CPUFREQ_GOV_STOP:
558
                mutex_lock(&dbs_mutex);
559
                dbs_timer_exit(this_dbs_info);
560
                sysfs_remove_group(&policy->kobj, &dbs_attr_group);
561
                dbs_enable--;
562
                mutex_unlock(&dbs_mutex);
563
 
564
                break;
565
 
566
        case CPUFREQ_GOV_LIMITS:
567
                mutex_lock(&dbs_mutex);
568
                if (policy->max < this_dbs_info->cur_policy->cur)
569
                        __cpufreq_driver_target(this_dbs_info->cur_policy,
570
                                                policy->max,
571
                                                CPUFREQ_RELATION_H);
572
                else if (policy->min > this_dbs_info->cur_policy->cur)
573
                        __cpufreq_driver_target(this_dbs_info->cur_policy,
574
                                                policy->min,
575
                                                CPUFREQ_RELATION_L);
576
                mutex_unlock(&dbs_mutex);
577
                break;
578
        }
579
        return 0;
580
}
581
 
582
struct cpufreq_governor cpufreq_gov_ondemand = {
583
        .name                   = "ondemand",
584
        .governor               = cpufreq_governor_dbs,
585
        .max_transition_latency = TRANSITION_LATENCY_LIMIT,
586
        .owner                  = THIS_MODULE,
587
};
588
EXPORT_SYMBOL(cpufreq_gov_ondemand);
589
 
590
static int __init cpufreq_gov_dbs_init(void)
591
{
592
        kondemand_wq = create_workqueue("kondemand");
593
        if (!kondemand_wq) {
594
                printk(KERN_ERR "Creation of kondemand failed\n");
595
                return -EFAULT;
596
        }
597
        return cpufreq_register_governor(&cpufreq_gov_ondemand);
598
}
599
 
600
static void __exit cpufreq_gov_dbs_exit(void)
601
{
602
        cpufreq_unregister_governor(&cpufreq_gov_ondemand);
603
        destroy_workqueue(kondemand_wq);
604
}
605
 
606
 
607
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
608
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
609
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
610
                   "Low Latency Frequency Transition capable processors");
611
MODULE_LICENSE("GPL");
612
 
613
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
614
fs_initcall(cpufreq_gov_dbs_init);
615
#else
616
module_init(cpufreq_gov_dbs_init);
617
#endif
618
module_exit(cpufreq_gov_dbs_exit);

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.