1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* drivers/cpufreq/cpufreq_conservative.c
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 2001 Russell King
|
5 |
|
|
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
|
6 |
|
|
* Jun Nakajima <jun.nakajima@intel.com>
|
7 |
|
|
* (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk>
|
8 |
|
|
*
|
9 |
|
|
* This program is free software; you can redistribute it and/or modify
|
10 |
|
|
* it under the terms of the GNU General Public License version 2 as
|
11 |
|
|
* published by the Free Software Foundation.
|
12 |
|
|
*/
|
13 |
|
|
|
14 |
|
|
#include <linux/kernel.h>
|
15 |
|
|
#include <linux/module.h>
|
16 |
|
|
#include <linux/smp.h>
|
17 |
|
|
#include <linux/init.h>
|
18 |
|
|
#include <linux/interrupt.h>
|
19 |
|
|
#include <linux/ctype.h>
|
20 |
|
|
#include <linux/cpufreq.h>
|
21 |
|
|
#include <linux/sysctl.h>
|
22 |
|
|
#include <linux/types.h>
|
23 |
|
|
#include <linux/fs.h>
|
24 |
|
|
#include <linux/sysfs.h>
|
25 |
|
|
#include <linux/cpu.h>
|
26 |
|
|
#include <linux/kmod.h>
|
27 |
|
|
#include <linux/workqueue.h>
|
28 |
|
|
#include <linux/jiffies.h>
|
29 |
|
|
#include <linux/kernel_stat.h>
|
30 |
|
|
#include <linux/percpu.h>
|
31 |
|
|
#include <linux/mutex.h>
|
32 |
|
|
/*
|
33 |
|
|
* dbs is used in this file as a shortform for demandbased switching
|
34 |
|
|
* It helps to keep variable names smaller, simpler
|
35 |
|
|
*/
|
36 |
|
|
|
37 |
|
|
#define DEF_FREQUENCY_UP_THRESHOLD (80)
|
38 |
|
|
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
|
39 |
|
|
|
40 |
|
|
/*
|
41 |
|
|
* The polling frequency of this governor depends on the capability of
|
42 |
|
|
* the processor. Default polling frequency is 1000 times the transition
|
43 |
|
|
* latency of the processor. The governor will work on any processor with
|
44 |
|
|
* transition latency <= 10mS, using appropriate sampling
|
45 |
|
|
* rate.
|
46 |
|
|
* For CPUs with transition latency > 10mS (mostly drivers
|
47 |
|
|
* with CPUFREQ_ETERNAL), this governor will not work.
|
48 |
|
|
* All times here are in uS.
|
49 |
|
|
*/
|
50 |
|
|
static unsigned int def_sampling_rate;
|
51 |
|
|
#define MIN_SAMPLING_RATE_RATIO (2)
|
52 |
|
|
/* for correct statistics, we need at least 10 ticks between each measure */
|
53 |
|
|
#define MIN_STAT_SAMPLING_RATE \
|
54 |
|
|
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
|
55 |
|
|
#define MIN_SAMPLING_RATE \
|
56 |
|
|
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
|
57 |
|
|
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
|
58 |
|
|
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
|
59 |
|
|
#define DEF_SAMPLING_DOWN_FACTOR (1)
|
60 |
|
|
#define MAX_SAMPLING_DOWN_FACTOR (10)
|
61 |
|
|
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
62 |
|
|
|
63 |
|
|
static void do_dbs_timer(struct work_struct *work);
|
64 |
|
|
|
65 |
|
|
struct cpu_dbs_info_s {
|
66 |
|
|
struct cpufreq_policy *cur_policy;
|
67 |
|
|
unsigned int prev_cpu_idle_up;
|
68 |
|
|
unsigned int prev_cpu_idle_down;
|
69 |
|
|
unsigned int enable;
|
70 |
|
|
unsigned int down_skip;
|
71 |
|
|
unsigned int requested_freq;
|
72 |
|
|
};
|
73 |
|
|
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
|
74 |
|
|
|
75 |
|
|
static unsigned int dbs_enable; /* number of CPUs using this policy */
|
76 |
|
|
|
77 |
|
|
/*
|
78 |
|
|
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
|
79 |
|
|
* lock and dbs_mutex. cpu_hotplug lock should always be held before
|
80 |
|
|
* dbs_mutex. If any function that can potentially take cpu_hotplug lock
|
81 |
|
|
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
|
82 |
|
|
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
|
83 |
|
|
* is recursive for the same process. -Venki
|
84 |
|
|
*/
|
85 |
|
|
static DEFINE_MUTEX (dbs_mutex);
|
86 |
|
|
static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
|
87 |
|
|
|
88 |
|
|
struct dbs_tuners {
|
89 |
|
|
unsigned int sampling_rate;
|
90 |
|
|
unsigned int sampling_down_factor;
|
91 |
|
|
unsigned int up_threshold;
|
92 |
|
|
unsigned int down_threshold;
|
93 |
|
|
unsigned int ignore_nice;
|
94 |
|
|
unsigned int freq_step;
|
95 |
|
|
};
|
96 |
|
|
|
97 |
|
|
static struct dbs_tuners dbs_tuners_ins = {
|
98 |
|
|
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
|
99 |
|
|
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
|
100 |
|
|
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
|
101 |
|
|
.ignore_nice = 0,
|
102 |
|
|
.freq_step = 5,
|
103 |
|
|
};
|
104 |
|
|
|
105 |
|
|
static inline unsigned int get_cpu_idle_time(unsigned int cpu)
|
106 |
|
|
{
|
107 |
|
|
unsigned int add_nice = 0, ret;
|
108 |
|
|
|
109 |
|
|
if (dbs_tuners_ins.ignore_nice)
|
110 |
|
|
add_nice = kstat_cpu(cpu).cpustat.nice;
|
111 |
|
|
|
112 |
|
|
ret = kstat_cpu(cpu).cpustat.idle +
|
113 |
|
|
kstat_cpu(cpu).cpustat.iowait +
|
114 |
|
|
add_nice;
|
115 |
|
|
|
116 |
|
|
return ret;
|
117 |
|
|
}
|
118 |
|
|
|
119 |
|
|
/* keep track of frequency transitions */
|
120 |
|
|
static int
|
121 |
|
|
dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
122 |
|
|
void *data)
|
123 |
|
|
{
|
124 |
|
|
struct cpufreq_freqs *freq = data;
|
125 |
|
|
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
|
126 |
|
|
freq->cpu);
|
127 |
|
|
|
128 |
|
|
if (!this_dbs_info->enable)
|
129 |
|
|
return 0;
|
130 |
|
|
|
131 |
|
|
this_dbs_info->requested_freq = freq->new;
|
132 |
|
|
|
133 |
|
|
return 0;
|
134 |
|
|
}
|
135 |
|
|
|
136 |
|
|
static struct notifier_block dbs_cpufreq_notifier_block = {
|
137 |
|
|
.notifier_call = dbs_cpufreq_notifier
|
138 |
|
|
};
|
139 |
|
|
|
140 |
|
|
/************************** sysfs interface ************************/
|
141 |
|
|
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
|
142 |
|
|
{
|
143 |
|
|
return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
|
144 |
|
|
}
|
145 |
|
|
|
146 |
|
|
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
|
147 |
|
|
{
|
148 |
|
|
return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
|
149 |
|
|
}
|
150 |
|
|
|
151 |
|
|
#define define_one_ro(_name) \
|
152 |
|
|
static struct freq_attr _name = \
|
153 |
|
|
__ATTR(_name, 0444, show_##_name, NULL)
|
154 |
|
|
|
155 |
|
|
define_one_ro(sampling_rate_max);
|
156 |
|
|
define_one_ro(sampling_rate_min);
|
157 |
|
|
|
158 |
|
|
/* cpufreq_conservative Governor Tunables */
|
159 |
|
|
#define show_one(file_name, object) \
|
160 |
|
|
static ssize_t show_##file_name \
|
161 |
|
|
(struct cpufreq_policy *unused, char *buf) \
|
162 |
|
|
{ \
|
163 |
|
|
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
|
164 |
|
|
}
|
165 |
|
|
show_one(sampling_rate, sampling_rate);
|
166 |
|
|
show_one(sampling_down_factor, sampling_down_factor);
|
167 |
|
|
show_one(up_threshold, up_threshold);
|
168 |
|
|
show_one(down_threshold, down_threshold);
|
169 |
|
|
show_one(ignore_nice_load, ignore_nice);
|
170 |
|
|
show_one(freq_step, freq_step);
|
171 |
|
|
|
172 |
|
|
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
|
173 |
|
|
const char *buf, size_t count)
|
174 |
|
|
{
|
175 |
|
|
unsigned int input;
|
176 |
|
|
int ret;
|
177 |
|
|
ret = sscanf (buf, "%u", &input);
|
178 |
|
|
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
|
179 |
|
|
return -EINVAL;
|
180 |
|
|
|
181 |
|
|
mutex_lock(&dbs_mutex);
|
182 |
|
|
dbs_tuners_ins.sampling_down_factor = input;
|
183 |
|
|
mutex_unlock(&dbs_mutex);
|
184 |
|
|
|
185 |
|
|
return count;
|
186 |
|
|
}
|
187 |
|
|
|
188 |
|
|
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
|
189 |
|
|
const char *buf, size_t count)
|
190 |
|
|
{
|
191 |
|
|
unsigned int input;
|
192 |
|
|
int ret;
|
193 |
|
|
ret = sscanf (buf, "%u", &input);
|
194 |
|
|
|
195 |
|
|
mutex_lock(&dbs_mutex);
|
196 |
|
|
if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
|
197 |
|
|
mutex_unlock(&dbs_mutex);
|
198 |
|
|
return -EINVAL;
|
199 |
|
|
}
|
200 |
|
|
|
201 |
|
|
dbs_tuners_ins.sampling_rate = input;
|
202 |
|
|
mutex_unlock(&dbs_mutex);
|
203 |
|
|
|
204 |
|
|
return count;
|
205 |
|
|
}
|
206 |
|
|
|
207 |
|
|
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
|
208 |
|
|
const char *buf, size_t count)
|
209 |
|
|
{
|
210 |
|
|
unsigned int input;
|
211 |
|
|
int ret;
|
212 |
|
|
ret = sscanf (buf, "%u", &input);
|
213 |
|
|
|
214 |
|
|
mutex_lock(&dbs_mutex);
|
215 |
|
|
if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) {
|
216 |
|
|
mutex_unlock(&dbs_mutex);
|
217 |
|
|
return -EINVAL;
|
218 |
|
|
}
|
219 |
|
|
|
220 |
|
|
dbs_tuners_ins.up_threshold = input;
|
221 |
|
|
mutex_unlock(&dbs_mutex);
|
222 |
|
|
|
223 |
|
|
return count;
|
224 |
|
|
}
|
225 |
|
|
|
226 |
|
|
static ssize_t store_down_threshold(struct cpufreq_policy *unused,
|
227 |
|
|
const char *buf, size_t count)
|
228 |
|
|
{
|
229 |
|
|
unsigned int input;
|
230 |
|
|
int ret;
|
231 |
|
|
ret = sscanf (buf, "%u", &input);
|
232 |
|
|
|
233 |
|
|
mutex_lock(&dbs_mutex);
|
234 |
|
|
if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) {
|
235 |
|
|
mutex_unlock(&dbs_mutex);
|
236 |
|
|
return -EINVAL;
|
237 |
|
|
}
|
238 |
|
|
|
239 |
|
|
dbs_tuners_ins.down_threshold = input;
|
240 |
|
|
mutex_unlock(&dbs_mutex);
|
241 |
|
|
|
242 |
|
|
return count;
|
243 |
|
|
}
|
244 |
|
|
|
245 |
|
|
static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
|
246 |
|
|
const char *buf, size_t count)
|
247 |
|
|
{
|
248 |
|
|
unsigned int input;
|
249 |
|
|
int ret;
|
250 |
|
|
|
251 |
|
|
unsigned int j;
|
252 |
|
|
|
253 |
|
|
ret = sscanf(buf, "%u", &input);
|
254 |
|
|
if (ret != 1)
|
255 |
|
|
return -EINVAL;
|
256 |
|
|
|
257 |
|
|
if (input > 1)
|
258 |
|
|
input = 1;
|
259 |
|
|
|
260 |
|
|
mutex_lock(&dbs_mutex);
|
261 |
|
|
if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
|
262 |
|
|
mutex_unlock(&dbs_mutex);
|
263 |
|
|
return count;
|
264 |
|
|
}
|
265 |
|
|
dbs_tuners_ins.ignore_nice = input;
|
266 |
|
|
|
267 |
|
|
/* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
|
268 |
|
|
for_each_online_cpu(j) {
|
269 |
|
|
struct cpu_dbs_info_s *j_dbs_info;
|
270 |
|
|
j_dbs_info = &per_cpu(cpu_dbs_info, j);
|
271 |
|
|
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
|
272 |
|
|
j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
|
273 |
|
|
}
|
274 |
|
|
mutex_unlock(&dbs_mutex);
|
275 |
|
|
|
276 |
|
|
return count;
|
277 |
|
|
}
|
278 |
|
|
|
279 |
|
|
static ssize_t store_freq_step(struct cpufreq_policy *policy,
|
280 |
|
|
const char *buf, size_t count)
|
281 |
|
|
{
|
282 |
|
|
unsigned int input;
|
283 |
|
|
int ret;
|
284 |
|
|
|
285 |
|
|
ret = sscanf(buf, "%u", &input);
|
286 |
|
|
|
287 |
|
|
if (ret != 1)
|
288 |
|
|
return -EINVAL;
|
289 |
|
|
|
290 |
|
|
if (input > 100)
|
291 |
|
|
input = 100;
|
292 |
|
|
|
293 |
|
|
/* no need to test here if freq_step is zero as the user might actually
|
294 |
|
|
* want this, they would be crazy though :) */
|
295 |
|
|
mutex_lock(&dbs_mutex);
|
296 |
|
|
dbs_tuners_ins.freq_step = input;
|
297 |
|
|
mutex_unlock(&dbs_mutex);
|
298 |
|
|
|
299 |
|
|
return count;
|
300 |
|
|
}
|
301 |
|
|
|
302 |
|
|
#define define_one_rw(_name) \
|
303 |
|
|
static struct freq_attr _name = \
|
304 |
|
|
__ATTR(_name, 0644, show_##_name, store_##_name)
|
305 |
|
|
|
306 |
|
|
define_one_rw(sampling_rate);
|
307 |
|
|
define_one_rw(sampling_down_factor);
|
308 |
|
|
define_one_rw(up_threshold);
|
309 |
|
|
define_one_rw(down_threshold);
|
310 |
|
|
define_one_rw(ignore_nice_load);
|
311 |
|
|
define_one_rw(freq_step);
|
312 |
|
|
|
313 |
|
|
static struct attribute * dbs_attributes[] = {
|
314 |
|
|
&sampling_rate_max.attr,
|
315 |
|
|
&sampling_rate_min.attr,
|
316 |
|
|
&sampling_rate.attr,
|
317 |
|
|
&sampling_down_factor.attr,
|
318 |
|
|
&up_threshold.attr,
|
319 |
|
|
&down_threshold.attr,
|
320 |
|
|
&ignore_nice_load.attr,
|
321 |
|
|
&freq_step.attr,
|
322 |
|
|
NULL
|
323 |
|
|
};
|
324 |
|
|
|
325 |
|
|
static struct attribute_group dbs_attr_group = {
|
326 |
|
|
.attrs = dbs_attributes,
|
327 |
|
|
.name = "conservative",
|
328 |
|
|
};
|
329 |
|
|
|
330 |
|
|
/************************** sysfs end ************************/
|
331 |
|
|
|
332 |
|
|
static void dbs_check_cpu(int cpu)
|
333 |
|
|
{
|
334 |
|
|
unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
|
335 |
|
|
unsigned int tmp_idle_ticks, total_idle_ticks;
|
336 |
|
|
unsigned int freq_step;
|
337 |
|
|
unsigned int freq_down_sampling_rate;
|
338 |
|
|
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
|
339 |
|
|
struct cpufreq_policy *policy;
|
340 |
|
|
|
341 |
|
|
if (!this_dbs_info->enable)
|
342 |
|
|
return;
|
343 |
|
|
|
344 |
|
|
policy = this_dbs_info->cur_policy;
|
345 |
|
|
|
346 |
|
|
/*
|
347 |
|
|
* The default safe range is 20% to 80%
|
348 |
|
|
* Every sampling_rate, we check
|
349 |
|
|
* - If current idle time is less than 20%, then we try to
|
350 |
|
|
* increase frequency
|
351 |
|
|
* Every sampling_rate*sampling_down_factor, we check
|
352 |
|
|
* - If current idle time is more than 80%, then we try to
|
353 |
|
|
* decrease frequency
|
354 |
|
|
*
|
355 |
|
|
* Any frequency increase takes it to the maximum frequency.
|
356 |
|
|
* Frequency reduction happens at minimum steps of
|
357 |
|
|
* 5% (default) of max_frequency
|
358 |
|
|
*/
|
359 |
|
|
|
360 |
|
|
/* Check for frequency increase */
|
361 |
|
|
idle_ticks = UINT_MAX;
|
362 |
|
|
|
363 |
|
|
/* Check for frequency increase */
|
364 |
|
|
total_idle_ticks = get_cpu_idle_time(cpu);
|
365 |
|
|
tmp_idle_ticks = total_idle_ticks -
|
366 |
|
|
this_dbs_info->prev_cpu_idle_up;
|
367 |
|
|
this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
|
368 |
|
|
|
369 |
|
|
if (tmp_idle_ticks < idle_ticks)
|
370 |
|
|
idle_ticks = tmp_idle_ticks;
|
371 |
|
|
|
372 |
|
|
/* Scale idle ticks by 100 and compare with up and down ticks */
|
373 |
|
|
idle_ticks *= 100;
|
374 |
|
|
up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
|
375 |
|
|
usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
376 |
|
|
|
377 |
|
|
if (idle_ticks < up_idle_ticks) {
|
378 |
|
|
this_dbs_info->down_skip = 0;
|
379 |
|
|
this_dbs_info->prev_cpu_idle_down =
|
380 |
|
|
this_dbs_info->prev_cpu_idle_up;
|
381 |
|
|
|
382 |
|
|
/* if we are already at full speed then break out early */
|
383 |
|
|
if (this_dbs_info->requested_freq == policy->max)
|
384 |
|
|
return;
|
385 |
|
|
|
386 |
|
|
freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
387 |
|
|
|
388 |
|
|
/* max freq cannot be less than 100. But who knows.... */
|
389 |
|
|
if (unlikely(freq_step == 0))
|
390 |
|
|
freq_step = 5;
|
391 |
|
|
|
392 |
|
|
this_dbs_info->requested_freq += freq_step;
|
393 |
|
|
if (this_dbs_info->requested_freq > policy->max)
|
394 |
|
|
this_dbs_info->requested_freq = policy->max;
|
395 |
|
|
|
396 |
|
|
__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
|
397 |
|
|
CPUFREQ_RELATION_H);
|
398 |
|
|
return;
|
399 |
|
|
}
|
400 |
|
|
|
401 |
|
|
/* Check for frequency decrease */
|
402 |
|
|
this_dbs_info->down_skip++;
|
403 |
|
|
if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor)
|
404 |
|
|
return;
|
405 |
|
|
|
406 |
|
|
/* Check for frequency decrease */
|
407 |
|
|
total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
|
408 |
|
|
tmp_idle_ticks = total_idle_ticks -
|
409 |
|
|
this_dbs_info->prev_cpu_idle_down;
|
410 |
|
|
this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
|
411 |
|
|
|
412 |
|
|
if (tmp_idle_ticks < idle_ticks)
|
413 |
|
|
idle_ticks = tmp_idle_ticks;
|
414 |
|
|
|
415 |
|
|
/* Scale idle ticks by 100 and compare with up and down ticks */
|
416 |
|
|
idle_ticks *= 100;
|
417 |
|
|
this_dbs_info->down_skip = 0;
|
418 |
|
|
|
419 |
|
|
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
|
420 |
|
|
dbs_tuners_ins.sampling_down_factor;
|
421 |
|
|
down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
|
422 |
|
|
usecs_to_jiffies(freq_down_sampling_rate);
|
423 |
|
|
|
424 |
|
|
if (idle_ticks > down_idle_ticks) {
|
425 |
|
|
/*
|
426 |
|
|
* if we are already at the lowest speed then break out early
|
427 |
|
|
* or if we 'cannot' reduce the speed as the user might want
|
428 |
|
|
* freq_step to be zero
|
429 |
|
|
*/
|
430 |
|
|
if (this_dbs_info->requested_freq == policy->min
|
431 |
|
|
|| dbs_tuners_ins.freq_step == 0)
|
432 |
|
|
return;
|
433 |
|
|
|
434 |
|
|
freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
435 |
|
|
|
436 |
|
|
/* max freq cannot be less than 100. But who knows.... */
|
437 |
|
|
if (unlikely(freq_step == 0))
|
438 |
|
|
freq_step = 5;
|
439 |
|
|
|
440 |
|
|
this_dbs_info->requested_freq -= freq_step;
|
441 |
|
|
if (this_dbs_info->requested_freq < policy->min)
|
442 |
|
|
this_dbs_info->requested_freq = policy->min;
|
443 |
|
|
|
444 |
|
|
__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
|
445 |
|
|
CPUFREQ_RELATION_H);
|
446 |
|
|
return;
|
447 |
|
|
}
|
448 |
|
|
}
|
449 |
|
|
|
450 |
|
|
static void do_dbs_timer(struct work_struct *work)
|
451 |
|
|
{
|
452 |
|
|
int i;
|
453 |
|
|
mutex_lock(&dbs_mutex);
|
454 |
|
|
for_each_online_cpu(i)
|
455 |
|
|
dbs_check_cpu(i);
|
456 |
|
|
schedule_delayed_work(&dbs_work,
|
457 |
|
|
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
458 |
|
|
mutex_unlock(&dbs_mutex);
|
459 |
|
|
}
|
460 |
|
|
|
461 |
|
|
static inline void dbs_timer_init(void)
|
462 |
|
|
{
|
463 |
|
|
schedule_delayed_work(&dbs_work,
|
464 |
|
|
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
465 |
|
|
return;
|
466 |
|
|
}
|
467 |
|
|
|
468 |
|
|
static inline void dbs_timer_exit(void)
|
469 |
|
|
{
|
470 |
|
|
cancel_delayed_work(&dbs_work);
|
471 |
|
|
return;
|
472 |
|
|
}
|
473 |
|
|
|
474 |
|
|
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
475 |
|
|
unsigned int event)
|
476 |
|
|
{
|
477 |
|
|
unsigned int cpu = policy->cpu;
|
478 |
|
|
struct cpu_dbs_info_s *this_dbs_info;
|
479 |
|
|
unsigned int j;
|
480 |
|
|
int rc;
|
481 |
|
|
|
482 |
|
|
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
|
483 |
|
|
|
484 |
|
|
switch (event) {
|
485 |
|
|
case CPUFREQ_GOV_START:
|
486 |
|
|
if ((!cpu_online(cpu)) || (!policy->cur))
|
487 |
|
|
return -EINVAL;
|
488 |
|
|
|
489 |
|
|
if (this_dbs_info->enable) /* Already enabled */
|
490 |
|
|
break;
|
491 |
|
|
|
492 |
|
|
mutex_lock(&dbs_mutex);
|
493 |
|
|
|
494 |
|
|
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
|
495 |
|
|
if (rc) {
|
496 |
|
|
mutex_unlock(&dbs_mutex);
|
497 |
|
|
return rc;
|
498 |
|
|
}
|
499 |
|
|
|
500 |
|
|
for_each_cpu_mask(j, policy->cpus) {
|
501 |
|
|
struct cpu_dbs_info_s *j_dbs_info;
|
502 |
|
|
j_dbs_info = &per_cpu(cpu_dbs_info, j);
|
503 |
|
|
j_dbs_info->cur_policy = policy;
|
504 |
|
|
|
505 |
|
|
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
|
506 |
|
|
j_dbs_info->prev_cpu_idle_down
|
507 |
|
|
= j_dbs_info->prev_cpu_idle_up;
|
508 |
|
|
}
|
509 |
|
|
this_dbs_info->enable = 1;
|
510 |
|
|
this_dbs_info->down_skip = 0;
|
511 |
|
|
this_dbs_info->requested_freq = policy->cur;
|
512 |
|
|
|
513 |
|
|
dbs_enable++;
|
514 |
|
|
/*
|
515 |
|
|
* Start the timerschedule work, when this governor
|
516 |
|
|
* is used for first time
|
517 |
|
|
*/
|
518 |
|
|
if (dbs_enable == 1) {
|
519 |
|
|
unsigned int latency;
|
520 |
|
|
/* policy latency is in nS. Convert it to uS first */
|
521 |
|
|
latency = policy->cpuinfo.transition_latency / 1000;
|
522 |
|
|
if (latency == 0)
|
523 |
|
|
latency = 1;
|
524 |
|
|
|
525 |
|
|
def_sampling_rate = 10 * latency *
|
526 |
|
|
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
|
527 |
|
|
|
528 |
|
|
if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
|
529 |
|
|
def_sampling_rate = MIN_STAT_SAMPLING_RATE;
|
530 |
|
|
|
531 |
|
|
dbs_tuners_ins.sampling_rate = def_sampling_rate;
|
532 |
|
|
|
533 |
|
|
dbs_timer_init();
|
534 |
|
|
cpufreq_register_notifier(
|
535 |
|
|
&dbs_cpufreq_notifier_block,
|
536 |
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
537 |
|
|
}
|
538 |
|
|
|
539 |
|
|
mutex_unlock(&dbs_mutex);
|
540 |
|
|
break;
|
541 |
|
|
|
542 |
|
|
case CPUFREQ_GOV_STOP:
|
543 |
|
|
mutex_lock(&dbs_mutex);
|
544 |
|
|
this_dbs_info->enable = 0;
|
545 |
|
|
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
|
546 |
|
|
dbs_enable--;
|
547 |
|
|
/*
|
548 |
|
|
* Stop the timerschedule work, when this governor
|
549 |
|
|
* is used for first time
|
550 |
|
|
*/
|
551 |
|
|
if (dbs_enable == 0) {
|
552 |
|
|
dbs_timer_exit();
|
553 |
|
|
cpufreq_unregister_notifier(
|
554 |
|
|
&dbs_cpufreq_notifier_block,
|
555 |
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
556 |
|
|
}
|
557 |
|
|
|
558 |
|
|
mutex_unlock(&dbs_mutex);
|
559 |
|
|
|
560 |
|
|
break;
|
561 |
|
|
|
562 |
|
|
case CPUFREQ_GOV_LIMITS:
|
563 |
|
|
mutex_lock(&dbs_mutex);
|
564 |
|
|
if (policy->max < this_dbs_info->cur_policy->cur)
|
565 |
|
|
__cpufreq_driver_target(
|
566 |
|
|
this_dbs_info->cur_policy,
|
567 |
|
|
policy->max, CPUFREQ_RELATION_H);
|
568 |
|
|
else if (policy->min > this_dbs_info->cur_policy->cur)
|
569 |
|
|
__cpufreq_driver_target(
|
570 |
|
|
this_dbs_info->cur_policy,
|
571 |
|
|
policy->min, CPUFREQ_RELATION_L);
|
572 |
|
|
mutex_unlock(&dbs_mutex);
|
573 |
|
|
break;
|
574 |
|
|
}
|
575 |
|
|
return 0;
|
576 |
|
|
}
|
577 |
|
|
|
578 |
|
|
struct cpufreq_governor cpufreq_gov_conservative = {
|
579 |
|
|
.name = "conservative",
|
580 |
|
|
.governor = cpufreq_governor_dbs,
|
581 |
|
|
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
582 |
|
|
.owner = THIS_MODULE,
|
583 |
|
|
};
|
584 |
|
|
EXPORT_SYMBOL(cpufreq_gov_conservative);
|
585 |
|
|
|
586 |
|
|
static int __init cpufreq_gov_dbs_init(void)
|
587 |
|
|
{
|
588 |
|
|
return cpufreq_register_governor(&cpufreq_gov_conservative);
|
589 |
|
|
}
|
590 |
|
|
|
591 |
|
|
static void __exit cpufreq_gov_dbs_exit(void)
|
592 |
|
|
{
|
593 |
|
|
/* Make sure that the scheduled work is indeed not running */
|
594 |
|
|
flush_scheduled_work();
|
595 |
|
|
|
596 |
|
|
cpufreq_unregister_governor(&cpufreq_gov_conservative);
|
597 |
|
|
}
|
598 |
|
|
|
599 |
|
|
|
600 |
|
|
MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>");
|
601 |
|
|
MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for "
|
602 |
|
|
"Low Latency Frequency Transition capable processors "
|
603 |
|
|
"optimised for use in a battery environment");
|
604 |
|
|
MODULE_LICENSE ("GPL");
|
605 |
|
|
|
606 |
|
|
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
|
607 |
|
|
fs_initcall(cpufreq_gov_dbs_init);
|
608 |
|
|
#else
|
609 |
|
|
module_init(cpufreq_gov_dbs_init);
|
610 |
|
|
#endif
|
611 |
|
|
module_exit(cpufreq_gov_dbs_exit);
|