]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * drivers/cpufreq/cpufreq_ondemand.c | |
3 | * | |
4 | * Copyright (C) 2001 Russell King | |
5 | * (C) 2003 Venkatesh Pallipadi <[email protected]>. | |
6 | * Jun Nakajima <[email protected]> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/cpufreq.h> | |
17 | #include <linux/cpu.h> | |
18 | #include <linux/jiffies.h> | |
19 | #include <linux/kernel_stat.h> | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/hrtimer.h> | |
22 | #include <linux/tick.h> | |
23 | #include <linux/ktime.h> | |
24 | #include <linux/sched.h> | |
25 | ||
26 | /* | |
27 | * dbs is used in this file as a shortform for demandbased switching | |
28 | * It helps to keep variable names smaller, simpler | |
29 | */ | |
30 | ||
31 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | |
32 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | |
33 | #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) | |
34 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) | |
35 | #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) | |
36 | #define MIN_FREQUENCY_UP_THRESHOLD (11) | |
37 | #define MAX_FREQUENCY_UP_THRESHOLD (100) | |
38 | ||
39 | /* | |
40 | * The polling frequency of this governor depends on the capability of | |
41 | * the processor. Default polling frequency is 1000 times the transition | |
42 | * latency of the processor. The governor will work on any processor with | |
43 | * transition latency <= 10mS, using appropriate sampling | |
44 | * rate. | |
45 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | |
46 | * this governor will not work. | |
47 | * All times here are in uS. | |
48 | */ | |
49 | #define MIN_SAMPLING_RATE_RATIO (2) | |
50 | ||
51 | static unsigned int min_sampling_rate; | |
52 | ||
53 | #define LATENCY_MULTIPLIER (1000) | |
54 | #define MIN_LATENCY_MULTIPLIER (100) | |
55 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) | |
56 | ||
57 | static void do_dbs_timer(struct work_struct *work); | |
58 | ||
59 | /* Sampling types */ | |
60 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | |
61 | ||
62 | struct cpu_dbs_info_s { | |
63 | cputime64_t prev_cpu_idle; | |
64 | cputime64_t prev_cpu_wall; | |
65 | cputime64_t prev_cpu_nice; | |
66 | struct cpufreq_policy *cur_policy; | |
67 | struct delayed_work work; | |
68 | struct cpufreq_frequency_table *freq_table; | |
69 | unsigned int freq_lo; | |
70 | unsigned int freq_lo_jiffies; | |
71 | unsigned int freq_hi_jiffies; | |
72 | int cpu; | |
73 | unsigned int enable:1, | |
74 | sample_type:1; | |
75 | }; | |
76 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | |
77 | ||
78 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | |
79 | ||
80 | /* | |
81 | * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug | |
82 | * lock and dbs_mutex. cpu_hotplug lock should always be held before | |
83 | * dbs_mutex. If any function that can potentially take cpu_hotplug lock | |
84 | * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then | |
85 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | |
86 | * is recursive for the same process. -Venki | |
87 | * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it | |
88 | * would deadlock with cancel_delayed_work_sync(), which is needed for proper | |
89 | * raceless workqueue teardown. | |
90 | */ | |
91 | static DEFINE_MUTEX(dbs_mutex); | |
92 | ||
93 | static struct workqueue_struct *kondemand_wq; | |
94 | ||
95 | static struct dbs_tuners { | |
96 | unsigned int sampling_rate; | |
97 | unsigned int up_threshold; | |
98 | unsigned int down_differential; | |
99 | unsigned int ignore_nice; | |
100 | unsigned int powersave_bias; | |
101 | } dbs_tuners_ins = { | |
102 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | |
103 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | |
104 | .ignore_nice = 0, | |
105 | .powersave_bias = 0, | |
106 | }; | |
107 | ||
108 | static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | |
109 | cputime64_t *wall) | |
110 | { | |
111 | cputime64_t idle_time; | |
112 | cputime64_t cur_wall_time; | |
113 | cputime64_t busy_time; | |
114 | ||
115 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | |
116 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | |
117 | kstat_cpu(cpu).cpustat.system); | |
118 | ||
119 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); | |
120 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); | |
121 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); | |
122 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); | |
123 | ||
124 | idle_time = cputime64_sub(cur_wall_time, busy_time); | |
125 | if (wall) | |
126 | *wall = cur_wall_time; | |
127 | ||
128 | return idle_time; | |
129 | } | |
130 | ||
131 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | |
132 | { | |
133 | u64 idle_time = get_cpu_idle_time_us(cpu, wall); | |
134 | ||
135 | if (idle_time == -1ULL) | |
136 | return get_cpu_idle_time_jiffy(cpu, wall); | |
137 | ||
138 | return idle_time; | |
139 | } | |
140 | ||
141 | /* | |
142 | * Find right freq to be set now with powersave_bias on. | |
143 | * Returns the freq_hi to be used right now and will set freq_hi_jiffies, | |
144 | * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. | |
145 | */ | |
146 | static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |
147 | unsigned int freq_next, | |
148 | unsigned int relation) | |
149 | { | |
150 | unsigned int freq_req, freq_reduc, freq_avg; | |
151 | unsigned int freq_hi, freq_lo; | |
152 | unsigned int index = 0; | |
153 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; | |
154 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); | |
155 | ||
156 | if (!dbs_info->freq_table) { | |
157 | dbs_info->freq_lo = 0; | |
158 | dbs_info->freq_lo_jiffies = 0; | |
159 | return freq_next; | |
160 | } | |
161 | ||
162 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, | |
163 | relation, &index); | |
164 | freq_req = dbs_info->freq_table[index].frequency; | |
165 | freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; | |
166 | freq_avg = freq_req - freq_reduc; | |
167 | ||
168 | /* Find freq bounds for freq_avg in freq_table */ | |
169 | index = 0; | |
170 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, | |
171 | CPUFREQ_RELATION_H, &index); | |
172 | freq_lo = dbs_info->freq_table[index].frequency; | |
173 | index = 0; | |
174 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, | |
175 | CPUFREQ_RELATION_L, &index); | |
176 | freq_hi = dbs_info->freq_table[index].frequency; | |
177 | ||
178 | /* Find out how long we have to be in hi and lo freqs */ | |
179 | if (freq_hi == freq_lo) { | |
180 | dbs_info->freq_lo = 0; | |
181 | dbs_info->freq_lo_jiffies = 0; | |
182 | return freq_lo; | |
183 | } | |
184 | jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | |
185 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; | |
186 | jiffies_hi += ((freq_hi - freq_lo) / 2); | |
187 | jiffies_hi /= (freq_hi - freq_lo); | |
188 | jiffies_lo = jiffies_total - jiffies_hi; | |
189 | dbs_info->freq_lo = freq_lo; | |
190 | dbs_info->freq_lo_jiffies = jiffies_lo; | |
191 | dbs_info->freq_hi_jiffies = jiffies_hi; | |
192 | return freq_hi; | |
193 | } | |
194 | ||
195 | static void ondemand_powersave_bias_init(void) | |
196 | { | |
197 | int i; | |
198 | for_each_online_cpu(i) { | |
199 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); | |
200 | dbs_info->freq_table = cpufreq_frequency_get_table(i); | |
201 | dbs_info->freq_lo = 0; | |
202 | } | |
203 | } | |
204 | ||
205 | /************************** sysfs interface ************************/ | |
206 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) | |
207 | { | |
208 | printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max " | |
209 | "sysfs file is deprecated - used by: %s\n", current->comm); | |
210 | return sprintf(buf, "%u\n", -1U); | |
211 | } | |
212 | ||
213 | static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) | |
214 | { | |
215 | return sprintf(buf, "%u\n", min_sampling_rate); | |
216 | } | |
217 | ||
218 | #define define_one_ro(_name) \ | |
219 | static struct freq_attr _name = \ | |
220 | __ATTR(_name, 0444, show_##_name, NULL) | |
221 | ||
222 | define_one_ro(sampling_rate_max); | |
223 | define_one_ro(sampling_rate_min); | |
224 | ||
225 | /* cpufreq_ondemand Governor Tunables */ | |
226 | #define show_one(file_name, object) \ | |
227 | static ssize_t show_##file_name \ | |
228 | (struct cpufreq_policy *unused, char *buf) \ | |
229 | { \ | |
230 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ | |
231 | } | |
232 | show_one(sampling_rate, sampling_rate); | |
233 | show_one(up_threshold, up_threshold); | |
234 | show_one(ignore_nice_load, ignore_nice); | |
235 | show_one(powersave_bias, powersave_bias); | |
236 | ||
237 | static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | |
238 | const char *buf, size_t count) | |
239 | { | |
240 | unsigned int input; | |
241 | int ret; | |
242 | ret = sscanf(buf, "%u", &input); | |
243 | ||
244 | mutex_lock(&dbs_mutex); | |
245 | if (ret != 1) { | |
246 | mutex_unlock(&dbs_mutex); | |
247 | return -EINVAL; | |
248 | } | |
249 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | |
250 | mutex_unlock(&dbs_mutex); | |
251 | ||
252 | return count; | |
253 | } | |
254 | ||
255 | static ssize_t store_up_threshold(struct cpufreq_policy *unused, | |
256 | const char *buf, size_t count) | |
257 | { | |
258 | unsigned int input; | |
259 | int ret; | |
260 | ret = sscanf(buf, "%u", &input); | |
261 | ||
262 | mutex_lock(&dbs_mutex); | |
263 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || | |
264 | input < MIN_FREQUENCY_UP_THRESHOLD) { | |
265 | mutex_unlock(&dbs_mutex); | |
266 | return -EINVAL; | |
267 | } | |
268 | ||
269 | dbs_tuners_ins.up_threshold = input; | |
270 | mutex_unlock(&dbs_mutex); | |
271 | ||
272 | return count; | |
273 | } | |
274 | ||
275 | static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |
276 | const char *buf, size_t count) | |
277 | { | |
278 | unsigned int input; | |
279 | int ret; | |
280 | ||
281 | unsigned int j; | |
282 | ||
283 | ret = sscanf(buf, "%u", &input); | |
284 | if (ret != 1) | |
285 | return -EINVAL; | |
286 | ||
287 | if (input > 1) | |
288 | input = 1; | |
289 | ||
290 | mutex_lock(&dbs_mutex); | |
291 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ | |
292 | mutex_unlock(&dbs_mutex); | |
293 | return count; | |
294 | } | |
295 | dbs_tuners_ins.ignore_nice = input; | |
296 | ||
297 | /* we need to re-evaluate prev_cpu_idle */ | |
298 | for_each_online_cpu(j) { | |
299 | struct cpu_dbs_info_s *dbs_info; | |
300 | dbs_info = &per_cpu(cpu_dbs_info, j); | |
301 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | |
302 | &dbs_info->prev_cpu_wall); | |
303 | if (dbs_tuners_ins.ignore_nice) | |
304 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | |
305 | ||
306 | } | |
307 | mutex_unlock(&dbs_mutex); | |
308 | ||
309 | return count; | |
310 | } | |
311 | ||
312 | static ssize_t store_powersave_bias(struct cpufreq_policy *unused, | |
313 | const char *buf, size_t count) | |
314 | { | |
315 | unsigned int input; | |
316 | int ret; | |
317 | ret = sscanf(buf, "%u", &input); | |
318 | ||
319 | if (ret != 1) | |
320 | return -EINVAL; | |
321 | ||
322 | if (input > 1000) | |
323 | input = 1000; | |
324 | ||
325 | mutex_lock(&dbs_mutex); | |
326 | dbs_tuners_ins.powersave_bias = input; | |
327 | ondemand_powersave_bias_init(); | |
328 | mutex_unlock(&dbs_mutex); | |
329 | ||
330 | return count; | |
331 | } | |
332 | ||
333 | #define define_one_rw(_name) \ | |
334 | static struct freq_attr _name = \ | |
335 | __ATTR(_name, 0644, show_##_name, store_##_name) | |
336 | ||
337 | define_one_rw(sampling_rate); | |
338 | define_one_rw(up_threshold); | |
339 | define_one_rw(ignore_nice_load); | |
340 | define_one_rw(powersave_bias); | |
341 | ||
342 | static struct attribute *dbs_attributes[] = { | |
343 | &sampling_rate_max.attr, | |
344 | &sampling_rate_min.attr, | |
345 | &sampling_rate.attr, | |
346 | &up_threshold.attr, | |
347 | &ignore_nice_load.attr, | |
348 | &powersave_bias.attr, | |
349 | NULL | |
350 | }; | |
351 | ||
352 | static struct attribute_group dbs_attr_group = { | |
353 | .attrs = dbs_attributes, | |
354 | .name = "ondemand", | |
355 | }; | |
356 | ||
357 | /************************** sysfs end ************************/ | |
358 | ||
359 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |
360 | { | |
361 | unsigned int max_load_freq; | |
362 | ||
363 | struct cpufreq_policy *policy; | |
364 | unsigned int j; | |
365 | ||
366 | if (!this_dbs_info->enable) | |
367 | return; | |
368 | ||
369 | this_dbs_info->freq_lo = 0; | |
370 | policy = this_dbs_info->cur_policy; | |
371 | ||
372 | /* | |
373 | * Every sampling_rate, we check, if current idle time is less | |
374 | * than 20% (default), then we try to increase frequency | |
375 | * Every sampling_rate, we look for a the lowest | |
376 | * frequency which can sustain the load while keeping idle time over | |
377 | * 30%. If such a frequency exist, we try to decrease to this frequency. | |
378 | * | |
379 | * Any frequency increase takes it to the maximum frequency. | |
380 | * Frequency reduction happens at minimum steps of | |
381 | * 5% (default) of current frequency | |
382 | */ | |
383 | ||
384 | /* Get Absolute Load - in terms of freq */ | |
385 | max_load_freq = 0; | |
386 | ||
387 | for_each_cpu(j, policy->cpus) { | |
388 | struct cpu_dbs_info_s *j_dbs_info; | |
389 | cputime64_t cur_wall_time, cur_idle_time; | |
390 | unsigned int idle_time, wall_time; | |
391 | unsigned int load, load_freq; | |
392 | int freq_avg; | |
393 | ||
394 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | |
395 | ||
396 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | |
397 | ||
398 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, | |
399 | j_dbs_info->prev_cpu_wall); | |
400 | j_dbs_info->prev_cpu_wall = cur_wall_time; | |
401 | ||
402 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | |
403 | j_dbs_info->prev_cpu_idle); | |
404 | j_dbs_info->prev_cpu_idle = cur_idle_time; | |
405 | ||
406 | if (dbs_tuners_ins.ignore_nice) { | |
407 | cputime64_t cur_nice; | |
408 | unsigned long cur_nice_jiffies; | |
409 | ||
410 | cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, | |
411 | j_dbs_info->prev_cpu_nice); | |
412 | /* | |
413 | * Assumption: nice time between sampling periods will | |
414 | * be less than 2^32 jiffies for 32 bit sys | |
415 | */ | |
416 | cur_nice_jiffies = (unsigned long) | |
417 | cputime64_to_jiffies64(cur_nice); | |
418 | ||
419 | j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | |
420 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | |
421 | } | |
422 | ||
423 | if (unlikely(!wall_time || wall_time < idle_time)) | |
424 | continue; | |
425 | ||
426 | load = 100 * (wall_time - idle_time) / wall_time; | |
427 | ||
428 | freq_avg = __cpufreq_driver_getavg(policy, j); | |
429 | if (freq_avg <= 0) | |
430 | freq_avg = policy->cur; | |
431 | ||
432 | load_freq = load * freq_avg; | |
433 | if (load_freq > max_load_freq) | |
434 | max_load_freq = load_freq; | |
435 | } | |
436 | ||
437 | /* Check for frequency increase */ | |
438 | if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { | |
439 | /* if we are already at full speed then break out early */ | |
440 | if (!dbs_tuners_ins.powersave_bias) { | |
441 | if (policy->cur == policy->max) | |
442 | return; | |
443 | ||
444 | __cpufreq_driver_target(policy, policy->max, | |
445 | CPUFREQ_RELATION_H); | |
446 | } else { | |
447 | int freq = powersave_bias_target(policy, policy->max, | |
448 | CPUFREQ_RELATION_H); | |
449 | __cpufreq_driver_target(policy, freq, | |
450 | CPUFREQ_RELATION_L); | |
451 | } | |
452 | return; | |
453 | } | |
454 | ||
455 | /* Check for frequency decrease */ | |
456 | /* if we cannot reduce the frequency anymore, break out early */ | |
457 | if (policy->cur == policy->min) | |
458 | return; | |
459 | ||
460 | /* | |
461 | * The optimal frequency is the frequency that is the lowest that | |
462 | * can support the current CPU usage without triggering the up | |
463 | * policy. To be safe, we focus 10 points under the threshold. | |
464 | */ | |
465 | if (max_load_freq < | |
466 | (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * | |
467 | policy->cur) { | |
468 | unsigned int freq_next; | |
469 | freq_next = max_load_freq / | |
470 | (dbs_tuners_ins.up_threshold - | |
471 | dbs_tuners_ins.down_differential); | |
472 | ||
473 | if (!dbs_tuners_ins.powersave_bias) { | |
474 | __cpufreq_driver_target(policy, freq_next, | |
475 | CPUFREQ_RELATION_L); | |
476 | } else { | |
477 | int freq = powersave_bias_target(policy, freq_next, | |
478 | CPUFREQ_RELATION_L); | |
479 | __cpufreq_driver_target(policy, freq, | |
480 | CPUFREQ_RELATION_L); | |
481 | } | |
482 | } | |
483 | } | |
484 | ||
485 | static void do_dbs_timer(struct work_struct *work) | |
486 | { | |
487 | struct cpu_dbs_info_s *dbs_info = | |
488 | container_of(work, struct cpu_dbs_info_s, work.work); | |
489 | unsigned int cpu = dbs_info->cpu; | |
490 | int sample_type = dbs_info->sample_type; | |
491 | ||
492 | /* We want all CPUs to do sampling nearly on same jiffy */ | |
493 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | |
494 | ||
495 | delay -= jiffies % delay; | |
496 | ||
497 | if (lock_policy_rwsem_write(cpu) < 0) | |
498 | return; | |
499 | ||
500 | if (!dbs_info->enable) { | |
501 | unlock_policy_rwsem_write(cpu); | |
502 | return; | |
503 | } | |
504 | ||
505 | /* Common NORMAL_SAMPLE setup */ | |
506 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | |
507 | if (!dbs_tuners_ins.powersave_bias || | |
508 | sample_type == DBS_NORMAL_SAMPLE) { | |
509 | dbs_check_cpu(dbs_info); | |
510 | if (dbs_info->freq_lo) { | |
511 | /* Setup timer for SUB_SAMPLE */ | |
512 | dbs_info->sample_type = DBS_SUB_SAMPLE; | |
513 | delay = dbs_info->freq_hi_jiffies; | |
514 | } | |
515 | } else { | |
516 | __cpufreq_driver_target(dbs_info->cur_policy, | |
517 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | |
518 | } | |
519 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | |
520 | unlock_policy_rwsem_write(cpu); | |
521 | } | |
522 | ||
523 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |
524 | { | |
525 | /* We want all CPUs to do sampling nearly on same jiffy */ | |
526 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | |
527 | delay -= jiffies % delay; | |
528 | ||
529 | dbs_info->enable = 1; | |
530 | ondemand_powersave_bias_init(); | |
531 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | |
532 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | |
533 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | |
534 | delay); | |
535 | } | |
536 | ||
537 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | |
538 | { | |
539 | dbs_info->enable = 0; | |
540 | cancel_delayed_work_sync(&dbs_info->work); | |
541 | } | |
542 | ||
543 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |
544 | unsigned int event) | |
545 | { | |
546 | unsigned int cpu = policy->cpu; | |
547 | struct cpu_dbs_info_s *this_dbs_info; | |
548 | unsigned int j; | |
549 | int rc; | |
550 | ||
551 | this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | |
552 | ||
553 | switch (event) { | |
554 | case CPUFREQ_GOV_START: | |
555 | if ((!cpu_online(cpu)) || (!policy->cur)) | |
556 | return -EINVAL; | |
557 | ||
558 | if (this_dbs_info->enable) /* Already enabled */ | |
559 | break; | |
560 | ||
561 | mutex_lock(&dbs_mutex); | |
562 | dbs_enable++; | |
563 | ||
564 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | |
565 | if (rc) { | |
566 | dbs_enable--; | |
567 | mutex_unlock(&dbs_mutex); | |
568 | return rc; | |
569 | } | |
570 | ||
571 | for_each_cpu(j, policy->cpus) { | |
572 | struct cpu_dbs_info_s *j_dbs_info; | |
573 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | |
574 | j_dbs_info->cur_policy = policy; | |
575 | ||
576 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | |
577 | &j_dbs_info->prev_cpu_wall); | |
578 | if (dbs_tuners_ins.ignore_nice) { | |
579 | j_dbs_info->prev_cpu_nice = | |
580 | kstat_cpu(j).cpustat.nice; | |
581 | } | |
582 | } | |
583 | this_dbs_info->cpu = cpu; | |
584 | /* | |
585 | * Start the timerschedule work, when this governor | |
586 | * is used for first time | |
587 | */ | |
588 | if (dbs_enable == 1) { | |
589 | unsigned int latency; | |
590 | /* policy latency is in nS. Convert it to uS first */ | |
591 | latency = policy->cpuinfo.transition_latency / 1000; | |
592 | if (latency == 0) | |
593 | latency = 1; | |
594 | /* Bring kernel and HW constraints together */ | |
595 | min_sampling_rate = max(min_sampling_rate, | |
596 | MIN_LATENCY_MULTIPLIER * latency); | |
597 | dbs_tuners_ins.sampling_rate = | |
598 | max(min_sampling_rate, | |
599 | latency * LATENCY_MULTIPLIER); | |
600 | } | |
601 | dbs_timer_init(this_dbs_info); | |
602 | ||
603 | mutex_unlock(&dbs_mutex); | |
604 | break; | |
605 | ||
606 | case CPUFREQ_GOV_STOP: | |
607 | mutex_lock(&dbs_mutex); | |
608 | dbs_timer_exit(this_dbs_info); | |
609 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | |
610 | dbs_enable--; | |
611 | mutex_unlock(&dbs_mutex); | |
612 | ||
613 | break; | |
614 | ||
615 | case CPUFREQ_GOV_LIMITS: | |
616 | mutex_lock(&dbs_mutex); | |
617 | if (policy->max < this_dbs_info->cur_policy->cur) | |
618 | __cpufreq_driver_target(this_dbs_info->cur_policy, | |
619 | policy->max, CPUFREQ_RELATION_H); | |
620 | else if (policy->min > this_dbs_info->cur_policy->cur) | |
621 | __cpufreq_driver_target(this_dbs_info->cur_policy, | |
622 | policy->min, CPUFREQ_RELATION_L); | |
623 | mutex_unlock(&dbs_mutex); | |
624 | break; | |
625 | } | |
626 | return 0; | |
627 | } | |
628 | ||
629 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | |
630 | static | |
631 | #endif | |
632 | struct cpufreq_governor cpufreq_gov_ondemand = { | |
633 | .name = "ondemand", | |
634 | .governor = cpufreq_governor_dbs, | |
635 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | |
636 | .owner = THIS_MODULE, | |
637 | }; | |
638 | ||
639 | static int __init cpufreq_gov_dbs_init(void) | |
640 | { | |
641 | int err; | |
642 | cputime64_t wall; | |
643 | u64 idle_time; | |
644 | int cpu = get_cpu(); | |
645 | ||
646 | idle_time = get_cpu_idle_time_us(cpu, &wall); | |
647 | put_cpu(); | |
648 | if (idle_time != -1ULL) { | |
649 | /* Idle micro accounting is supported. Use finer thresholds */ | |
650 | dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | |
651 | dbs_tuners_ins.down_differential = | |
652 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | |
653 | /* | |
654 | * In no_hz/micro accounting case we set the minimum frequency | |
655 | * not depending on HZ, but fixed (very low). The deferred | |
656 | * timer might skip some samples if idle/sleeping as needed. | |
657 | */ | |
658 | min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; | |
659 | } else { | |
660 | /* For correct statistics, we need 10 ticks for each measure */ | |
661 | min_sampling_rate = | |
662 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); | |
663 | } | |
664 | ||
665 | kondemand_wq = create_workqueue("kondemand"); | |
666 | if (!kondemand_wq) { | |
667 | printk(KERN_ERR "Creation of kondemand failed\n"); | |
668 | return -EFAULT; | |
669 | } | |
670 | err = cpufreq_register_governor(&cpufreq_gov_ondemand); | |
671 | if (err) | |
672 | destroy_workqueue(kondemand_wq); | |
673 | ||
674 | return err; | |
675 | } | |
676 | ||
677 | static void __exit cpufreq_gov_dbs_exit(void) | |
678 | { | |
679 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); | |
680 | destroy_workqueue(kondemand_wq); | |
681 | } | |
682 | ||
683 | ||
684 | MODULE_AUTHOR("Venkatesh Pallipadi <[email protected]>"); | |
685 | MODULE_AUTHOR("Alexey Starikovskiy <[email protected]>"); | |
686 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " | |
687 | "Low Latency Frequency Transition capable processors"); | |
688 | MODULE_LICENSE("GPL"); | |
689 | ||
690 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | |
691 | fs_initcall(cpufreq_gov_dbs_init); | |
692 | #else | |
693 | module_init(cpufreq_gov_dbs_init); | |
694 | #endif | |
695 | module_exit(cpufreq_gov_dbs_exit); |