]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * drivers/cpufreq/cpufreq_ondemand.c | |
3 | * | |
4 | * Copyright (C) 2001 Russell King | |
5 | * (C) 2003 Venkatesh Pallipadi <[email protected]>. | |
6 | * Jun Nakajima <[email protected]> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
4471a34f VK |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | ||
5ff0a268 | 15 | #include <linux/cpu.h> |
4471a34f | 16 | #include <linux/percpu-defs.h> |
4d5dcc42 | 17 | #include <linux/slab.h> |
80800913 | 18 | #include <linux/tick.h> |
55687da1 | 19 | #include <linux/sched/cpufreq.h> |
7d5a9956 RW |
20 | |
21 | #include "cpufreq_ondemand.h" | |
1da177e4 | 22 | |
06eb09d1 | 23 | /* On-demand governor macros */ |
1da177e4 | 24 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
3f78a9f7 DN |
25 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
26 | #define MAX_SAMPLING_DOWN_FACTOR (100000) | |
80800913 | 27 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) |
cef9615a | 28 | #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) |
4dd63b49 | 29 | #define MIN_FREQUENCY_UP_THRESHOLD (1) |
1da177e4 LT |
30 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
31 | ||
fb30809e JS |
32 | static struct od_ops od_ops; |
33 | ||
c2837558 JS |
34 | static unsigned int default_powersave_bias; |
35 | ||
4471a34f VK |
36 | /* |
37 | * Not all CPUs want IO time to be accounted as busy; this depends on how | |
38 | * efficient idling at a higher frequency/voltage is. | |
39 | * Pavel Machek says this is not so for various generations of AMD and old | |
40 | * Intel systems. | |
06eb09d1 | 41 | * Mike Chan (android.com) claims this is also not true for ARM. |
4471a34f VK |
42 | * Because of this, whitelist specific known (series) of CPUs by default, and |
43 | * leave all others up to the user. | |
44 | */ | |
45 | static int should_io_be_busy(void) | |
46 | { | |
47 | #if defined(CONFIG_X86) | |
48 | /* | |
06eb09d1 | 49 | * For Intel, Core 2 (model 15) and later have an efficient idle. |
4471a34f VK |
50 | */ |
51 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
52 | boot_cpu_data.x86 == 6 && | |
53 | boot_cpu_data.x86_model >= 15) | |
54 | return 1; | |
55 | #endif | |
56 | return 0; | |
6b8fcd90 AV |
57 | } |
58 | ||
05ca0350 AS |
59 | /* |
60 | * Find right freq to be set now with powersave_bias on. | |
07aa4402 RW |
61 | * Returns the freq_hi to be used right now and will set freq_hi_delay_us, |
62 | * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs. | |
05ca0350 | 63 | */ |
fb30809e | 64 | static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, |
4471a34f | 65 | unsigned int freq_next, unsigned int relation) |
05ca0350 AS |
66 | { |
67 | unsigned int freq_req, freq_reduc, freq_avg; | |
68 | unsigned int freq_hi, freq_lo; | |
d218ed77 | 69 | unsigned int index; |
07aa4402 | 70 | unsigned int delay_hi_us; |
bc505475 | 71 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
7d5a9956 | 72 | struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); |
bc505475 | 73 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
4d5dcc42 | 74 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
34ac5d7a | 75 | struct cpufreq_frequency_table *freq_table = policy->freq_table; |
05ca0350 | 76 | |
34ac5d7a | 77 | if (!freq_table) { |
05ca0350 | 78 | dbs_info->freq_lo = 0; |
07aa4402 | 79 | dbs_info->freq_lo_delay_us = 0; |
05ca0350 AS |
80 | return freq_next; |
81 | } | |
82 | ||
d218ed77 | 83 | index = cpufreq_frequency_table_target(policy, freq_next, relation); |
34ac5d7a | 84 | freq_req = freq_table[index].frequency; |
4d5dcc42 | 85 | freq_reduc = freq_req * od_tuners->powersave_bias / 1000; |
05ca0350 AS |
86 | freq_avg = freq_req - freq_reduc; |
87 | ||
88 | /* Find freq bounds for freq_avg in freq_table */ | |
82577360 | 89 | index = cpufreq_table_find_index_h(policy, freq_avg); |
34ac5d7a | 90 | freq_lo = freq_table[index].frequency; |
82577360 | 91 | index = cpufreq_table_find_index_l(policy, freq_avg); |
34ac5d7a | 92 | freq_hi = freq_table[index].frequency; |
05ca0350 AS |
93 | |
94 | /* Find out how long we have to be in hi and lo freqs */ | |
95 | if (freq_hi == freq_lo) { | |
96 | dbs_info->freq_lo = 0; | |
07aa4402 | 97 | dbs_info->freq_lo_delay_us = 0; |
05ca0350 AS |
98 | return freq_lo; |
99 | } | |
07aa4402 RW |
100 | delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate; |
101 | delay_hi_us += (freq_hi - freq_lo) / 2; | |
102 | delay_hi_us /= freq_hi - freq_lo; | |
103 | dbs_info->freq_hi_delay_us = delay_hi_us; | |
05ca0350 | 104 | dbs_info->freq_lo = freq_lo; |
07aa4402 | 105 | dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us; |
05ca0350 AS |
106 | return freq_hi; |
107 | } | |
108 | ||
d1db75ff | 109 | static void ondemand_powersave_bias_init(struct cpufreq_policy *policy) |
05ca0350 | 110 | { |
7d5a9956 | 111 | struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); |
d1db75ff | 112 | |
d1db75ff | 113 | dbs_info->freq_lo = 0; |
05ca0350 AS |
114 | } |
115 | ||
3a3e9e06 | 116 | static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) |
4471a34f | 117 | { |
bc505475 RW |
118 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
119 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
4d5dcc42 VK |
120 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
121 | ||
122 | if (od_tuners->powersave_bias) | |
3a3e9e06 | 123 | freq = od_ops.powersave_bias_target(policy, freq, |
fb30809e | 124 | CPUFREQ_RELATION_H); |
3a3e9e06 | 125 | else if (policy->cur == policy->max) |
4471a34f | 126 | return; |
0e625ac1 | 127 | |
3a3e9e06 | 128 | __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ? |
4471a34f VK |
129 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); |
130 | } | |
131 | ||
132 | /* | |
133 | * Every sampling_rate, we check, if current idle time is less than 20% | |
dfa5bb62 SK |
134 | * (default), then we try to increase frequency. Else, we adjust the frequency |
135 | * proportional to load. | |
4471a34f | 136 | */ |
4cccf755 | 137 | static void od_update(struct cpufreq_policy *policy) |
1da177e4 | 138 | { |
7d5a9956 RW |
139 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
140 | struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); | |
bc505475 | 141 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
4d5dcc42 | 142 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
4cccf755 | 143 | unsigned int load = dbs_update(policy); |
4471a34f VK |
144 | |
145 | dbs_info->freq_lo = 0; | |
146 | ||
147 | /* Check for frequency increase */ | |
ff4b1789 | 148 | if (load > dbs_data->up_threshold) { |
4471a34f VK |
149 | /* If switching to max speed, apply sampling_down_factor */ |
150 | if (policy->cur < policy->max) | |
57dc3bcd | 151 | policy_dbs->rate_mult = dbs_data->sampling_down_factor; |
4471a34f | 152 | dbs_freq_increase(policy, policy->max); |
dfa5bb62 SK |
153 | } else { |
154 | /* Calculate the next frequency proportional to load */ | |
6393d6a1 SK |
155 | unsigned int freq_next, min_f, max_f; |
156 | ||
157 | min_f = policy->cpuinfo.min_freq; | |
158 | max_f = policy->cpuinfo.max_freq; | |
159 | freq_next = min_f + load * (max_f - min_f) / 100; | |
4471a34f VK |
160 | |
161 | /* No longer fully busy, reset rate_mult */ | |
57dc3bcd | 162 | policy_dbs->rate_mult = 1; |
4471a34f | 163 | |
a7f35cff RW |
164 | if (od_tuners->powersave_bias) |
165 | freq_next = od_ops.powersave_bias_target(policy, | |
166 | freq_next, | |
167 | CPUFREQ_RELATION_L); | |
168 | ||
6393d6a1 | 169 | __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C); |
4471a34f | 170 | } |
1da177e4 LT |
171 | } |
172 | ||
26f0dbc9 | 173 | static unsigned int od_dbs_update(struct cpufreq_policy *policy) |
4471a34f | 174 | { |
bc505475 RW |
175 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
176 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
7d5a9956 | 177 | struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); |
6e96c5b3 | 178 | int sample_type = dbs_info->sample_type; |
4447266b | 179 | |
4471a34f | 180 | /* Common NORMAL_SAMPLE setup */ |
43e0ee36 | 181 | dbs_info->sample_type = OD_NORMAL_SAMPLE; |
4cccf755 RW |
182 | /* |
183 | * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore | |
184 | * it then. | |
185 | */ | |
186 | if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) { | |
43e0ee36 | 187 | __cpufreq_driver_target(policy, dbs_info->freq_lo, |
42994af6 | 188 | CPUFREQ_RELATION_H); |
07aa4402 | 189 | return dbs_info->freq_lo_delay_us; |
6e96c5b3 RW |
190 | } |
191 | ||
192 | od_update(policy); | |
193 | ||
194 | if (dbs_info->freq_lo) { | |
26f0dbc9 | 195 | /* Setup SUB_SAMPLE */ |
6e96c5b3 | 196 | dbs_info->sample_type = OD_SUB_SAMPLE; |
07aa4402 | 197 | return dbs_info->freq_hi_delay_us; |
4471a34f VK |
198 | } |
199 | ||
07aa4402 | 200 | return dbs_data->sampling_rate * policy_dbs->rate_mult; |
da53d61e FB |
201 | } |
202 | ||
4471a34f | 203 | /************************** sysfs interface ************************/ |
7bdad34d | 204 | static struct dbs_governor od_dbs_gov; |
1da177e4 | 205 | |
0dd3c1d6 RW |
206 | static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf, |
207 | size_t count) | |
19379b11 | 208 | { |
0dd3c1d6 | 209 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
19379b11 AV |
210 | unsigned int input; |
211 | int ret; | |
212 | ||
213 | ret = sscanf(buf, "%u", &input); | |
214 | if (ret != 1) | |
215 | return -EINVAL; | |
8847e038 | 216 | dbs_data->io_is_busy = !!input; |
9366d840 SK |
217 | |
218 | /* we need to re-evaluate prev_cpu_idle */ | |
8c8f77fd | 219 | gov_update_cpu_data(dbs_data); |
a33cce1c | 220 | |
19379b11 AV |
221 | return count; |
222 | } | |
223 | ||
0dd3c1d6 RW |
224 | static ssize_t store_up_threshold(struct gov_attr_set *attr_set, |
225 | const char *buf, size_t count) | |
1da177e4 | 226 | { |
0dd3c1d6 | 227 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
1da177e4 LT |
228 | unsigned int input; |
229 | int ret; | |
ffac80e9 | 230 | ret = sscanf(buf, "%u", &input); |
1da177e4 | 231 | |
32ee8c3e | 232 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || |
c29f1403 | 233 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
1da177e4 LT |
234 | return -EINVAL; |
235 | } | |
4bd4e428 | 236 | |
ff4b1789 | 237 | dbs_data->up_threshold = input; |
1da177e4 LT |
238 | return count; |
239 | } | |
240 | ||
0dd3c1d6 RW |
241 | static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, |
242 | const char *buf, size_t count) | |
3f78a9f7 | 243 | { |
0dd3c1d6 | 244 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
57dc3bcd RW |
245 | struct policy_dbs_info *policy_dbs; |
246 | unsigned int input; | |
3f78a9f7 DN |
247 | int ret; |
248 | ret = sscanf(buf, "%u", &input); | |
249 | ||
250 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | |
251 | return -EINVAL; | |
57dc3bcd | 252 | |
ff4b1789 | 253 | dbs_data->sampling_down_factor = input; |
3f78a9f7 DN |
254 | |
255 | /* Reset down sampling multiplier in case it was active */ | |
0dd3c1d6 | 256 | list_for_each_entry(policy_dbs, &attr_set->policy_list, list) { |
57dc3bcd RW |
257 | /* |
258 | * Doing this without locking might lead to using different | |
26f0dbc9 | 259 | * rate_mult values in od_update() and od_dbs_update(). |
57dc3bcd | 260 | */ |
26f0dbc9 | 261 | mutex_lock(&policy_dbs->update_mutex); |
57dc3bcd | 262 | policy_dbs->rate_mult = 1; |
26f0dbc9 | 263 | mutex_unlock(&policy_dbs->update_mutex); |
3f78a9f7 | 264 | } |
57dc3bcd | 265 | |
3f78a9f7 DN |
266 | return count; |
267 | } | |
268 | ||
0dd3c1d6 RW |
269 | static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, |
270 | const char *buf, size_t count) | |
3d5ee9e5 | 271 | { |
0dd3c1d6 | 272 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
3d5ee9e5 DJ |
273 | unsigned int input; |
274 | int ret; | |
275 | ||
ffac80e9 | 276 | ret = sscanf(buf, "%u", &input); |
2b03f891 | 277 | if (ret != 1) |
3d5ee9e5 DJ |
278 | return -EINVAL; |
279 | ||
2b03f891 | 280 | if (input > 1) |
3d5ee9e5 | 281 | input = 1; |
32ee8c3e | 282 | |
ff4b1789 | 283 | if (input == dbs_data->ignore_nice_load) { /* nothing to do */ |
3d5ee9e5 DJ |
284 | return count; |
285 | } | |
ff4b1789 | 286 | dbs_data->ignore_nice_load = input; |
3d5ee9e5 | 287 | |
ccb2fe20 | 288 | /* we need to re-evaluate prev_cpu_idle */ |
8c8f77fd | 289 | gov_update_cpu_data(dbs_data); |
1ca3abdb | 290 | |
3d5ee9e5 DJ |
291 | return count; |
292 | } | |
293 | ||
0dd3c1d6 RW |
294 | static ssize_t store_powersave_bias(struct gov_attr_set *attr_set, |
295 | const char *buf, size_t count) | |
05ca0350 | 296 | { |
0dd3c1d6 | 297 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
4d5dcc42 | 298 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
d1db75ff | 299 | struct policy_dbs_info *policy_dbs; |
05ca0350 AS |
300 | unsigned int input; |
301 | int ret; | |
302 | ret = sscanf(buf, "%u", &input); | |
303 | ||
304 | if (ret != 1) | |
305 | return -EINVAL; | |
306 | ||
307 | if (input > 1000) | |
308 | input = 1000; | |
309 | ||
4d5dcc42 | 310 | od_tuners->powersave_bias = input; |
d1db75ff | 311 | |
0dd3c1d6 | 312 | list_for_each_entry(policy_dbs, &attr_set->policy_list, list) |
d1db75ff RW |
313 | ondemand_powersave_bias_init(policy_dbs->policy); |
314 | ||
05ca0350 AS |
315 | return count; |
316 | } | |
317 | ||
c4435630 VK |
318 | gov_show_one_common(sampling_rate); |
319 | gov_show_one_common(up_threshold); | |
320 | gov_show_one_common(sampling_down_factor); | |
321 | gov_show_one_common(ignore_nice_load); | |
322 | gov_show_one_common(min_sampling_rate); | |
8847e038 | 323 | gov_show_one_common(io_is_busy); |
c4435630 VK |
324 | gov_show_one(od, powersave_bias); |
325 | ||
326 | gov_attr_rw(sampling_rate); | |
327 | gov_attr_rw(io_is_busy); | |
328 | gov_attr_rw(up_threshold); | |
329 | gov_attr_rw(sampling_down_factor); | |
330 | gov_attr_rw(ignore_nice_load); | |
331 | gov_attr_rw(powersave_bias); | |
332 | gov_attr_ro(min_sampling_rate); | |
333 | ||
334 | static struct attribute *od_attributes[] = { | |
335 | &min_sampling_rate.attr, | |
336 | &sampling_rate.attr, | |
337 | &up_threshold.attr, | |
338 | &sampling_down_factor.attr, | |
339 | &ignore_nice_load.attr, | |
340 | &powersave_bias.attr, | |
341 | &io_is_busy.attr, | |
1da177e4 LT |
342 | NULL |
343 | }; | |
344 | ||
1da177e4 LT |
345 | /************************** sysfs end ************************/ |
346 | ||
7d5a9956 RW |
347 | static struct policy_dbs_info *od_alloc(void) |
348 | { | |
349 | struct od_policy_dbs_info *dbs_info; | |
350 | ||
351 | dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL); | |
352 | return dbs_info ? &dbs_info->policy_dbs : NULL; | |
353 | } | |
354 | ||
355 | static void od_free(struct policy_dbs_info *policy_dbs) | |
356 | { | |
357 | kfree(to_dbs_info(policy_dbs)); | |
358 | } | |
359 | ||
9a15fb2c | 360 | static int od_init(struct dbs_data *dbs_data) |
4d5dcc42 VK |
361 | { |
362 | struct od_dbs_tuners *tuners; | |
363 | u64 idle_time; | |
364 | int cpu; | |
365 | ||
d5b73cd8 | 366 | tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); |
a69d6b29 | 367 | if (!tuners) |
4d5dcc42 | 368 | return -ENOMEM; |
4d5dcc42 VK |
369 | |
370 | cpu = get_cpu(); | |
371 | idle_time = get_cpu_idle_time_us(cpu, NULL); | |
372 | put_cpu(); | |
373 | if (idle_time != -1ULL) { | |
374 | /* Idle micro accounting is supported. Use finer thresholds */ | |
ff4b1789 | 375 | dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
4d5dcc42 VK |
376 | /* |
377 | * In nohz/micro accounting case we set the minimum frequency | |
26f0dbc9 | 378 | * not depending on HZ, but fixed (very low). |
4d5dcc42 VK |
379 | */ |
380 | dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; | |
381 | } else { | |
ff4b1789 | 382 | dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
4d5dcc42 VK |
383 | |
384 | /* For correct statistics, we need 10 ticks for each measure */ | |
385 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | |
386 | jiffies_to_usecs(10); | |
387 | } | |
388 | ||
ff4b1789 VK |
389 | dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
390 | dbs_data->ignore_nice_load = 0; | |
c2837558 | 391 | tuners->powersave_bias = default_powersave_bias; |
8847e038 | 392 | dbs_data->io_is_busy = should_io_be_busy(); |
4d5dcc42 VK |
393 | |
394 | dbs_data->tuners = tuners; | |
4d5dcc42 VK |
395 | return 0; |
396 | } | |
397 | ||
9a15fb2c | 398 | static void od_exit(struct dbs_data *dbs_data) |
4d5dcc42 VK |
399 | { |
400 | kfree(dbs_data->tuners); | |
401 | } | |
402 | ||
702c9e54 RW |
403 | static void od_start(struct cpufreq_policy *policy) |
404 | { | |
7d5a9956 | 405 | struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); |
702c9e54 RW |
406 | |
407 | dbs_info->sample_type = OD_NORMAL_SAMPLE; | |
d1db75ff | 408 | ondemand_powersave_bias_init(policy); |
702c9e54 RW |
409 | } |
410 | ||
4471a34f | 411 | static struct od_ops od_ops = { |
fb30809e | 412 | .powersave_bias_target = generic_powersave_bias_target, |
4471a34f | 413 | }; |
2f8a835c | 414 | |
7bdad34d | 415 | static struct dbs_governor od_dbs_gov = { |
e788892b | 416 | .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"), |
c4435630 | 417 | .kobj_type = { .default_attrs = od_attributes }, |
26f0dbc9 | 418 | .gov_dbs_update = od_dbs_update, |
7d5a9956 RW |
419 | .alloc = od_alloc, |
420 | .free = od_free, | |
4d5dcc42 VK |
421 | .init = od_init, |
422 | .exit = od_exit, | |
702c9e54 | 423 | .start = od_start, |
4471a34f | 424 | }; |
1da177e4 | 425 | |
7bdad34d | 426 | #define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov) |
af926185 | 427 | |
fb30809e JS |
428 | static void od_set_powersave_bias(unsigned int powersave_bias) |
429 | { | |
fb30809e JS |
430 | unsigned int cpu; |
431 | cpumask_t done; | |
432 | ||
c2837558 | 433 | default_powersave_bias = powersave_bias; |
fb30809e JS |
434 | cpumask_clear(&done); |
435 | ||
436 | get_online_cpus(); | |
437 | for_each_online_cpu(cpu) { | |
8c8f77fd | 438 | struct cpufreq_policy *policy; |
e40e7b25 | 439 | struct policy_dbs_info *policy_dbs; |
8c8f77fd RW |
440 | struct dbs_data *dbs_data; |
441 | struct od_dbs_tuners *od_tuners; | |
44152cb8 | 442 | |
fb30809e JS |
443 | if (cpumask_test_cpu(cpu, &done)) |
444 | continue; | |
445 | ||
8c8f77fd RW |
446 | policy = cpufreq_cpu_get_raw(cpu); |
447 | if (!policy || policy->governor != CPU_FREQ_GOV_ONDEMAND) | |
448 | continue; | |
449 | ||
450 | policy_dbs = policy->governor_data; | |
e40e7b25 | 451 | if (!policy_dbs) |
c2837558 | 452 | continue; |
fb30809e JS |
453 | |
454 | cpumask_or(&done, &done, policy->cpus); | |
c2837558 | 455 | |
bc505475 | 456 | dbs_data = policy_dbs->dbs_data; |
c2837558 JS |
457 | od_tuners = dbs_data->tuners; |
458 | od_tuners->powersave_bias = default_powersave_bias; | |
fb30809e JS |
459 | } |
460 | put_online_cpus(); | |
461 | } | |
462 | ||
463 | void od_register_powersave_bias_handler(unsigned int (*f) | |
464 | (struct cpufreq_policy *, unsigned int, unsigned int), | |
465 | unsigned int powersave_bias) | |
466 | { | |
467 | od_ops.powersave_bias_target = f; | |
468 | od_set_powersave_bias(powersave_bias); | |
469 | } | |
470 | EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler); | |
471 | ||
472 | void od_unregister_powersave_bias_handler(void) | |
473 | { | |
474 | od_ops.powersave_bias_target = generic_powersave_bias_target; | |
475 | od_set_powersave_bias(0); | |
476 | } | |
477 | EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler); | |
478 | ||
1da177e4 LT |
479 | static int __init cpufreq_gov_dbs_init(void) |
480 | { | |
af926185 | 481 | return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND); |
1da177e4 LT |
482 | } |
483 | ||
484 | static void __exit cpufreq_gov_dbs_exit(void) | |
485 | { | |
af926185 | 486 | cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND); |
1da177e4 LT |
487 | } |
488 | ||
ffac80e9 VP |
489 | MODULE_AUTHOR("Venkatesh Pallipadi <[email protected]>"); |
490 | MODULE_AUTHOR("Alexey Starikovskiy <[email protected]>"); | |
491 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " | |
2b03f891 | 492 | "Low Latency Frequency Transition capable processors"); |
ffac80e9 | 493 | MODULE_LICENSE("GPL"); |
1da177e4 | 494 | |
6915719b | 495 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
de1df26b RW |
496 | struct cpufreq_governor *cpufreq_default_governor(void) |
497 | { | |
af926185 | 498 | return CPU_FREQ_GOV_ONDEMAND; |
de1df26b RW |
499 | } |
500 | ||
6915719b JW |
501 | fs_initcall(cpufreq_gov_dbs_init); |
502 | #else | |
1da177e4 | 503 | module_init(cpufreq_gov_dbs_init); |
6915719b | 504 | #endif |
1da177e4 | 505 | module_exit(cpufreq_gov_dbs_exit); |