]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b9170836 DJ |
2 | /* |
3 | * drivers/cpufreq/cpufreq_conservative.c | |
4 | * | |
5 | * Copyright (C) 2001 Russell King | |
6 | * (C) 2003 Venkatesh Pallipadi <[email protected]>. | |
7 | * Jun Nakajima <[email protected]> | |
11a80a9c | 8 | * (C) 2009 Alexander Clouter <[email protected]> |
b9170836 DJ |
9 | */ |
10 | ||
4d5dcc42 | 11 | #include <linux/slab.h> |
4471a34f | 12 | #include "cpufreq_governor.h" |
b9170836 | 13 | |
7d5a9956 RW |
14 | struct cs_policy_dbs_info { |
15 | struct policy_dbs_info policy_dbs; | |
16 | unsigned int down_skip; | |
abb66279 | 17 | unsigned int requested_freq; |
7d5a9956 RW |
18 | }; |
19 | ||
20 | static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs) | |
21 | { | |
22 | return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs); | |
23 | } | |
24 | ||
47ebaac1 RW |
25 | struct cs_dbs_tuners { |
26 | unsigned int down_threshold; | |
27 | unsigned int freq_step; | |
28 | }; | |
29 | ||
c88883cd | 30 | /* Conservative governor macros */ |
b9170836 | 31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
b9170836 | 32 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) |
98765847 | 33 | #define DEF_FREQUENCY_STEP (5) |
2c906b31 AC |
34 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
35 | #define MAX_SAMPLING_DOWN_FACTOR (10) | |
b9170836 | 36 | |
d5f905a9 VK |
37 | static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners, |
38 | struct cpufreq_policy *policy) | |
98765847 | 39 | { |
d5f905a9 | 40 | unsigned int freq_step = (cs_tuners->freq_step * policy->max) / 100; |
98765847 SK |
41 | |
42 | /* max freq cannot be less than 100. But who knows... */ | |
d5f905a9 VK |
43 | if (unlikely(freq_step == 0)) |
44 | freq_step = DEF_FREQUENCY_STEP; | |
98765847 | 45 | |
d5f905a9 | 46 | return freq_step; |
98765847 SK |
47 | } |
48 | ||
4471a34f VK |
49 | /* |
50 | * Every sampling_rate, we check, if current idle time is less than 20% | |
7af1c056 SK |
51 | * (default), then we try to increase frequency. Every sampling_rate * |
52 | * sampling_down_factor, we check, if current idle time is more than 80% | |
53 | * (default), then we try to decrease frequency | |
4471a34f | 54 | * |
42d951c8 SK |
55 | * Frequency updates happen at minimum steps of 5% (default) of maximum |
56 | * frequency | |
4471a34f | 57 | */ |
26f0dbc9 | 58 | static unsigned int cs_dbs_update(struct cpufreq_policy *policy) |
a8d7c3bc | 59 | { |
bc505475 | 60 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
7d5a9956 | 61 | struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); |
abb66279 | 62 | unsigned int requested_freq = dbs_info->requested_freq; |
bc505475 | 63 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
4d5dcc42 | 64 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
4cccf755 | 65 | unsigned int load = dbs_update(policy); |
00bfe058 | 66 | unsigned int freq_step; |
4471a34f VK |
67 | |
68 | /* | |
69 | * break out if we 'cannot' reduce the speed as the user might | |
70 | * want freq_step to be zero | |
71 | */ | |
4d5dcc42 | 72 | if (cs_tuners->freq_step == 0) |
4cccf755 | 73 | goto out; |
4471a34f | 74 | |
abb66279 RW |
75 | /* |
76 | * If requested_freq is out of range, it is likely that the limits | |
77 | * changed in the meantime, so fall back to current frequency in that | |
78 | * case. | |
79 | */ | |
da5e79bc | 80 | if (requested_freq > policy->max || requested_freq < policy->min) { |
abb66279 | 81 | requested_freq = policy->cur; |
da5e79bc RW |
82 | dbs_info->requested_freq = requested_freq; |
83 | } | |
abb66279 | 84 | |
00bfe058 SK |
85 | freq_step = get_freq_step(cs_tuners, policy); |
86 | ||
87 | /* | |
88 | * Decrease requested_freq one freq_step for each idle period that | |
89 | * we didn't update the frequency. | |
90 | */ | |
91 | if (policy_dbs->idle_periods < UINT_MAX) { | |
92 | unsigned int freq_steps = policy_dbs->idle_periods * freq_step; | |
93 | ||
da5e79bc | 94 | if (requested_freq > policy->min + freq_steps) |
00bfe058 SK |
95 | requested_freq -= freq_steps; |
96 | else | |
97 | requested_freq = policy->min; | |
98 | ||
99 | policy_dbs->idle_periods = UINT_MAX; | |
100 | } | |
101 | ||
4471a34f | 102 | /* Check for frequency increase */ |
ff4b1789 | 103 | if (load > dbs_data->up_threshold) { |
4471a34f VK |
104 | dbs_info->down_skip = 0; |
105 | ||
106 | /* if we are already at full speed then break out early */ | |
d352cf47 | 107 | if (requested_freq == policy->max) |
4cccf755 | 108 | goto out; |
4471a34f | 109 | |
00bfe058 | 110 | requested_freq += freq_step; |
abb66279 RW |
111 | if (requested_freq > policy->max) |
112 | requested_freq = policy->max; | |
6d7bcb14 | 113 | |
b894d20e VD |
114 | __cpufreq_driver_target(policy, requested_freq, |
115 | CPUFREQ_RELATION_HE); | |
abb66279 | 116 | dbs_info->requested_freq = requested_freq; |
4cccf755 | 117 | goto out; |
4471a34f VK |
118 | } |
119 | ||
7af1c056 | 120 | /* if sampling_down_factor is active break out early */ |
ff4b1789 | 121 | if (++dbs_info->down_skip < dbs_data->sampling_down_factor) |
4cccf755 | 122 | goto out; |
7af1c056 SK |
123 | dbs_info->down_skip = 0; |
124 | ||
27ed3cd2 SK |
125 | /* Check for frequency decrease */ |
126 | if (load < cs_tuners->down_threshold) { | |
4471a34f VK |
127 | /* |
128 | * if we cannot reduce the frequency anymore, break out early | |
129 | */ | |
d352cf47 | 130 | if (requested_freq == policy->min) |
4cccf755 | 131 | goto out; |
4471a34f | 132 | |
d5f905a9 VK |
133 | if (requested_freq > freq_step) |
134 | requested_freq -= freq_step; | |
3baa976a | 135 | else |
d352cf47 | 136 | requested_freq = policy->min; |
ad529a9c | 137 | |
b894d20e VD |
138 | __cpufreq_driver_target(policy, requested_freq, |
139 | CPUFREQ_RELATION_LE); | |
abb66279 | 140 | dbs_info->requested_freq = requested_freq; |
4471a34f | 141 | } |
43e0ee36 | 142 | |
4cccf755 | 143 | out: |
07aa4402 | 144 | return dbs_data->sampling_rate; |
66df2a01 FB |
145 | } |
146 | ||
b9170836 | 147 | /************************** sysfs interface ************************/ |
b9170836 | 148 | |
85750bcd | 149 | static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set, |
0dd3c1d6 | 150 | const char *buf, size_t count) |
b9170836 | 151 | { |
0dd3c1d6 | 152 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
b9170836 DJ |
153 | unsigned int input; |
154 | int ret; | |
9acef487 | 155 | ret = sscanf(buf, "%u", &input); |
8e677ce8 | 156 | |
2c906b31 | 157 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
b9170836 DJ |
158 | return -EINVAL; |
159 | ||
ff4b1789 | 160 | dbs_data->sampling_down_factor = input; |
b9170836 DJ |
161 | return count; |
162 | } | |
163 | ||
85750bcd | 164 | static ssize_t up_threshold_store(struct gov_attr_set *attr_set, |
0dd3c1d6 | 165 | const char *buf, size_t count) |
b9170836 | 166 | { |
0dd3c1d6 | 167 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
4d5dcc42 | 168 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
b9170836 DJ |
169 | unsigned int input; |
170 | int ret; | |
9acef487 | 171 | ret = sscanf(buf, "%u", &input); |
b9170836 | 172 | |
4d5dcc42 | 173 | if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold) |
b9170836 | 174 | return -EINVAL; |
b9170836 | 175 | |
ff4b1789 | 176 | dbs_data->up_threshold = input; |
b9170836 DJ |
177 | return count; |
178 | } | |
179 | ||
85750bcd | 180 | static ssize_t down_threshold_store(struct gov_attr_set *attr_set, |
0dd3c1d6 | 181 | const char *buf, size_t count) |
b9170836 | 182 | { |
0dd3c1d6 | 183 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
4d5dcc42 | 184 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
b9170836 DJ |
185 | unsigned int input; |
186 | int ret; | |
9acef487 | 187 | ret = sscanf(buf, "%u", &input); |
b9170836 | 188 | |
b8e11f7d | 189 | /* cannot be lower than 1 otherwise freq will not fall */ |
842c34a2 | 190 | if (ret != 1 || input < 1 || input >= dbs_data->up_threshold) |
b9170836 | 191 | return -EINVAL; |
b9170836 | 192 | |
4d5dcc42 | 193 | cs_tuners->down_threshold = input; |
b9170836 DJ |
194 | return count; |
195 | } | |
196 | ||
85750bcd | 197 | static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set, |
0dd3c1d6 | 198 | const char *buf, size_t count) |
b9170836 | 199 | { |
0dd3c1d6 | 200 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
a33cce1c | 201 | unsigned int input; |
b9170836 DJ |
202 | int ret; |
203 | ||
18a7247d DJ |
204 | ret = sscanf(buf, "%u", &input); |
205 | if (ret != 1) | |
b9170836 DJ |
206 | return -EINVAL; |
207 | ||
18a7247d | 208 | if (input > 1) |
b9170836 | 209 | input = 1; |
18a7247d | 210 | |
ff4b1789 | 211 | if (input == dbs_data->ignore_nice_load) /* nothing to do */ |
b9170836 | 212 | return count; |
326c86de | 213 | |
ff4b1789 | 214 | dbs_data->ignore_nice_load = input; |
b9170836 | 215 | |
8e677ce8 | 216 | /* we need to re-evaluate prev_cpu_idle */ |
8c8f77fd | 217 | gov_update_cpu_data(dbs_data); |
a33cce1c | 218 | |
b9170836 DJ |
219 | return count; |
220 | } | |
221 | ||
85750bcd | 222 | static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf, |
0dd3c1d6 | 223 | size_t count) |
b9170836 | 224 | { |
0dd3c1d6 | 225 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
4d5dcc42 | 226 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
b9170836 DJ |
227 | unsigned int input; |
228 | int ret; | |
18a7247d | 229 | ret = sscanf(buf, "%u", &input); |
b9170836 | 230 | |
18a7247d | 231 | if (ret != 1) |
b9170836 DJ |
232 | return -EINVAL; |
233 | ||
18a7247d | 234 | if (input > 100) |
b9170836 | 235 | input = 100; |
18a7247d | 236 | |
4471a34f VK |
237 | /* |
238 | * no need to test here if freq_step is zero as the user might actually | |
239 | * want this, they would be crazy though :) | |
240 | */ | |
4d5dcc42 | 241 | cs_tuners->freq_step = input; |
b9170836 DJ |
242 | return count; |
243 | } | |
244 | ||
c4435630 VK |
245 | gov_show_one_common(sampling_rate); |
246 | gov_show_one_common(sampling_down_factor); | |
247 | gov_show_one_common(up_threshold); | |
248 | gov_show_one_common(ignore_nice_load); | |
c4435630 VK |
249 | gov_show_one(cs, down_threshold); |
250 | gov_show_one(cs, freq_step); | |
251 | ||
252 | gov_attr_rw(sampling_rate); | |
253 | gov_attr_rw(sampling_down_factor); | |
254 | gov_attr_rw(up_threshold); | |
255 | gov_attr_rw(ignore_nice_load); | |
c4435630 VK |
256 | gov_attr_rw(down_threshold); |
257 | gov_attr_rw(freq_step); | |
258 | ||
fe262d5c | 259 | static struct attribute *cs_attrs[] = { |
c4435630 VK |
260 | &sampling_rate.attr, |
261 | &sampling_down_factor.attr, | |
262 | &up_threshold.attr, | |
263 | &down_threshold.attr, | |
264 | &ignore_nice_load.attr, | |
265 | &freq_step.attr, | |
b9170836 DJ |
266 | NULL |
267 | }; | |
fe262d5c | 268 | ATTRIBUTE_GROUPS(cs); |
b9170836 | 269 | |
b9170836 DJ |
270 | /************************** sysfs end ************************/ |
271 | ||
7d5a9956 RW |
272 | static struct policy_dbs_info *cs_alloc(void) |
273 | { | |
274 | struct cs_policy_dbs_info *dbs_info; | |
275 | ||
276 | dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL); | |
277 | return dbs_info ? &dbs_info->policy_dbs : NULL; | |
278 | } | |
279 | ||
280 | static void cs_free(struct policy_dbs_info *policy_dbs) | |
281 | { | |
282 | kfree(to_dbs_info(policy_dbs)); | |
283 | } | |
284 | ||
9a15fb2c | 285 | static int cs_init(struct dbs_data *dbs_data) |
4d5dcc42 VK |
286 | { |
287 | struct cs_dbs_tuners *tuners; | |
288 | ||
d5b73cd8 | 289 | tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); |
a69d6b29 | 290 | if (!tuners) |
4d5dcc42 | 291 | return -ENOMEM; |
4d5dcc42 | 292 | |
4d5dcc42 | 293 | tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; |
98765847 | 294 | tuners->freq_step = DEF_FREQUENCY_STEP; |
ff4b1789 VK |
295 | dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
296 | dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | |
297 | dbs_data->ignore_nice_load = 0; | |
4d5dcc42 | 298 | dbs_data->tuners = tuners; |
8e0484d2 | 299 | |
4d5dcc42 VK |
300 | return 0; |
301 | } | |
302 | ||
9a15fb2c | 303 | static void cs_exit(struct dbs_data *dbs_data) |
4d5dcc42 VK |
304 | { |
305 | kfree(dbs_data->tuners); | |
306 | } | |
307 | ||
702c9e54 RW |
308 | static void cs_start(struct cpufreq_policy *policy) |
309 | { | |
7d5a9956 | 310 | struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); |
702c9e54 RW |
311 | |
312 | dbs_info->down_skip = 0; | |
abb66279 | 313 | dbs_info->requested_freq = policy->cur; |
702c9e54 RW |
314 | } |
315 | ||
d352cf47 RW |
316 | static struct dbs_governor cs_governor = { |
317 | .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"), | |
fe262d5c | 318 | .kobj_type = { .default_groups = cs_groups }, |
26f0dbc9 | 319 | .gov_dbs_update = cs_dbs_update, |
d352cf47 RW |
320 | .alloc = cs_alloc, |
321 | .free = cs_free, | |
322 | .init = cs_init, | |
323 | .exit = cs_exit, | |
324 | .start = cs_start, | |
4471a34f | 325 | }; |
b9170836 | 326 | |
10dd8573 | 327 | #define CPU_FREQ_GOV_CONSERVATIVE (cs_governor.gov) |
b9170836 | 328 | |
11a80a9c | 329 | MODULE_AUTHOR("Alexander Clouter <[email protected]>"); |
9acef487 | 330 | MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " |
b9170836 DJ |
331 | "Low Latency Frequency Transition capable processors " |
332 | "optimised for use in a battery environment"); | |
9acef487 | 333 | MODULE_LICENSE("GPL"); |
b9170836 | 334 | |
6915719b | 335 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE |
de1df26b RW |
336 | struct cpufreq_governor *cpufreq_default_governor(void) |
337 | { | |
10dd8573 | 338 | return &CPU_FREQ_GOV_CONSERVATIVE; |
de1df26b | 339 | } |
6915719b | 340 | #endif |
10dd8573 QP |
341 | |
342 | cpufreq_governor_init(CPU_FREQ_GOV_CONSERVATIVE); | |
343 | cpufreq_governor_exit(CPU_FREQ_GOV_CONSERVATIVE); |