]> Git Repo - linux.git/blob - drivers/acpi/cppc_acpi.c
Merge tag 'hardening-v6.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / acpi / cppc_acpi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4  *
5  * (C) Copyright 2014, 2015 Linaro Ltd.
6  * Author: Ashwin Chaugule <[email protected]>
7  *
8  * CPPC describes a few methods for controlling CPU performance using
9  * information from a per CPU table called CPC. This table is described in
10  * the ACPI v5.0+ specification. The table consists of a list of
11  * registers which may be memory mapped or hardware registers and also may
12  * include some static integer values.
13  *
14  * CPU performance is on an abstract continuous scale as against a discretized
15  * P-state scale which is tied to CPU frequency only. In brief, the basic
16  * operation involves:
17  *
18  * - OS makes a CPU performance request. (Can provide min and max bounds)
19  *
20  * - Platform (such as BMC) is free to optimize request within requested bounds
21  *   depending on power/thermal budgets etc.
22  *
23  * - Platform conveys its decision back to OS
24  *
25  * The communication between OS and platform occurs through another medium
26  * called (PCC) Platform Communication Channel. This is a generic mailbox like
27  * mechanism which includes doorbell semantics to indicate register updates.
28  * See drivers/mailbox/pcc.c for details on PCC.
29  *
30  * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31  * above specifications.
32  */
33
34 #define pr_fmt(fmt)     "ACPI CPPC: " fmt
35
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
42 #include <linux/dmi.h>
43 #include <linux/units.h>
44 #include <linux/unaligned.h>
45
46 #include <acpi/cppc_acpi.h>
47
48 struct cppc_pcc_data {
49         struct pcc_mbox_chan *pcc_channel;
50         void __iomem *pcc_comm_addr;
51         bool pcc_channel_acquired;
52         unsigned int deadline_us;
53         unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
54
55         bool pending_pcc_write_cmd;     /* Any pending/batched PCC write cmds? */
56         bool platform_owns_pcc;         /* Ownership of PCC subspace */
57         unsigned int pcc_write_cnt;     /* Running count of PCC write commands */
58
59         /*
60          * Lock to provide controlled access to the PCC channel.
61          *
62          * For performance critical usecases(currently cppc_set_perf)
63          *      We need to take read_lock and check if channel belongs to OSPM
64          * before reading or writing to PCC subspace
65          *      We need to take write_lock before transferring the channel
66          * ownership to the platform via a Doorbell
67          *      This allows us to batch a number of CPPC requests if they happen
68          * to originate in about the same time
69          *
70          * For non-performance critical usecases(init)
71          *      Take write_lock for all purposes which gives exclusive access
72          */
73         struct rw_semaphore pcc_lock;
74
75         /* Wait queue for CPUs whose requests were batched */
76         wait_queue_head_t pcc_write_wait_q;
77         ktime_t last_cmd_cmpl_time;
78         ktime_t last_mpar_reset;
79         int mpar_count;
80         int refcount;
81 };
82
83 /* Array to represent the PCC channel per subspace ID */
84 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
85 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
86 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
87
88 /*
89  * The cpc_desc structure contains the ACPI register details
90  * as described in the per CPU _CPC tables. The details
91  * include the type of register (e.g. PCC, System IO, FFH etc.)
92  * and destination addresses which lets us READ/WRITE CPU performance
93  * information using the appropriate I/O methods.
94  */
95 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
96
97 /* pcc mapped address + header size + offset within PCC subspace */
98 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
99                                                 0x8 + (offs))
100
101 /* Check if a CPC register is in PCC */
102 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&             \
103                                 (cpc)->cpc_entry.reg.space_id ==        \
104                                 ACPI_ADR_SPACE_PLATFORM_COMM)
105
106 /* Check if a CPC register is in FFH */
107 #define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&             \
108                                 (cpc)->cpc_entry.reg.space_id ==        \
109                                 ACPI_ADR_SPACE_FIXED_HARDWARE)
110
111 /* Check if a CPC register is in SystemMemory */
112 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&   \
113                                 (cpc)->cpc_entry.reg.space_id ==        \
114                                 ACPI_ADR_SPACE_SYSTEM_MEMORY)
115
116 /* Check if a CPC register is in SystemIo */
117 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&       \
118                                 (cpc)->cpc_entry.reg.space_id ==        \
119                                 ACPI_ADR_SPACE_SYSTEM_IO)
120
121 /* Evaluates to True if reg is a NULL register descriptor */
122 #define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
123                                 (reg)->address == 0 &&                  \
124                                 (reg)->bit_width == 0 &&                \
125                                 (reg)->bit_offset == 0 &&               \
126                                 (reg)->access_width == 0)
127
128 /* Evaluates to True if an optional cpc field is supported */
129 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?          \
130                                 !!(cpc)->cpc_entry.int_value :          \
131                                 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
132 /*
133  * Arbitrary Retries in case the remote processor is slow to respond
134  * to PCC commands. Keeping it high enough to cover emulators where
135  * the processors run painfully slow.
136  */
137 #define NUM_RETRIES 500ULL
138
139 #define OVER_16BTS_MASK ~0xFFFFULL
140
141 #define define_one_cppc_ro(_name)               \
142 static struct kobj_attribute _name =            \
143 __ATTR(_name, 0444, show_##_name, NULL)
144
145 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
146
147 #define show_cppc_data(access_fn, struct_name, member_name)             \
148         static ssize_t show_##member_name(struct kobject *kobj,         \
149                                 struct kobj_attribute *attr, char *buf) \
150         {                                                               \
151                 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
152                 struct struct_name st_name = {0};                       \
153                 int ret;                                                \
154                                                                         \
155                 ret = access_fn(cpc_ptr->cpu_id, &st_name);             \
156                 if (ret)                                                \
157                         return ret;                                     \
158                                                                         \
159                 return sysfs_emit(buf, "%llu\n",                \
160                                 (u64)st_name.member_name);              \
161         }                                                               \
162         define_one_cppc_ro(member_name)
163
164 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
165 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
166 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
167 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
168 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
169 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
170 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
171
172 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
173 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
174
175 /* Check for valid access_width, otherwise, fallback to using bit_width */
176 #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
177
178 /* Shift and apply the mask for CPC reads/writes */
179 #define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) &                         \
180                                         GENMASK(((reg)->bit_width) - 1, 0))
181 #define MASK_VAL_WRITE(reg, prev_val, val)                                              \
182         ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) |          \
183         ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset)))      \
184
185 static ssize_t show_feedback_ctrs(struct kobject *kobj,
186                 struct kobj_attribute *attr, char *buf)
187 {
188         struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
189         struct cppc_perf_fb_ctrs fb_ctrs = {0};
190         int ret;
191
192         ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
193         if (ret)
194                 return ret;
195
196         return sysfs_emit(buf, "ref:%llu del:%llu\n",
197                         fb_ctrs.reference, fb_ctrs.delivered);
198 }
199 define_one_cppc_ro(feedback_ctrs);
200
201 static struct attribute *cppc_attrs[] = {
202         &feedback_ctrs.attr,
203         &reference_perf.attr,
204         &wraparound_time.attr,
205         &highest_perf.attr,
206         &lowest_perf.attr,
207         &lowest_nonlinear_perf.attr,
208         &guaranteed_perf.attr,
209         &nominal_perf.attr,
210         &nominal_freq.attr,
211         &lowest_freq.attr,
212         NULL
213 };
214 ATTRIBUTE_GROUPS(cppc);
215
216 static const struct kobj_type cppc_ktype = {
217         .sysfs_ops = &kobj_sysfs_ops,
218         .default_groups = cppc_groups,
219 };
220
221 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
222 {
223         int ret, status;
224         struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
225         struct acpi_pcct_shared_memory __iomem *generic_comm_base =
226                 pcc_ss_data->pcc_comm_addr;
227
228         if (!pcc_ss_data->platform_owns_pcc)
229                 return 0;
230
231         /*
232          * Poll PCC status register every 3us(delay_us) for maximum of
233          * deadline_us(timeout_us) until PCC command complete bit is set(cond)
234          */
235         ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
236                                         status & PCC_CMD_COMPLETE_MASK, 3,
237                                         pcc_ss_data->deadline_us);
238
239         if (likely(!ret)) {
240                 pcc_ss_data->platform_owns_pcc = false;
241                 if (chk_err_bit && (status & PCC_ERROR_MASK))
242                         ret = -EIO;
243         }
244
245         if (unlikely(ret))
246                 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
247                        pcc_ss_id, ret);
248
249         return ret;
250 }
251
252 /*
253  * This function transfers the ownership of the PCC to the platform
254  * So it must be called while holding write_lock(pcc_lock)
255  */
256 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
257 {
258         int ret = -EIO, i;
259         struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
260         struct acpi_pcct_shared_memory __iomem *generic_comm_base =
261                 pcc_ss_data->pcc_comm_addr;
262         unsigned int time_delta;
263
264         /*
265          * For CMD_WRITE we know for a fact the caller should have checked
266          * the channel before writing to PCC space
267          */
268         if (cmd == CMD_READ) {
269                 /*
270                  * If there are pending cpc_writes, then we stole the channel
271                  * before write completion, so first send a WRITE command to
272                  * platform
273                  */
274                 if (pcc_ss_data->pending_pcc_write_cmd)
275                         send_pcc_cmd(pcc_ss_id, CMD_WRITE);
276
277                 ret = check_pcc_chan(pcc_ss_id, false);
278                 if (ret)
279                         goto end;
280         } else /* CMD_WRITE */
281                 pcc_ss_data->pending_pcc_write_cmd = FALSE;
282
283         /*
284          * Handle the Minimum Request Turnaround Time(MRTT)
285          * "The minimum amount of time that OSPM must wait after the completion
286          * of a command before issuing the next command, in microseconds"
287          */
288         if (pcc_ss_data->pcc_mrtt) {
289                 time_delta = ktime_us_delta(ktime_get(),
290                                             pcc_ss_data->last_cmd_cmpl_time);
291                 if (pcc_ss_data->pcc_mrtt > time_delta)
292                         udelay(pcc_ss_data->pcc_mrtt - time_delta);
293         }
294
295         /*
296          * Handle the non-zero Maximum Periodic Access Rate(MPAR)
297          * "The maximum number of periodic requests that the subspace channel can
298          * support, reported in commands per minute. 0 indicates no limitation."
299          *
300          * This parameter should be ideally zero or large enough so that it can
301          * handle maximum number of requests that all the cores in the system can
302          * collectively generate. If it is not, we will follow the spec and just
303          * not send the request to the platform after hitting the MPAR limit in
304          * any 60s window
305          */
306         if (pcc_ss_data->pcc_mpar) {
307                 if (pcc_ss_data->mpar_count == 0) {
308                         time_delta = ktime_ms_delta(ktime_get(),
309                                                     pcc_ss_data->last_mpar_reset);
310                         if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
311                                 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
312                                          pcc_ss_id);
313                                 ret = -EIO;
314                                 goto end;
315                         }
316                         pcc_ss_data->last_mpar_reset = ktime_get();
317                         pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
318                 }
319                 pcc_ss_data->mpar_count--;
320         }
321
322         /* Write to the shared comm region. */
323         writew_relaxed(cmd, &generic_comm_base->command);
324
325         /* Flip CMD COMPLETE bit */
326         writew_relaxed(0, &generic_comm_base->status);
327
328         pcc_ss_data->platform_owns_pcc = true;
329
330         /* Ring doorbell */
331         ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
332         if (ret < 0) {
333                 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
334                        pcc_ss_id, cmd, ret);
335                 goto end;
336         }
337
338         /* wait for completion and check for PCC error bit */
339         ret = check_pcc_chan(pcc_ss_id, true);
340
341         if (pcc_ss_data->pcc_mrtt)
342                 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
343
344         if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
345                 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
346         else
347                 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
348
349 end:
350         if (cmd == CMD_WRITE) {
351                 if (unlikely(ret)) {
352                         for_each_possible_cpu(i) {
353                                 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
354
355                                 if (!desc)
356                                         continue;
357
358                                 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
359                                         desc->write_cmd_status = ret;
360                         }
361                 }
362                 pcc_ss_data->pcc_write_cnt++;
363                 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
364         }
365
366         return ret;
367 }
368
369 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
370 {
371         if (ret < 0)
372                 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
373                                 *(u16 *)msg, ret);
374         else
375                 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
376                                 *(u16 *)msg, ret);
377 }
378
379 static struct mbox_client cppc_mbox_cl = {
380         .tx_done = cppc_chan_tx_done,
381         .knows_txdone = true,
382 };
383
384 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
385 {
386         int result = -EFAULT;
387         acpi_status status = AE_OK;
388         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
389         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
390         struct acpi_buffer state = {0, NULL};
391         union acpi_object  *psd = NULL;
392         struct acpi_psd_package *pdomain;
393
394         status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
395                                             &buffer, ACPI_TYPE_PACKAGE);
396         if (status == AE_NOT_FOUND)     /* _PSD is optional */
397                 return 0;
398         if (ACPI_FAILURE(status))
399                 return -ENODEV;
400
401         psd = buffer.pointer;
402         if (!psd || psd->package.count != 1) {
403                 pr_debug("Invalid _PSD data\n");
404                 goto end;
405         }
406
407         pdomain = &(cpc_ptr->domain_info);
408
409         state.length = sizeof(struct acpi_psd_package);
410         state.pointer = pdomain;
411
412         status = acpi_extract_package(&(psd->package.elements[0]),
413                 &format, &state);
414         if (ACPI_FAILURE(status)) {
415                 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
416                 goto end;
417         }
418
419         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
420                 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
421                 goto end;
422         }
423
424         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
425                 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
426                 goto end;
427         }
428
429         if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
430             pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
431             pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
432                 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
433                 goto end;
434         }
435
436         result = 0;
437 end:
438         kfree(buffer.pointer);
439         return result;
440 }
441
442 bool acpi_cpc_valid(void)
443 {
444         struct cpc_desc *cpc_ptr;
445         int cpu;
446
447         if (acpi_disabled)
448                 return false;
449
450         for_each_present_cpu(cpu) {
451                 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
452                 if (!cpc_ptr)
453                         return false;
454         }
455
456         return true;
457 }
458 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
459
460 bool cppc_allow_fast_switch(void)
461 {
462         struct cpc_register_resource *desired_reg;
463         struct cpc_desc *cpc_ptr;
464         int cpu;
465
466         for_each_possible_cpu(cpu) {
467                 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
468                 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
469                 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
470                                 !CPC_IN_SYSTEM_IO(desired_reg))
471                         return false;
472         }
473
474         return true;
475 }
476 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
477
478 /**
479  * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
480  * @cpu: Find all CPUs that share a domain with cpu.
481  * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
482  *
483  *      Return: 0 for success or negative value for err.
484  */
485 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
486 {
487         struct cpc_desc *cpc_ptr, *match_cpc_ptr;
488         struct acpi_psd_package *match_pdomain;
489         struct acpi_psd_package *pdomain;
490         int count_target, i;
491
492         /*
493          * Now that we have _PSD data from all CPUs, let's setup P-state
494          * domain info.
495          */
496         cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
497         if (!cpc_ptr)
498                 return -EFAULT;
499
500         pdomain = &(cpc_ptr->domain_info);
501         cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
502         if (pdomain->num_processors <= 1)
503                 return 0;
504
505         /* Validate the Domain info */
506         count_target = pdomain->num_processors;
507         if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
508                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
509         else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
510                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
511         else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
512                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
513
514         for_each_possible_cpu(i) {
515                 if (i == cpu)
516                         continue;
517
518                 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
519                 if (!match_cpc_ptr)
520                         goto err_fault;
521
522                 match_pdomain = &(match_cpc_ptr->domain_info);
523                 if (match_pdomain->domain != pdomain->domain)
524                         continue;
525
526                 /* Here i and cpu are in the same domain */
527                 if (match_pdomain->num_processors != count_target)
528                         goto err_fault;
529
530                 if (pdomain->coord_type != match_pdomain->coord_type)
531                         goto err_fault;
532
533                 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
534         }
535
536         return 0;
537
538 err_fault:
539         /* Assume no coordination on any error parsing domain info */
540         cpumask_clear(cpu_data->shared_cpu_map);
541         cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
542         cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
543
544         return -EFAULT;
545 }
546 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
547
548 static int register_pcc_channel(int pcc_ss_idx)
549 {
550         struct pcc_mbox_chan *pcc_chan;
551         u64 usecs_lat;
552
553         if (pcc_ss_idx >= 0) {
554                 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
555
556                 if (IS_ERR(pcc_chan)) {
557                         pr_err("Failed to find PCC channel for subspace %d\n",
558                                pcc_ss_idx);
559                         return -ENODEV;
560                 }
561
562                 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
563                 /*
564                  * cppc_ss->latency is just a Nominal value. In reality
565                  * the remote processor could be much slower to reply.
566                  * So add an arbitrary amount of wait on top of Nominal.
567                  */
568                 usecs_lat = NUM_RETRIES * pcc_chan->latency;
569                 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
570                 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
571                 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
572                 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
573
574                 pcc_data[pcc_ss_idx]->pcc_comm_addr =
575                         acpi_os_ioremap(pcc_chan->shmem_base_addr,
576                                         pcc_chan->shmem_size);
577                 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
578                         pr_err("Failed to ioremap PCC comm region mem for %d\n",
579                                pcc_ss_idx);
580                         return -ENOMEM;
581                 }
582
583                 /* Set flag so that we don't come here for each CPU. */
584                 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
585         }
586
587         return 0;
588 }
589
590 /**
591  * cpc_ffh_supported() - check if FFH reading supported
592  *
593  * Check if the architecture has support for functional fixed hardware
594  * read/write capability.
595  *
596  * Return: true for supported, false for not supported
597  */
598 bool __weak cpc_ffh_supported(void)
599 {
600         return false;
601 }
602
603 /**
604  * cpc_supported_by_cpu() - check if CPPC is supported by CPU
605  *
606  * Check if the architectural support for CPPC is present even
607  * if the _OSC hasn't prescribed it
608  *
609  * Return: true for supported, false for not supported
610  */
611 bool __weak cpc_supported_by_cpu(void)
612 {
613         return false;
614 }
615
616 /**
617  * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
618  * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
619  *
620  * Check and allocate the cppc_pcc_data memory.
621  * In some processor configurations it is possible that same subspace
622  * is shared between multiple CPUs. This is seen especially in CPUs
623  * with hardware multi-threading support.
624  *
625  * Return: 0 for success, errno for failure
626  */
627 static int pcc_data_alloc(int pcc_ss_id)
628 {
629         if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
630                 return -EINVAL;
631
632         if (pcc_data[pcc_ss_id]) {
633                 pcc_data[pcc_ss_id]->refcount++;
634         } else {
635                 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
636                                               GFP_KERNEL);
637                 if (!pcc_data[pcc_ss_id])
638                         return -ENOMEM;
639                 pcc_data[pcc_ss_id]->refcount++;
640         }
641
642         return 0;
643 }
644
645 /*
646  * An example CPC table looks like the following.
647  *
648  *  Name (_CPC, Package() {
649  *      17,                                                     // NumEntries
650  *      1,                                                      // Revision
651  *      ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)},    // Highest Performance
652  *      ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)},    // Nominal Performance
653  *      ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)},    // Lowest Nonlinear Performance
654  *      ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)},    // Lowest Performance
655  *      ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)},    // Guaranteed Performance Register
656  *      ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)},    // Desired Performance Register
657  *      ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
658  *      ...
659  *      ...
660  *      ...
661  *  }
662  * Each Register() encodes how to access that specific register.
663  * e.g. a sample PCC entry has the following encoding:
664  *
665  *  Register (
666  *      PCC,    // AddressSpaceKeyword
667  *      8,      // RegisterBitWidth
668  *      8,      // RegisterBitOffset
669  *      0x30,   // RegisterAddress
670  *      9,      // AccessSize (subspace ID)
671  *  )
672  */
673
674 /**
675  * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
676  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
677  *
678  *      Return: 0 for success or negative value for err.
679  */
680 int acpi_cppc_processor_probe(struct acpi_processor *pr)
681 {
682         struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
683         union acpi_object *out_obj, *cpc_obj;
684         struct cpc_desc *cpc_ptr;
685         struct cpc_reg *gas_t;
686         struct device *cpu_dev;
687         acpi_handle handle = pr->handle;
688         unsigned int num_ent, i, cpc_rev;
689         int pcc_subspace_id = -1;
690         acpi_status status;
691         int ret = -ENODATA;
692
693         if (!osc_sb_cppc2_support_acked) {
694                 pr_debug("CPPC v2 _OSC not acked\n");
695                 if (!cpc_supported_by_cpu()) {
696                         pr_debug("CPPC is not supported by the CPU\n");
697                         return -ENODEV;
698                 }
699         }
700
701         /* Parse the ACPI _CPC table for this CPU. */
702         status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
703                         ACPI_TYPE_PACKAGE);
704         if (ACPI_FAILURE(status)) {
705                 ret = -ENODEV;
706                 goto out_buf_free;
707         }
708
709         out_obj = (union acpi_object *) output.pointer;
710
711         cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
712         if (!cpc_ptr) {
713                 ret = -ENOMEM;
714                 goto out_buf_free;
715         }
716
717         /* First entry is NumEntries. */
718         cpc_obj = &out_obj->package.elements[0];
719         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
720                 num_ent = cpc_obj->integer.value;
721                 if (num_ent <= 1) {
722                         pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
723                                  num_ent, pr->id);
724                         goto out_free;
725                 }
726         } else {
727                 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
728                          cpc_obj->type, pr->id);
729                 goto out_free;
730         }
731
732         /* Second entry should be revision. */
733         cpc_obj = &out_obj->package.elements[1];
734         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
735                 cpc_rev = cpc_obj->integer.value;
736         } else {
737                 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
738                          cpc_obj->type, pr->id);
739                 goto out_free;
740         }
741
742         if (cpc_rev < CPPC_V2_REV) {
743                 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
744                          pr->id);
745                 goto out_free;
746         }
747
748         /*
749          * Disregard _CPC if the number of entries in the return pachage is not
750          * as expected, but support future revisions being proper supersets of
751          * the v3 and only causing more entries to be returned by _CPC.
752          */
753         if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
754             (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
755             (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
756                 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
757                          num_ent, pr->id);
758                 goto out_free;
759         }
760         if (cpc_rev > CPPC_V3_REV) {
761                 num_ent = CPPC_V3_NUM_ENT;
762                 cpc_rev = CPPC_V3_REV;
763         }
764
765         cpc_ptr->num_entries = num_ent;
766         cpc_ptr->version = cpc_rev;
767
768         /* Iterate through remaining entries in _CPC */
769         for (i = 2; i < num_ent; i++) {
770                 cpc_obj = &out_obj->package.elements[i];
771
772                 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
773                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
774                         cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
775                 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
776                         gas_t = (struct cpc_reg *)
777                                 cpc_obj->buffer.pointer;
778
779                         /*
780                          * The PCC Subspace index is encoded inside
781                          * the CPC table entries. The same PCC index
782                          * will be used for all the PCC entries,
783                          * so extract it only once.
784                          */
785                         if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
786                                 if (pcc_subspace_id < 0) {
787                                         pcc_subspace_id = gas_t->access_width;
788                                         if (pcc_data_alloc(pcc_subspace_id))
789                                                 goto out_free;
790                                 } else if (pcc_subspace_id != gas_t->access_width) {
791                                         pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
792                                                  pr->id);
793                                         goto out_free;
794                                 }
795                         } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
796                                 if (gas_t->address) {
797                                         void __iomem *addr;
798                                         size_t access_width;
799
800                                         if (!osc_cpc_flexible_adr_space_confirmed) {
801                                                 pr_debug("Flexible address space capability not supported\n");
802                                                 if (!cpc_supported_by_cpu())
803                                                         goto out_free;
804                                         }
805
806                                         access_width = GET_BIT_WIDTH(gas_t) / 8;
807                                         addr = ioremap(gas_t->address, access_width);
808                                         if (!addr)
809                                                 goto out_free;
810                                         cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
811                                 }
812                         } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
813                                 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
814                                         /*
815                                          * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
816                                          * SystemIO doesn't implement 64-bit
817                                          * registers.
818                                          */
819                                         pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
820                                                  gas_t->access_width);
821                                         goto out_free;
822                                 }
823                                 if (gas_t->address & OVER_16BTS_MASK) {
824                                         /* SystemIO registers use 16-bit integer addresses */
825                                         pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
826                                                  gas_t->address);
827                                         goto out_free;
828                                 }
829                                 if (!osc_cpc_flexible_adr_space_confirmed) {
830                                         pr_debug("Flexible address space capability not supported\n");
831                                         if (!cpc_supported_by_cpu())
832                                                 goto out_free;
833                                 }
834                         } else {
835                                 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
836                                         /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
837                                         pr_debug("Unsupported register type (%d) in _CPC\n",
838                                                  gas_t->space_id);
839                                         goto out_free;
840                                 }
841                         }
842
843                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
844                         memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
845                 } else {
846                         pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
847                                  i, pr->id);
848                         goto out_free;
849                 }
850         }
851         per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
852
853         /*
854          * Initialize the remaining cpc_regs as unsupported.
855          * Example: In case FW exposes CPPC v2, the below loop will initialize
856          * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
857          */
858         for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
859                 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
860                 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
861         }
862
863
864         /* Store CPU Logical ID */
865         cpc_ptr->cpu_id = pr->id;
866         raw_spin_lock_init(&cpc_ptr->rmw_lock);
867
868         /* Parse PSD data for this CPU */
869         ret = acpi_get_psd(cpc_ptr, handle);
870         if (ret)
871                 goto out_free;
872
873         /* Register PCC channel once for all PCC subspace ID. */
874         if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
875                 ret = register_pcc_channel(pcc_subspace_id);
876                 if (ret)
877                         goto out_free;
878
879                 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
880                 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
881         }
882
883         /* Everything looks okay */
884         pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
885
886         /* Add per logical CPU nodes for reading its feedback counters. */
887         cpu_dev = get_cpu_device(pr->id);
888         if (!cpu_dev) {
889                 ret = -EINVAL;
890                 goto out_free;
891         }
892
893         /* Plug PSD data into this CPU's CPC descriptor. */
894         per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
895
896         ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
897                         "acpi_cppc");
898         if (ret) {
899                 per_cpu(cpc_desc_ptr, pr->id) = NULL;
900                 kobject_put(&cpc_ptr->kobj);
901                 goto out_free;
902         }
903
904         kfree(output.pointer);
905         return 0;
906
907 out_free:
908         /* Free all the mapped sys mem areas for this CPU */
909         for (i = 2; i < cpc_ptr->num_entries; i++) {
910                 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
911
912                 if (addr)
913                         iounmap(addr);
914         }
915         kfree(cpc_ptr);
916
917 out_buf_free:
918         kfree(output.pointer);
919         return ret;
920 }
921 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
922
923 /**
924  * acpi_cppc_processor_exit - Cleanup CPC structs.
925  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
926  *
927  * Return: Void
928  */
929 void acpi_cppc_processor_exit(struct acpi_processor *pr)
930 {
931         struct cpc_desc *cpc_ptr;
932         unsigned int i;
933         void __iomem *addr;
934         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
935
936         if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
937                 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
938                         pcc_data[pcc_ss_id]->refcount--;
939                         if (!pcc_data[pcc_ss_id]->refcount) {
940                                 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
941                                 kfree(pcc_data[pcc_ss_id]);
942                                 pcc_data[pcc_ss_id] = NULL;
943                         }
944                 }
945         }
946
947         cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
948         if (!cpc_ptr)
949                 return;
950
951         /* Free all the mapped sys mem areas for this CPU */
952         for (i = 2; i < cpc_ptr->num_entries; i++) {
953                 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
954                 if (addr)
955                         iounmap(addr);
956         }
957
958         kobject_put(&cpc_ptr->kobj);
959         kfree(cpc_ptr);
960 }
961 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
962
963 /**
964  * cpc_read_ffh() - Read FFH register
965  * @cpunum:     CPU number to read
966  * @reg:        cppc register information
967  * @val:        place holder for return value
968  *
969  * Read bit_width bits from a specified address and bit_offset
970  *
971  * Return: 0 for success and error code
972  */
973 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
974 {
975         return -ENOTSUPP;
976 }
977
978 /**
979  * cpc_write_ffh() - Write FFH register
980  * @cpunum:     CPU number to write
981  * @reg:        cppc register information
982  * @val:        value to write
983  *
984  * Write value of bit_width bits to a specified address and bit_offset
985  *
986  * Return: 0 for success and error code
987  */
988 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
989 {
990         return -ENOTSUPP;
991 }
992
993 /*
994  * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
995  * as fast as possible. We have already mapped the PCC subspace during init, so
996  * we can directly write to it.
997  */
998
999 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
1000 {
1001         void __iomem *vaddr = NULL;
1002         int size;
1003         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1004         struct cpc_reg *reg = &reg_res->cpc_entry.reg;
1005
1006         if (reg_res->type == ACPI_TYPE_INTEGER) {
1007                 *val = reg_res->cpc_entry.int_value;
1008                 return 0;
1009         }
1010
1011         *val = 0;
1012         size = GET_BIT_WIDTH(reg);
1013
1014         if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
1015             reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1016                 u32 val_u32;
1017                 acpi_status status;
1018
1019                 status = acpi_os_read_port((acpi_io_address)reg->address,
1020                                            &val_u32, size);
1021                 if (ACPI_FAILURE(status)) {
1022                         pr_debug("Error: Failed to read SystemIO port %llx\n",
1023                                  reg->address);
1024                         return -EFAULT;
1025                 }
1026
1027                 *val = val_u32;
1028                 return 0;
1029         } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1030                 /*
1031                  * For registers in PCC space, the register size is determined
1032                  * by the bit width field; the access size is used to indicate
1033                  * the PCC subspace id.
1034                  */
1035                 size = reg->bit_width;
1036                 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1037         }
1038         else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1039                 vaddr = reg_res->sys_mem_vaddr;
1040         else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1041                 return cpc_read_ffh(cpu, reg, val);
1042         else
1043                 return acpi_os_read_memory((acpi_physical_address)reg->address,
1044                                 val, size);
1045
1046         switch (size) {
1047         case 8:
1048                 *val = readb_relaxed(vaddr);
1049                 break;
1050         case 16:
1051                 *val = readw_relaxed(vaddr);
1052                 break;
1053         case 32:
1054                 *val = readl_relaxed(vaddr);
1055                 break;
1056         case 64:
1057                 *val = readq_relaxed(vaddr);
1058                 break;
1059         default:
1060                 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1061                         pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
1062                                 size, reg->address);
1063                 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1064                         pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1065                                 size, pcc_ss_id);
1066                 }
1067                 return -EFAULT;
1068         }
1069
1070         if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1071                 *val = MASK_VAL_READ(reg, *val);
1072
1073         return 0;
1074 }
1075
1076 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1077 {
1078         int ret_val = 0;
1079         int size;
1080         u64 prev_val;
1081         void __iomem *vaddr = NULL;
1082         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1083         struct cpc_reg *reg = &reg_res->cpc_entry.reg;
1084         struct cpc_desc *cpc_desc;
1085         unsigned long flags;
1086
1087         size = GET_BIT_WIDTH(reg);
1088
1089         if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
1090             reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1091                 acpi_status status;
1092
1093                 status = acpi_os_write_port((acpi_io_address)reg->address,
1094                                             (u32)val, size);
1095                 if (ACPI_FAILURE(status)) {
1096                         pr_debug("Error: Failed to write SystemIO port %llx\n",
1097                                  reg->address);
1098                         return -EFAULT;
1099                 }
1100
1101                 return 0;
1102         } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1103                 /*
1104                  * For registers in PCC space, the register size is determined
1105                  * by the bit width field; the access size is used to indicate
1106                  * the PCC subspace id.
1107                  */
1108                 size = reg->bit_width;
1109                 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1110         }
1111         else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1112                 vaddr = reg_res->sys_mem_vaddr;
1113         else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1114                 return cpc_write_ffh(cpu, reg, val);
1115         else
1116                 return acpi_os_write_memory((acpi_physical_address)reg->address,
1117                                 val, size);
1118
1119         if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1120                 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1121                 if (!cpc_desc) {
1122                         pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1123                         return -ENODEV;
1124                 }
1125
1126                 raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
1127                 switch (size) {
1128                 case 8:
1129                         prev_val = readb_relaxed(vaddr);
1130                         break;
1131                 case 16:
1132                         prev_val = readw_relaxed(vaddr);
1133                         break;
1134                 case 32:
1135                         prev_val = readl_relaxed(vaddr);
1136                         break;
1137                 case 64:
1138                         prev_val = readq_relaxed(vaddr);
1139                         break;
1140                 default:
1141                         raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
1142                         return -EFAULT;
1143                 }
1144                 val = MASK_VAL_WRITE(reg, prev_val, val);
1145         }
1146
1147         switch (size) {
1148         case 8:
1149                 writeb_relaxed(val, vaddr);
1150                 break;
1151         case 16:
1152                 writew_relaxed(val, vaddr);
1153                 break;
1154         case 32:
1155                 writel_relaxed(val, vaddr);
1156                 break;
1157         case 64:
1158                 writeq_relaxed(val, vaddr);
1159                 break;
1160         default:
1161                 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1162                         pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
1163                                 size, reg->address);
1164                 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1165                         pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1166                                 size, pcc_ss_id);
1167                 }
1168                 ret_val = -EFAULT;
1169                 break;
1170         }
1171
1172         if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1173                 raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
1174
1175         return ret_val;
1176 }
1177
1178 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1179 {
1180         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1181         struct cpc_register_resource *reg;
1182
1183         if (!cpc_desc) {
1184                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1185                 return -ENODEV;
1186         }
1187
1188         reg = &cpc_desc->cpc_regs[reg_idx];
1189
1190         if (CPC_IN_PCC(reg)) {
1191                 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1192                 struct cppc_pcc_data *pcc_ss_data = NULL;
1193                 int ret = 0;
1194
1195                 if (pcc_ss_id < 0)
1196                         return -EIO;
1197
1198                 pcc_ss_data = pcc_data[pcc_ss_id];
1199
1200                 down_write(&pcc_ss_data->pcc_lock);
1201
1202                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1203                         cpc_read(cpunum, reg, perf);
1204                 else
1205                         ret = -EIO;
1206
1207                 up_write(&pcc_ss_data->pcc_lock);
1208
1209                 return ret;
1210         }
1211
1212         cpc_read(cpunum, reg, perf);
1213
1214         return 0;
1215 }
1216
1217 /**
1218  * cppc_get_desired_perf - Get the desired performance register value.
1219  * @cpunum: CPU from which to get desired performance.
1220  * @desired_perf: Return address.
1221  *
1222  * Return: 0 for success, -EIO otherwise.
1223  */
1224 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1225 {
1226         return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1227 }
1228 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1229
1230 /**
1231  * cppc_get_nominal_perf - Get the nominal performance register value.
1232  * @cpunum: CPU from which to get nominal performance.
1233  * @nominal_perf: Return address.
1234  *
1235  * Return: 0 for success, -EIO otherwise.
1236  */
1237 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1238 {
1239         return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1240 }
1241
1242 /**
1243  * cppc_get_highest_perf - Get the highest performance register value.
1244  * @cpunum: CPU from which to get highest performance.
1245  * @highest_perf: Return address.
1246  *
1247  * Return: 0 for success, -EIO otherwise.
1248  */
1249 int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
1250 {
1251         return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
1252 }
1253 EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
1254
1255 /**
1256  * cppc_get_epp_perf - Get the epp register value.
1257  * @cpunum: CPU from which to get epp preference value.
1258  * @epp_perf: Return address.
1259  *
1260  * Return: 0 for success, -EIO otherwise.
1261  */
1262 int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
1263 {
1264         return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
1265 }
1266 EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
1267
1268 /**
1269  * cppc_get_perf_caps - Get a CPU's performance capabilities.
1270  * @cpunum: CPU from which to get capabilities info.
1271  * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1272  *
1273  * Return: 0 for success with perf_caps populated else -ERRNO.
1274  */
1275 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1276 {
1277         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1278         struct cpc_register_resource *highest_reg, *lowest_reg,
1279                 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1280                 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1281         u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1282         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1283         struct cppc_pcc_data *pcc_ss_data = NULL;
1284         int ret = 0, regs_in_pcc = 0;
1285
1286         if (!cpc_desc) {
1287                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1288                 return -ENODEV;
1289         }
1290
1291         highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1292         lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1293         lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1294         nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1295         low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1296         nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1297         guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1298
1299         /* Are any of the regs PCC ?*/
1300         if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1301                 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1302                 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1303                 if (pcc_ss_id < 0) {
1304                         pr_debug("Invalid pcc_ss_id\n");
1305                         return -ENODEV;
1306                 }
1307                 pcc_ss_data = pcc_data[pcc_ss_id];
1308                 regs_in_pcc = 1;
1309                 down_write(&pcc_ss_data->pcc_lock);
1310                 /* Ring doorbell once to update PCC subspace */
1311                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1312                         ret = -EIO;
1313                         goto out_err;
1314                 }
1315         }
1316
1317         cpc_read(cpunum, highest_reg, &high);
1318         perf_caps->highest_perf = high;
1319
1320         cpc_read(cpunum, lowest_reg, &low);
1321         perf_caps->lowest_perf = low;
1322
1323         cpc_read(cpunum, nominal_reg, &nom);
1324         perf_caps->nominal_perf = nom;
1325
1326         if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1327             IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1328                 perf_caps->guaranteed_perf = 0;
1329         } else {
1330                 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1331                 perf_caps->guaranteed_perf = guaranteed;
1332         }
1333
1334         cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1335         perf_caps->lowest_nonlinear_perf = min_nonlinear;
1336
1337         if (!high || !low || !nom || !min_nonlinear)
1338                 ret = -EFAULT;
1339
1340         /* Read optional lowest and nominal frequencies if present */
1341         if (CPC_SUPPORTED(low_freq_reg))
1342                 cpc_read(cpunum, low_freq_reg, &low_f);
1343
1344         if (CPC_SUPPORTED(nom_freq_reg))
1345                 cpc_read(cpunum, nom_freq_reg, &nom_f);
1346
1347         perf_caps->lowest_freq = low_f;
1348         perf_caps->nominal_freq = nom_f;
1349
1350
1351 out_err:
1352         if (regs_in_pcc)
1353                 up_write(&pcc_ss_data->pcc_lock);
1354         return ret;
1355 }
1356 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1357
1358 /**
1359  * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
1360  *
1361  * CPPC has flexibility about how CPU performance counters are accessed.
1362  * One of the choices is PCC regions, which can have a high access latency. This
1363  * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
1364  *
1365  * Return: true if any of the counters are in PCC regions, false otherwise
1366  */
1367 bool cppc_perf_ctrs_in_pcc(void)
1368 {
1369         int cpu;
1370
1371         for_each_present_cpu(cpu) {
1372                 struct cpc_register_resource *ref_perf_reg;
1373                 struct cpc_desc *cpc_desc;
1374
1375                 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1376
1377                 if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1378                     CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1379                     CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
1380                         return true;
1381
1382
1383                 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1384
1385                 /*
1386                  * If reference perf register is not supported then we should
1387                  * use the nominal perf value
1388                  */
1389                 if (!CPC_SUPPORTED(ref_perf_reg))
1390                         ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1391
1392                 if (CPC_IN_PCC(ref_perf_reg))
1393                         return true;
1394         }
1395
1396         return false;
1397 }
1398 EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
1399
1400 /**
1401  * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1402  * @cpunum: CPU from which to read counters.
1403  * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1404  *
1405  * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1406  */
1407 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1408 {
1409         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1410         struct cpc_register_resource *delivered_reg, *reference_reg,
1411                 *ref_perf_reg, *ctr_wrap_reg;
1412         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1413         struct cppc_pcc_data *pcc_ss_data = NULL;
1414         u64 delivered, reference, ref_perf, ctr_wrap_time;
1415         int ret = 0, regs_in_pcc = 0;
1416
1417         if (!cpc_desc) {
1418                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1419                 return -ENODEV;
1420         }
1421
1422         delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1423         reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1424         ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1425         ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1426
1427         /*
1428          * If reference perf register is not supported then we should
1429          * use the nominal perf value
1430          */
1431         if (!CPC_SUPPORTED(ref_perf_reg))
1432                 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1433
1434         /* Are any of the regs PCC ?*/
1435         if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1436                 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1437                 if (pcc_ss_id < 0) {
1438                         pr_debug("Invalid pcc_ss_id\n");
1439                         return -ENODEV;
1440                 }
1441                 pcc_ss_data = pcc_data[pcc_ss_id];
1442                 down_write(&pcc_ss_data->pcc_lock);
1443                 regs_in_pcc = 1;
1444                 /* Ring doorbell once to update PCC subspace */
1445                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1446                         ret = -EIO;
1447                         goto out_err;
1448                 }
1449         }
1450
1451         cpc_read(cpunum, delivered_reg, &delivered);
1452         cpc_read(cpunum, reference_reg, &reference);
1453         cpc_read(cpunum, ref_perf_reg, &ref_perf);
1454
1455         /*
1456          * Per spec, if ctr_wrap_time optional register is unsupported, then the
1457          * performance counters are assumed to never wrap during the lifetime of
1458          * platform
1459          */
1460         ctr_wrap_time = (u64)(~((u64)0));
1461         if (CPC_SUPPORTED(ctr_wrap_reg))
1462                 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1463
1464         if (!delivered || !reference || !ref_perf) {
1465                 ret = -EFAULT;
1466                 goto out_err;
1467         }
1468
1469         perf_fb_ctrs->delivered = delivered;
1470         perf_fb_ctrs->reference = reference;
1471         perf_fb_ctrs->reference_perf = ref_perf;
1472         perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1473 out_err:
1474         if (regs_in_pcc)
1475                 up_write(&pcc_ss_data->pcc_lock);
1476         return ret;
1477 }
1478 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1479
1480 /*
1481  * Set Energy Performance Preference Register value through
1482  * Performance Controls Interface
1483  */
1484 int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
1485 {
1486         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1487         struct cpc_register_resource *epp_set_reg;
1488         struct cpc_register_resource *auto_sel_reg;
1489         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1490         struct cppc_pcc_data *pcc_ss_data = NULL;
1491         int ret;
1492
1493         if (!cpc_desc) {
1494                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1495                 return -ENODEV;
1496         }
1497
1498         auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1499         epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
1500
1501         if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
1502                 if (pcc_ss_id < 0) {
1503                         pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
1504                         return -ENODEV;
1505                 }
1506
1507                 if (CPC_SUPPORTED(auto_sel_reg)) {
1508                         ret = cpc_write(cpu, auto_sel_reg, enable);
1509                         if (ret)
1510                                 return ret;
1511                 }
1512
1513                 if (CPC_SUPPORTED(epp_set_reg)) {
1514                         ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1515                         if (ret)
1516                                 return ret;
1517                 }
1518
1519                 pcc_ss_data = pcc_data[pcc_ss_id];
1520
1521                 down_write(&pcc_ss_data->pcc_lock);
1522                 /* after writing CPC, transfer the ownership of PCC to platform */
1523                 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1524                 up_write(&pcc_ss_data->pcc_lock);
1525         } else if (osc_cpc_flexible_adr_space_confirmed &&
1526                    CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
1527                 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1528         } else {
1529                 ret = -ENOTSUPP;
1530                 pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
1531         }
1532
1533         return ret;
1534 }
1535 EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
1536
1537 /**
1538  * cppc_get_auto_sel_caps - Read autonomous selection register.
1539  * @cpunum : CPU from which to read register.
1540  * @perf_caps : struct where autonomous selection register value is updated.
1541  */
1542 int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1543 {
1544         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1545         struct cpc_register_resource *auto_sel_reg;
1546         u64  auto_sel;
1547
1548         if (!cpc_desc) {
1549                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1550                 return -ENODEV;
1551         }
1552
1553         auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1554
1555         if (!CPC_SUPPORTED(auto_sel_reg))
1556                 pr_warn_once("Autonomous mode is not unsupported!\n");
1557
1558         if (CPC_IN_PCC(auto_sel_reg)) {
1559                 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1560                 struct cppc_pcc_data *pcc_ss_data = NULL;
1561                 int ret = 0;
1562
1563                 if (pcc_ss_id < 0)
1564                         return -ENODEV;
1565
1566                 pcc_ss_data = pcc_data[pcc_ss_id];
1567
1568                 down_write(&pcc_ss_data->pcc_lock);
1569
1570                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
1571                         cpc_read(cpunum, auto_sel_reg, &auto_sel);
1572                         perf_caps->auto_sel = (bool)auto_sel;
1573                 } else {
1574                         ret = -EIO;
1575                 }
1576
1577                 up_write(&pcc_ss_data->pcc_lock);
1578
1579                 return ret;
1580         }
1581
1582         return 0;
1583 }
1584 EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
1585
1586 /**
1587  * cppc_set_auto_sel - Write autonomous selection register.
1588  * @cpu    : CPU to which to write register.
1589  * @enable : the desired value of autonomous selection resiter to be updated.
1590  */
1591 int cppc_set_auto_sel(int cpu, bool enable)
1592 {
1593         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1594         struct cpc_register_resource *auto_sel_reg;
1595         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1596         struct cppc_pcc_data *pcc_ss_data = NULL;
1597         int ret = -EINVAL;
1598
1599         if (!cpc_desc) {
1600                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1601                 return -ENODEV;
1602         }
1603
1604         auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1605
1606         if (CPC_IN_PCC(auto_sel_reg)) {
1607                 if (pcc_ss_id < 0) {
1608                         pr_debug("Invalid pcc_ss_id\n");
1609                         return -ENODEV;
1610                 }
1611
1612                 if (CPC_SUPPORTED(auto_sel_reg)) {
1613                         ret = cpc_write(cpu, auto_sel_reg, enable);
1614                         if (ret)
1615                                 return ret;
1616                 }
1617
1618                 pcc_ss_data = pcc_data[pcc_ss_id];
1619
1620                 down_write(&pcc_ss_data->pcc_lock);
1621                 /* after writing CPC, transfer the ownership of PCC to platform */
1622                 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1623                 up_write(&pcc_ss_data->pcc_lock);
1624         } else {
1625                 ret = -ENOTSUPP;
1626                 pr_debug("_CPC in PCC is not supported\n");
1627         }
1628
1629         return ret;
1630 }
1631 EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
1632
1633 /**
1634  * cppc_set_enable - Set to enable CPPC on the processor by writing the
1635  * Continuous Performance Control package EnableRegister field.
1636  * @cpu: CPU for which to enable CPPC register.
1637  * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1638  *
1639  * Return: 0 for success, -ERRNO or -EIO otherwise.
1640  */
1641 int cppc_set_enable(int cpu, bool enable)
1642 {
1643         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1644         struct cpc_register_resource *enable_reg;
1645         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1646         struct cppc_pcc_data *pcc_ss_data = NULL;
1647         int ret = -EINVAL;
1648
1649         if (!cpc_desc) {
1650                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1651                 return -EINVAL;
1652         }
1653
1654         enable_reg = &cpc_desc->cpc_regs[ENABLE];
1655
1656         if (CPC_IN_PCC(enable_reg)) {
1657
1658                 if (pcc_ss_id < 0)
1659                         return -EIO;
1660
1661                 ret = cpc_write(cpu, enable_reg, enable);
1662                 if (ret)
1663                         return ret;
1664
1665                 pcc_ss_data = pcc_data[pcc_ss_id];
1666
1667                 down_write(&pcc_ss_data->pcc_lock);
1668                 /* after writing CPC, transfer the ownership of PCC to platfrom */
1669                 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1670                 up_write(&pcc_ss_data->pcc_lock);
1671                 return ret;
1672         }
1673
1674         return cpc_write(cpu, enable_reg, enable);
1675 }
1676 EXPORT_SYMBOL_GPL(cppc_set_enable);
1677
1678 /**
1679  * cppc_set_perf - Set a CPU's performance controls.
1680  * @cpu: CPU for which to set performance controls.
1681  * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1682  *
1683  * Return: 0 for success, -ERRNO otherwise.
1684  */
1685 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1686 {
1687         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1688         struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
1689         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1690         struct cppc_pcc_data *pcc_ss_data = NULL;
1691         int ret = 0;
1692
1693         if (!cpc_desc) {
1694                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1695                 return -ENODEV;
1696         }
1697
1698         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1699         min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
1700         max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
1701
1702         /*
1703          * This is Phase-I where we want to write to CPC registers
1704          * -> We want all CPUs to be able to execute this phase in parallel
1705          *
1706          * Since read_lock can be acquired by multiple CPUs simultaneously we
1707          * achieve that goal here
1708          */
1709         if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1710                 if (pcc_ss_id < 0) {
1711                         pr_debug("Invalid pcc_ss_id\n");
1712                         return -ENODEV;
1713                 }
1714                 pcc_ss_data = pcc_data[pcc_ss_id];
1715                 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1716                 if (pcc_ss_data->platform_owns_pcc) {
1717                         ret = check_pcc_chan(pcc_ss_id, false);
1718                         if (ret) {
1719                                 up_read(&pcc_ss_data->pcc_lock);
1720                                 return ret;
1721                         }
1722                 }
1723                 /*
1724                  * Update the pending_write to make sure a PCC CMD_READ will not
1725                  * arrive and steal the channel during the switch to write lock
1726                  */
1727                 pcc_ss_data->pending_pcc_write_cmd = true;
1728                 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1729                 cpc_desc->write_cmd_status = 0;
1730         }
1731
1732         cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1733
1734         /*
1735          * Only write if min_perf and max_perf not zero. Some drivers pass zero
1736          * value to min and max perf, but they don't mean to set the zero value,
1737          * they just don't want to write to those registers.
1738          */
1739         if (perf_ctrls->min_perf)
1740                 cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
1741         if (perf_ctrls->max_perf)
1742                 cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
1743
1744         if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
1745                 up_read(&pcc_ss_data->pcc_lock);        /* END Phase-I */
1746         /*
1747          * This is Phase-II where we transfer the ownership of PCC to Platform
1748          *
1749          * Short Summary: Basically if we think of a group of cppc_set_perf
1750          * requests that happened in short overlapping interval. The last CPU to
1751          * come out of Phase-I will enter Phase-II and ring the doorbell.
1752          *
1753          * We have the following requirements for Phase-II:
1754          *     1. We want to execute Phase-II only when there are no CPUs
1755          * currently executing in Phase-I
1756          *     2. Once we start Phase-II we want to avoid all other CPUs from
1757          * entering Phase-I.
1758          *     3. We want only one CPU among all those who went through Phase-I
1759          * to run phase-II
1760          *
1761          * If write_trylock fails to get the lock and doesn't transfer the
1762          * PCC ownership to the platform, then one of the following will be TRUE
1763          *     1. There is at-least one CPU in Phase-I which will later execute
1764          * write_trylock, so the CPUs in Phase-I will be responsible for
1765          * executing the Phase-II.
1766          *     2. Some other CPU has beaten this CPU to successfully execute the
1767          * write_trylock and has already acquired the write_lock. We know for a
1768          * fact it (other CPU acquiring the write_lock) couldn't have happened
1769          * before this CPU's Phase-I as we held the read_lock.
1770          *     3. Some other CPU executing pcc CMD_READ has stolen the
1771          * down_write, in which case, send_pcc_cmd will check for pending
1772          * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1773          * So this CPU can be certain that its request will be delivered
1774          *    So in all cases, this CPU knows that its request will be delivered
1775          * by another CPU and can return
1776          *
1777          * After getting the down_write we still need to check for
1778          * pending_pcc_write_cmd to take care of the following scenario
1779          *    The thread running this code could be scheduled out between
1780          * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1781          * could have delivered the request to Platform by triggering the
1782          * doorbell and transferred the ownership of PCC to platform. So this
1783          * avoids triggering an unnecessary doorbell and more importantly before
1784          * triggering the doorbell it makes sure that the PCC channel ownership
1785          * is still with OSPM.
1786          *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1787          * there was a pcc CMD_READ waiting on down_write and it steals the lock
1788          * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1789          * case during a CMD_READ and if there are pending writes it delivers
1790          * the write command before servicing the read command
1791          */
1792         if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1793                 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1794                         /* Update only if there are pending write commands */
1795                         if (pcc_ss_data->pending_pcc_write_cmd)
1796                                 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1797                         up_write(&pcc_ss_data->pcc_lock);       /* END Phase-II */
1798                 } else
1799                         /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1800                         wait_event(pcc_ss_data->pcc_write_wait_q,
1801                                    cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1802
1803                 /* send_pcc_cmd updates the status in case of failure */
1804                 ret = cpc_desc->write_cmd_status;
1805         }
1806         return ret;
1807 }
1808 EXPORT_SYMBOL_GPL(cppc_set_perf);
1809
1810 /**
1811  * cppc_get_transition_latency - returns frequency transition latency in ns
1812  * @cpu_num: CPU number for per_cpu().
1813  *
1814  * ACPI CPPC does not explicitly specify how a platform can specify the
1815  * transition latency for performance change requests. The closest we have
1816  * is the timing information from the PCCT tables which provides the info
1817  * on the number and frequency of PCC commands the platform can handle.
1818  *
1819  * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1820  * then assume there is no latency.
1821  */
1822 unsigned int cppc_get_transition_latency(int cpu_num)
1823 {
1824         /*
1825          * Expected transition latency is based on the PCCT timing values
1826          * Below are definition from ACPI spec:
1827          * pcc_nominal- Expected latency to process a command, in microseconds
1828          * pcc_mpar   - The maximum number of periodic requests that the subspace
1829          *              channel can support, reported in commands per minute. 0
1830          *              indicates no limitation.
1831          * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1832          *              completion of a command before issuing the next command,
1833          *              in microseconds.
1834          */
1835         unsigned int latency_ns = 0;
1836         struct cpc_desc *cpc_desc;
1837         struct cpc_register_resource *desired_reg;
1838         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1839         struct cppc_pcc_data *pcc_ss_data;
1840
1841         cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1842         if (!cpc_desc)
1843                 return CPUFREQ_ETERNAL;
1844
1845         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1846         if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1847                 return 0;
1848         else if (!CPC_IN_PCC(desired_reg))
1849                 return CPUFREQ_ETERNAL;
1850
1851         if (pcc_ss_id < 0)
1852                 return CPUFREQ_ETERNAL;
1853
1854         pcc_ss_data = pcc_data[pcc_ss_id];
1855         if (pcc_ss_data->pcc_mpar)
1856                 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1857
1858         latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1859         latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1860
1861         return latency_ns;
1862 }
1863 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1864
1865 /* Minimum struct length needed for the DMI processor entry we want */
1866 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH  48
1867
1868 /* Offset in the DMI processor structure for the max frequency */
1869 #define DMI_PROCESSOR_MAX_SPEED         0x14
1870
1871 /* Callback function used to retrieve the max frequency from DMI */
1872 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
1873 {
1874         const u8 *dmi_data = (const u8 *)dm;
1875         u16 *mhz = (u16 *)private;
1876
1877         if (dm->type == DMI_ENTRY_PROCESSOR &&
1878             dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
1879                 u16 val = (u16)get_unaligned((const u16 *)
1880                                 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
1881                 *mhz = umax(val, *mhz);
1882         }
1883 }
1884
1885 /* Look up the max frequency in DMI */
1886 static u64 cppc_get_dmi_max_khz(void)
1887 {
1888         u16 mhz = 0;
1889
1890         dmi_walk(cppc_find_dmi_mhz, &mhz);
1891
1892         /*
1893          * Real stupid fallback value, just in case there is no
1894          * actual value set.
1895          */
1896         mhz = mhz ? mhz : 1;
1897
1898         return KHZ_PER_MHZ * mhz;
1899 }
1900
1901 /*
1902  * If CPPC lowest_freq and nominal_freq registers are exposed then we can
1903  * use them to convert perf to freq and vice versa. The conversion is
1904  * extrapolated as an affine function passing by the 2 points:
1905  *  - (Low perf, Low freq)
1906  *  - (Nominal perf, Nominal freq)
1907  */
1908 unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
1909 {
1910         s64 retval, offset = 0;
1911         static u64 max_khz;
1912         u64 mul, div;
1913
1914         if (caps->lowest_freq && caps->nominal_freq) {
1915                 /* Avoid special case when nominal_freq is equal to lowest_freq */
1916                 if (caps->lowest_freq == caps->nominal_freq) {
1917                         mul = caps->nominal_freq;
1918                         div = caps->nominal_perf;
1919                 } else {
1920                         mul = caps->nominal_freq - caps->lowest_freq;
1921                         div = caps->nominal_perf - caps->lowest_perf;
1922                 }
1923                 mul *= KHZ_PER_MHZ;
1924                 offset = caps->nominal_freq * KHZ_PER_MHZ -
1925                          div64_u64(caps->nominal_perf * mul, div);
1926         } else {
1927                 if (!max_khz)
1928                         max_khz = cppc_get_dmi_max_khz();
1929                 mul = max_khz;
1930                 div = caps->highest_perf;
1931         }
1932
1933         retval = offset + div64_u64(perf * mul, div);
1934         if (retval >= 0)
1935                 return retval;
1936         return 0;
1937 }
1938 EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
1939
1940 unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
1941 {
1942         s64 retval, offset = 0;
1943         static u64 max_khz;
1944         u64 mul, div;
1945
1946         if (caps->lowest_freq && caps->nominal_freq) {
1947                 /* Avoid special case when nominal_freq is equal to lowest_freq */
1948                 if (caps->lowest_freq == caps->nominal_freq) {
1949                         mul = caps->nominal_perf;
1950                         div = caps->nominal_freq;
1951                 } else {
1952                         mul = caps->nominal_perf - caps->lowest_perf;
1953                         div = caps->nominal_freq - caps->lowest_freq;
1954                 }
1955                 /*
1956                  * We don't need to convert to kHz for computing offset and can
1957                  * directly use nominal_freq and lowest_freq as the div64_u64
1958                  * will remove the frequency unit.
1959                  */
1960                 offset = caps->nominal_perf -
1961                          div64_u64(caps->nominal_freq * mul, div);
1962                 /* But we need it for computing the perf level. */
1963                 div *= KHZ_PER_MHZ;
1964         } else {
1965                 if (!max_khz)
1966                         max_khz = cppc_get_dmi_max_khz();
1967                 mul = caps->highest_perf;
1968                 div = max_khz;
1969         }
1970
1971         retval = offset + div64_u64(freq * mul, div);
1972         if (retval >= 0)
1973                 return retval;
1974         return 0;
1975 }
1976 EXPORT_SYMBOL_GPL(cppc_khz_to_perf);
This page took 0.147654 seconds and 4 git commands to generate.