]> Git Repo - J-linux.git/blob - drivers/acpi/cppc_acpi.c
Merge branches 'acpi-tables', 'acpi-numa', 'acpi-sysfs', 'acpi-cppc', 'acpi-thermal...
[J-linux.git] / drivers / acpi / cppc_acpi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4  *
5  * (C) Copyright 2014, 2015 Linaro Ltd.
6  * Author: Ashwin Chaugule <[email protected]>
7  *
8  * CPPC describes a few methods for controlling CPU performance using
9  * information from a per CPU table called CPC. This table is described in
10  * the ACPI v5.0+ specification. The table consists of a list of
11  * registers which may be memory mapped or hardware registers and also may
12  * include some static integer values.
13  *
14  * CPU performance is on an abstract continuous scale as against a discretized
15  * P-state scale which is tied to CPU frequency only. In brief, the basic
16  * operation involves:
17  *
18  * - OS makes a CPU performance request. (Can provide min and max bounds)
19  *
20  * - Platform (such as BMC) is free to optimize request within requested bounds
21  *   depending on power/thermal budgets etc.
22  *
23  * - Platform conveys its decision back to OS
24  *
25  * The communication between OS and platform occurs through another medium
26  * called (PCC) Platform Communication Channel. This is a generic mailbox like
27  * mechanism which includes doorbell semantics to indicate register updates.
28  * See drivers/mailbox/pcc.c for details on PCC.
29  *
30  * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31  * above specifications.
32  */
33
34 #define pr_fmt(fmt)     "ACPI CPPC: " fmt
35
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
42
43 #include <acpi/cppc_acpi.h>
44
45 struct cppc_pcc_data {
46         struct pcc_mbox_chan *pcc_channel;
47         void __iomem *pcc_comm_addr;
48         bool pcc_channel_acquired;
49         unsigned int deadline_us;
50         unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
51
52         bool pending_pcc_write_cmd;     /* Any pending/batched PCC write cmds? */
53         bool platform_owns_pcc;         /* Ownership of PCC subspace */
54         unsigned int pcc_write_cnt;     /* Running count of PCC write commands */
55
56         /*
57          * Lock to provide controlled access to the PCC channel.
58          *
59          * For performance critical usecases(currently cppc_set_perf)
60          *      We need to take read_lock and check if channel belongs to OSPM
61          * before reading or writing to PCC subspace
62          *      We need to take write_lock before transferring the channel
63          * ownership to the platform via a Doorbell
64          *      This allows us to batch a number of CPPC requests if they happen
65          * to originate in about the same time
66          *
67          * For non-performance critical usecases(init)
68          *      Take write_lock for all purposes which gives exclusive access
69          */
70         struct rw_semaphore pcc_lock;
71
72         /* Wait queue for CPUs whose requests were batched */
73         wait_queue_head_t pcc_write_wait_q;
74         ktime_t last_cmd_cmpl_time;
75         ktime_t last_mpar_reset;
76         int mpar_count;
77         int refcount;
78 };
79
80 /* Array to represent the PCC channel per subspace ID */
81 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
83 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
84
85 /*
86  * The cpc_desc structure contains the ACPI register details
87  * as described in the per CPU _CPC tables. The details
88  * include the type of register (e.g. PCC, System IO, FFH etc.)
89  * and destination addresses which lets us READ/WRITE CPU performance
90  * information using the appropriate I/O methods.
91  */
92 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
93
94 /* pcc mapped address + header size + offset within PCC subspace */
95 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
96                                                 0x8 + (offs))
97
98 /* Check if a CPC register is in PCC */
99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&             \
100                                 (cpc)->cpc_entry.reg.space_id ==        \
101                                 ACPI_ADR_SPACE_PLATFORM_COMM)
102
103 /* Evaluates to True if reg is a NULL register descriptor */
104 #define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105                                 (reg)->address == 0 &&                  \
106                                 (reg)->bit_width == 0 &&                \
107                                 (reg)->bit_offset == 0 &&               \
108                                 (reg)->access_width == 0)
109
110 /* Evaluates to True if an optional cpc field is supported */
111 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?          \
112                                 !!(cpc)->cpc_entry.int_value :          \
113                                 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
114 /*
115  * Arbitrary Retries in case the remote processor is slow to respond
116  * to PCC commands. Keeping it high enough to cover emulators where
117  * the processors run painfully slow.
118  */
119 #define NUM_RETRIES 500ULL
120
121 #define define_one_cppc_ro(_name)               \
122 static struct kobj_attribute _name =            \
123 __ATTR(_name, 0444, show_##_name, NULL)
124
125 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
126
127 #define show_cppc_data(access_fn, struct_name, member_name)             \
128         static ssize_t show_##member_name(struct kobject *kobj,         \
129                                 struct kobj_attribute *attr, char *buf) \
130         {                                                               \
131                 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
132                 struct struct_name st_name = {0};                       \
133                 int ret;                                                \
134                                                                         \
135                 ret = access_fn(cpc_ptr->cpu_id, &st_name);             \
136                 if (ret)                                                \
137                         return ret;                                     \
138                                                                         \
139                 return scnprintf(buf, PAGE_SIZE, "%llu\n",              \
140                                 (u64)st_name.member_name);              \
141         }                                                               \
142         define_one_cppc_ro(member_name)
143
144 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
145 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
146 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
147 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
148 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
149 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
150
151 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
152 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
153
154 static ssize_t show_feedback_ctrs(struct kobject *kobj,
155                 struct kobj_attribute *attr, char *buf)
156 {
157         struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
158         struct cppc_perf_fb_ctrs fb_ctrs = {0};
159         int ret;
160
161         ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
162         if (ret)
163                 return ret;
164
165         return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
166                         fb_ctrs.reference, fb_ctrs.delivered);
167 }
168 define_one_cppc_ro(feedback_ctrs);
169
170 static struct attribute *cppc_attrs[] = {
171         &feedback_ctrs.attr,
172         &reference_perf.attr,
173         &wraparound_time.attr,
174         &highest_perf.attr,
175         &lowest_perf.attr,
176         &lowest_nonlinear_perf.attr,
177         &nominal_perf.attr,
178         &nominal_freq.attr,
179         &lowest_freq.attr,
180         NULL
181 };
182 ATTRIBUTE_GROUPS(cppc);
183
184 static struct kobj_type cppc_ktype = {
185         .sysfs_ops = &kobj_sysfs_ops,
186         .default_groups = cppc_groups,
187 };
188
189 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
190 {
191         int ret, status;
192         struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
193         struct acpi_pcct_shared_memory __iomem *generic_comm_base =
194                 pcc_ss_data->pcc_comm_addr;
195
196         if (!pcc_ss_data->platform_owns_pcc)
197                 return 0;
198
199         /*
200          * Poll PCC status register every 3us(delay_us) for maximum of
201          * deadline_us(timeout_us) until PCC command complete bit is set(cond)
202          */
203         ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
204                                         status & PCC_CMD_COMPLETE_MASK, 3,
205                                         pcc_ss_data->deadline_us);
206
207         if (likely(!ret)) {
208                 pcc_ss_data->platform_owns_pcc = false;
209                 if (chk_err_bit && (status & PCC_ERROR_MASK))
210                         ret = -EIO;
211         }
212
213         if (unlikely(ret))
214                 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
215                        pcc_ss_id, ret);
216
217         return ret;
218 }
219
220 /*
221  * This function transfers the ownership of the PCC to the platform
222  * So it must be called while holding write_lock(pcc_lock)
223  */
224 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
225 {
226         int ret = -EIO, i;
227         struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
228         struct acpi_pcct_shared_memory __iomem *generic_comm_base =
229                 pcc_ss_data->pcc_comm_addr;
230         unsigned int time_delta;
231
232         /*
233          * For CMD_WRITE we know for a fact the caller should have checked
234          * the channel before writing to PCC space
235          */
236         if (cmd == CMD_READ) {
237                 /*
238                  * If there are pending cpc_writes, then we stole the channel
239                  * before write completion, so first send a WRITE command to
240                  * platform
241                  */
242                 if (pcc_ss_data->pending_pcc_write_cmd)
243                         send_pcc_cmd(pcc_ss_id, CMD_WRITE);
244
245                 ret = check_pcc_chan(pcc_ss_id, false);
246                 if (ret)
247                         goto end;
248         } else /* CMD_WRITE */
249                 pcc_ss_data->pending_pcc_write_cmd = FALSE;
250
251         /*
252          * Handle the Minimum Request Turnaround Time(MRTT)
253          * "The minimum amount of time that OSPM must wait after the completion
254          * of a command before issuing the next command, in microseconds"
255          */
256         if (pcc_ss_data->pcc_mrtt) {
257                 time_delta = ktime_us_delta(ktime_get(),
258                                             pcc_ss_data->last_cmd_cmpl_time);
259                 if (pcc_ss_data->pcc_mrtt > time_delta)
260                         udelay(pcc_ss_data->pcc_mrtt - time_delta);
261         }
262
263         /*
264          * Handle the non-zero Maximum Periodic Access Rate(MPAR)
265          * "The maximum number of periodic requests that the subspace channel can
266          * support, reported in commands per minute. 0 indicates no limitation."
267          *
268          * This parameter should be ideally zero or large enough so that it can
269          * handle maximum number of requests that all the cores in the system can
270          * collectively generate. If it is not, we will follow the spec and just
271          * not send the request to the platform after hitting the MPAR limit in
272          * any 60s window
273          */
274         if (pcc_ss_data->pcc_mpar) {
275                 if (pcc_ss_data->mpar_count == 0) {
276                         time_delta = ktime_ms_delta(ktime_get(),
277                                                     pcc_ss_data->last_mpar_reset);
278                         if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
279                                 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
280                                          pcc_ss_id);
281                                 ret = -EIO;
282                                 goto end;
283                         }
284                         pcc_ss_data->last_mpar_reset = ktime_get();
285                         pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
286                 }
287                 pcc_ss_data->mpar_count--;
288         }
289
290         /* Write to the shared comm region. */
291         writew_relaxed(cmd, &generic_comm_base->command);
292
293         /* Flip CMD COMPLETE bit */
294         writew_relaxed(0, &generic_comm_base->status);
295
296         pcc_ss_data->platform_owns_pcc = true;
297
298         /* Ring doorbell */
299         ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
300         if (ret < 0) {
301                 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
302                        pcc_ss_id, cmd, ret);
303                 goto end;
304         }
305
306         /* wait for completion and check for PCC errro bit */
307         ret = check_pcc_chan(pcc_ss_id, true);
308
309         if (pcc_ss_data->pcc_mrtt)
310                 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
311
312         if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
313                 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
314         else
315                 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
316
317 end:
318         if (cmd == CMD_WRITE) {
319                 if (unlikely(ret)) {
320                         for_each_possible_cpu(i) {
321                                 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
322
323                                 if (!desc)
324                                         continue;
325
326                                 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
327                                         desc->write_cmd_status = ret;
328                         }
329                 }
330                 pcc_ss_data->pcc_write_cnt++;
331                 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
332         }
333
334         return ret;
335 }
336
337 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
338 {
339         if (ret < 0)
340                 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
341                                 *(u16 *)msg, ret);
342         else
343                 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
344                                 *(u16 *)msg, ret);
345 }
346
347 static struct mbox_client cppc_mbox_cl = {
348         .tx_done = cppc_chan_tx_done,
349         .knows_txdone = true,
350 };
351
352 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
353 {
354         int result = -EFAULT;
355         acpi_status status = AE_OK;
356         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
357         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
358         struct acpi_buffer state = {0, NULL};
359         union acpi_object  *psd = NULL;
360         struct acpi_psd_package *pdomain;
361
362         status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
363                                             &buffer, ACPI_TYPE_PACKAGE);
364         if (status == AE_NOT_FOUND)     /* _PSD is optional */
365                 return 0;
366         if (ACPI_FAILURE(status))
367                 return -ENODEV;
368
369         psd = buffer.pointer;
370         if (!psd || psd->package.count != 1) {
371                 pr_debug("Invalid _PSD data\n");
372                 goto end;
373         }
374
375         pdomain = &(cpc_ptr->domain_info);
376
377         state.length = sizeof(struct acpi_psd_package);
378         state.pointer = pdomain;
379
380         status = acpi_extract_package(&(psd->package.elements[0]),
381                 &format, &state);
382         if (ACPI_FAILURE(status)) {
383                 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
384                 goto end;
385         }
386
387         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
388                 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
389                 goto end;
390         }
391
392         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
393                 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
394                 goto end;
395         }
396
397         if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
398             pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
399             pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
400                 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
401                 goto end;
402         }
403
404         result = 0;
405 end:
406         kfree(buffer.pointer);
407         return result;
408 }
409
410 bool acpi_cpc_valid(void)
411 {
412         struct cpc_desc *cpc_ptr;
413         int cpu;
414
415         for_each_possible_cpu(cpu) {
416                 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
417                 if (!cpc_ptr)
418                         return false;
419         }
420
421         return true;
422 }
423 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
424
425 /**
426  * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
427  * @cpu: Find all CPUs that share a domain with cpu.
428  * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
429  *
430  *      Return: 0 for success or negative value for err.
431  */
432 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
433 {
434         struct cpc_desc *cpc_ptr, *match_cpc_ptr;
435         struct acpi_psd_package *match_pdomain;
436         struct acpi_psd_package *pdomain;
437         int count_target, i;
438
439         /*
440          * Now that we have _PSD data from all CPUs, let's setup P-state
441          * domain info.
442          */
443         cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
444         if (!cpc_ptr)
445                 return -EFAULT;
446
447         pdomain = &(cpc_ptr->domain_info);
448         cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
449         if (pdomain->num_processors <= 1)
450                 return 0;
451
452         /* Validate the Domain info */
453         count_target = pdomain->num_processors;
454         if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
455                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
456         else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
457                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
458         else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
459                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
460
461         for_each_possible_cpu(i) {
462                 if (i == cpu)
463                         continue;
464
465                 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
466                 if (!match_cpc_ptr)
467                         goto err_fault;
468
469                 match_pdomain = &(match_cpc_ptr->domain_info);
470                 if (match_pdomain->domain != pdomain->domain)
471                         continue;
472
473                 /* Here i and cpu are in the same domain */
474                 if (match_pdomain->num_processors != count_target)
475                         goto err_fault;
476
477                 if (pdomain->coord_type != match_pdomain->coord_type)
478                         goto err_fault;
479
480                 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
481         }
482
483         return 0;
484
485 err_fault:
486         /* Assume no coordination on any error parsing domain info */
487         cpumask_clear(cpu_data->shared_cpu_map);
488         cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
489         cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
490
491         return -EFAULT;
492 }
493 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
494
495 static int register_pcc_channel(int pcc_ss_idx)
496 {
497         struct pcc_mbox_chan *pcc_chan;
498         u64 usecs_lat;
499
500         if (pcc_ss_idx >= 0) {
501                 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
502
503                 if (IS_ERR(pcc_chan)) {
504                         pr_err("Failed to find PCC channel for subspace %d\n",
505                                pcc_ss_idx);
506                         return -ENODEV;
507                 }
508
509                 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
510                 /*
511                  * cppc_ss->latency is just a Nominal value. In reality
512                  * the remote processor could be much slower to reply.
513                  * So add an arbitrary amount of wait on top of Nominal.
514                  */
515                 usecs_lat = NUM_RETRIES * pcc_chan->latency;
516                 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
517                 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
518                 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
519                 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
520
521                 pcc_data[pcc_ss_idx]->pcc_comm_addr =
522                         acpi_os_ioremap(pcc_chan->shmem_base_addr,
523                                         pcc_chan->shmem_size);
524                 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
525                         pr_err("Failed to ioremap PCC comm region mem for %d\n",
526                                pcc_ss_idx);
527                         return -ENOMEM;
528                 }
529
530                 /* Set flag so that we don't come here for each CPU. */
531                 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
532         }
533
534         return 0;
535 }
536
537 /**
538  * cpc_ffh_supported() - check if FFH reading supported
539  *
540  * Check if the architecture has support for functional fixed hardware
541  * read/write capability.
542  *
543  * Return: true for supported, false for not supported
544  */
545 bool __weak cpc_ffh_supported(void)
546 {
547         return false;
548 }
549
550 /**
551  * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
552  *
553  * Check and allocate the cppc_pcc_data memory.
554  * In some processor configurations it is possible that same subspace
555  * is shared between multiple CPUs. This is seen especially in CPUs
556  * with hardware multi-threading support.
557  *
558  * Return: 0 for success, errno for failure
559  */
560 static int pcc_data_alloc(int pcc_ss_id)
561 {
562         if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
563                 return -EINVAL;
564
565         if (pcc_data[pcc_ss_id]) {
566                 pcc_data[pcc_ss_id]->refcount++;
567         } else {
568                 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
569                                               GFP_KERNEL);
570                 if (!pcc_data[pcc_ss_id])
571                         return -ENOMEM;
572                 pcc_data[pcc_ss_id]->refcount++;
573         }
574
575         return 0;
576 }
577
578 /* Check if CPPC revision + num_ent combination is supported */
579 static bool is_cppc_supported(int revision, int num_ent)
580 {
581         int expected_num_ent;
582
583         switch (revision) {
584         case CPPC_V2_REV:
585                 expected_num_ent = CPPC_V2_NUM_ENT;
586                 break;
587         case CPPC_V3_REV:
588                 expected_num_ent = CPPC_V3_NUM_ENT;
589                 break;
590         default:
591                 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
592                         revision);
593                 return false;
594         }
595
596         if (expected_num_ent != num_ent) {
597                 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
598                         num_ent, expected_num_ent, revision);
599                 return false;
600         }
601
602         return true;
603 }
604
605 /*
606  * An example CPC table looks like the following.
607  *
608  *  Name (_CPC, Package() {
609  *      17,                                                     // NumEntries
610  *      1,                                                      // Revision
611  *      ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)},    // Highest Performance
612  *      ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)},    // Nominal Performance
613  *      ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)},    // Lowest Nonlinear Performance
614  *      ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)},    // Lowest Performance
615  *      ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)},    // Guaranteed Performance Register
616  *      ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)},    // Desired Performance Register
617  *      ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
618  *      ...
619  *      ...
620  *      ...
621  *  }
622  * Each Register() encodes how to access that specific register.
623  * e.g. a sample PCC entry has the following encoding:
624  *
625  *  Register (
626  *      PCC,    // AddressSpaceKeyword
627  *      8,      // RegisterBitWidth
628  *      8,      // RegisterBitOffset
629  *      0x30,   // RegisterAddress
630  *      9,      // AccessSize (subspace ID)
631  *  )
632  */
633
634 #ifndef init_freq_invariance_cppc
635 static inline void init_freq_invariance_cppc(void) { }
636 #endif
637
638 /**
639  * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
640  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
641  *
642  *      Return: 0 for success or negative value for err.
643  */
644 int acpi_cppc_processor_probe(struct acpi_processor *pr)
645 {
646         struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
647         union acpi_object *out_obj, *cpc_obj;
648         struct cpc_desc *cpc_ptr;
649         struct cpc_reg *gas_t;
650         struct device *cpu_dev;
651         acpi_handle handle = pr->handle;
652         unsigned int num_ent, i, cpc_rev;
653         int pcc_subspace_id = -1;
654         acpi_status status;
655         int ret = -EFAULT;
656
657         /* Parse the ACPI _CPC table for this CPU. */
658         status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
659                         ACPI_TYPE_PACKAGE);
660         if (ACPI_FAILURE(status)) {
661                 ret = -ENODEV;
662                 goto out_buf_free;
663         }
664
665         out_obj = (union acpi_object *) output.pointer;
666
667         cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
668         if (!cpc_ptr) {
669                 ret = -ENOMEM;
670                 goto out_buf_free;
671         }
672
673         /* First entry is NumEntries. */
674         cpc_obj = &out_obj->package.elements[0];
675         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
676                 num_ent = cpc_obj->integer.value;
677         } else {
678                 pr_debug("Unexpected entry type(%d) for NumEntries\n",
679                                 cpc_obj->type);
680                 goto out_free;
681         }
682         cpc_ptr->num_entries = num_ent;
683
684         /* Second entry should be revision. */
685         cpc_obj = &out_obj->package.elements[1];
686         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
687                 cpc_rev = cpc_obj->integer.value;
688         } else {
689                 pr_debug("Unexpected entry type(%d) for Revision\n",
690                                 cpc_obj->type);
691                 goto out_free;
692         }
693         cpc_ptr->version = cpc_rev;
694
695         if (!is_cppc_supported(cpc_rev, num_ent))
696                 goto out_free;
697
698         /* Iterate through remaining entries in _CPC */
699         for (i = 2; i < num_ent; i++) {
700                 cpc_obj = &out_obj->package.elements[i];
701
702                 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
703                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
704                         cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
705                 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
706                         gas_t = (struct cpc_reg *)
707                                 cpc_obj->buffer.pointer;
708
709                         /*
710                          * The PCC Subspace index is encoded inside
711                          * the CPC table entries. The same PCC index
712                          * will be used for all the PCC entries,
713                          * so extract it only once.
714                          */
715                         if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
716                                 if (pcc_subspace_id < 0) {
717                                         pcc_subspace_id = gas_t->access_width;
718                                         if (pcc_data_alloc(pcc_subspace_id))
719                                                 goto out_free;
720                                 } else if (pcc_subspace_id != gas_t->access_width) {
721                                         pr_debug("Mismatched PCC ids.\n");
722                                         goto out_free;
723                                 }
724                         } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
725                                 if (gas_t->address) {
726                                         void __iomem *addr;
727
728                                         addr = ioremap(gas_t->address, gas_t->bit_width/8);
729                                         if (!addr)
730                                                 goto out_free;
731                                         cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
732                                 }
733                         } else {
734                                 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
735                                         /* Support only PCC ,SYS MEM and FFH type regs */
736                                         pr_debug("Unsupported register type: %d\n", gas_t->space_id);
737                                         goto out_free;
738                                 }
739                         }
740
741                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
742                         memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
743                 } else {
744                         pr_debug("Err in entry:%d in CPC table of CPU:%d\n", i, pr->id);
745                         goto out_free;
746                 }
747         }
748         per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
749
750         /*
751          * Initialize the remaining cpc_regs as unsupported.
752          * Example: In case FW exposes CPPC v2, the below loop will initialize
753          * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
754          */
755         for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
756                 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
757                 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
758         }
759
760
761         /* Store CPU Logical ID */
762         cpc_ptr->cpu_id = pr->id;
763
764         /* Parse PSD data for this CPU */
765         ret = acpi_get_psd(cpc_ptr, handle);
766         if (ret)
767                 goto out_free;
768
769         /* Register PCC channel once for all PCC subspace ID. */
770         if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
771                 ret = register_pcc_channel(pcc_subspace_id);
772                 if (ret)
773                         goto out_free;
774
775                 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
776                 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
777         }
778
779         /* Everything looks okay */
780         pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
781
782         /* Add per logical CPU nodes for reading its feedback counters. */
783         cpu_dev = get_cpu_device(pr->id);
784         if (!cpu_dev) {
785                 ret = -EINVAL;
786                 goto out_free;
787         }
788
789         /* Plug PSD data into this CPU's CPC descriptor. */
790         per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
791
792         ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
793                         "acpi_cppc");
794         if (ret) {
795                 per_cpu(cpc_desc_ptr, pr->id) = NULL;
796                 kobject_put(&cpc_ptr->kobj);
797                 goto out_free;
798         }
799
800         init_freq_invariance_cppc();
801
802         kfree(output.pointer);
803         return 0;
804
805 out_free:
806         /* Free all the mapped sys mem areas for this CPU */
807         for (i = 2; i < cpc_ptr->num_entries; i++) {
808                 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
809
810                 if (addr)
811                         iounmap(addr);
812         }
813         kfree(cpc_ptr);
814
815 out_buf_free:
816         kfree(output.pointer);
817         return ret;
818 }
819 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
820
821 /**
822  * acpi_cppc_processor_exit - Cleanup CPC structs.
823  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
824  *
825  * Return: Void
826  */
827 void acpi_cppc_processor_exit(struct acpi_processor *pr)
828 {
829         struct cpc_desc *cpc_ptr;
830         unsigned int i;
831         void __iomem *addr;
832         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
833
834         if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
835                 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
836                         pcc_data[pcc_ss_id]->refcount--;
837                         if (!pcc_data[pcc_ss_id]->refcount) {
838                                 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
839                                 kfree(pcc_data[pcc_ss_id]);
840                                 pcc_data[pcc_ss_id] = NULL;
841                         }
842                 }
843         }
844
845         cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
846         if (!cpc_ptr)
847                 return;
848
849         /* Free all the mapped sys mem areas for this CPU */
850         for (i = 2; i < cpc_ptr->num_entries; i++) {
851                 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
852                 if (addr)
853                         iounmap(addr);
854         }
855
856         kobject_put(&cpc_ptr->kobj);
857         kfree(cpc_ptr);
858 }
859 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
860
861 /**
862  * cpc_read_ffh() - Read FFH register
863  * @cpunum:     CPU number to read
864  * @reg:        cppc register information
865  * @val:        place holder for return value
866  *
867  * Read bit_width bits from a specified address and bit_offset
868  *
869  * Return: 0 for success and error code
870  */
871 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
872 {
873         return -ENOTSUPP;
874 }
875
876 /**
877  * cpc_write_ffh() - Write FFH register
878  * @cpunum:     CPU number to write
879  * @reg:        cppc register information
880  * @val:        value to write
881  *
882  * Write value of bit_width bits to a specified address and bit_offset
883  *
884  * Return: 0 for success and error code
885  */
886 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
887 {
888         return -ENOTSUPP;
889 }
890
891 /*
892  * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
893  * as fast as possible. We have already mapped the PCC subspace during init, so
894  * we can directly write to it.
895  */
896
897 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
898 {
899         int ret_val = 0;
900         void __iomem *vaddr = NULL;
901         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
902         struct cpc_reg *reg = &reg_res->cpc_entry.reg;
903
904         if (reg_res->type == ACPI_TYPE_INTEGER) {
905                 *val = reg_res->cpc_entry.int_value;
906                 return ret_val;
907         }
908
909         *val = 0;
910         if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
911                 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
912         else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
913                 vaddr = reg_res->sys_mem_vaddr;
914         else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
915                 return cpc_read_ffh(cpu, reg, val);
916         else
917                 return acpi_os_read_memory((acpi_physical_address)reg->address,
918                                 val, reg->bit_width);
919
920         switch (reg->bit_width) {
921         case 8:
922                 *val = readb_relaxed(vaddr);
923                 break;
924         case 16:
925                 *val = readw_relaxed(vaddr);
926                 break;
927         case 32:
928                 *val = readl_relaxed(vaddr);
929                 break;
930         case 64:
931                 *val = readq_relaxed(vaddr);
932                 break;
933         default:
934                 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
935                          reg->bit_width, pcc_ss_id);
936                 ret_val = -EFAULT;
937         }
938
939         return ret_val;
940 }
941
942 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
943 {
944         int ret_val = 0;
945         void __iomem *vaddr = NULL;
946         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
947         struct cpc_reg *reg = &reg_res->cpc_entry.reg;
948
949         if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
950                 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
951         else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
952                 vaddr = reg_res->sys_mem_vaddr;
953         else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
954                 return cpc_write_ffh(cpu, reg, val);
955         else
956                 return acpi_os_write_memory((acpi_physical_address)reg->address,
957                                 val, reg->bit_width);
958
959         switch (reg->bit_width) {
960         case 8:
961                 writeb_relaxed(val, vaddr);
962                 break;
963         case 16:
964                 writew_relaxed(val, vaddr);
965                 break;
966         case 32:
967                 writel_relaxed(val, vaddr);
968                 break;
969         case 64:
970                 writeq_relaxed(val, vaddr);
971                 break;
972         default:
973                 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
974                          reg->bit_width, pcc_ss_id);
975                 ret_val = -EFAULT;
976                 break;
977         }
978
979         return ret_val;
980 }
981
982 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
983 {
984         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
985         struct cpc_register_resource *reg;
986
987         if (!cpc_desc) {
988                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
989                 return -ENODEV;
990         }
991
992         reg = &cpc_desc->cpc_regs[reg_idx];
993
994         if (CPC_IN_PCC(reg)) {
995                 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
996                 struct cppc_pcc_data *pcc_ss_data = NULL;
997                 int ret = 0;
998
999                 if (pcc_ss_id < 0)
1000                         return -EIO;
1001
1002                 pcc_ss_data = pcc_data[pcc_ss_id];
1003
1004                 down_write(&pcc_ss_data->pcc_lock);
1005
1006                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1007                         cpc_read(cpunum, reg, perf);
1008                 else
1009                         ret = -EIO;
1010
1011                 up_write(&pcc_ss_data->pcc_lock);
1012
1013                 return ret;
1014         }
1015
1016         cpc_read(cpunum, reg, perf);
1017
1018         return 0;
1019 }
1020
1021 /**
1022  * cppc_get_desired_perf - Get the desired performance register value.
1023  * @cpunum: CPU from which to get desired performance.
1024  * @desired_perf: Return address.
1025  *
1026  * Return: 0 for success, -EIO otherwise.
1027  */
1028 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1029 {
1030         return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1031 }
1032 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1033
1034 /**
1035  * cppc_get_nominal_perf - Get the nominal performance register value.
1036  * @cpunum: CPU from which to get nominal performance.
1037  * @nominal_perf: Return address.
1038  *
1039  * Return: 0 for success, -EIO otherwise.
1040  */
1041 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1042 {
1043         return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1044 }
1045
1046 /**
1047  * cppc_get_perf_caps - Get a CPU's performance capabilities.
1048  * @cpunum: CPU from which to get capabilities info.
1049  * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1050  *
1051  * Return: 0 for success with perf_caps populated else -ERRNO.
1052  */
1053 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1054 {
1055         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1056         struct cpc_register_resource *highest_reg, *lowest_reg,
1057                 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1058                 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1059         u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1060         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1061         struct cppc_pcc_data *pcc_ss_data = NULL;
1062         int ret = 0, regs_in_pcc = 0;
1063
1064         if (!cpc_desc) {
1065                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1066                 return -ENODEV;
1067         }
1068
1069         highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1070         lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1071         lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1072         nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1073         low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1074         nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1075         guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1076
1077         /* Are any of the regs PCC ?*/
1078         if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1079                 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1080                 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1081                 if (pcc_ss_id < 0) {
1082                         pr_debug("Invalid pcc_ss_id\n");
1083                         return -ENODEV;
1084                 }
1085                 pcc_ss_data = pcc_data[pcc_ss_id];
1086                 regs_in_pcc = 1;
1087                 down_write(&pcc_ss_data->pcc_lock);
1088                 /* Ring doorbell once to update PCC subspace */
1089                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1090                         ret = -EIO;
1091                         goto out_err;
1092                 }
1093         }
1094
1095         cpc_read(cpunum, highest_reg, &high);
1096         perf_caps->highest_perf = high;
1097
1098         cpc_read(cpunum, lowest_reg, &low);
1099         perf_caps->lowest_perf = low;
1100
1101         cpc_read(cpunum, nominal_reg, &nom);
1102         perf_caps->nominal_perf = nom;
1103
1104         if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1105             IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1106                 perf_caps->guaranteed_perf = 0;
1107         } else {
1108                 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1109                 perf_caps->guaranteed_perf = guaranteed;
1110         }
1111
1112         cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1113         perf_caps->lowest_nonlinear_perf = min_nonlinear;
1114
1115         if (!high || !low || !nom || !min_nonlinear)
1116                 ret = -EFAULT;
1117
1118         /* Read optional lowest and nominal frequencies if present */
1119         if (CPC_SUPPORTED(low_freq_reg))
1120                 cpc_read(cpunum, low_freq_reg, &low_f);
1121
1122         if (CPC_SUPPORTED(nom_freq_reg))
1123                 cpc_read(cpunum, nom_freq_reg, &nom_f);
1124
1125         perf_caps->lowest_freq = low_f;
1126         perf_caps->nominal_freq = nom_f;
1127
1128
1129 out_err:
1130         if (regs_in_pcc)
1131                 up_write(&pcc_ss_data->pcc_lock);
1132         return ret;
1133 }
1134 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1135
1136 /**
1137  * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1138  * @cpunum: CPU from which to read counters.
1139  * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1140  *
1141  * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1142  */
1143 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1144 {
1145         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1146         struct cpc_register_resource *delivered_reg, *reference_reg,
1147                 *ref_perf_reg, *ctr_wrap_reg;
1148         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1149         struct cppc_pcc_data *pcc_ss_data = NULL;
1150         u64 delivered, reference, ref_perf, ctr_wrap_time;
1151         int ret = 0, regs_in_pcc = 0;
1152
1153         if (!cpc_desc) {
1154                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1155                 return -ENODEV;
1156         }
1157
1158         delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1159         reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1160         ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1161         ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1162
1163         /*
1164          * If reference perf register is not supported then we should
1165          * use the nominal perf value
1166          */
1167         if (!CPC_SUPPORTED(ref_perf_reg))
1168                 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1169
1170         /* Are any of the regs PCC ?*/
1171         if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1172                 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1173                 if (pcc_ss_id < 0) {
1174                         pr_debug("Invalid pcc_ss_id\n");
1175                         return -ENODEV;
1176                 }
1177                 pcc_ss_data = pcc_data[pcc_ss_id];
1178                 down_write(&pcc_ss_data->pcc_lock);
1179                 regs_in_pcc = 1;
1180                 /* Ring doorbell once to update PCC subspace */
1181                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1182                         ret = -EIO;
1183                         goto out_err;
1184                 }
1185         }
1186
1187         cpc_read(cpunum, delivered_reg, &delivered);
1188         cpc_read(cpunum, reference_reg, &reference);
1189         cpc_read(cpunum, ref_perf_reg, &ref_perf);
1190
1191         /*
1192          * Per spec, if ctr_wrap_time optional register is unsupported, then the
1193          * performance counters are assumed to never wrap during the lifetime of
1194          * platform
1195          */
1196         ctr_wrap_time = (u64)(~((u64)0));
1197         if (CPC_SUPPORTED(ctr_wrap_reg))
1198                 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1199
1200         if (!delivered || !reference || !ref_perf) {
1201                 ret = -EFAULT;
1202                 goto out_err;
1203         }
1204
1205         perf_fb_ctrs->delivered = delivered;
1206         perf_fb_ctrs->reference = reference;
1207         perf_fb_ctrs->reference_perf = ref_perf;
1208         perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1209 out_err:
1210         if (regs_in_pcc)
1211                 up_write(&pcc_ss_data->pcc_lock);
1212         return ret;
1213 }
1214 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1215
1216 /**
1217  * cppc_set_perf - Set a CPU's performance controls.
1218  * @cpu: CPU for which to set performance controls.
1219  * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1220  *
1221  * Return: 0 for success, -ERRNO otherwise.
1222  */
1223 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1224 {
1225         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1226         struct cpc_register_resource *desired_reg;
1227         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1228         struct cppc_pcc_data *pcc_ss_data = NULL;
1229         int ret = 0;
1230
1231         if (!cpc_desc) {
1232                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1233                 return -ENODEV;
1234         }
1235
1236         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1237
1238         /*
1239          * This is Phase-I where we want to write to CPC registers
1240          * -> We want all CPUs to be able to execute this phase in parallel
1241          *
1242          * Since read_lock can be acquired by multiple CPUs simultaneously we
1243          * achieve that goal here
1244          */
1245         if (CPC_IN_PCC(desired_reg)) {
1246                 if (pcc_ss_id < 0) {
1247                         pr_debug("Invalid pcc_ss_id\n");
1248                         return -ENODEV;
1249                 }
1250                 pcc_ss_data = pcc_data[pcc_ss_id];
1251                 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1252                 if (pcc_ss_data->platform_owns_pcc) {
1253                         ret = check_pcc_chan(pcc_ss_id, false);
1254                         if (ret) {
1255                                 up_read(&pcc_ss_data->pcc_lock);
1256                                 return ret;
1257                         }
1258                 }
1259                 /*
1260                  * Update the pending_write to make sure a PCC CMD_READ will not
1261                  * arrive and steal the channel during the switch to write lock
1262                  */
1263                 pcc_ss_data->pending_pcc_write_cmd = true;
1264                 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1265                 cpc_desc->write_cmd_status = 0;
1266         }
1267
1268         /*
1269          * Skip writing MIN/MAX until Linux knows how to come up with
1270          * useful values.
1271          */
1272         cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1273
1274         if (CPC_IN_PCC(desired_reg))
1275                 up_read(&pcc_ss_data->pcc_lock);        /* END Phase-I */
1276         /*
1277          * This is Phase-II where we transfer the ownership of PCC to Platform
1278          *
1279          * Short Summary: Basically if we think of a group of cppc_set_perf
1280          * requests that happened in short overlapping interval. The last CPU to
1281          * come out of Phase-I will enter Phase-II and ring the doorbell.
1282          *
1283          * We have the following requirements for Phase-II:
1284          *     1. We want to execute Phase-II only when there are no CPUs
1285          * currently executing in Phase-I
1286          *     2. Once we start Phase-II we want to avoid all other CPUs from
1287          * entering Phase-I.
1288          *     3. We want only one CPU among all those who went through Phase-I
1289          * to run phase-II
1290          *
1291          * If write_trylock fails to get the lock and doesn't transfer the
1292          * PCC ownership to the platform, then one of the following will be TRUE
1293          *     1. There is at-least one CPU in Phase-I which will later execute
1294          * write_trylock, so the CPUs in Phase-I will be responsible for
1295          * executing the Phase-II.
1296          *     2. Some other CPU has beaten this CPU to successfully execute the
1297          * write_trylock and has already acquired the write_lock. We know for a
1298          * fact it (other CPU acquiring the write_lock) couldn't have happened
1299          * before this CPU's Phase-I as we held the read_lock.
1300          *     3. Some other CPU executing pcc CMD_READ has stolen the
1301          * down_write, in which case, send_pcc_cmd will check for pending
1302          * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1303          * So this CPU can be certain that its request will be delivered
1304          *    So in all cases, this CPU knows that its request will be delivered
1305          * by another CPU and can return
1306          *
1307          * After getting the down_write we still need to check for
1308          * pending_pcc_write_cmd to take care of the following scenario
1309          *    The thread running this code could be scheduled out between
1310          * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1311          * could have delivered the request to Platform by triggering the
1312          * doorbell and transferred the ownership of PCC to platform. So this
1313          * avoids triggering an unnecessary doorbell and more importantly before
1314          * triggering the doorbell it makes sure that the PCC channel ownership
1315          * is still with OSPM.
1316          *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1317          * there was a pcc CMD_READ waiting on down_write and it steals the lock
1318          * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1319          * case during a CMD_READ and if there are pending writes it delivers
1320          * the write command before servicing the read command
1321          */
1322         if (CPC_IN_PCC(desired_reg)) {
1323                 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1324                         /* Update only if there are pending write commands */
1325                         if (pcc_ss_data->pending_pcc_write_cmd)
1326                                 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1327                         up_write(&pcc_ss_data->pcc_lock);       /* END Phase-II */
1328                 } else
1329                         /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1330                         wait_event(pcc_ss_data->pcc_write_wait_q,
1331                                    cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1332
1333                 /* send_pcc_cmd updates the status in case of failure */
1334                 ret = cpc_desc->write_cmd_status;
1335         }
1336         return ret;
1337 }
1338 EXPORT_SYMBOL_GPL(cppc_set_perf);
1339
1340 /**
1341  * cppc_get_transition_latency - returns frequency transition latency in ns
1342  *
1343  * ACPI CPPC does not explicitly specify how a platform can specify the
1344  * transition latency for performance change requests. The closest we have
1345  * is the timing information from the PCCT tables which provides the info
1346  * on the number and frequency of PCC commands the platform can handle.
1347  */
1348 unsigned int cppc_get_transition_latency(int cpu_num)
1349 {
1350         /*
1351          * Expected transition latency is based on the PCCT timing values
1352          * Below are definition from ACPI spec:
1353          * pcc_nominal- Expected latency to process a command, in microseconds
1354          * pcc_mpar   - The maximum number of periodic requests that the subspace
1355          *              channel can support, reported in commands per minute. 0
1356          *              indicates no limitation.
1357          * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1358          *              completion of a command before issuing the next command,
1359          *              in microseconds.
1360          */
1361         unsigned int latency_ns = 0;
1362         struct cpc_desc *cpc_desc;
1363         struct cpc_register_resource *desired_reg;
1364         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1365         struct cppc_pcc_data *pcc_ss_data;
1366
1367         cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1368         if (!cpc_desc)
1369                 return CPUFREQ_ETERNAL;
1370
1371         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1372         if (!CPC_IN_PCC(desired_reg))
1373                 return CPUFREQ_ETERNAL;
1374
1375         if (pcc_ss_id < 0)
1376                 return CPUFREQ_ETERNAL;
1377
1378         pcc_ss_data = pcc_data[pcc_ss_id];
1379         if (pcc_ss_data->pcc_mpar)
1380                 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1381
1382         latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1383         latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1384
1385         return latency_ns;
1386 }
1387 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
This page took 0.150112 seconds and 4 git commands to generate.