1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
6 * Copyright (C) 2016 Intel Corporation
12 * More information about RDT be found in the Intel (R) x86 Architecture
13 * Software Developer Manual June 2016, volume 3, section 17.17.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/cpu.h>
19 #include <linux/kernfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/tick.h>
27 * Check whether MBA bandwidth percentage value is correct. The value is
28 * checked against the minimum and max bandwidth values specified by the
29 * hardware. The allocated bandwidth percentage is rounded to the next
30 * control step available on the hardware.
32 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
38 * Only linear delay values is supported for current Intel SKUs.
40 if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
41 rdt_last_cmd_puts("No support for non-linear MB domains\n");
45 ret = kstrtoul(buf, 10, &bw);
47 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
51 if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
53 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
54 r->membw.min_bw, r->default_ctrl);
58 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
62 int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
65 struct resctrl_staged_config *cfg;
66 u32 closid = data->rdtgrp->closid;
67 struct rdt_resource *r = s->res;
70 cfg = &d->staged_config[s->conf_type];
71 if (cfg->have_new_ctrl) {
72 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
76 if (!bw_validate(data->buf, &bw_val, r))
80 d->mbps_val[closid] = bw_val;
84 cfg->new_ctrl = bw_val;
85 cfg->have_new_ctrl = true;
91 * Check whether a cache bit mask is valid.
92 * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID:
93 * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1
94 * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1
96 * Haswell does not support a non-contiguous 1s value and additionally
97 * requires at least two bits set.
98 * AMD allows non-contiguous bitmasks.
100 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
102 unsigned long first_bit, zero_bit, val;
103 unsigned int cbm_len = r->cache.cbm_len;
106 ret = kstrtoul(buf, 16, &val);
108 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
112 if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) {
113 rdt_last_cmd_puts("Mask out of range\n");
117 first_bit = find_first_bit(&val, cbm_len);
118 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
120 /* Are non-contiguous bitmasks allowed? */
121 if (!r->cache.arch_has_sparse_bitmasks &&
122 (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
123 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
127 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
128 rdt_last_cmd_printf("Need at least %d bits in the mask\n",
129 r->cache.min_cbm_bits);
138 * Read one cache bit mask (hex). Check that it is valid for the current
141 int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
142 struct rdt_domain *d)
144 struct rdtgroup *rdtgrp = data->rdtgrp;
145 struct resctrl_staged_config *cfg;
146 struct rdt_resource *r = s->res;
149 cfg = &d->staged_config[s->conf_type];
150 if (cfg->have_new_ctrl) {
151 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
156 * Cannot set up more than one pseudo-locked region in a cache
159 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
160 rdtgroup_pseudo_locked_in_hierarchy(d)) {
161 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
165 if (!cbm_validate(data->buf, &cbm_val, r))
168 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
169 rdtgrp->mode == RDT_MODE_SHAREABLE) &&
170 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
171 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
176 * The CBM may not overlap with the CBM of another closid if
177 * either is exclusive.
179 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
180 rdt_last_cmd_puts("Overlaps with exclusive group\n");
184 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
185 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
186 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
187 rdt_last_cmd_puts("Overlaps with other group\n");
192 cfg->new_ctrl = cbm_val;
193 cfg->have_new_ctrl = true;
199 * For each domain in this resource we expect to find a series of:
201 * separated by ";". The "id" is in decimal, and must match one of
202 * the "id"s for this resource.
204 static int parse_line(char *line, struct resctrl_schema *s,
205 struct rdtgroup *rdtgrp)
207 enum resctrl_conf_type t = s->conf_type;
208 struct resctrl_staged_config *cfg;
209 struct rdt_resource *r = s->res;
210 struct rdt_parse_data data;
211 char *dom = NULL, *id;
212 struct rdt_domain *d;
213 unsigned long dom_id;
215 /* Walking r->domains, ensure it can't race with cpuhp */
216 lockdep_assert_cpus_held();
218 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
219 (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) {
220 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
225 if (!line || line[0] == '\0')
227 dom = strsep(&line, ";");
228 id = strsep(&dom, "=");
229 if (!dom || kstrtoul(id, 10, &dom_id)) {
230 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
234 list_for_each_entry(d, &r->domains, list) {
235 if (d->id == dom_id) {
237 data.rdtgrp = rdtgrp;
238 if (r->parse_ctrlval(&data, s, d))
240 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
241 cfg = &d->staged_config[t];
243 * In pseudo-locking setup mode and just
244 * parsed a valid CBM that should be
245 * pseudo-locked. Only one locked region per
246 * resource group and domain so just do
247 * the required initialization for single
252 rdtgrp->plr->cbm = cfg->new_ctrl;
253 d->plr = rdtgrp->plr;
262 static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
269 return closid * 2 + 1;
275 static bool apply_config(struct rdt_hw_domain *hw_dom,
276 struct resctrl_staged_config *cfg, u32 idx,
277 cpumask_var_t cpu_mask)
279 struct rdt_domain *dom = &hw_dom->d_resctrl;
281 if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
282 cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
283 hw_dom->ctrl_val[idx] = cfg->new_ctrl;
291 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
292 u32 closid, enum resctrl_conf_type t, u32 cfg_val)
294 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
295 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
296 u32 idx = get_config_index(closid, t);
297 struct msr_param msr_param;
299 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
302 hw_dom->ctrl_val[idx] = cfg_val;
306 msr_param.high = idx + 1;
307 hw_res->msr_update(d, &msr_param, r);
312 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
314 struct resctrl_staged_config *cfg;
315 struct rdt_hw_domain *hw_dom;
316 struct msr_param msr_param;
317 enum resctrl_conf_type t;
318 cpumask_var_t cpu_mask;
319 struct rdt_domain *d;
322 /* Walking r->domains, ensure it can't race with cpuhp */
323 lockdep_assert_cpus_held();
325 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
328 msr_param.res = NULL;
329 list_for_each_entry(d, &r->domains, list) {
330 hw_dom = resctrl_to_arch_dom(d);
331 for (t = 0; t < CDP_NUM_TYPES; t++) {
332 cfg = &hw_dom->d_resctrl.staged_config[t];
333 if (!cfg->have_new_ctrl)
336 idx = get_config_index(closid, t);
337 if (!apply_config(hw_dom, cfg, idx, cpu_mask))
340 if (!msr_param.res) {
342 msr_param.high = msr_param.low + 1;
345 msr_param.low = min(msr_param.low, idx);
346 msr_param.high = max(msr_param.high, idx + 1);
351 if (cpumask_empty(cpu_mask))
354 /* Update resource control msr on all the CPUs. */
355 on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
358 free_cpumask_var(cpu_mask);
363 static int rdtgroup_parse_resource(char *resname, char *tok,
364 struct rdtgroup *rdtgrp)
366 struct resctrl_schema *s;
368 list_for_each_entry(s, &resctrl_schema_all, list) {
369 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
370 return parse_line(tok, s, rdtgrp);
372 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
376 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
377 char *buf, size_t nbytes, loff_t off)
379 struct resctrl_schema *s;
380 struct rdtgroup *rdtgrp;
381 struct rdt_resource *r;
385 /* Valid input requires a trailing newline */
386 if (nbytes == 0 || buf[nbytes - 1] != '\n')
388 buf[nbytes - 1] = '\0';
390 rdtgrp = rdtgroup_kn_lock_live(of->kn);
392 rdtgroup_kn_unlock(of->kn);
395 rdt_last_cmd_clear();
398 * No changes to pseudo-locked region allowed. It has to be removed
399 * and re-created instead.
401 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
403 rdt_last_cmd_puts("Resource group is pseudo-locked\n");
407 rdt_staged_configs_clear();
409 while ((tok = strsep(&buf, "\n")) != NULL) {
410 resname = strim(strsep(&tok, ":"));
412 rdt_last_cmd_puts("Missing ':'\n");
416 if (tok[0] == '\0') {
417 rdt_last_cmd_printf("Missing '%s' value\n", resname);
421 ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
426 list_for_each_entry(s, &resctrl_schema_all, list) {
430 * Writes to mba_sc resources update the software controller,
431 * not the control MSR.
436 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
441 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
443 * If pseudo-locking fails we keep the resource group in
444 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
445 * active and updated for just the domain the pseudo-locked
446 * region was requested for.
448 ret = rdtgroup_pseudo_lock_create(rdtgrp);
452 rdt_staged_configs_clear();
453 rdtgroup_kn_unlock(of->kn);
454 return ret ?: nbytes;
457 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
458 u32 closid, enum resctrl_conf_type type)
460 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
461 u32 idx = get_config_index(closid, type);
463 return hw_dom->ctrl_val[idx];
466 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
468 struct rdt_resource *r = schema->res;
469 struct rdt_domain *dom;
473 /* Walking r->domains, ensure it can't race with cpuhp */
474 lockdep_assert_cpus_held();
476 seq_printf(s, "%*s:", max_name_width, schema->name);
477 list_for_each_entry(dom, &r->domains, list) {
482 ctrl_val = dom->mbps_val[closid];
484 ctrl_val = resctrl_arch_get_config(r, dom, closid,
487 seq_printf(s, r->format_str, dom->id, max_data_width,
494 int rdtgroup_schemata_show(struct kernfs_open_file *of,
495 struct seq_file *s, void *v)
497 struct resctrl_schema *schema;
498 struct rdtgroup *rdtgrp;
502 rdtgrp = rdtgroup_kn_lock_live(of->kn);
504 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
505 list_for_each_entry(schema, &resctrl_schema_all, list) {
506 seq_printf(s, "%s:uninitialized\n", schema->name);
508 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
509 if (!rdtgrp->plr->d) {
510 rdt_last_cmd_clear();
511 rdt_last_cmd_puts("Cache domain offline\n");
514 seq_printf(s, "%s:%d=%x\n",
515 rdtgrp->plr->s->res->name,
520 closid = rdtgrp->closid;
521 list_for_each_entry(schema, &resctrl_schema_all, list) {
522 if (closid < schema->num_closid)
523 show_doms(s, schema, closid);
529 rdtgroup_kn_unlock(of->kn);
533 static int smp_mon_event_count(void *arg)
535 mon_event_count(arg);
540 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
541 struct rdt_domain *d, struct rdtgroup *rdtgrp,
542 int evtid, int first)
546 /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */
547 lockdep_assert_cpus_held();
550 * Setup the parameters to pass to mon_event_count() to read the data.
558 rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
559 if (IS_ERR(rr->arch_mon_ctx)) {
564 cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU);
567 * cpumask_any_housekeeping() prefers housekeeping CPUs, but
568 * are all the CPUs nohz_full? If yes, pick a CPU to IPI.
569 * MPAM's resctrl_arch_rmid_read() is unable to read the
570 * counters on some platforms if its called in IRQ context.
572 if (tick_nohz_full_cpu(cpu))
573 smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
575 smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
577 resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
580 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
582 struct kernfs_open_file *of = m->private;
583 u32 resid, evtid, domid;
584 struct rdtgroup *rdtgrp;
585 struct rdt_resource *r;
586 union mon_data_bits md;
587 struct rdt_domain *d;
591 rdtgrp = rdtgroup_kn_lock_live(of->kn);
597 md.priv = of->kn->priv;
602 r = &rdt_resources_all[resid].r_resctrl;
603 d = rdt_find_domain(r, domid, NULL);
604 if (IS_ERR_OR_NULL(d)) {
609 mon_event_read(&rr, r, d, rdtgrp, evtid, false);
612 seq_puts(m, "Error\n");
613 else if (rr.err == -EINVAL)
614 seq_puts(m, "Unavailable\n");
616 seq_printf(m, "%llu\n", rr.val);
619 rdtgroup_kn_unlock(of->kn);