]> Git Repo - linux.git/blob - drivers/cxl/core/hdm.c
Linux 6.14-rc3
[linux.git] / drivers / cxl / core / hdm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6
7 #include "cxlmem.h"
8 #include "core.h"
9
10 /**
11  * DOC: cxl core hdm
12  *
13  * Compute Express Link Host Managed Device Memory, starting with the
14  * CXL 2.0 specification, is managed by an array of HDM Decoder register
15  * instances per CXL port and per CXL endpoint. Define common helpers
16  * for enumerating these registers and capabilities.
17  */
18
19 DECLARE_RWSEM(cxl_dpa_rwsem);
20
21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
22                            int *target_map)
23 {
24         int rc;
25
26         rc = cxl_decoder_add_locked(cxld, target_map);
27         if (rc) {
28                 put_device(&cxld->dev);
29                 dev_err(&port->dev, "Failed to add decoder\n");
30                 return rc;
31         }
32
33         rc = cxl_decoder_autoremove(&port->dev, cxld);
34         if (rc)
35                 return rc;
36
37         dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
38
39         return 0;
40 }
41
42 /*
43  * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
44  * single ported host-bridges need not publish a decoder capability when a
45  * passthrough decode can be assumed, i.e. all transactions that the uport sees
46  * are claimed and passed to the single dport. Disable the range until the first
47  * CXL region is enumerated / activated.
48  */
49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
50 {
51         struct cxl_switch_decoder *cxlsd;
52         struct cxl_dport *dport = NULL;
53         int single_port_map[1];
54         unsigned long index;
55         struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
56
57         /*
58          * Capability checks are moot for passthrough decoders, support
59          * any and all possibilities.
60          */
61         cxlhdm->interleave_mask = ~0U;
62         cxlhdm->iw_cap_mask = ~0UL;
63
64         cxlsd = cxl_switch_decoder_alloc(port, 1);
65         if (IS_ERR(cxlsd))
66                 return PTR_ERR(cxlsd);
67
68         device_lock_assert(&port->dev);
69
70         xa_for_each(&port->dports, index, dport)
71                 break;
72         single_port_map[0] = dport->port_id;
73
74         return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
75 }
76 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL");
77
78 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
79 {
80         u32 hdm_cap;
81
82         hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
83         cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
84         cxlhdm->target_count =
85                 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
86         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
87                 cxlhdm->interleave_mask |= GENMASK(11, 8);
88         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
89                 cxlhdm->interleave_mask |= GENMASK(14, 12);
90         cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
91         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
92                 cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
93         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
94                 cxlhdm->iw_cap_mask |= BIT(16);
95 }
96
97 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
98 {
99         struct cxl_hdm *cxlhdm;
100         void __iomem *hdm;
101         u32 ctrl;
102         int i;
103
104         if (!info)
105                 return false;
106
107         cxlhdm = dev_get_drvdata(&info->port->dev);
108         hdm = cxlhdm->regs.hdm_decoder;
109
110         if (!hdm)
111                 return true;
112
113         /*
114          * If HDM decoders are present and the driver is in control of
115          * Mem_Enable skip DVSEC based emulation
116          */
117         if (!info->mem_enabled)
118                 return false;
119
120         /*
121          * If any decoders are committed already, there should not be any
122          * emulated DVSEC decoders.
123          */
124         for (i = 0; i < cxlhdm->decoder_count; i++) {
125                 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
126                 dev_dbg(&info->port->dev,
127                         "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
128                         info->port->id, i,
129                         FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
130                         readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
131                         readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
132                         readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
133                         readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
134                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
135                         return false;
136         }
137
138         return true;
139 }
140
141 /**
142  * devm_cxl_setup_hdm - map HDM decoder component registers
143  * @port: cxl_port to map
144  * @info: cached DVSEC range register info
145  */
146 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
147                                    struct cxl_endpoint_dvsec_info *info)
148 {
149         struct cxl_register_map *reg_map = &port->reg_map;
150         struct device *dev = &port->dev;
151         struct cxl_hdm *cxlhdm;
152         int rc;
153
154         cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
155         if (!cxlhdm)
156                 return ERR_PTR(-ENOMEM);
157         cxlhdm->port = port;
158         dev_set_drvdata(dev, cxlhdm);
159
160         /* Memory devices can configure device HDM using DVSEC range regs. */
161         if (reg_map->resource == CXL_RESOURCE_NONE) {
162                 if (!info || !info->mem_enabled) {
163                         dev_err(dev, "No component registers mapped\n");
164                         return ERR_PTR(-ENXIO);
165                 }
166
167                 cxlhdm->decoder_count = info->ranges;
168                 return cxlhdm;
169         }
170
171         if (!reg_map->component_map.hdm_decoder.valid) {
172                 dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
173                 /* unique error code to indicate no HDM decoder capability */
174                 return ERR_PTR(-ENODEV);
175         }
176
177         rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
178                                     BIT(CXL_CM_CAP_CAP_ID_HDM));
179         if (rc) {
180                 dev_err(dev, "Failed to map HDM capability.\n");
181                 return ERR_PTR(rc);
182         }
183
184         parse_hdm_decoder_caps(cxlhdm);
185         if (cxlhdm->decoder_count == 0) {
186                 dev_err(dev, "Spec violation. Caps invalid\n");
187                 return ERR_PTR(-ENXIO);
188         }
189
190         /*
191          * Now that the hdm capability is parsed, decide if range
192          * register emulation is needed and fixup cxlhdm accordingly.
193          */
194         if (should_emulate_decoders(info)) {
195                 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
196                         info->ranges > 1 ? "s" : "");
197                 cxlhdm->decoder_count = info->ranges;
198         }
199
200         return cxlhdm;
201 }
202 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL");
203
204 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
205 {
206         unsigned long long start = r->start, end = r->end;
207
208         seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
209                    r->name);
210 }
211
212 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
213 {
214         struct resource *p1, *p2;
215
216         down_read(&cxl_dpa_rwsem);
217         for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
218                 __cxl_dpa_debug(file, p1, 0);
219                 for (p2 = p1->child; p2; p2 = p2->sibling)
220                         __cxl_dpa_debug(file, p2, 1);
221         }
222         up_read(&cxl_dpa_rwsem);
223 }
224 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
225
226 /*
227  * Must be called in a context that synchronizes against this decoder's
228  * port ->remove() callback (like an endpoint decoder sysfs attribute)
229  */
230 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
231 {
232         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
233         struct cxl_port *port = cxled_to_port(cxled);
234         struct cxl_dev_state *cxlds = cxlmd->cxlds;
235         struct resource *res = cxled->dpa_res;
236         resource_size_t skip_start;
237
238         lockdep_assert_held_write(&cxl_dpa_rwsem);
239
240         /* save @skip_start, before @res is released */
241         skip_start = res->start - cxled->skip;
242         __release_region(&cxlds->dpa_res, res->start, resource_size(res));
243         if (cxled->skip)
244                 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
245         cxled->skip = 0;
246         cxled->dpa_res = NULL;
247         put_device(&cxled->cxld.dev);
248         port->hdm_end--;
249 }
250
251 static void cxl_dpa_release(void *cxled)
252 {
253         down_write(&cxl_dpa_rwsem);
254         __cxl_dpa_release(cxled);
255         up_write(&cxl_dpa_rwsem);
256 }
257
258 /*
259  * Must be called from context that will not race port device
260  * unregistration, like decoder sysfs attribute methods
261  */
262 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
263 {
264         struct cxl_port *port = cxled_to_port(cxled);
265
266         lockdep_assert_held_write(&cxl_dpa_rwsem);
267         devm_remove_action(&port->dev, cxl_dpa_release, cxled);
268         __cxl_dpa_release(cxled);
269 }
270
271 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
272                              resource_size_t base, resource_size_t len,
273                              resource_size_t skipped)
274 {
275         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
276         struct cxl_port *port = cxled_to_port(cxled);
277         struct cxl_dev_state *cxlds = cxlmd->cxlds;
278         struct device *dev = &port->dev;
279         struct resource *res;
280
281         lockdep_assert_held_write(&cxl_dpa_rwsem);
282
283         if (!len) {
284                 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
285                          port->id, cxled->cxld.id);
286                 return -EINVAL;
287         }
288
289         if (cxled->dpa_res) {
290                 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
291                         port->id, cxled->cxld.id, cxled->dpa_res);
292                 return -EBUSY;
293         }
294
295         if (port->hdm_end + 1 != cxled->cxld.id) {
296                 /*
297                  * Assumes alloc and commit order is always in hardware instance
298                  * order per expectations from 8.2.5.12.20 Committing Decoder
299                  * Programming that enforce decoder[m] committed before
300                  * decoder[m+1] commit start.
301                  */
302                 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
303                         cxled->cxld.id, port->id, port->hdm_end + 1);
304                 return -EBUSY;
305         }
306
307         if (skipped) {
308                 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
309                                        dev_name(&cxled->cxld.dev), 0);
310                 if (!res) {
311                         dev_dbg(dev,
312                                 "decoder%d.%d: failed to reserve skipped space\n",
313                                 port->id, cxled->cxld.id);
314                         return -EBUSY;
315                 }
316         }
317         res = __request_region(&cxlds->dpa_res, base, len,
318                                dev_name(&cxled->cxld.dev), 0);
319         if (!res) {
320                 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
321                         port->id, cxled->cxld.id);
322                 if (skipped)
323                         __release_region(&cxlds->dpa_res, base - skipped,
324                                          skipped);
325                 return -EBUSY;
326         }
327         cxled->dpa_res = res;
328         cxled->skip = skipped;
329
330         if (resource_contains(&cxlds->pmem_res, res))
331                 cxled->mode = CXL_DECODER_PMEM;
332         else if (resource_contains(&cxlds->ram_res, res))
333                 cxled->mode = CXL_DECODER_RAM;
334         else {
335                 dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
336                          port->id, cxled->cxld.id, cxled->dpa_res);
337                 cxled->mode = CXL_DECODER_MIXED;
338         }
339
340         port->hdm_end++;
341         get_device(&cxled->cxld.dev);
342         return 0;
343 }
344
345 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
346                                 resource_size_t base, resource_size_t len,
347                                 resource_size_t skipped)
348 {
349         struct cxl_port *port = cxled_to_port(cxled);
350         int rc;
351
352         down_write(&cxl_dpa_rwsem);
353         rc = __cxl_dpa_reserve(cxled, base, len, skipped);
354         up_write(&cxl_dpa_rwsem);
355
356         if (rc)
357                 return rc;
358
359         return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
360 }
361 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
362
363 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
364 {
365         resource_size_t size = 0;
366
367         down_read(&cxl_dpa_rwsem);
368         if (cxled->dpa_res)
369                 size = resource_size(cxled->dpa_res);
370         up_read(&cxl_dpa_rwsem);
371
372         return size;
373 }
374
375 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
376 {
377         resource_size_t base = -1;
378
379         lockdep_assert_held(&cxl_dpa_rwsem);
380         if (cxled->dpa_res)
381                 base = cxled->dpa_res->start;
382
383         return base;
384 }
385
386 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
387 {
388         struct cxl_port *port = cxled_to_port(cxled);
389         struct device *dev = &cxled->cxld.dev;
390         int rc;
391
392         down_write(&cxl_dpa_rwsem);
393         if (!cxled->dpa_res) {
394                 rc = 0;
395                 goto out;
396         }
397         if (cxled->cxld.region) {
398                 dev_dbg(dev, "decoder assigned to: %s\n",
399                         dev_name(&cxled->cxld.region->dev));
400                 rc = -EBUSY;
401                 goto out;
402         }
403         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
404                 dev_dbg(dev, "decoder enabled\n");
405                 rc = -EBUSY;
406                 goto out;
407         }
408         if (cxled->cxld.id != port->hdm_end) {
409                 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
410                         port->hdm_end);
411                 rc = -EBUSY;
412                 goto out;
413         }
414         devm_cxl_dpa_release(cxled);
415         rc = 0;
416 out:
417         up_write(&cxl_dpa_rwsem);
418         return rc;
419 }
420
421 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
422                      enum cxl_decoder_mode mode)
423 {
424         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
425         struct cxl_dev_state *cxlds = cxlmd->cxlds;
426         struct device *dev = &cxled->cxld.dev;
427
428         switch (mode) {
429         case CXL_DECODER_RAM:
430         case CXL_DECODER_PMEM:
431                 break;
432         default:
433                 dev_dbg(dev, "unsupported mode: %d\n", mode);
434                 return -EINVAL;
435         }
436
437         guard(rwsem_write)(&cxl_dpa_rwsem);
438         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
439                 return -EBUSY;
440
441         /*
442          * Only allow modes that are supported by the current partition
443          * configuration
444          */
445         if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
446                 dev_dbg(dev, "no available pmem capacity\n");
447                 return -ENXIO;
448         }
449         if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
450                 dev_dbg(dev, "no available ram capacity\n");
451                 return -ENXIO;
452         }
453
454         cxled->mode = mode;
455         return 0;
456 }
457
458 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
459 {
460         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
461         resource_size_t free_ram_start, free_pmem_start;
462         struct cxl_port *port = cxled_to_port(cxled);
463         struct cxl_dev_state *cxlds = cxlmd->cxlds;
464         struct device *dev = &cxled->cxld.dev;
465         resource_size_t start, avail, skip;
466         struct resource *p, *last;
467         int rc;
468
469         down_write(&cxl_dpa_rwsem);
470         if (cxled->cxld.region) {
471                 dev_dbg(dev, "decoder attached to %s\n",
472                         dev_name(&cxled->cxld.region->dev));
473                 rc = -EBUSY;
474                 goto out;
475         }
476
477         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
478                 dev_dbg(dev, "decoder enabled\n");
479                 rc = -EBUSY;
480                 goto out;
481         }
482
483         for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
484                 last = p;
485         if (last)
486                 free_ram_start = last->end + 1;
487         else
488                 free_ram_start = cxlds->ram_res.start;
489
490         for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
491                 last = p;
492         if (last)
493                 free_pmem_start = last->end + 1;
494         else
495                 free_pmem_start = cxlds->pmem_res.start;
496
497         if (cxled->mode == CXL_DECODER_RAM) {
498                 start = free_ram_start;
499                 avail = cxlds->ram_res.end - start + 1;
500                 skip = 0;
501         } else if (cxled->mode == CXL_DECODER_PMEM) {
502                 resource_size_t skip_start, skip_end;
503
504                 start = free_pmem_start;
505                 avail = cxlds->pmem_res.end - start + 1;
506                 skip_start = free_ram_start;
507
508                 /*
509                  * If some pmem is already allocated, then that allocation
510                  * already handled the skip.
511                  */
512                 if (cxlds->pmem_res.child &&
513                     skip_start == cxlds->pmem_res.child->start)
514                         skip_end = skip_start - 1;
515                 else
516                         skip_end = start - 1;
517                 skip = skip_end - skip_start + 1;
518         } else {
519                 dev_dbg(dev, "mode not set\n");
520                 rc = -EINVAL;
521                 goto out;
522         }
523
524         if (size > avail) {
525                 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
526                         cxl_decoder_mode_name(cxled->mode), &avail);
527                 rc = -ENOSPC;
528                 goto out;
529         }
530
531         rc = __cxl_dpa_reserve(cxled, start, size, skip);
532 out:
533         up_write(&cxl_dpa_rwsem);
534
535         if (rc)
536                 return rc;
537
538         return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
539 }
540
541 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
542 {
543         u16 eig;
544         u8 eiw;
545
546         /*
547          * Input validation ensures these warns never fire, but otherwise
548          * suppress unititalized variable usage warnings.
549          */
550         if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
551                       "invalid interleave_ways: %d\n", cxld->interleave_ways))
552                 return;
553         if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
554                       "invalid interleave_granularity: %d\n",
555                       cxld->interleave_granularity))
556                 return;
557
558         u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
559         u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
560         *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
561 }
562
563 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
564 {
565         u32p_replace_bits(ctrl,
566                           !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
567                           CXL_HDM_DECODER0_CTRL_HOSTONLY);
568 }
569
570 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
571 {
572         struct cxl_dport **t = &cxlsd->target[0];
573         int ways = cxlsd->cxld.interleave_ways;
574
575         *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
576         if (ways > 1)
577                 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
578         if (ways > 2)
579                 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
580         if (ways > 3)
581                 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
582         if (ways > 4)
583                 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
584         if (ways > 5)
585                 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
586         if (ways > 6)
587                 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
588         if (ways > 7)
589                 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
590 }
591
592 /*
593  * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
594  * committed or error within 10ms, but just be generous with 20ms to account for
595  * clock skew and other marginal behavior
596  */
597 #define COMMIT_TIMEOUT_MS 20
598 static int cxld_await_commit(void __iomem *hdm, int id)
599 {
600         u32 ctrl;
601         int i;
602
603         for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
604                 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
605                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
606                         ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
607                         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
608                         return -EIO;
609                 }
610                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
611                         return 0;
612                 fsleep(1000);
613         }
614
615         return -ETIMEDOUT;
616 }
617
618 static int cxl_decoder_commit(struct cxl_decoder *cxld)
619 {
620         struct cxl_port *port = to_cxl_port(cxld->dev.parent);
621         struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
622         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
623         int id = cxld->id, rc;
624         u64 base, size;
625         u32 ctrl;
626
627         if (cxld->flags & CXL_DECODER_F_ENABLE)
628                 return 0;
629
630         if (cxl_num_decoders_committed(port) != id) {
631                 dev_dbg(&port->dev,
632                         "%s: out of order commit, expected decoder%d.%d\n",
633                         dev_name(&cxld->dev), port->id,
634                         cxl_num_decoders_committed(port));
635                 return -EBUSY;
636         }
637
638         /*
639          * For endpoint decoders hosted on CXL memory devices that
640          * support the sanitize operation, make sure sanitize is not in-flight.
641          */
642         if (is_endpoint_decoder(&cxld->dev)) {
643                 struct cxl_endpoint_decoder *cxled =
644                         to_cxl_endpoint_decoder(&cxld->dev);
645                 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
646                 struct cxl_memdev_state *mds =
647                         to_cxl_memdev_state(cxlmd->cxlds);
648
649                 if (mds && mds->security.sanitize_active) {
650                         dev_dbg(&cxlmd->dev,
651                                 "attempted to commit %s during sanitize\n",
652                                 dev_name(&cxld->dev));
653                         return -EBUSY;
654                 }
655         }
656
657         down_read(&cxl_dpa_rwsem);
658         /* common decoder settings */
659         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
660         cxld_set_interleave(cxld, &ctrl);
661         cxld_set_type(cxld, &ctrl);
662         base = cxld->hpa_range.start;
663         size = range_len(&cxld->hpa_range);
664
665         writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
666         writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
667         writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
668         writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
669
670         if (is_switch_decoder(&cxld->dev)) {
671                 struct cxl_switch_decoder *cxlsd =
672                         to_cxl_switch_decoder(&cxld->dev);
673                 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
674                 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
675                 u64 targets;
676
677                 cxlsd_set_targets(cxlsd, &targets);
678                 writel(upper_32_bits(targets), tl_hi);
679                 writel(lower_32_bits(targets), tl_lo);
680         } else {
681                 struct cxl_endpoint_decoder *cxled =
682                         to_cxl_endpoint_decoder(&cxld->dev);
683                 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
684                 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
685
686                 writel(upper_32_bits(cxled->skip), sk_hi);
687                 writel(lower_32_bits(cxled->skip), sk_lo);
688         }
689
690         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
691         up_read(&cxl_dpa_rwsem);
692
693         port->commit_end++;
694         rc = cxld_await_commit(hdm, cxld->id);
695         if (rc) {
696                 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
697                         dev_name(&cxld->dev), rc);
698                 cxld->reset(cxld);
699                 return rc;
700         }
701         cxld->flags |= CXL_DECODER_F_ENABLE;
702
703         return 0;
704 }
705
706 static int commit_reap(struct device *dev, void *data)
707 {
708         struct cxl_port *port = to_cxl_port(dev->parent);
709         struct cxl_decoder *cxld;
710
711         if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
712                 return 0;
713
714         cxld = to_cxl_decoder(dev);
715         if (port->commit_end == cxld->id &&
716             ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
717                 port->commit_end--;
718                 dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
719                         dev_name(&cxld->dev), port->commit_end);
720         }
721
722         return 0;
723 }
724
725 void cxl_port_commit_reap(struct cxl_decoder *cxld)
726 {
727         struct cxl_port *port = to_cxl_port(cxld->dev.parent);
728
729         lockdep_assert_held_write(&cxl_region_rwsem);
730
731         /*
732          * Once the highest committed decoder is disabled, free any other
733          * decoders that were pinned allocated by out-of-order release.
734          */
735         port->commit_end--;
736         dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
737                 port->commit_end);
738         device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
739                                            commit_reap);
740 }
741 EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL");
742
743 static void cxl_decoder_reset(struct cxl_decoder *cxld)
744 {
745         struct cxl_port *port = to_cxl_port(cxld->dev.parent);
746         struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
747         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
748         int id = cxld->id;
749         u32 ctrl;
750
751         if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
752                 return;
753
754         if (port->commit_end == id)
755                 cxl_port_commit_reap(cxld);
756         else
757                 dev_dbg(&port->dev,
758                         "%s: out of order reset, expected decoder%d.%d\n",
759                         dev_name(&cxld->dev), port->id, port->commit_end);
760
761         down_read(&cxl_dpa_rwsem);
762         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
763         ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
764         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
765
766         writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
767         writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
768         writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
769         writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
770         up_read(&cxl_dpa_rwsem);
771
772         cxld->flags &= ~CXL_DECODER_F_ENABLE;
773
774         /* Userspace is now responsible for reconfiguring this decoder */
775         if (is_endpoint_decoder(&cxld->dev)) {
776                 struct cxl_endpoint_decoder *cxled;
777
778                 cxled = to_cxl_endpoint_decoder(&cxld->dev);
779                 cxled->state = CXL_DECODER_STATE_MANUAL;
780         }
781 }
782
783 static int cxl_setup_hdm_decoder_from_dvsec(
784         struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
785         int which, struct cxl_endpoint_dvsec_info *info)
786 {
787         struct cxl_endpoint_decoder *cxled;
788         u64 len;
789         int rc;
790
791         if (!is_cxl_endpoint(port))
792                 return -EOPNOTSUPP;
793
794         cxled = to_cxl_endpoint_decoder(&cxld->dev);
795         len = range_len(&info->dvsec_range[which]);
796         if (!len)
797                 return -ENOENT;
798
799         cxld->target_type = CXL_DECODER_HOSTONLYMEM;
800         cxld->commit = NULL;
801         cxld->reset = NULL;
802         cxld->hpa_range = info->dvsec_range[which];
803
804         /*
805          * Set the emulated decoder as locked pending additional support to
806          * change the range registers at run time.
807          */
808         cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
809         port->commit_end = cxld->id;
810
811         rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
812         if (rc) {
813                 dev_err(&port->dev,
814                         "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
815                         port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
816                 return rc;
817         }
818         *dpa_base += len;
819         cxled->state = CXL_DECODER_STATE_AUTO;
820
821         return 0;
822 }
823
824 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
825                             int *target_map, void __iomem *hdm, int which,
826                             u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
827 {
828         struct cxl_endpoint_decoder *cxled = NULL;
829         u64 size, base, skip, dpa_size, lo, hi;
830         bool committed;
831         u32 remainder;
832         int i, rc;
833         u32 ctrl;
834         union {
835                 u64 value;
836                 unsigned char target_id[8];
837         } target_list;
838
839         if (should_emulate_decoders(info))
840                 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
841                                                         which, info);
842
843         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
844         lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
845         hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
846         base = (hi << 32) + lo;
847         lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
848         hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
849         size = (hi << 32) + lo;
850         committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
851         cxld->commit = cxl_decoder_commit;
852         cxld->reset = cxl_decoder_reset;
853
854         if (!committed)
855                 size = 0;
856         if (base == U64_MAX || size == U64_MAX) {
857                 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
858                          port->id, cxld->id);
859                 return -ENXIO;
860         }
861
862         if (info)
863                 cxled = to_cxl_endpoint_decoder(&cxld->dev);
864         cxld->hpa_range = (struct range) {
865                 .start = base,
866                 .end = base + size - 1,
867         };
868
869         /* decoders are enabled if committed */
870         if (committed) {
871                 cxld->flags |= CXL_DECODER_F_ENABLE;
872                 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
873                         cxld->flags |= CXL_DECODER_F_LOCK;
874                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
875                         cxld->target_type = CXL_DECODER_HOSTONLYMEM;
876                 else
877                         cxld->target_type = CXL_DECODER_DEVMEM;
878
879                 guard(rwsem_write)(&cxl_region_rwsem);
880                 if (cxld->id != cxl_num_decoders_committed(port)) {
881                         dev_warn(&port->dev,
882                                  "decoder%d.%d: Committed out of order\n",
883                                  port->id, cxld->id);
884                         return -ENXIO;
885                 }
886
887                 if (size == 0) {
888                         dev_warn(&port->dev,
889                                  "decoder%d.%d: Committed with zero size\n",
890                                  port->id, cxld->id);
891                         return -ENXIO;
892                 }
893                 port->commit_end = cxld->id;
894         } else {
895                 if (cxled) {
896                         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
897                         struct cxl_dev_state *cxlds = cxlmd->cxlds;
898
899                         /*
900                          * Default by devtype until a device arrives that needs
901                          * more precision.
902                          */
903                         if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
904                                 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
905                         else
906                                 cxld->target_type = CXL_DECODER_DEVMEM;
907                 } else {
908                         /* To be overridden by region type at commit time */
909                         cxld->target_type = CXL_DECODER_HOSTONLYMEM;
910                 }
911
912                 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
913                     cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
914                         ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
915                         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
916                 }
917         }
918         rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
919                           &cxld->interleave_ways);
920         if (rc) {
921                 dev_warn(&port->dev,
922                          "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
923                          port->id, cxld->id, ctrl);
924                 return rc;
925         }
926         rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
927                                  &cxld->interleave_granularity);
928         if (rc) {
929                 dev_warn(&port->dev,
930                          "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
931                          port->id, cxld->id, ctrl);
932                 return rc;
933         }
934
935         dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
936                 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
937                 cxld->interleave_ways, cxld->interleave_granularity);
938
939         if (!cxled) {
940                 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
941                 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
942                 target_list.value = (hi << 32) + lo;
943                 for (i = 0; i < cxld->interleave_ways; i++)
944                         target_map[i] = target_list.target_id[i];
945
946                 return 0;
947         }
948
949         if (!committed)
950                 return 0;
951
952         dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
953         if (remainder) {
954                 dev_err(&port->dev,
955                         "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
956                         port->id, cxld->id, size, cxld->interleave_ways);
957                 return -ENXIO;
958         }
959         lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
960         hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
961         skip = (hi << 32) + lo;
962         rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
963         if (rc) {
964                 dev_err(&port->dev,
965                         "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
966                         port->id, cxld->id, *dpa_base,
967                         *dpa_base + dpa_size + skip - 1, rc);
968                 return rc;
969         }
970         *dpa_base += dpa_size + skip;
971
972         cxled->state = CXL_DECODER_STATE_AUTO;
973
974         return 0;
975 }
976
977 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
978 {
979         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
980         int committed, i;
981         u32 ctrl;
982
983         if (!hdm)
984                 return;
985
986         /*
987          * Since the register resource was recently claimed via request_region()
988          * be careful about trusting the "not-committed" status until the commit
989          * timeout has elapsed.  The commit timeout is 10ms (CXL 2.0
990          * 8.2.5.12.20), but double it to be tolerant of any clock skew between
991          * host and target.
992          */
993         for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
994                 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
995                 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
996                         committed++;
997         }
998
999         /* ensure that future checks of committed can be trusted */
1000         if (committed != cxlhdm->decoder_count)
1001                 msleep(20);
1002 }
1003
1004 /**
1005  * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
1006  * @cxlhdm: Structure to populate with HDM capabilities
1007  * @info: cached DVSEC range register info
1008  */
1009 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
1010                                 struct cxl_endpoint_dvsec_info *info)
1011 {
1012         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
1013         struct cxl_port *port = cxlhdm->port;
1014         int i;
1015         u64 dpa_base = 0;
1016
1017         cxl_settle_decoders(cxlhdm);
1018
1019         for (i = 0; i < cxlhdm->decoder_count; i++) {
1020                 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
1021                 int rc, target_count = cxlhdm->target_count;
1022                 struct cxl_decoder *cxld;
1023
1024                 if (is_cxl_endpoint(port)) {
1025                         struct cxl_endpoint_decoder *cxled;
1026
1027                         cxled = cxl_endpoint_decoder_alloc(port);
1028                         if (IS_ERR(cxled)) {
1029                                 dev_warn(&port->dev,
1030                                          "Failed to allocate decoder%d.%d\n",
1031                                          port->id, i);
1032                                 return PTR_ERR(cxled);
1033                         }
1034                         cxld = &cxled->cxld;
1035                 } else {
1036                         struct cxl_switch_decoder *cxlsd;
1037
1038                         cxlsd = cxl_switch_decoder_alloc(port, target_count);
1039                         if (IS_ERR(cxlsd)) {
1040                                 dev_warn(&port->dev,
1041                                          "Failed to allocate decoder%d.%d\n",
1042                                          port->id, i);
1043                                 return PTR_ERR(cxlsd);
1044                         }
1045                         cxld = &cxlsd->cxld;
1046                 }
1047
1048                 rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
1049                                       &dpa_base, info);
1050                 if (rc) {
1051                         dev_warn(&port->dev,
1052                                  "Failed to initialize decoder%d.%d\n",
1053                                  port->id, i);
1054                         put_device(&cxld->dev);
1055                         return rc;
1056                 }
1057                 rc = add_hdm_decoder(port, cxld, target_map);
1058                 if (rc) {
1059                         dev_warn(&port->dev,
1060                                  "Failed to add decoder%d.%d\n", port->id, i);
1061                         return rc;
1062                 }
1063         }
1064
1065         return 0;
1066 }
1067 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL");
This page took 0.09113 seconds and 4 git commands to generate.