]> Git Repo - linux.git/blob - drivers/cxl/core/hdm.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / cxl / core / hdm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6
7 #include "cxlmem.h"
8 #include "core.h"
9
10 /**
11  * DOC: cxl core hdm
12  *
13  * Compute Express Link Host Managed Device Memory, starting with the
14  * CXL 2.0 specification, is managed by an array of HDM Decoder register
15  * instances per CXL port and per CXL endpoint. Define common helpers
16  * for enumerating these registers and capabilities.
17  */
18
19 DECLARE_RWSEM(cxl_dpa_rwsem);
20
21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
22                            int *target_map)
23 {
24         int rc;
25
26         rc = cxl_decoder_add_locked(cxld, target_map);
27         if (rc) {
28                 put_device(&cxld->dev);
29                 dev_err(&port->dev, "Failed to add decoder\n");
30                 return rc;
31         }
32
33         rc = cxl_decoder_autoremove(&port->dev, cxld);
34         if (rc)
35                 return rc;
36
37         dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
38
39         return 0;
40 }
41
42 /*
43  * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
44  * single ported host-bridges need not publish a decoder capability when a
45  * passthrough decode can be assumed, i.e. all transactions that the uport sees
46  * are claimed and passed to the single dport. Disable the range until the first
47  * CXL region is enumerated / activated.
48  */
49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
50 {
51         struct cxl_switch_decoder *cxlsd;
52         struct cxl_dport *dport = NULL;
53         int single_port_map[1];
54         unsigned long index;
55         struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
56
57         /*
58          * Capability checks are moot for passthrough decoders, support
59          * any and all possibilities.
60          */
61         cxlhdm->interleave_mask = ~0U;
62         cxlhdm->iw_cap_mask = ~0UL;
63
64         cxlsd = cxl_switch_decoder_alloc(port, 1);
65         if (IS_ERR(cxlsd))
66                 return PTR_ERR(cxlsd);
67
68         device_lock_assert(&port->dev);
69
70         xa_for_each(&port->dports, index, dport)
71                 break;
72         single_port_map[0] = dport->port_id;
73
74         return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
75 }
76 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
77
78 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
79 {
80         u32 hdm_cap;
81
82         hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
83         cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
84         cxlhdm->target_count =
85                 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
86         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
87                 cxlhdm->interleave_mask |= GENMASK(11, 8);
88         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
89                 cxlhdm->interleave_mask |= GENMASK(14, 12);
90         cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
91         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
92                 cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
93         if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
94                 cxlhdm->iw_cap_mask |= BIT(16);
95 }
96
97 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
98 {
99         struct cxl_hdm *cxlhdm;
100         void __iomem *hdm;
101         u32 ctrl;
102         int i;
103
104         if (!info)
105                 return false;
106
107         cxlhdm = dev_get_drvdata(&info->port->dev);
108         hdm = cxlhdm->regs.hdm_decoder;
109
110         if (!hdm)
111                 return true;
112
113         /*
114          * If HDM decoders are present and the driver is in control of
115          * Mem_Enable skip DVSEC based emulation
116          */
117         if (!info->mem_enabled)
118                 return false;
119
120         /*
121          * If any decoders are committed already, there should not be any
122          * emulated DVSEC decoders.
123          */
124         for (i = 0; i < cxlhdm->decoder_count; i++) {
125                 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
126                 dev_dbg(&info->port->dev,
127                         "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
128                         info->port->id, i,
129                         FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
130                         readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
131                         readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
132                         readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
133                         readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
134                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
135                         return false;
136         }
137
138         return true;
139 }
140
141 /**
142  * devm_cxl_setup_hdm - map HDM decoder component registers
143  * @port: cxl_port to map
144  * @info: cached DVSEC range register info
145  */
146 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
147                                    struct cxl_endpoint_dvsec_info *info)
148 {
149         struct cxl_register_map *reg_map = &port->reg_map;
150         struct device *dev = &port->dev;
151         struct cxl_hdm *cxlhdm;
152         int rc;
153
154         cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
155         if (!cxlhdm)
156                 return ERR_PTR(-ENOMEM);
157         cxlhdm->port = port;
158         dev_set_drvdata(dev, cxlhdm);
159
160         /* Memory devices can configure device HDM using DVSEC range regs. */
161         if (reg_map->resource == CXL_RESOURCE_NONE) {
162                 if (!info || !info->mem_enabled) {
163                         dev_err(dev, "No component registers mapped\n");
164                         return ERR_PTR(-ENXIO);
165                 }
166
167                 cxlhdm->decoder_count = info->ranges;
168                 return cxlhdm;
169         }
170
171         if (!reg_map->component_map.hdm_decoder.valid) {
172                 dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
173                 /* unique error code to indicate no HDM decoder capability */
174                 return ERR_PTR(-ENODEV);
175         }
176
177         rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
178                                     BIT(CXL_CM_CAP_CAP_ID_HDM));
179         if (rc) {
180                 dev_err(dev, "Failed to map HDM capability.\n");
181                 return ERR_PTR(rc);
182         }
183
184         parse_hdm_decoder_caps(cxlhdm);
185         if (cxlhdm->decoder_count == 0) {
186                 dev_err(dev, "Spec violation. Caps invalid\n");
187                 return ERR_PTR(-ENXIO);
188         }
189
190         /*
191          * Now that the hdm capability is parsed, decide if range
192          * register emulation is needed and fixup cxlhdm accordingly.
193          */
194         if (should_emulate_decoders(info)) {
195                 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
196                         info->ranges > 1 ? "s" : "");
197                 cxlhdm->decoder_count = info->ranges;
198         }
199
200         return cxlhdm;
201 }
202 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
203
204 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
205 {
206         unsigned long long start = r->start, end = r->end;
207
208         seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
209                    r->name);
210 }
211
212 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
213 {
214         struct resource *p1, *p2;
215
216         down_read(&cxl_dpa_rwsem);
217         for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
218                 __cxl_dpa_debug(file, p1, 0);
219                 for (p2 = p1->child; p2; p2 = p2->sibling)
220                         __cxl_dpa_debug(file, p2, 1);
221         }
222         up_read(&cxl_dpa_rwsem);
223 }
224 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
225
226 /*
227  * Must be called in a context that synchronizes against this decoder's
228  * port ->remove() callback (like an endpoint decoder sysfs attribute)
229  */
230 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
231 {
232         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
233         struct cxl_port *port = cxled_to_port(cxled);
234         struct cxl_dev_state *cxlds = cxlmd->cxlds;
235         struct resource *res = cxled->dpa_res;
236         resource_size_t skip_start;
237
238         lockdep_assert_held_write(&cxl_dpa_rwsem);
239
240         /* save @skip_start, before @res is released */
241         skip_start = res->start - cxled->skip;
242         __release_region(&cxlds->dpa_res, res->start, resource_size(res));
243         if (cxled->skip)
244                 __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
245         cxled->skip = 0;
246         cxled->dpa_res = NULL;
247         put_device(&cxled->cxld.dev);
248         port->hdm_end--;
249 }
250
251 static void cxl_dpa_release(void *cxled)
252 {
253         down_write(&cxl_dpa_rwsem);
254         __cxl_dpa_release(cxled);
255         up_write(&cxl_dpa_rwsem);
256 }
257
258 /*
259  * Must be called from context that will not race port device
260  * unregistration, like decoder sysfs attribute methods
261  */
262 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
263 {
264         struct cxl_port *port = cxled_to_port(cxled);
265
266         lockdep_assert_held_write(&cxl_dpa_rwsem);
267         devm_remove_action(&port->dev, cxl_dpa_release, cxled);
268         __cxl_dpa_release(cxled);
269 }
270
271 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
272                              resource_size_t base, resource_size_t len,
273                              resource_size_t skipped)
274 {
275         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
276         struct cxl_port *port = cxled_to_port(cxled);
277         struct cxl_dev_state *cxlds = cxlmd->cxlds;
278         struct device *dev = &port->dev;
279         struct resource *res;
280
281         lockdep_assert_held_write(&cxl_dpa_rwsem);
282
283         if (!len) {
284                 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
285                          port->id, cxled->cxld.id);
286                 return -EINVAL;
287         }
288
289         if (cxled->dpa_res) {
290                 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
291                         port->id, cxled->cxld.id, cxled->dpa_res);
292                 return -EBUSY;
293         }
294
295         if (port->hdm_end + 1 != cxled->cxld.id) {
296                 /*
297                  * Assumes alloc and commit order is always in hardware instance
298                  * order per expectations from 8.2.5.12.20 Committing Decoder
299                  * Programming that enforce decoder[m] committed before
300                  * decoder[m+1] commit start.
301                  */
302                 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
303                         cxled->cxld.id, port->id, port->hdm_end + 1);
304                 return -EBUSY;
305         }
306
307         if (skipped) {
308                 res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
309                                        dev_name(&cxled->cxld.dev), 0);
310                 if (!res) {
311                         dev_dbg(dev,
312                                 "decoder%d.%d: failed to reserve skipped space\n",
313                                 port->id, cxled->cxld.id);
314                         return -EBUSY;
315                 }
316         }
317         res = __request_region(&cxlds->dpa_res, base, len,
318                                dev_name(&cxled->cxld.dev), 0);
319         if (!res) {
320                 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
321                         port->id, cxled->cxld.id);
322                 if (skipped)
323                         __release_region(&cxlds->dpa_res, base - skipped,
324                                          skipped);
325                 return -EBUSY;
326         }
327         cxled->dpa_res = res;
328         cxled->skip = skipped;
329
330         if (resource_contains(&cxlds->pmem_res, res))
331                 cxled->mode = CXL_DECODER_PMEM;
332         else if (resource_contains(&cxlds->ram_res, res))
333                 cxled->mode = CXL_DECODER_RAM;
334         else {
335                 dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
336                          port->id, cxled->cxld.id, cxled->dpa_res);
337                 cxled->mode = CXL_DECODER_MIXED;
338         }
339
340         port->hdm_end++;
341         get_device(&cxled->cxld.dev);
342         return 0;
343 }
344
345 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
346                                 resource_size_t base, resource_size_t len,
347                                 resource_size_t skipped)
348 {
349         struct cxl_port *port = cxled_to_port(cxled);
350         int rc;
351
352         down_write(&cxl_dpa_rwsem);
353         rc = __cxl_dpa_reserve(cxled, base, len, skipped);
354         up_write(&cxl_dpa_rwsem);
355
356         if (rc)
357                 return rc;
358
359         return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
360 }
361 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
362
363 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
364 {
365         resource_size_t size = 0;
366
367         down_read(&cxl_dpa_rwsem);
368         if (cxled->dpa_res)
369                 size = resource_size(cxled->dpa_res);
370         up_read(&cxl_dpa_rwsem);
371
372         return size;
373 }
374
375 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
376 {
377         resource_size_t base = -1;
378
379         lockdep_assert_held(&cxl_dpa_rwsem);
380         if (cxled->dpa_res)
381                 base = cxled->dpa_res->start;
382
383         return base;
384 }
385
386 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
387 {
388         struct cxl_port *port = cxled_to_port(cxled);
389         struct device *dev = &cxled->cxld.dev;
390         int rc;
391
392         down_write(&cxl_dpa_rwsem);
393         if (!cxled->dpa_res) {
394                 rc = 0;
395                 goto out;
396         }
397         if (cxled->cxld.region) {
398                 dev_dbg(dev, "decoder assigned to: %s\n",
399                         dev_name(&cxled->cxld.region->dev));
400                 rc = -EBUSY;
401                 goto out;
402         }
403         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
404                 dev_dbg(dev, "decoder enabled\n");
405                 rc = -EBUSY;
406                 goto out;
407         }
408         if (cxled->cxld.id != port->hdm_end) {
409                 dev_dbg(dev, "expected decoder%d.%d\n", port->id,
410                         port->hdm_end);
411                 rc = -EBUSY;
412                 goto out;
413         }
414         devm_cxl_dpa_release(cxled);
415         rc = 0;
416 out:
417         up_write(&cxl_dpa_rwsem);
418         return rc;
419 }
420
421 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
422                      enum cxl_decoder_mode mode)
423 {
424         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
425         struct cxl_dev_state *cxlds = cxlmd->cxlds;
426         struct device *dev = &cxled->cxld.dev;
427         int rc;
428
429         switch (mode) {
430         case CXL_DECODER_RAM:
431         case CXL_DECODER_PMEM:
432                 break;
433         default:
434                 dev_dbg(dev, "unsupported mode: %d\n", mode);
435                 return -EINVAL;
436         }
437
438         down_write(&cxl_dpa_rwsem);
439         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
440                 rc = -EBUSY;
441                 goto out;
442         }
443
444         /*
445          * Only allow modes that are supported by the current partition
446          * configuration
447          */
448         if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
449                 dev_dbg(dev, "no available pmem capacity\n");
450                 rc = -ENXIO;
451                 goto out;
452         }
453         if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
454                 dev_dbg(dev, "no available ram capacity\n");
455                 rc = -ENXIO;
456                 goto out;
457         }
458
459         cxled->mode = mode;
460         rc = 0;
461 out:
462         up_write(&cxl_dpa_rwsem);
463
464         return rc;
465 }
466
467 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
468 {
469         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
470         resource_size_t free_ram_start, free_pmem_start;
471         struct cxl_port *port = cxled_to_port(cxled);
472         struct cxl_dev_state *cxlds = cxlmd->cxlds;
473         struct device *dev = &cxled->cxld.dev;
474         resource_size_t start, avail, skip;
475         struct resource *p, *last;
476         int rc;
477
478         down_write(&cxl_dpa_rwsem);
479         if (cxled->cxld.region) {
480                 dev_dbg(dev, "decoder attached to %s\n",
481                         dev_name(&cxled->cxld.region->dev));
482                 rc = -EBUSY;
483                 goto out;
484         }
485
486         if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
487                 dev_dbg(dev, "decoder enabled\n");
488                 rc = -EBUSY;
489                 goto out;
490         }
491
492         for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
493                 last = p;
494         if (last)
495                 free_ram_start = last->end + 1;
496         else
497                 free_ram_start = cxlds->ram_res.start;
498
499         for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
500                 last = p;
501         if (last)
502                 free_pmem_start = last->end + 1;
503         else
504                 free_pmem_start = cxlds->pmem_res.start;
505
506         if (cxled->mode == CXL_DECODER_RAM) {
507                 start = free_ram_start;
508                 avail = cxlds->ram_res.end - start + 1;
509                 skip = 0;
510         } else if (cxled->mode == CXL_DECODER_PMEM) {
511                 resource_size_t skip_start, skip_end;
512
513                 start = free_pmem_start;
514                 avail = cxlds->pmem_res.end - start + 1;
515                 skip_start = free_ram_start;
516
517                 /*
518                  * If some pmem is already allocated, then that allocation
519                  * already handled the skip.
520                  */
521                 if (cxlds->pmem_res.child &&
522                     skip_start == cxlds->pmem_res.child->start)
523                         skip_end = skip_start - 1;
524                 else
525                         skip_end = start - 1;
526                 skip = skip_end - skip_start + 1;
527         } else {
528                 dev_dbg(dev, "mode not set\n");
529                 rc = -EINVAL;
530                 goto out;
531         }
532
533         if (size > avail) {
534                 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
535                         cxl_decoder_mode_name(cxled->mode), &avail);
536                 rc = -ENOSPC;
537                 goto out;
538         }
539
540         rc = __cxl_dpa_reserve(cxled, start, size, skip);
541 out:
542         up_write(&cxl_dpa_rwsem);
543
544         if (rc)
545                 return rc;
546
547         return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
548 }
549
550 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
551 {
552         u16 eig;
553         u8 eiw;
554
555         /*
556          * Input validation ensures these warns never fire, but otherwise
557          * suppress unititalized variable usage warnings.
558          */
559         if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
560                       "invalid interleave_ways: %d\n", cxld->interleave_ways))
561                 return;
562         if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
563                       "invalid interleave_granularity: %d\n",
564                       cxld->interleave_granularity))
565                 return;
566
567         u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
568         u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
569         *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
570 }
571
572 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
573 {
574         u32p_replace_bits(ctrl,
575                           !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
576                           CXL_HDM_DECODER0_CTRL_HOSTONLY);
577 }
578
579 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
580 {
581         struct cxl_dport **t = &cxlsd->target[0];
582         int ways = cxlsd->cxld.interleave_ways;
583
584         *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
585         if (ways > 1)
586                 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
587         if (ways > 2)
588                 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
589         if (ways > 3)
590                 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
591         if (ways > 4)
592                 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
593         if (ways > 5)
594                 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
595         if (ways > 6)
596                 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
597         if (ways > 7)
598                 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
599 }
600
601 /*
602  * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
603  * committed or error within 10ms, but just be generous with 20ms to account for
604  * clock skew and other marginal behavior
605  */
606 #define COMMIT_TIMEOUT_MS 20
607 static int cxld_await_commit(void __iomem *hdm, int id)
608 {
609         u32 ctrl;
610         int i;
611
612         for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
613                 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
614                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
615                         ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
616                         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
617                         return -EIO;
618                 }
619                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
620                         return 0;
621                 fsleep(1000);
622         }
623
624         return -ETIMEDOUT;
625 }
626
627 static int cxl_decoder_commit(struct cxl_decoder *cxld)
628 {
629         struct cxl_port *port = to_cxl_port(cxld->dev.parent);
630         struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
631         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
632         int id = cxld->id, rc;
633         u64 base, size;
634         u32 ctrl;
635
636         if (cxld->flags & CXL_DECODER_F_ENABLE)
637                 return 0;
638
639         if (cxl_num_decoders_committed(port) != id) {
640                 dev_dbg(&port->dev,
641                         "%s: out of order commit, expected decoder%d.%d\n",
642                         dev_name(&cxld->dev), port->id,
643                         cxl_num_decoders_committed(port));
644                 return -EBUSY;
645         }
646
647         /*
648          * For endpoint decoders hosted on CXL memory devices that
649          * support the sanitize operation, make sure sanitize is not in-flight.
650          */
651         if (is_endpoint_decoder(&cxld->dev)) {
652                 struct cxl_endpoint_decoder *cxled =
653                         to_cxl_endpoint_decoder(&cxld->dev);
654                 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
655                 struct cxl_memdev_state *mds =
656                         to_cxl_memdev_state(cxlmd->cxlds);
657
658                 if (mds && mds->security.sanitize_active) {
659                         dev_dbg(&cxlmd->dev,
660                                 "attempted to commit %s during sanitize\n",
661                                 dev_name(&cxld->dev));
662                         return -EBUSY;
663                 }
664         }
665
666         down_read(&cxl_dpa_rwsem);
667         /* common decoder settings */
668         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
669         cxld_set_interleave(cxld, &ctrl);
670         cxld_set_type(cxld, &ctrl);
671         base = cxld->hpa_range.start;
672         size = range_len(&cxld->hpa_range);
673
674         writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
675         writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
676         writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
677         writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
678
679         if (is_switch_decoder(&cxld->dev)) {
680                 struct cxl_switch_decoder *cxlsd =
681                         to_cxl_switch_decoder(&cxld->dev);
682                 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
683                 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
684                 u64 targets;
685
686                 cxlsd_set_targets(cxlsd, &targets);
687                 writel(upper_32_bits(targets), tl_hi);
688                 writel(lower_32_bits(targets), tl_lo);
689         } else {
690                 struct cxl_endpoint_decoder *cxled =
691                         to_cxl_endpoint_decoder(&cxld->dev);
692                 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
693                 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
694
695                 writel(upper_32_bits(cxled->skip), sk_hi);
696                 writel(lower_32_bits(cxled->skip), sk_lo);
697         }
698
699         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
700         up_read(&cxl_dpa_rwsem);
701
702         port->commit_end++;
703         rc = cxld_await_commit(hdm, cxld->id);
704         if (rc) {
705                 dev_dbg(&port->dev, "%s: error %d committing decoder\n",
706                         dev_name(&cxld->dev), rc);
707                 cxld->reset(cxld);
708                 return rc;
709         }
710         cxld->flags |= CXL_DECODER_F_ENABLE;
711
712         return 0;
713 }
714
715 static int cxl_decoder_reset(struct cxl_decoder *cxld)
716 {
717         struct cxl_port *port = to_cxl_port(cxld->dev.parent);
718         struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
719         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
720         int id = cxld->id;
721         u32 ctrl;
722
723         if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
724                 return 0;
725
726         if (port->commit_end != id) {
727                 dev_dbg(&port->dev,
728                         "%s: out of order reset, expected decoder%d.%d\n",
729                         dev_name(&cxld->dev), port->id, port->commit_end);
730                 return -EBUSY;
731         }
732
733         down_read(&cxl_dpa_rwsem);
734         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
735         ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
736         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
737
738         writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
739         writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
740         writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
741         writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
742         up_read(&cxl_dpa_rwsem);
743
744         port->commit_end--;
745         cxld->flags &= ~CXL_DECODER_F_ENABLE;
746
747         /* Userspace is now responsible for reconfiguring this decoder */
748         if (is_endpoint_decoder(&cxld->dev)) {
749                 struct cxl_endpoint_decoder *cxled;
750
751                 cxled = to_cxl_endpoint_decoder(&cxld->dev);
752                 cxled->state = CXL_DECODER_STATE_MANUAL;
753         }
754
755         return 0;
756 }
757
758 static int cxl_setup_hdm_decoder_from_dvsec(
759         struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
760         int which, struct cxl_endpoint_dvsec_info *info)
761 {
762         struct cxl_endpoint_decoder *cxled;
763         u64 len;
764         int rc;
765
766         if (!is_cxl_endpoint(port))
767                 return -EOPNOTSUPP;
768
769         cxled = to_cxl_endpoint_decoder(&cxld->dev);
770         len = range_len(&info->dvsec_range[which]);
771         if (!len)
772                 return -ENOENT;
773
774         cxld->target_type = CXL_DECODER_HOSTONLYMEM;
775         cxld->commit = NULL;
776         cxld->reset = NULL;
777         cxld->hpa_range = info->dvsec_range[which];
778
779         /*
780          * Set the emulated decoder as locked pending additional support to
781          * change the range registers at run time.
782          */
783         cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
784         port->commit_end = cxld->id;
785
786         rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
787         if (rc) {
788                 dev_err(&port->dev,
789                         "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
790                         port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
791                 return rc;
792         }
793         *dpa_base += len;
794         cxled->state = CXL_DECODER_STATE_AUTO;
795
796         return 0;
797 }
798
799 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
800                             int *target_map, void __iomem *hdm, int which,
801                             u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
802 {
803         struct cxl_endpoint_decoder *cxled = NULL;
804         u64 size, base, skip, dpa_size, lo, hi;
805         bool committed;
806         u32 remainder;
807         int i, rc;
808         u32 ctrl;
809         union {
810                 u64 value;
811                 unsigned char target_id[8];
812         } target_list;
813
814         if (should_emulate_decoders(info))
815                 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
816                                                         which, info);
817
818         ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
819         lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
820         hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
821         base = (hi << 32) + lo;
822         lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
823         hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
824         size = (hi << 32) + lo;
825         committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
826         cxld->commit = cxl_decoder_commit;
827         cxld->reset = cxl_decoder_reset;
828
829         if (!committed)
830                 size = 0;
831         if (base == U64_MAX || size == U64_MAX) {
832                 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
833                          port->id, cxld->id);
834                 return -ENXIO;
835         }
836
837         if (info)
838                 cxled = to_cxl_endpoint_decoder(&cxld->dev);
839         cxld->hpa_range = (struct range) {
840                 .start = base,
841                 .end = base + size - 1,
842         };
843
844         /* decoders are enabled if committed */
845         if (committed) {
846                 cxld->flags |= CXL_DECODER_F_ENABLE;
847                 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
848                         cxld->flags |= CXL_DECODER_F_LOCK;
849                 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
850                         cxld->target_type = CXL_DECODER_HOSTONLYMEM;
851                 else
852                         cxld->target_type = CXL_DECODER_DEVMEM;
853
854                 guard(rwsem_write)(&cxl_region_rwsem);
855                 if (cxld->id != cxl_num_decoders_committed(port)) {
856                         dev_warn(&port->dev,
857                                  "decoder%d.%d: Committed out of order\n",
858                                  port->id, cxld->id);
859                         return -ENXIO;
860                 }
861
862                 if (size == 0) {
863                         dev_warn(&port->dev,
864                                  "decoder%d.%d: Committed with zero size\n",
865                                  port->id, cxld->id);
866                         return -ENXIO;
867                 }
868                 port->commit_end = cxld->id;
869         } else {
870                 if (cxled) {
871                         struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
872                         struct cxl_dev_state *cxlds = cxlmd->cxlds;
873
874                         /*
875                          * Default by devtype until a device arrives that needs
876                          * more precision.
877                          */
878                         if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
879                                 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
880                         else
881                                 cxld->target_type = CXL_DECODER_DEVMEM;
882                 } else {
883                         /* To be overridden by region type at commit time */
884                         cxld->target_type = CXL_DECODER_HOSTONLYMEM;
885                 }
886
887                 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
888                     cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
889                         ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
890                         writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
891                 }
892         }
893         rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
894                           &cxld->interleave_ways);
895         if (rc) {
896                 dev_warn(&port->dev,
897                          "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
898                          port->id, cxld->id, ctrl);
899                 return rc;
900         }
901         rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
902                                  &cxld->interleave_granularity);
903         if (rc) {
904                 dev_warn(&port->dev,
905                          "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
906                          port->id, cxld->id, ctrl);
907                 return rc;
908         }
909
910         dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
911                 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
912                 cxld->interleave_ways, cxld->interleave_granularity);
913
914         if (!cxled) {
915                 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
916                 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
917                 target_list.value = (hi << 32) + lo;
918                 for (i = 0; i < cxld->interleave_ways; i++)
919                         target_map[i] = target_list.target_id[i];
920
921                 return 0;
922         }
923
924         if (!committed)
925                 return 0;
926
927         dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
928         if (remainder) {
929                 dev_err(&port->dev,
930                         "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
931                         port->id, cxld->id, size, cxld->interleave_ways);
932                 return -ENXIO;
933         }
934         lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
935         hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
936         skip = (hi << 32) + lo;
937         rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
938         if (rc) {
939                 dev_err(&port->dev,
940                         "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
941                         port->id, cxld->id, *dpa_base,
942                         *dpa_base + dpa_size + skip - 1, rc);
943                 return rc;
944         }
945         *dpa_base += dpa_size + skip;
946
947         cxled->state = CXL_DECODER_STATE_AUTO;
948
949         return 0;
950 }
951
952 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
953 {
954         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
955         int committed, i;
956         u32 ctrl;
957
958         if (!hdm)
959                 return;
960
961         /*
962          * Since the register resource was recently claimed via request_region()
963          * be careful about trusting the "not-committed" status until the commit
964          * timeout has elapsed.  The commit timeout is 10ms (CXL 2.0
965          * 8.2.5.12.20), but double it to be tolerant of any clock skew between
966          * host and target.
967          */
968         for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
969                 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
970                 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
971                         committed++;
972         }
973
974         /* ensure that future checks of committed can be trusted */
975         if (committed != cxlhdm->decoder_count)
976                 msleep(20);
977 }
978
979 /**
980  * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
981  * @cxlhdm: Structure to populate with HDM capabilities
982  * @info: cached DVSEC range register info
983  */
984 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
985                                 struct cxl_endpoint_dvsec_info *info)
986 {
987         void __iomem *hdm = cxlhdm->regs.hdm_decoder;
988         struct cxl_port *port = cxlhdm->port;
989         int i;
990         u64 dpa_base = 0;
991
992         cxl_settle_decoders(cxlhdm);
993
994         for (i = 0; i < cxlhdm->decoder_count; i++) {
995                 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
996                 int rc, target_count = cxlhdm->target_count;
997                 struct cxl_decoder *cxld;
998
999                 if (is_cxl_endpoint(port)) {
1000                         struct cxl_endpoint_decoder *cxled;
1001
1002                         cxled = cxl_endpoint_decoder_alloc(port);
1003                         if (IS_ERR(cxled)) {
1004                                 dev_warn(&port->dev,
1005                                          "Failed to allocate decoder%d.%d\n",
1006                                          port->id, i);
1007                                 return PTR_ERR(cxled);
1008                         }
1009                         cxld = &cxled->cxld;
1010                 } else {
1011                         struct cxl_switch_decoder *cxlsd;
1012
1013                         cxlsd = cxl_switch_decoder_alloc(port, target_count);
1014                         if (IS_ERR(cxlsd)) {
1015                                 dev_warn(&port->dev,
1016                                          "Failed to allocate decoder%d.%d\n",
1017                                          port->id, i);
1018                                 return PTR_ERR(cxlsd);
1019                         }
1020                         cxld = &cxlsd->cxld;
1021                 }
1022
1023                 rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
1024                                       &dpa_base, info);
1025                 if (rc) {
1026                         dev_warn(&port->dev,
1027                                  "Failed to initialize decoder%d.%d\n",
1028                                  port->id, i);
1029                         put_device(&cxld->dev);
1030                         return rc;
1031                 }
1032                 rc = add_hdm_decoder(port, cxld, target_map);
1033                 if (rc) {
1034                         dev_warn(&port->dev,
1035                                  "Failed to add decoder%d.%d\n", port->id, i);
1036                         return rc;
1037                 }
1038         }
1039
1040         return 0;
1041 }
1042 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
This page took 0.092705 seconds and 4 git commands to generate.