]> Git Repo - linux.git/blob - drivers/cxl/pmem.c
net/sched: act_api: move TCA_EXT_WARN_MSG to the correct hierarchy
[linux.git] / drivers / cxl / pmem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <asm/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
10 #include <linux/nd.h>
11 #include "cxlmem.h"
12 #include "cxl.h"
13
14 extern const struct nvdimm_security_ops *cxl_security_ops;
15
16 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
17
18 static void clear_exclusive(void *cxlds)
19 {
20         clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
21 }
22
23 static void unregister_nvdimm(void *nvdimm)
24 {
25         nvdimm_delete(nvdimm);
26 }
27
28 static ssize_t provider_show(struct device *dev, struct device_attribute *attr, char *buf)
29 {
30         struct nvdimm *nvdimm = to_nvdimm(dev);
31         struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
32
33         return sysfs_emit(buf, "%s\n", dev_name(&cxl_nvd->dev));
34 }
35 static DEVICE_ATTR_RO(provider);
36
37 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
38 {
39         struct nvdimm *nvdimm = to_nvdimm(dev);
40         struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
41         struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds;
42
43         return sysfs_emit(buf, "%lld\n", cxlds->serial);
44 }
45 static DEVICE_ATTR_RO(id);
46
47 static struct attribute *cxl_dimm_attributes[] = {
48         &dev_attr_id.attr,
49         &dev_attr_provider.attr,
50         NULL
51 };
52
53 static const struct attribute_group cxl_dimm_attribute_group = {
54         .name = "cxl",
55         .attrs = cxl_dimm_attributes,
56 };
57
58 static const struct attribute_group *cxl_dimm_attribute_groups[] = {
59         &cxl_dimm_attribute_group,
60         NULL
61 };
62
63 static int cxl_nvdimm_probe(struct device *dev)
64 {
65         struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
66         struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
67         struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
68         unsigned long flags = 0, cmd_mask = 0;
69         struct cxl_dev_state *cxlds = cxlmd->cxlds;
70         struct nvdimm *nvdimm;
71         int rc;
72
73         set_exclusive_cxl_commands(cxlds, exclusive_cmds);
74         rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
75         if (rc)
76                 return rc;
77
78         set_bit(NDD_LABELING, &flags);
79         set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
80         set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
81         set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
82         nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
83                                  cxl_dimm_attribute_groups, flags,
84                                  cmd_mask, 0, NULL, cxl_nvd->dev_id,
85                                  cxl_security_ops, NULL);
86         if (!nvdimm)
87                 return -ENOMEM;
88
89         dev_set_drvdata(dev, nvdimm);
90         return devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
91 }
92
93 static struct cxl_driver cxl_nvdimm_driver = {
94         .name = "cxl_nvdimm",
95         .probe = cxl_nvdimm_probe,
96         .id = CXL_DEVICE_NVDIMM,
97         .drv = {
98                 .suppress_bind_attrs = true,
99         },
100 };
101
102 static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
103                                     struct nd_cmd_get_config_size *cmd,
104                                     unsigned int buf_len)
105 {
106         if (sizeof(*cmd) > buf_len)
107                 return -EINVAL;
108
109         *cmd = (struct nd_cmd_get_config_size) {
110                  .config_size = cxlds->lsa_size,
111                  .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa),
112         };
113
114         return 0;
115 }
116
117 static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
118                                     struct nd_cmd_get_config_data_hdr *cmd,
119                                     unsigned int buf_len)
120 {
121         struct cxl_mbox_get_lsa get_lsa;
122         struct cxl_mbox_cmd mbox_cmd;
123         int rc;
124
125         if (sizeof(*cmd) > buf_len)
126                 return -EINVAL;
127         if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
128                 return -EINVAL;
129
130         get_lsa = (struct cxl_mbox_get_lsa) {
131                 .offset = cpu_to_le32(cmd->in_offset),
132                 .length = cpu_to_le32(cmd->in_length),
133         };
134         mbox_cmd = (struct cxl_mbox_cmd) {
135                 .opcode = CXL_MBOX_OP_GET_LSA,
136                 .payload_in = &get_lsa,
137                 .size_in = sizeof(get_lsa),
138                 .size_out = cmd->in_length,
139                 .payload_out = cmd->out_buf,
140         };
141
142         rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
143         cmd->status = 0;
144
145         return rc;
146 }
147
148 static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
149                                     struct nd_cmd_set_config_hdr *cmd,
150                                     unsigned int buf_len)
151 {
152         struct cxl_mbox_set_lsa *set_lsa;
153         struct cxl_mbox_cmd mbox_cmd;
154         int rc;
155
156         if (sizeof(*cmd) > buf_len)
157                 return -EINVAL;
158
159         /* 4-byte status follows the input data in the payload */
160         if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len)
161                 return -EINVAL;
162
163         set_lsa =
164                 kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
165         if (!set_lsa)
166                 return -ENOMEM;
167
168         *set_lsa = (struct cxl_mbox_set_lsa) {
169                 .offset = cpu_to_le32(cmd->in_offset),
170         };
171         memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
172         mbox_cmd = (struct cxl_mbox_cmd) {
173                 .opcode = CXL_MBOX_OP_SET_LSA,
174                 .payload_in = set_lsa,
175                 .size_in = struct_size(set_lsa, data, cmd->in_length),
176         };
177
178         rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
179
180         /*
181          * Set "firmware" status (4-packed bytes at the end of the input
182          * payload.
183          */
184         put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
185         kvfree(set_lsa);
186
187         return rc;
188 }
189
190 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
191                                void *buf, unsigned int buf_len)
192 {
193         struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
194         unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
195         struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
196         struct cxl_dev_state *cxlds = cxlmd->cxlds;
197
198         if (!test_bit(cmd, &cmd_mask))
199                 return -ENOTTY;
200
201         switch (cmd) {
202         case ND_CMD_GET_CONFIG_SIZE:
203                 return cxl_pmem_get_config_size(cxlds, buf, buf_len);
204         case ND_CMD_GET_CONFIG_DATA:
205                 return cxl_pmem_get_config_data(cxlds, buf, buf_len);
206         case ND_CMD_SET_CONFIG_DATA:
207                 return cxl_pmem_set_config_data(cxlds, buf, buf_len);
208         default:
209                 return -ENOTTY;
210         }
211 }
212
213 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
214                         struct nvdimm *nvdimm, unsigned int cmd, void *buf,
215                         unsigned int buf_len, int *cmd_rc)
216 {
217         /*
218          * No firmware response to translate, let the transport error
219          * code take precedence.
220          */
221         *cmd_rc = 0;
222
223         if (!nvdimm)
224                 return -ENOTTY;
225         return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
226 }
227
228 static int detach_nvdimm(struct device *dev, void *data)
229 {
230         struct cxl_nvdimm *cxl_nvd;
231         bool release = false;
232
233         if (!is_cxl_nvdimm(dev))
234                 return 0;
235
236         device_lock(dev);
237         if (!dev->driver)
238                 goto out;
239
240         cxl_nvd = to_cxl_nvdimm(dev);
241         if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
242                 release = true;
243 out:
244         device_unlock(dev);
245         if (release)
246                 device_release_driver(dev);
247         return 0;
248 }
249
250 static void unregister_nvdimm_bus(void *_cxl_nvb)
251 {
252         struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
253         struct nvdimm_bus *nvdimm_bus = cxl_nvb->nvdimm_bus;
254
255         bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, detach_nvdimm);
256
257         cxl_nvb->nvdimm_bus = NULL;
258         nvdimm_bus_unregister(nvdimm_bus);
259 }
260
261 static int cxl_nvdimm_bridge_probe(struct device *dev)
262 {
263         struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
264
265         cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
266                 .provider_name = "CXL",
267                 .module = THIS_MODULE,
268                 .ndctl = cxl_pmem_ctl,
269         };
270
271         cxl_nvb->nvdimm_bus =
272                 nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
273
274         if (!cxl_nvb->nvdimm_bus)
275                 return -ENOMEM;
276
277         return devm_add_action_or_reset(dev, unregister_nvdimm_bus, cxl_nvb);
278 }
279
280 static struct cxl_driver cxl_nvdimm_bridge_driver = {
281         .name = "cxl_nvdimm_bridge",
282         .probe = cxl_nvdimm_bridge_probe,
283         .id = CXL_DEVICE_NVDIMM_BRIDGE,
284         .drv = {
285                 .suppress_bind_attrs = true,
286         },
287 };
288
289 static void unregister_nvdimm_region(void *nd_region)
290 {
291         nvdimm_region_delete(nd_region);
292 }
293
294 static void cxlr_pmem_remove_resource(void *res)
295 {
296         remove_resource(res);
297 }
298
299 struct cxl_pmem_region_info {
300         u64 offset;
301         u64 serial;
302 };
303
304 static int cxl_pmem_region_probe(struct device *dev)
305 {
306         struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
307         struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
308         struct cxl_region *cxlr = cxlr_pmem->cxlr;
309         struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
310         struct cxl_pmem_region_info *info = NULL;
311         struct nd_interleave_set *nd_set;
312         struct nd_region_desc ndr_desc;
313         struct cxl_nvdimm *cxl_nvd;
314         struct nvdimm *nvdimm;
315         struct resource *res;
316         int rc, i = 0;
317
318         memset(&mappings, 0, sizeof(mappings));
319         memset(&ndr_desc, 0, sizeof(ndr_desc));
320
321         res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
322         if (!res)
323                 return -ENOMEM;
324
325         res->name = "Persistent Memory";
326         res->start = cxlr_pmem->hpa_range.start;
327         res->end = cxlr_pmem->hpa_range.end;
328         res->flags = IORESOURCE_MEM;
329         res->desc = IORES_DESC_PERSISTENT_MEMORY;
330
331         rc = insert_resource(&iomem_resource, res);
332         if (rc)
333                 return rc;
334
335         rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
336         if (rc)
337                 return rc;
338
339         ndr_desc.res = res;
340         ndr_desc.provider_data = cxlr_pmem;
341
342         ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
343         ndr_desc.target_node = phys_to_target_node(res->start);
344         if (ndr_desc.target_node == NUMA_NO_NODE) {
345                 ndr_desc.target_node = ndr_desc.numa_node;
346                 dev_dbg(&cxlr->dev, "changing target node from %d to %d",
347                         NUMA_NO_NODE, ndr_desc.target_node);
348         }
349
350         nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
351         if (!nd_set)
352                 return -ENOMEM;
353
354         ndr_desc.memregion = cxlr->id;
355         set_bit(ND_REGION_CXL, &ndr_desc.flags);
356         set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
357
358         info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
359         if (!info)
360                 return -ENOMEM;
361
362         for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
363                 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
364                 struct cxl_memdev *cxlmd = m->cxlmd;
365                 struct cxl_dev_state *cxlds = cxlmd->cxlds;
366
367                 cxl_nvd = cxlmd->cxl_nvd;
368                 nvdimm = dev_get_drvdata(&cxl_nvd->dev);
369                 if (!nvdimm) {
370                         dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
371                                 dev_name(&cxlmd->dev));
372                         rc = -ENODEV;
373                         goto out_nvd;
374                 }
375
376                 m->cxl_nvd = cxl_nvd;
377                 mappings[i] = (struct nd_mapping_desc) {
378                         .nvdimm = nvdimm,
379                         .start = m->start,
380                         .size = m->size,
381                         .position = i,
382                 };
383                 info[i].offset = m->start;
384                 info[i].serial = cxlds->serial;
385         }
386         ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
387         ndr_desc.mapping = mappings;
388
389         /*
390          * TODO enable CXL labels which skip the need for 'interleave-set cookie'
391          */
392         nd_set->cookie1 =
393                 nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
394         nd_set->cookie2 = nd_set->cookie1;
395         ndr_desc.nd_set = nd_set;
396
397         cxlr_pmem->nd_region =
398                 nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
399         if (!cxlr_pmem->nd_region) {
400                 rc = -ENOMEM;
401                 goto out_nvd;
402         }
403
404         rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
405                                       cxlr_pmem->nd_region);
406 out_nvd:
407         kfree(info);
408
409         return rc;
410 }
411
412 static struct cxl_driver cxl_pmem_region_driver = {
413         .name = "cxl_pmem_region",
414         .probe = cxl_pmem_region_probe,
415         .id = CXL_DEVICE_PMEM_REGION,
416         .drv = {
417                 .suppress_bind_attrs = true,
418         },
419 };
420
421 static __init int cxl_pmem_init(void)
422 {
423         int rc;
424
425         set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
426         set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
427
428         rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
429         if (rc)
430                 return rc;
431
432         rc = cxl_driver_register(&cxl_nvdimm_driver);
433         if (rc)
434                 goto err_nvdimm;
435
436         rc = cxl_driver_register(&cxl_pmem_region_driver);
437         if (rc)
438                 goto err_region;
439
440         return 0;
441
442 err_region:
443         cxl_driver_unregister(&cxl_nvdimm_driver);
444 err_nvdimm:
445         cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
446         return rc;
447 }
448
449 static __exit void cxl_pmem_exit(void)
450 {
451         cxl_driver_unregister(&cxl_pmem_region_driver);
452         cxl_driver_unregister(&cxl_nvdimm_driver);
453         cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
454 }
455
456 MODULE_LICENSE("GPL v2");
457 module_init(cxl_pmem_init);
458 module_exit(cxl_pmem_exit);
459 MODULE_IMPORT_NS(CXL);
460 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
461 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
462 MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
This page took 0.056281 seconds and 4 git commands to generate.