]> Git Repo - linux.git/blob - drivers/cxl/pmem.c
thermal/intel/intel_soc_dts_iosf: Use Intel TCC library
[linux.git] / drivers / cxl / pmem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <asm/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
10 #include <linux/nd.h>
11 #include "cxlmem.h"
12 #include "cxl.h"
13
14 extern const struct nvdimm_security_ops *cxl_security_ops;
15
16 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
17
18 static void clear_exclusive(void *cxlds)
19 {
20         clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
21 }
22
23 static void unregister_nvdimm(void *nvdimm)
24 {
25         nvdimm_delete(nvdimm);
26 }
27
28 static ssize_t provider_show(struct device *dev, struct device_attribute *attr, char *buf)
29 {
30         struct nvdimm *nvdimm = to_nvdimm(dev);
31         struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
32
33         return sysfs_emit(buf, "%s\n", dev_name(&cxl_nvd->dev));
34 }
35 static DEVICE_ATTR_RO(provider);
36
37 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
38 {
39         struct nvdimm *nvdimm = to_nvdimm(dev);
40         struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
41         struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds;
42
43         return sysfs_emit(buf, "%lld\n", cxlds->serial);
44 }
45 static DEVICE_ATTR_RO(id);
46
47 static struct attribute *cxl_dimm_attributes[] = {
48         &dev_attr_id.attr,
49         &dev_attr_provider.attr,
50         NULL
51 };
52
53 static const struct attribute_group cxl_dimm_attribute_group = {
54         .name = "cxl",
55         .attrs = cxl_dimm_attributes,
56 };
57
58 static const struct attribute_group *cxl_dimm_attribute_groups[] = {
59         &cxl_dimm_attribute_group,
60         NULL
61 };
62
63 static int cxl_nvdimm_probe(struct device *dev)
64 {
65         struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
66         struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
67         struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
68         unsigned long flags = 0, cmd_mask = 0;
69         struct cxl_dev_state *cxlds = cxlmd->cxlds;
70         struct nvdimm *nvdimm;
71         int rc;
72
73         set_exclusive_cxl_commands(cxlds, exclusive_cmds);
74         rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
75         if (rc)
76                 return rc;
77
78         set_bit(NDD_LABELING, &flags);
79         set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
80         set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
81         set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
82         nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
83                                  cxl_dimm_attribute_groups, flags,
84                                  cmd_mask, 0, NULL, cxl_nvd->dev_id,
85                                  cxl_security_ops, NULL);
86         if (!nvdimm)
87                 return -ENOMEM;
88
89         dev_set_drvdata(dev, nvdimm);
90         return devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
91 }
92
93 static struct cxl_driver cxl_nvdimm_driver = {
94         .name = "cxl_nvdimm",
95         .probe = cxl_nvdimm_probe,
96         .id = CXL_DEVICE_NVDIMM,
97         .drv = {
98                 .suppress_bind_attrs = true,
99         },
100 };
101
102 static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
103                                     struct nd_cmd_get_config_size *cmd,
104                                     unsigned int buf_len)
105 {
106         if (sizeof(*cmd) > buf_len)
107                 return -EINVAL;
108
109         *cmd = (struct nd_cmd_get_config_size) {
110                  .config_size = cxlds->lsa_size,
111                  .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa),
112         };
113
114         return 0;
115 }
116
117 static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
118                                     struct nd_cmd_get_config_data_hdr *cmd,
119                                     unsigned int buf_len)
120 {
121         struct cxl_mbox_get_lsa get_lsa;
122         struct cxl_mbox_cmd mbox_cmd;
123         int rc;
124
125         if (sizeof(*cmd) > buf_len)
126                 return -EINVAL;
127         if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
128                 return -EINVAL;
129
130         get_lsa = (struct cxl_mbox_get_lsa) {
131                 .offset = cpu_to_le32(cmd->in_offset),
132                 .length = cpu_to_le32(cmd->in_length),
133         };
134         mbox_cmd = (struct cxl_mbox_cmd) {
135                 .opcode = CXL_MBOX_OP_GET_LSA,
136                 .payload_in = &get_lsa,
137                 .size_in = sizeof(get_lsa),
138                 .size_out = cmd->in_length,
139                 .payload_out = cmd->out_buf,
140         };
141
142         rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
143         cmd->status = 0;
144
145         return rc;
146 }
147
148 static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
149                                     struct nd_cmd_set_config_hdr *cmd,
150                                     unsigned int buf_len)
151 {
152         struct cxl_mbox_set_lsa *set_lsa;
153         struct cxl_mbox_cmd mbox_cmd;
154         int rc;
155
156         if (sizeof(*cmd) > buf_len)
157                 return -EINVAL;
158
159         /* 4-byte status follows the input data in the payload */
160         if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len)
161                 return -EINVAL;
162
163         set_lsa =
164                 kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
165         if (!set_lsa)
166                 return -ENOMEM;
167
168         *set_lsa = (struct cxl_mbox_set_lsa) {
169                 .offset = cpu_to_le32(cmd->in_offset),
170         };
171         memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
172         mbox_cmd = (struct cxl_mbox_cmd) {
173                 .opcode = CXL_MBOX_OP_SET_LSA,
174                 .payload_in = set_lsa,
175                 .size_in = struct_size(set_lsa, data, cmd->in_length),
176         };
177
178         rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
179
180         /*
181          * Set "firmware" status (4-packed bytes at the end of the input
182          * payload.
183          */
184         put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
185         kvfree(set_lsa);
186
187         return rc;
188 }
189
190 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
191                                void *buf, unsigned int buf_len)
192 {
193         struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
194         unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
195         struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
196         struct cxl_dev_state *cxlds = cxlmd->cxlds;
197
198         if (!test_bit(cmd, &cmd_mask))
199                 return -ENOTTY;
200
201         switch (cmd) {
202         case ND_CMD_GET_CONFIG_SIZE:
203                 return cxl_pmem_get_config_size(cxlds, buf, buf_len);
204         case ND_CMD_GET_CONFIG_DATA:
205                 return cxl_pmem_get_config_data(cxlds, buf, buf_len);
206         case ND_CMD_SET_CONFIG_DATA:
207                 return cxl_pmem_set_config_data(cxlds, buf, buf_len);
208         default:
209                 return -ENOTTY;
210         }
211 }
212
213 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
214                         struct nvdimm *nvdimm, unsigned int cmd, void *buf,
215                         unsigned int buf_len, int *cmd_rc)
216 {
217         /*
218          * No firmware response to translate, let the transport error
219          * code take precedence.
220          */
221         *cmd_rc = 0;
222
223         if (!nvdimm)
224                 return -ENOTTY;
225         return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
226 }
227
228 static void unregister_nvdimm_bus(void *_cxl_nvb)
229 {
230         struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
231         struct nvdimm_bus *nvdimm_bus = cxl_nvb->nvdimm_bus;
232
233         cxl_nvb->nvdimm_bus = NULL;
234         nvdimm_bus_unregister(nvdimm_bus);
235 }
236
237 static int cxl_nvdimm_bridge_probe(struct device *dev)
238 {
239         struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
240
241         cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
242                 .provider_name = "CXL",
243                 .module = THIS_MODULE,
244                 .ndctl = cxl_pmem_ctl,
245         };
246
247         cxl_nvb->nvdimm_bus =
248                 nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
249
250         if (!cxl_nvb->nvdimm_bus)
251                 return -ENOMEM;
252
253         return devm_add_action_or_reset(dev, unregister_nvdimm_bus, cxl_nvb);
254 }
255
256 static struct cxl_driver cxl_nvdimm_bridge_driver = {
257         .name = "cxl_nvdimm_bridge",
258         .probe = cxl_nvdimm_bridge_probe,
259         .id = CXL_DEVICE_NVDIMM_BRIDGE,
260         .drv = {
261                 .suppress_bind_attrs = true,
262         },
263 };
264
265 static void unregister_nvdimm_region(void *nd_region)
266 {
267         nvdimm_region_delete(nd_region);
268 }
269
270 static void cxlr_pmem_remove_resource(void *res)
271 {
272         remove_resource(res);
273 }
274
275 struct cxl_pmem_region_info {
276         u64 offset;
277         u64 serial;
278 };
279
280 static int cxl_pmem_region_probe(struct device *dev)
281 {
282         struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
283         struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
284         struct cxl_region *cxlr = cxlr_pmem->cxlr;
285         struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
286         struct cxl_pmem_region_info *info = NULL;
287         struct nd_interleave_set *nd_set;
288         struct nd_region_desc ndr_desc;
289         struct cxl_nvdimm *cxl_nvd;
290         struct nvdimm *nvdimm;
291         struct resource *res;
292         int rc, i = 0;
293
294         memset(&mappings, 0, sizeof(mappings));
295         memset(&ndr_desc, 0, sizeof(ndr_desc));
296
297         res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
298         if (!res)
299                 return -ENOMEM;
300
301         res->name = "Persistent Memory";
302         res->start = cxlr_pmem->hpa_range.start;
303         res->end = cxlr_pmem->hpa_range.end;
304         res->flags = IORESOURCE_MEM;
305         res->desc = IORES_DESC_PERSISTENT_MEMORY;
306
307         rc = insert_resource(&iomem_resource, res);
308         if (rc)
309                 return rc;
310
311         rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
312         if (rc)
313                 return rc;
314
315         ndr_desc.res = res;
316         ndr_desc.provider_data = cxlr_pmem;
317
318         ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
319         ndr_desc.target_node = phys_to_target_node(res->start);
320         if (ndr_desc.target_node == NUMA_NO_NODE) {
321                 ndr_desc.target_node = ndr_desc.numa_node;
322                 dev_dbg(&cxlr->dev, "changing target node from %d to %d",
323                         NUMA_NO_NODE, ndr_desc.target_node);
324         }
325
326         nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
327         if (!nd_set)
328                 return -ENOMEM;
329
330         ndr_desc.memregion = cxlr->id;
331         set_bit(ND_REGION_CXL, &ndr_desc.flags);
332         set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
333
334         info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
335         if (!info)
336                 return -ENOMEM;
337
338         for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
339                 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
340                 struct cxl_memdev *cxlmd = m->cxlmd;
341                 struct cxl_dev_state *cxlds = cxlmd->cxlds;
342
343                 cxl_nvd = cxlmd->cxl_nvd;
344                 nvdimm = dev_get_drvdata(&cxl_nvd->dev);
345                 if (!nvdimm) {
346                         dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
347                                 dev_name(&cxlmd->dev));
348                         rc = -ENODEV;
349                         goto out_nvd;
350                 }
351
352                 m->cxl_nvd = cxl_nvd;
353                 mappings[i] = (struct nd_mapping_desc) {
354                         .nvdimm = nvdimm,
355                         .start = m->start,
356                         .size = m->size,
357                         .position = i,
358                 };
359                 info[i].offset = m->start;
360                 info[i].serial = cxlds->serial;
361         }
362         ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
363         ndr_desc.mapping = mappings;
364
365         /*
366          * TODO enable CXL labels which skip the need for 'interleave-set cookie'
367          */
368         nd_set->cookie1 =
369                 nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
370         nd_set->cookie2 = nd_set->cookie1;
371         ndr_desc.nd_set = nd_set;
372
373         cxlr_pmem->nd_region =
374                 nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
375         if (!cxlr_pmem->nd_region) {
376                 rc = -ENOMEM;
377                 goto out_nvd;
378         }
379
380         rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
381                                       cxlr_pmem->nd_region);
382 out_nvd:
383         kfree(info);
384
385         return rc;
386 }
387
388 static struct cxl_driver cxl_pmem_region_driver = {
389         .name = "cxl_pmem_region",
390         .probe = cxl_pmem_region_probe,
391         .id = CXL_DEVICE_PMEM_REGION,
392         .drv = {
393                 .suppress_bind_attrs = true,
394         },
395 };
396
397 static __init int cxl_pmem_init(void)
398 {
399         int rc;
400
401         set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
402         set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
403
404         rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
405         if (rc)
406                 return rc;
407
408         rc = cxl_driver_register(&cxl_nvdimm_driver);
409         if (rc)
410                 goto err_nvdimm;
411
412         rc = cxl_driver_register(&cxl_pmem_region_driver);
413         if (rc)
414                 goto err_region;
415
416         return 0;
417
418 err_region:
419         cxl_driver_unregister(&cxl_nvdimm_driver);
420 err_nvdimm:
421         cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
422         return rc;
423 }
424
425 static __exit void cxl_pmem_exit(void)
426 {
427         cxl_driver_unregister(&cxl_pmem_region_driver);
428         cxl_driver_unregister(&cxl_nvdimm_driver);
429         cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
430 }
431
432 MODULE_LICENSE("GPL v2");
433 module_init(cxl_pmem_init);
434 module_exit(cxl_pmem_exit);
435 MODULE_IMPORT_NS(CXL);
436 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
437 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
438 MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
This page took 0.057242 seconds and 4 git commands to generate.