]> Git Repo - linux.git/blob - drivers/cxl/core/pmem.c
Merge tag 'ti-k3-dt-for-v6.11-part2' into ti-k3-dts-next
[linux.git] / drivers / cxl / core / pmem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
5 #include <linux/idr.h>
6 #include <cxlmem.h>
7 #include <cxl.h>
8 #include "core.h"
9
10 /**
11  * DOC: cxl pmem
12  *
13  * The core CXL PMEM infrastructure supports persistent memory
14  * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
15  * 'bridge' device is added at the root of a CXL device topology if
16  * platform firmware advertises at least one persistent memory capable
17  * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
18  * device. Then for each cxl_memdev in the CXL device topology a bridge
19  * device is added to host a LIBNVDIMM dimm object. When these bridges
20  * are registered native LIBNVDIMM uapis are translated to CXL
21  * operations, for example, namespace label access commands.
22  */
23
24 static DEFINE_IDA(cxl_nvdimm_bridge_ida);
25
26 static void cxl_nvdimm_bridge_release(struct device *dev)
27 {
28         struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
29
30         ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
31         kfree(cxl_nvb);
32 }
33
34 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
35         &cxl_base_attribute_group,
36         NULL,
37 };
38
39 const struct device_type cxl_nvdimm_bridge_type = {
40         .name = "cxl_nvdimm_bridge",
41         .release = cxl_nvdimm_bridge_release,
42         .groups = cxl_nvdimm_bridge_attribute_groups,
43 };
44
45 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
46 {
47         if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
48                           "not a cxl_nvdimm_bridge device\n"))
49                 return NULL;
50         return container_of(dev, struct cxl_nvdimm_bridge, dev);
51 }
52 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
53
54 bool is_cxl_nvdimm_bridge(struct device *dev)
55 {
56         return dev->type == &cxl_nvdimm_bridge_type;
57 }
58 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
59
60 static int match_nvdimm_bridge(struct device *dev, void *data)
61 {
62         return is_cxl_nvdimm_bridge(dev);
63 }
64
65 /**
66  * cxl_find_nvdimm_bridge() - find a bridge device relative to a port
67  * @port: any descendant port of an nvdimm-bridge associated
68  *        root-cxl-port
69  */
70 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
71 {
72         struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
73         struct device *dev;
74
75         if (!cxl_root)
76                 return NULL;
77
78         dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
79
80         if (!dev)
81                 return NULL;
82
83         return to_cxl_nvdimm_bridge(dev);
84 }
85 EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
86
87 static struct lock_class_key cxl_nvdimm_bridge_key;
88
89 static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
90 {
91         struct cxl_nvdimm_bridge *cxl_nvb;
92         struct device *dev;
93         int rc;
94
95         cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
96         if (!cxl_nvb)
97                 return ERR_PTR(-ENOMEM);
98
99         rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
100         if (rc < 0)
101                 goto err;
102         cxl_nvb->id = rc;
103
104         dev = &cxl_nvb->dev;
105         cxl_nvb->port = port;
106         device_initialize(dev);
107         lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key);
108         device_set_pm_not_required(dev);
109         dev->parent = &port->dev;
110         dev->bus = &cxl_bus_type;
111         dev->type = &cxl_nvdimm_bridge_type;
112
113         return cxl_nvb;
114
115 err:
116         kfree(cxl_nvb);
117         return ERR_PTR(rc);
118 }
119
120 static void unregister_nvb(void *_cxl_nvb)
121 {
122         struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
123
124         device_unregister(&cxl_nvb->dev);
125 }
126
127 /**
128  * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
129  * @host: platform firmware root device
130  * @port: CXL port at the root of a CXL topology
131  *
132  * Return: bridge device that can host cxl_nvdimm objects
133  */
134 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
135                                                      struct cxl_port *port)
136 {
137         struct cxl_nvdimm_bridge *cxl_nvb;
138         struct device *dev;
139         int rc;
140
141         if (!IS_ENABLED(CONFIG_CXL_PMEM))
142                 return ERR_PTR(-ENXIO);
143
144         cxl_nvb = cxl_nvdimm_bridge_alloc(port);
145         if (IS_ERR(cxl_nvb))
146                 return cxl_nvb;
147
148         dev = &cxl_nvb->dev;
149         rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
150         if (rc)
151                 goto err;
152
153         rc = device_add(dev);
154         if (rc)
155                 goto err;
156
157         rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
158         if (rc)
159                 return ERR_PTR(rc);
160
161         return cxl_nvb;
162
163 err:
164         put_device(dev);
165         return ERR_PTR(rc);
166 }
167 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
168
169 static void cxl_nvdimm_release(struct device *dev)
170 {
171         struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
172
173         kfree(cxl_nvd);
174 }
175
176 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
177         &cxl_base_attribute_group,
178         NULL,
179 };
180
181 const struct device_type cxl_nvdimm_type = {
182         .name = "cxl_nvdimm",
183         .release = cxl_nvdimm_release,
184         .groups = cxl_nvdimm_attribute_groups,
185 };
186
187 bool is_cxl_nvdimm(struct device *dev)
188 {
189         return dev->type == &cxl_nvdimm_type;
190 }
191 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
192
193 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
194 {
195         if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
196                           "not a cxl_nvdimm device\n"))
197                 return NULL;
198         return container_of(dev, struct cxl_nvdimm, dev);
199 }
200 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
201
202 static struct lock_class_key cxl_nvdimm_key;
203
204 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
205                                            struct cxl_memdev *cxlmd)
206 {
207         struct cxl_nvdimm *cxl_nvd;
208         struct device *dev;
209
210         cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
211         if (!cxl_nvd)
212                 return ERR_PTR(-ENOMEM);
213
214         dev = &cxl_nvd->dev;
215         cxl_nvd->cxlmd = cxlmd;
216         cxlmd->cxl_nvd = cxl_nvd;
217         device_initialize(dev);
218         lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
219         device_set_pm_not_required(dev);
220         dev->parent = &cxlmd->dev;
221         dev->bus = &cxl_bus_type;
222         dev->type = &cxl_nvdimm_type;
223         /*
224          * A "%llx" string is 17-bytes vs dimm_id that is max
225          * NVDIMM_KEY_DESC_LEN
226          */
227         BUILD_BUG_ON(sizeof(cxl_nvd->dev_id) < 17 ||
228                      sizeof(cxl_nvd->dev_id) > NVDIMM_KEY_DESC_LEN);
229         sprintf(cxl_nvd->dev_id, "%llx", cxlmd->cxlds->serial);
230
231         return cxl_nvd;
232 }
233
234 static void cxlmd_release_nvdimm(void *_cxlmd)
235 {
236         struct cxl_memdev *cxlmd = _cxlmd;
237         struct cxl_nvdimm *cxl_nvd = cxlmd->cxl_nvd;
238         struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
239
240         cxl_nvd->cxlmd = NULL;
241         cxlmd->cxl_nvd = NULL;
242         cxlmd->cxl_nvb = NULL;
243         device_unregister(&cxl_nvd->dev);
244         put_device(&cxl_nvb->dev);
245 }
246
247 /**
248  * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
249  * @parent_port: parent port for the (to be added) @cxlmd endpoint port
250  * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
251  *
252  * Return: 0 on success negative error code on failure.
253  */
254 int devm_cxl_add_nvdimm(struct cxl_port *parent_port,
255                         struct cxl_memdev *cxlmd)
256 {
257         struct cxl_nvdimm_bridge *cxl_nvb;
258         struct cxl_nvdimm *cxl_nvd;
259         struct device *dev;
260         int rc;
261
262         cxl_nvb = cxl_find_nvdimm_bridge(parent_port);
263         if (!cxl_nvb)
264                 return -ENODEV;
265
266         cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd);
267         if (IS_ERR(cxl_nvd)) {
268                 rc = PTR_ERR(cxl_nvd);
269                 goto err_alloc;
270         }
271         cxlmd->cxl_nvb = cxl_nvb;
272
273         dev = &cxl_nvd->dev;
274         rc = dev_set_name(dev, "pmem%d", cxlmd->id);
275         if (rc)
276                 goto err;
277
278         rc = device_add(dev);
279         if (rc)
280                 goto err;
281
282         dev_dbg(&cxlmd->dev, "register %s\n", dev_name(dev));
283
284         /* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */
285         return devm_add_action_or_reset(&cxlmd->dev, cxlmd_release_nvdimm, cxlmd);
286
287 err:
288         put_device(dev);
289 err_alloc:
290         cxlmd->cxl_nvb = NULL;
291         cxlmd->cxl_nvd = NULL;
292         put_device(&cxl_nvb->dev);
293
294         return rc;
295 }
296 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
This page took 0.049869 seconds and 4 git commands to generate.