]> Git Repo - linux.git/blob - drivers/cxl/core/memdev.c
Merge branch 'for-6.3/cxl-ram-region' into cxl/next
[linux.git] / drivers / cxl / core / memdev.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3
4 #include <linux/device.h>
5 #include <linux/slab.h>
6 #include <linux/idr.h>
7 #include <linux/pci.h>
8 #include <cxlmem.h>
9 #include "core.h"
10
11 static DECLARE_RWSEM(cxl_memdev_rwsem);
12
13 /*
14  * An entire PCI topology full of devices should be enough for any
15  * config
16  */
17 #define CXL_MEM_MAX_DEVS 65536
18
19 static int cxl_mem_major;
20 static DEFINE_IDA(cxl_memdev_ida);
21
22 static void cxl_memdev_release(struct device *dev)
23 {
24         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
25
26         ida_free(&cxl_memdev_ida, cxlmd->id);
27         kfree(cxlmd);
28 }
29
30 static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
31                                 kgid_t *gid)
32 {
33         return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
34 }
35
36 static ssize_t firmware_version_show(struct device *dev,
37                                      struct device_attribute *attr, char *buf)
38 {
39         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
40         struct cxl_dev_state *cxlds = cxlmd->cxlds;
41
42         return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
43 }
44 static DEVICE_ATTR_RO(firmware_version);
45
46 static ssize_t payload_max_show(struct device *dev,
47                                 struct device_attribute *attr, char *buf)
48 {
49         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
50         struct cxl_dev_state *cxlds = cxlmd->cxlds;
51
52         return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
53 }
54 static DEVICE_ATTR_RO(payload_max);
55
56 static ssize_t label_storage_size_show(struct device *dev,
57                                        struct device_attribute *attr, char *buf)
58 {
59         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
60         struct cxl_dev_state *cxlds = cxlmd->cxlds;
61
62         return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
63 }
64 static DEVICE_ATTR_RO(label_storage_size);
65
66 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
67                              char *buf)
68 {
69         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
70         struct cxl_dev_state *cxlds = cxlmd->cxlds;
71         unsigned long long len = resource_size(&cxlds->ram_res);
72
73         return sysfs_emit(buf, "%#llx\n", len);
74 }
75
76 static struct device_attribute dev_attr_ram_size =
77         __ATTR(size, 0444, ram_size_show, NULL);
78
79 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
80                               char *buf)
81 {
82         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
83         struct cxl_dev_state *cxlds = cxlmd->cxlds;
84         unsigned long long len = resource_size(&cxlds->pmem_res);
85
86         return sysfs_emit(buf, "%#llx\n", len);
87 }
88
89 static struct device_attribute dev_attr_pmem_size =
90         __ATTR(size, 0444, pmem_size_show, NULL);
91
92 static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
93                            char *buf)
94 {
95         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
96         struct cxl_dev_state *cxlds = cxlmd->cxlds;
97
98         return sysfs_emit(buf, "%#llx\n", cxlds->serial);
99 }
100 static DEVICE_ATTR_RO(serial);
101
102 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
103                               char *buf)
104 {
105         return sprintf(buf, "%d\n", dev_to_node(dev));
106 }
107 static DEVICE_ATTR_RO(numa_node);
108
109 static struct attribute *cxl_memdev_attributes[] = {
110         &dev_attr_serial.attr,
111         &dev_attr_firmware_version.attr,
112         &dev_attr_payload_max.attr,
113         &dev_attr_label_storage_size.attr,
114         &dev_attr_numa_node.attr,
115         NULL,
116 };
117
118 static struct attribute *cxl_memdev_pmem_attributes[] = {
119         &dev_attr_pmem_size.attr,
120         NULL,
121 };
122
123 static struct attribute *cxl_memdev_ram_attributes[] = {
124         &dev_attr_ram_size.attr,
125         NULL,
126 };
127
128 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
129                                   int n)
130 {
131         if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
132                 return 0;
133         return a->mode;
134 }
135
136 static struct attribute_group cxl_memdev_attribute_group = {
137         .attrs = cxl_memdev_attributes,
138         .is_visible = cxl_memdev_visible,
139 };
140
141 static struct attribute_group cxl_memdev_ram_attribute_group = {
142         .name = "ram",
143         .attrs = cxl_memdev_ram_attributes,
144 };
145
146 static struct attribute_group cxl_memdev_pmem_attribute_group = {
147         .name = "pmem",
148         .attrs = cxl_memdev_pmem_attributes,
149 };
150
151 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
152         &cxl_memdev_attribute_group,
153         &cxl_memdev_ram_attribute_group,
154         &cxl_memdev_pmem_attribute_group,
155         NULL,
156 };
157
158 static const struct device_type cxl_memdev_type = {
159         .name = "cxl_memdev",
160         .release = cxl_memdev_release,
161         .devnode = cxl_memdev_devnode,
162         .groups = cxl_memdev_attribute_groups,
163 };
164
165 bool is_cxl_memdev(struct device *dev)
166 {
167         return dev->type == &cxl_memdev_type;
168 }
169 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
170
171 /**
172  * set_exclusive_cxl_commands() - atomically disable user cxl commands
173  * @cxlds: The device state to operate on
174  * @cmds: bitmap of commands to mark exclusive
175  *
176  * Grab the cxl_memdev_rwsem in write mode to flush in-flight
177  * invocations of the ioctl path and then disable future execution of
178  * commands with the command ids set in @cmds.
179  */
180 void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
181 {
182         down_write(&cxl_memdev_rwsem);
183         bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
184                   CXL_MEM_COMMAND_ID_MAX);
185         up_write(&cxl_memdev_rwsem);
186 }
187 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
188
189 /**
190  * clear_exclusive_cxl_commands() - atomically enable user cxl commands
191  * @cxlds: The device state to modify
192  * @cmds: bitmap of commands to mark available for userspace
193  */
194 void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
195 {
196         down_write(&cxl_memdev_rwsem);
197         bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
198                       CXL_MEM_COMMAND_ID_MAX);
199         up_write(&cxl_memdev_rwsem);
200 }
201 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
202
203 static void cxl_memdev_shutdown(struct device *dev)
204 {
205         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
206
207         down_write(&cxl_memdev_rwsem);
208         cxlmd->cxlds = NULL;
209         up_write(&cxl_memdev_rwsem);
210 }
211
212 static void cxl_memdev_unregister(void *_cxlmd)
213 {
214         struct cxl_memdev *cxlmd = _cxlmd;
215         struct device *dev = &cxlmd->dev;
216
217         cxl_memdev_shutdown(dev);
218         cdev_device_del(&cxlmd->cdev, dev);
219         put_device(dev);
220 }
221
222 static void detach_memdev(struct work_struct *work)
223 {
224         struct cxl_memdev *cxlmd;
225
226         cxlmd = container_of(work, typeof(*cxlmd), detach_work);
227         device_release_driver(&cxlmd->dev);
228         put_device(&cxlmd->dev);
229 }
230
231 static struct lock_class_key cxl_memdev_key;
232
233 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
234                                            const struct file_operations *fops)
235 {
236         struct cxl_memdev *cxlmd;
237         struct device *dev;
238         struct cdev *cdev;
239         int rc;
240
241         cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
242         if (!cxlmd)
243                 return ERR_PTR(-ENOMEM);
244
245         rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
246         if (rc < 0)
247                 goto err;
248         cxlmd->id = rc;
249         cxlmd->depth = -1;
250
251         dev = &cxlmd->dev;
252         device_initialize(dev);
253         lockdep_set_class(&dev->mutex, &cxl_memdev_key);
254         dev->parent = cxlds->dev;
255         dev->bus = &cxl_bus_type;
256         dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
257         dev->type = &cxl_memdev_type;
258         device_set_pm_not_required(dev);
259         INIT_WORK(&cxlmd->detach_work, detach_memdev);
260
261         cdev = &cxlmd->cdev;
262         cdev_init(cdev, fops);
263         return cxlmd;
264
265 err:
266         kfree(cxlmd);
267         return ERR_PTR(rc);
268 }
269
270 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
271                                unsigned long arg)
272 {
273         switch (cmd) {
274         case CXL_MEM_QUERY_COMMANDS:
275                 return cxl_query_cmd(cxlmd, (void __user *)arg);
276         case CXL_MEM_SEND_COMMAND:
277                 return cxl_send_cmd(cxlmd, (void __user *)arg);
278         default:
279                 return -ENOTTY;
280         }
281 }
282
283 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
284                              unsigned long arg)
285 {
286         struct cxl_memdev *cxlmd = file->private_data;
287         int rc = -ENXIO;
288
289         down_read(&cxl_memdev_rwsem);
290         if (cxlmd->cxlds)
291                 rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
292         up_read(&cxl_memdev_rwsem);
293
294         return rc;
295 }
296
297 static int cxl_memdev_open(struct inode *inode, struct file *file)
298 {
299         struct cxl_memdev *cxlmd =
300                 container_of(inode->i_cdev, typeof(*cxlmd), cdev);
301
302         get_device(&cxlmd->dev);
303         file->private_data = cxlmd;
304
305         return 0;
306 }
307
308 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
309 {
310         struct cxl_memdev *cxlmd =
311                 container_of(inode->i_cdev, typeof(*cxlmd), cdev);
312
313         put_device(&cxlmd->dev);
314
315         return 0;
316 }
317
318 static const struct file_operations cxl_memdev_fops = {
319         .owner = THIS_MODULE,
320         .unlocked_ioctl = cxl_memdev_ioctl,
321         .open = cxl_memdev_open,
322         .release = cxl_memdev_release_file,
323         .compat_ioctl = compat_ptr_ioctl,
324         .llseek = noop_llseek,
325 };
326
327 struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
328 {
329         struct cxl_memdev *cxlmd;
330         struct device *dev;
331         struct cdev *cdev;
332         int rc;
333
334         cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
335         if (IS_ERR(cxlmd))
336                 return cxlmd;
337
338         dev = &cxlmd->dev;
339         rc = dev_set_name(dev, "mem%d", cxlmd->id);
340         if (rc)
341                 goto err;
342
343         /*
344          * Activate ioctl operations, no cxl_memdev_rwsem manipulation
345          * needed as this is ordered with cdev_add() publishing the device.
346          */
347         cxlmd->cxlds = cxlds;
348         cxlds->cxlmd = cxlmd;
349
350         cdev = &cxlmd->cdev;
351         rc = cdev_device_add(cdev, dev);
352         if (rc)
353                 goto err;
354
355         rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
356         if (rc)
357                 return ERR_PTR(rc);
358         return cxlmd;
359
360 err:
361         /*
362          * The cdev was briefly live, shutdown any ioctl operations that
363          * saw that state.
364          */
365         cxl_memdev_shutdown(dev);
366         put_device(dev);
367         return ERR_PTR(rc);
368 }
369 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
370
371 __init int cxl_memdev_init(void)
372 {
373         dev_t devt;
374         int rc;
375
376         rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
377         if (rc)
378                 return rc;
379
380         cxl_mem_major = MAJOR(devt);
381
382         return 0;
383 }
384
385 void cxl_memdev_exit(void)
386 {
387         unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
388 }
This page took 0.053715 seconds and 4 git commands to generate.