1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
4 #include <linux/io-64-nonatomic-lo-hi.h>
5 #include <linux/firmware.h>
6 #include <linux/device.h>
7 #include <linux/slab.h>
14 static DECLARE_RWSEM(cxl_memdev_rwsem);
17 * An entire PCI topology full of devices should be enough for any
20 #define CXL_MEM_MAX_DEVS 65536
22 static int cxl_mem_major;
23 static DEFINE_IDA(cxl_memdev_ida);
25 static void cxl_memdev_release(struct device *dev)
27 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
29 ida_free(&cxl_memdev_ida, cxlmd->id);
33 static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
36 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
39 static ssize_t firmware_version_show(struct device *dev,
40 struct device_attribute *attr, char *buf)
42 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
43 struct cxl_dev_state *cxlds = cxlmd->cxlds;
44 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
47 return sysfs_emit(buf, "\n");
48 return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
50 static DEVICE_ATTR_RO(firmware_version);
52 static ssize_t payload_max_show(struct device *dev,
53 struct device_attribute *attr, char *buf)
55 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
56 struct cxl_dev_state *cxlds = cxlmd->cxlds;
57 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
60 return sysfs_emit(buf, "\n");
61 return sysfs_emit(buf, "%zu\n", mds->payload_size);
63 static DEVICE_ATTR_RO(payload_max);
65 static ssize_t label_storage_size_show(struct device *dev,
66 struct device_attribute *attr, char *buf)
68 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
69 struct cxl_dev_state *cxlds = cxlmd->cxlds;
70 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
73 return sysfs_emit(buf, "\n");
74 return sysfs_emit(buf, "%zu\n", mds->lsa_size);
76 static DEVICE_ATTR_RO(label_storage_size);
78 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
81 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
82 struct cxl_dev_state *cxlds = cxlmd->cxlds;
83 unsigned long long len = resource_size(&cxlds->ram_res);
85 return sysfs_emit(buf, "%#llx\n", len);
88 static struct device_attribute dev_attr_ram_size =
89 __ATTR(size, 0444, ram_size_show, NULL);
91 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
94 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
95 struct cxl_dev_state *cxlds = cxlmd->cxlds;
96 unsigned long long len = resource_size(&cxlds->pmem_res);
98 return sysfs_emit(buf, "%#llx\n", len);
101 static struct device_attribute dev_attr_pmem_size =
102 __ATTR(size, 0444, pmem_size_show, NULL);
104 static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
107 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
108 struct cxl_dev_state *cxlds = cxlmd->cxlds;
110 return sysfs_emit(buf, "%#llx\n", cxlds->serial);
112 static DEVICE_ATTR_RO(serial);
114 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
117 return sprintf(buf, "%d\n", dev_to_node(dev));
119 static DEVICE_ATTR_RO(numa_node);
121 static ssize_t security_state_show(struct device *dev,
122 struct device_attribute *attr,
125 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
126 struct cxl_dev_state *cxlds = cxlmd->cxlds;
127 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
128 u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
129 u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
130 u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
131 unsigned long state = mds->security.state;
133 if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
134 return sysfs_emit(buf, "sanitize\n");
136 if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
137 return sysfs_emit(buf, "disabled\n");
138 if (state & CXL_PMEM_SEC_STATE_FROZEN ||
139 state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
140 state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
141 return sysfs_emit(buf, "frozen\n");
142 if (state & CXL_PMEM_SEC_STATE_LOCKED)
143 return sysfs_emit(buf, "locked\n");
145 return sysfs_emit(buf, "unlocked\n");
147 static struct device_attribute dev_attr_security_state =
148 __ATTR(state, 0444, security_state_show, NULL);
150 static ssize_t security_sanitize_store(struct device *dev,
151 struct device_attribute *attr,
152 const char *buf, size_t len)
154 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
155 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
156 struct cxl_port *port = cxlmd->endpoint;
160 if (kstrtobool(buf, &sanitize) || !sanitize)
163 if (!port || !is_cxl_endpoint(port))
166 /* ensure no regions are mapped to this memdev */
167 if (port->commit_end != -1)
170 rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
172 return rc ? rc : len;
174 static struct device_attribute dev_attr_security_sanitize =
175 __ATTR(sanitize, 0200, NULL, security_sanitize_store);
177 static ssize_t security_erase_store(struct device *dev,
178 struct device_attribute *attr,
179 const char *buf, size_t len)
181 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
182 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
183 struct cxl_port *port = cxlmd->endpoint;
187 if (kstrtobool(buf, &erase) || !erase)
190 if (!port || !is_cxl_endpoint(port))
193 /* ensure no regions are mapped to this memdev */
194 if (port->commit_end != -1)
197 rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
199 return rc ? rc : len;
201 static struct device_attribute dev_attr_security_erase =
202 __ATTR(erase, 0200, NULL, security_erase_store);
204 static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
206 struct cxl_dev_state *cxlds = cxlmd->cxlds;
210 /* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
211 if (resource_size(&cxlds->pmem_res)) {
212 offset = cxlds->pmem_res.start;
213 length = resource_size(&cxlds->pmem_res);
214 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
218 if (resource_size(&cxlds->ram_res)) {
219 offset = cxlds->ram_res.start;
220 length = resource_size(&cxlds->ram_res);
221 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
223 * Invalid Physical Address is not an error for
224 * volatile addresses. Device support is optional.
232 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
234 struct cxl_port *port;
237 port = cxlmd->endpoint;
238 if (!port || !is_cxl_endpoint(port))
241 rc = down_read_interruptible(&cxl_dpa_rwsem);
245 if (port->commit_end == -1) {
246 /* No regions mapped to this memdev */
247 rc = cxl_get_poison_by_memdev(cxlmd);
249 /* Regions mapped, collect poison by endpoint */
250 rc = cxl_get_poison_by_endpoint(port);
252 up_read(&cxl_dpa_rwsem);
256 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
258 struct cxl_dpa_to_region_context {
259 struct cxl_region *cxlr;
263 static int __cxl_dpa_to_region(struct device *dev, void *arg)
265 struct cxl_dpa_to_region_context *ctx = arg;
266 struct cxl_endpoint_decoder *cxled;
269 if (!is_endpoint_decoder(dev))
272 cxled = to_cxl_endpoint_decoder(dev);
273 if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
276 if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
279 dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
280 dev_name(&cxled->cxld.region->dev));
282 ctx->cxlr = cxled->cxld.region;
287 static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
289 struct cxl_dpa_to_region_context ctx;
290 struct cxl_port *port;
292 ctx = (struct cxl_dpa_to_region_context) {
295 port = cxlmd->endpoint;
296 if (port && is_cxl_endpoint(port) && port->commit_end != -1)
297 device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
302 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
304 struct cxl_dev_state *cxlds = cxlmd->cxlds;
306 if (!IS_ENABLED(CONFIG_DEBUG_FS))
309 if (!resource_size(&cxlds->dpa_res)) {
310 dev_dbg(cxlds->dev, "device has no dpa resource\n");
313 if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
314 dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
315 dpa, &cxlds->dpa_res);
318 if (!IS_ALIGNED(dpa, 64)) {
319 dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa);
326 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
328 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
329 struct cxl_mbox_inject_poison inject;
330 struct cxl_poison_record record;
331 struct cxl_mbox_cmd mbox_cmd;
332 struct cxl_region *cxlr;
335 if (!IS_ENABLED(CONFIG_DEBUG_FS))
338 rc = down_read_interruptible(&cxl_dpa_rwsem);
342 rc = cxl_validate_poison_dpa(cxlmd, dpa);
346 inject.address = cpu_to_le64(dpa);
347 mbox_cmd = (struct cxl_mbox_cmd) {
348 .opcode = CXL_MBOX_OP_INJECT_POISON,
349 .size_in = sizeof(inject),
350 .payload_in = &inject,
352 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
356 cxlr = cxl_dpa_to_region(cxlmd, dpa);
358 dev_warn_once(mds->cxlds.dev,
359 "poison inject dpa:%#llx region: %s\n", dpa,
360 dev_name(&cxlr->dev));
362 record = (struct cxl_poison_record) {
363 .address = cpu_to_le64(dpa),
364 .length = cpu_to_le32(1),
366 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
368 up_read(&cxl_dpa_rwsem);
372 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
374 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
376 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
377 struct cxl_mbox_clear_poison clear;
378 struct cxl_poison_record record;
379 struct cxl_mbox_cmd mbox_cmd;
380 struct cxl_region *cxlr;
383 if (!IS_ENABLED(CONFIG_DEBUG_FS))
386 rc = down_read_interruptible(&cxl_dpa_rwsem);
390 rc = cxl_validate_poison_dpa(cxlmd, dpa);
395 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
396 * is defined to accept 64 bytes of write-data, along with the
397 * address to clear. This driver uses zeroes as write-data.
399 clear = (struct cxl_mbox_clear_poison) {
400 .address = cpu_to_le64(dpa)
403 mbox_cmd = (struct cxl_mbox_cmd) {
404 .opcode = CXL_MBOX_OP_CLEAR_POISON,
405 .size_in = sizeof(clear),
406 .payload_in = &clear,
409 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
413 cxlr = cxl_dpa_to_region(cxlmd, dpa);
415 dev_warn_once(mds->cxlds.dev,
416 "poison clear dpa:%#llx region: %s\n", dpa,
417 dev_name(&cxlr->dev));
419 record = (struct cxl_poison_record) {
420 .address = cpu_to_le64(dpa),
421 .length = cpu_to_le32(1),
423 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
425 up_read(&cxl_dpa_rwsem);
429 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
431 static struct attribute *cxl_memdev_attributes[] = {
432 &dev_attr_serial.attr,
433 &dev_attr_firmware_version.attr,
434 &dev_attr_payload_max.attr,
435 &dev_attr_label_storage_size.attr,
436 &dev_attr_numa_node.attr,
440 static struct attribute *cxl_memdev_pmem_attributes[] = {
441 &dev_attr_pmem_size.attr,
445 static struct attribute *cxl_memdev_ram_attributes[] = {
446 &dev_attr_ram_size.attr,
450 static struct attribute *cxl_memdev_security_attributes[] = {
451 &dev_attr_security_state.attr,
452 &dev_attr_security_sanitize.attr,
453 &dev_attr_security_erase.attr,
457 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
460 if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
465 static struct attribute_group cxl_memdev_attribute_group = {
466 .attrs = cxl_memdev_attributes,
467 .is_visible = cxl_memdev_visible,
470 static struct attribute_group cxl_memdev_ram_attribute_group = {
472 .attrs = cxl_memdev_ram_attributes,
475 static struct attribute_group cxl_memdev_pmem_attribute_group = {
477 .attrs = cxl_memdev_pmem_attributes,
480 static umode_t cxl_memdev_security_visible(struct kobject *kobj,
481 struct attribute *a, int n)
483 struct device *dev = kobj_to_dev(kobj);
484 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
485 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
487 if (a == &dev_attr_security_sanitize.attr &&
488 !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
491 if (a == &dev_attr_security_erase.attr &&
492 !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds))
498 static struct attribute_group cxl_memdev_security_attribute_group = {
500 .attrs = cxl_memdev_security_attributes,
501 .is_visible = cxl_memdev_security_visible,
504 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
505 &cxl_memdev_attribute_group,
506 &cxl_memdev_ram_attribute_group,
507 &cxl_memdev_pmem_attribute_group,
508 &cxl_memdev_security_attribute_group,
512 static const struct device_type cxl_memdev_type = {
513 .name = "cxl_memdev",
514 .release = cxl_memdev_release,
515 .devnode = cxl_memdev_devnode,
516 .groups = cxl_memdev_attribute_groups,
519 bool is_cxl_memdev(const struct device *dev)
521 return dev->type == &cxl_memdev_type;
523 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
526 * set_exclusive_cxl_commands() - atomically disable user cxl commands
527 * @mds: The device state to operate on
528 * @cmds: bitmap of commands to mark exclusive
530 * Grab the cxl_memdev_rwsem in write mode to flush in-flight
531 * invocations of the ioctl path and then disable future execution of
532 * commands with the command ids set in @cmds.
534 void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
537 down_write(&cxl_memdev_rwsem);
538 bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
539 CXL_MEM_COMMAND_ID_MAX);
540 up_write(&cxl_memdev_rwsem);
542 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
545 * clear_exclusive_cxl_commands() - atomically enable user cxl commands
546 * @mds: The device state to modify
547 * @cmds: bitmap of commands to mark available for userspace
549 void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
552 down_write(&cxl_memdev_rwsem);
553 bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
554 CXL_MEM_COMMAND_ID_MAX);
555 up_write(&cxl_memdev_rwsem);
557 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
559 static void cxl_memdev_security_shutdown(struct device *dev)
561 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
562 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
564 if (mds->security.poll)
565 cancel_delayed_work_sync(&mds->security.poll_dwork);
568 static void cxl_memdev_shutdown(struct device *dev)
570 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
572 down_write(&cxl_memdev_rwsem);
573 cxl_memdev_security_shutdown(dev);
575 up_write(&cxl_memdev_rwsem);
578 static void cxl_memdev_unregister(void *_cxlmd)
580 struct cxl_memdev *cxlmd = _cxlmd;
581 struct device *dev = &cxlmd->dev;
583 cxl_memdev_shutdown(dev);
584 cdev_device_del(&cxlmd->cdev, dev);
588 static void detach_memdev(struct work_struct *work)
590 struct cxl_memdev *cxlmd;
592 cxlmd = container_of(work, typeof(*cxlmd), detach_work);
593 device_release_driver(&cxlmd->dev);
594 put_device(&cxlmd->dev);
597 static struct lock_class_key cxl_memdev_key;
599 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
600 const struct file_operations *fops)
602 struct cxl_memdev *cxlmd;
607 cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
609 return ERR_PTR(-ENOMEM);
611 rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL);
618 device_initialize(dev);
619 lockdep_set_class(&dev->mutex, &cxl_memdev_key);
620 dev->parent = cxlds->dev;
621 dev->bus = &cxl_bus_type;
622 dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
623 dev->type = &cxl_memdev_type;
624 device_set_pm_not_required(dev);
625 INIT_WORK(&cxlmd->detach_work, detach_memdev);
628 cdev_init(cdev, fops);
636 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
640 case CXL_MEM_QUERY_COMMANDS:
641 return cxl_query_cmd(cxlmd, (void __user *)arg);
642 case CXL_MEM_SEND_COMMAND:
643 return cxl_send_cmd(cxlmd, (void __user *)arg);
649 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
652 struct cxl_memdev *cxlmd = file->private_data;
653 struct cxl_dev_state *cxlds;
656 down_read(&cxl_memdev_rwsem);
657 cxlds = cxlmd->cxlds;
658 if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
659 rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
660 up_read(&cxl_memdev_rwsem);
665 static int cxl_memdev_open(struct inode *inode, struct file *file)
667 struct cxl_memdev *cxlmd =
668 container_of(inode->i_cdev, typeof(*cxlmd), cdev);
670 get_device(&cxlmd->dev);
671 file->private_data = cxlmd;
676 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
678 struct cxl_memdev *cxlmd =
679 container_of(inode->i_cdev, typeof(*cxlmd), cdev);
681 put_device(&cxlmd->dev);
687 * cxl_mem_get_fw_info - Get Firmware info
688 * @mds: The device data for the operation
690 * Retrieve firmware info for the device specified.
692 * Return: 0 if no error: or the result of the mailbox command.
694 * See CXL-3.0 8.2.9.3.1 Get FW Info
696 static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
698 struct cxl_mbox_get_fw_info info;
699 struct cxl_mbox_cmd mbox_cmd;
702 mbox_cmd = (struct cxl_mbox_cmd) {
703 .opcode = CXL_MBOX_OP_GET_FW_INFO,
704 .size_out = sizeof(info),
705 .payload_out = &info,
708 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
712 mds->fw.num_slots = info.num_slots;
713 mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
720 * cxl_mem_activate_fw - Activate Firmware
721 * @mds: The device data for the operation
722 * @slot: slot number to activate
724 * Activate firmware in a given slot for the device specified.
726 * Return: 0 if no error: or the result of the mailbox command.
728 * See CXL-3.0 8.2.9.3.3 Activate FW
730 static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
732 struct cxl_mbox_activate_fw activate;
733 struct cxl_mbox_cmd mbox_cmd;
735 if (slot == 0 || slot > mds->fw.num_slots)
738 mbox_cmd = (struct cxl_mbox_cmd) {
739 .opcode = CXL_MBOX_OP_ACTIVATE_FW,
740 .size_in = sizeof(activate),
741 .payload_in = &activate,
744 /* Only offline activation supported for now */
745 activate.action = CXL_FW_ACTIVATE_OFFLINE;
746 activate.slot = slot;
748 return cxl_internal_send_cmd(mds, &mbox_cmd);
752 * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
753 * @mds: The device data for the operation
755 * Abort an in-progress firmware transfer for the device specified.
757 * Return: 0 if no error: or the result of the mailbox command.
759 * See CXL-3.0 8.2.9.3.2 Transfer FW
761 static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
763 struct cxl_mbox_transfer_fw *transfer;
764 struct cxl_mbox_cmd mbox_cmd;
767 transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL);
771 /* Set a 1s poll interval and a total wait time of 30s */
772 mbox_cmd = (struct cxl_mbox_cmd) {
773 .opcode = CXL_MBOX_OP_TRANSFER_FW,
774 .size_in = sizeof(*transfer),
775 .payload_in = transfer,
776 .poll_interval_ms = 1000,
780 transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
782 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
787 static void cxl_fw_cleanup(struct fw_upload *fwl)
789 struct cxl_memdev_state *mds = fwl->dd_handle;
791 mds->fw.next_slot = 0;
794 static int cxl_fw_do_cancel(struct fw_upload *fwl)
796 struct cxl_memdev_state *mds = fwl->dd_handle;
797 struct cxl_dev_state *cxlds = &mds->cxlds;
798 struct cxl_memdev *cxlmd = cxlds->cxlmd;
801 rc = cxl_mem_abort_fw_xfer(mds);
803 dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
805 return FW_UPLOAD_ERR_CANCELED;
808 static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
811 struct cxl_memdev_state *mds = fwl->dd_handle;
812 struct cxl_mbox_transfer_fw *transfer;
815 return FW_UPLOAD_ERR_INVALID_SIZE;
817 mds->fw.oneshot = struct_size(transfer, data, size) <
820 if (cxl_mem_get_fw_info(mds))
821 return FW_UPLOAD_ERR_HW_ERROR;
824 * So far no state has been changed, hence no other cleanup is
825 * necessary. Simply return the cancelled status.
827 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
828 return FW_UPLOAD_ERR_CANCELED;
830 return FW_UPLOAD_ERR_NONE;
833 static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
834 u32 offset, u32 size, u32 *written)
836 struct cxl_memdev_state *mds = fwl->dd_handle;
837 struct cxl_dev_state *cxlds = &mds->cxlds;
838 struct cxl_memdev *cxlmd = cxlds->cxlmd;
839 struct cxl_mbox_transfer_fw *transfer;
840 struct cxl_mbox_cmd mbox_cmd;
841 u32 cur_size, remaining;
847 /* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
848 if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
850 "misaligned offset for FW transfer slice (%u)\n",
852 return FW_UPLOAD_ERR_RW_ERROR;
856 * Pick transfer size based on mds->payload_size @size must bw 128-byte
857 * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
858 * sizeof(*transfer) is 128. These constraints imply that @cur_size
859 * will always be 128b aligned.
861 cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
863 remaining = size - cur_size;
864 size_in = struct_size(transfer, data, cur_size);
866 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
867 return cxl_fw_do_cancel(fwl);
870 * Slot numbers are 1-indexed
871 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
872 * Check for rollover using modulo, and 1-index it by adding 1
874 mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
876 /* Do the transfer via mailbox cmd */
877 transfer = kzalloc(size_in, GFP_KERNEL);
879 return FW_UPLOAD_ERR_RW_ERROR;
881 transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
882 memcpy(transfer->data, data + offset, cur_size);
883 if (mds->fw.oneshot) {
884 transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
885 transfer->slot = mds->fw.next_slot;
888 transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
889 } else if (remaining == 0) {
890 transfer->action = CXL_FW_TRANSFER_ACTION_END;
891 transfer->slot = mds->fw.next_slot;
893 transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
897 mbox_cmd = (struct cxl_mbox_cmd) {
898 .opcode = CXL_MBOX_OP_TRANSFER_FW,
900 .payload_in = transfer,
901 .poll_interval_ms = 1000,
905 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
907 rc = FW_UPLOAD_ERR_RW_ERROR;
913 /* Activate FW if oneshot or if the last slice was written */
914 if (mds->fw.oneshot || remaining == 0) {
915 dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
917 rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
919 dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
921 rc = FW_UPLOAD_ERR_HW_ERROR;
926 rc = FW_UPLOAD_ERR_NONE;
933 static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
935 struct cxl_memdev_state *mds = fwl->dd_handle;
938 * cxl_internal_send_cmd() handles background operations synchronously.
939 * No need to wait for completions here - any errors would've been
940 * reported and handled during the ->write() call(s).
941 * Just check if a cancel request was received, and return success.
943 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
944 return cxl_fw_do_cancel(fwl);
946 return FW_UPLOAD_ERR_NONE;
949 static void cxl_fw_cancel(struct fw_upload *fwl)
951 struct cxl_memdev_state *mds = fwl->dd_handle;
953 set_bit(CXL_FW_CANCEL, mds->fw.state);
956 static const struct fw_upload_ops cxl_memdev_fw_ops = {
957 .prepare = cxl_fw_prepare,
958 .write = cxl_fw_write,
959 .poll_complete = cxl_fw_poll_complete,
960 .cancel = cxl_fw_cancel,
961 .cleanup = cxl_fw_cleanup,
964 static void devm_cxl_remove_fw_upload(void *fwl)
966 firmware_upload_unregister(fwl);
969 int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
971 struct cxl_dev_state *cxlds = &mds->cxlds;
972 struct device *dev = &cxlds->cxlmd->dev;
973 struct fw_upload *fwl;
976 if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
979 fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
980 &cxl_memdev_fw_ops, mds);
982 return dev_err_probe(dev, PTR_ERR(fwl),
983 "Failed to register firmware loader\n");
985 rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
989 "Failed to add firmware loader remove action: %d\n",
994 EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
996 static const struct file_operations cxl_memdev_fops = {
997 .owner = THIS_MODULE,
998 .unlocked_ioctl = cxl_memdev_ioctl,
999 .open = cxl_memdev_open,
1000 .release = cxl_memdev_release_file,
1001 .compat_ioctl = compat_ptr_ioctl,
1002 .llseek = noop_llseek,
1005 static void put_sanitize(void *data)
1007 struct cxl_memdev_state *mds = data;
1009 sysfs_put(mds->security.sanitize_node);
1012 static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
1014 struct cxl_dev_state *cxlds = cxlmd->cxlds;
1015 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
1016 struct device *dev = &cxlmd->dev;
1017 struct kernfs_node *sec;
1019 sec = sysfs_get_dirent(dev->kobj.sd, "security");
1021 dev_err(dev, "sysfs_get_dirent 'security' failed\n");
1024 mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
1026 if (!mds->security.sanitize_node) {
1027 dev_err(dev, "sysfs_get_dirent 'state' failed\n");
1031 return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
1034 struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
1036 struct cxl_memdev *cxlmd;
1041 cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
1046 rc = dev_set_name(dev, "mem%d", cxlmd->id);
1051 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
1052 * needed as this is ordered with cdev_add() publishing the device.
1054 cxlmd->cxlds = cxlds;
1055 cxlds->cxlmd = cxlmd;
1057 cdev = &cxlmd->cdev;
1058 rc = cdev_device_add(cdev, dev);
1062 rc = cxl_memdev_security_init(cxlmd);
1066 rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
1073 * The cdev was briefly live, shutdown any ioctl operations that
1076 cxl_memdev_shutdown(dev);
1080 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
1082 __init int cxl_memdev_init(void)
1087 rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
1091 cxl_mem_major = MAJOR(devt);
1096 void cxl_memdev_exit(void)
1098 unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);