1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * CXL Flash Device Driver
8 * Copyright (C) 2018 IBM Corporation
11 #include <linux/file.h>
12 #include <linux/idr.h>
13 #include <linux/module.h>
14 #include <linux/mount.h>
15 #include <linux/pseudo_fs.h>
16 #include <linux/poll.h>
17 #include <linux/sched/signal.h>
18 #include <linux/interrupt.h>
20 #include <misc/ocxl.h>
22 #include <uapi/misc/cxl.h>
28 * Pseudo-filesystem to allocate inodes.
31 #define OCXLFLASH_FS_MAGIC 0x1697698f
33 static int ocxlflash_fs_cnt;
34 static struct vfsmount *ocxlflash_vfs_mount;
36 static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
38 return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
41 static struct file_system_type ocxlflash_fs_type = {
44 .init_fs_context = ocxlflash_fs_init_fs_context,
45 .kill_sb = kill_anon_super,
49 * ocxlflash_release_mapping() - release the memory mapping
50 * @ctx: Context whose mapping is to be released.
52 static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
55 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
60 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
61 * @dev: Generic device of the host.
62 * @name: Name of the pseudo filesystem.
63 * @fops: File operations.
64 * @priv: Private data.
65 * @flags: Flags for the file.
67 * Return: pointer to the file on success, ERR_PTR on failure
69 static struct file *ocxlflash_getfile(struct device *dev, const char *name,
70 const struct file_operations *fops,
71 void *priv, int flags)
77 if (fops->owner && !try_module_get(fops->owner)) {
78 dev_err(dev, "%s: Owner does not exist\n", __func__);
83 rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
85 if (unlikely(rc < 0)) {
86 dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
91 inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
94 dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
99 file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
100 flags & (O_ACCMODE | O_NONBLOCK), fops);
103 dev_err(dev, "%s: alloc_file failed rc=%d\n",
108 file->private_data = priv;
114 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
116 module_put(fops->owner);
123 * ocxlflash_psa_map() - map the process specific MMIO space
124 * @ctx_cookie: Adapter context for which the mapping needs to be done.
126 * Return: MMIO pointer of the mapped region
128 static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
130 struct ocxlflash_context *ctx = ctx_cookie;
131 struct device *dev = ctx->hw_afu->dev;
133 mutex_lock(&ctx->state_mutex);
134 if (ctx->state != STARTED) {
135 dev_err(dev, "%s: Context not started, state=%d\n", __func__,
137 mutex_unlock(&ctx->state_mutex);
140 mutex_unlock(&ctx->state_mutex);
142 return ioremap(ctx->psn_phys, ctx->psn_size);
146 * ocxlflash_psa_unmap() - unmap the process specific MMIO space
147 * @addr: MMIO pointer to unmap.
149 static void ocxlflash_psa_unmap(void __iomem *addr)
155 * ocxlflash_process_element() - get process element of the adapter context
156 * @ctx_cookie: Adapter context associated with the process element.
158 * Return: process element of the adapter context
160 static int ocxlflash_process_element(void *ctx_cookie)
162 struct ocxlflash_context *ctx = ctx_cookie;
168 * afu_map_irq() - map the interrupt of the adapter context
170 * @ctx: Adapter context.
171 * @num: Per-context AFU interrupt number.
172 * @handler: Interrupt handler to register.
173 * @cookie: Interrupt handler private data.
174 * @name: Name of the interrupt.
176 * Return: 0 on success, -errno on failure
178 static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
179 irq_handler_t handler, void *cookie, char *name)
181 struct ocxl_hw_afu *afu = ctx->hw_afu;
182 struct device *dev = afu->dev;
183 struct ocxlflash_irqs *irq;
184 struct xive_irq_data *xd;
188 if (num < 0 || num >= ctx->num_irqs) {
189 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
194 irq = &ctx->irqs[num];
195 virq = irq_create_mapping(NULL, irq->hwirq);
196 if (unlikely(!virq)) {
197 dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
202 rc = request_irq(virq, handler, 0, name, cookie);
204 dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
208 xd = irq_get_handler_data(virq);
210 dev_err(dev, "%s: Can't get interrupt data\n", __func__);
216 irq->vtrig = xd->trig_mmio;
220 free_irq(virq, cookie);
222 irq_dispose_mapping(virq);
227 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
228 * @ctx_cookie: Adapter context.
229 * @num: Per-context AFU interrupt number.
230 * @handler: Interrupt handler to register.
231 * @cookie: Interrupt handler private data.
232 * @name: Name of the interrupt.
234 * Return: 0 on success, -errno on failure
236 static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
237 irq_handler_t handler, void *cookie,
240 return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
244 * afu_unmap_irq() - unmap the interrupt
246 * @ctx: Adapter context.
247 * @num: Per-context AFU interrupt number.
248 * @cookie: Interrupt handler private data.
250 static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
253 struct ocxl_hw_afu *afu = ctx->hw_afu;
254 struct device *dev = afu->dev;
255 struct ocxlflash_irqs *irq;
257 if (num < 0 || num >= ctx->num_irqs) {
258 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
262 irq = &ctx->irqs[num];
264 if (irq_find_mapping(NULL, irq->hwirq)) {
265 free_irq(irq->virq, cookie);
266 irq_dispose_mapping(irq->virq);
269 memset(irq, 0, sizeof(*irq));
273 * ocxlflash_unmap_afu_irq() - unmap the interrupt
274 * @ctx_cookie: Adapter context.
275 * @num: Per-context AFU interrupt number.
276 * @cookie: Interrupt handler private data.
278 static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
280 return afu_unmap_irq(0, ctx_cookie, num, cookie);
284 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
285 * @ctx_cookie: Context associated with the interrupt.
286 * @irq: Interrupt number.
288 * Return: effective address of the mapped region
290 static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
292 struct ocxlflash_context *ctx = ctx_cookie;
294 if (irq < 0 || irq >= ctx->num_irqs)
297 return (__force u64)ctx->irqs[irq].vtrig;
301 * ocxlflash_xsl_fault() - callback when translation error is triggered
302 * @data: Private data provided at callback registration, the context.
303 * @addr: Address that triggered the error.
304 * @dsisr: Value of dsisr register.
306 static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
308 struct ocxlflash_context *ctx = data;
310 spin_lock(&ctx->slock);
311 ctx->fault_addr = addr;
312 ctx->fault_dsisr = dsisr;
313 ctx->pending_fault = true;
314 spin_unlock(&ctx->slock);
316 wake_up_all(&ctx->wq);
320 * start_context() - local routine to start a context
321 * @ctx: Adapter context to be started.
323 * Assign the context specific MMIO space, add and enable the PE.
325 * Return: 0 on success, -errno on failure
327 static int start_context(struct ocxlflash_context *ctx)
329 struct ocxl_hw_afu *afu = ctx->hw_afu;
330 struct ocxl_afu_config *acfg = &afu->acfg;
331 void *link_token = afu->link_token;
332 struct device *dev = afu->dev;
333 bool master = ctx->master;
334 struct mm_struct *mm;
338 mutex_lock(&ctx->state_mutex);
339 if (ctx->state != OPENED) {
340 dev_err(dev, "%s: Context state invalid, state=%d\n",
341 __func__, ctx->state);
347 ctx->psn_size = acfg->global_mmio_size;
348 ctx->psn_phys = afu->gmmio_phys;
350 ctx->psn_size = acfg->pp_mmio_stride;
351 ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
354 /* pid and mm not set for master contexts */
359 pid = current->mm->context.id;
363 rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
364 ocxlflash_xsl_fault, ctx);
366 dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
371 ctx->state = STARTED;
373 mutex_unlock(&ctx->state_mutex);
378 * ocxlflash_start_context() - start a kernel context
379 * @ctx_cookie: Adapter context to be started.
381 * Return: 0 on success, -errno on failure
383 static int ocxlflash_start_context(void *ctx_cookie)
385 struct ocxlflash_context *ctx = ctx_cookie;
387 return start_context(ctx);
391 * ocxlflash_stop_context() - stop a context
392 * @ctx_cookie: Adapter context to be stopped.
394 * Return: 0 on success, -errno on failure
396 static int ocxlflash_stop_context(void *ctx_cookie)
398 struct ocxlflash_context *ctx = ctx_cookie;
399 struct ocxl_hw_afu *afu = ctx->hw_afu;
400 struct ocxl_afu_config *acfg = &afu->acfg;
401 struct pci_dev *pdev = afu->pdev;
402 struct device *dev = afu->dev;
403 enum ocxlflash_ctx_state state;
406 mutex_lock(&ctx->state_mutex);
409 mutex_unlock(&ctx->state_mutex);
410 if (state != STARTED)
413 rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
416 dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
418 /* If EBUSY, PE could be referenced in future by the AFU */
423 rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
425 dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
434 * ocxlflash_afu_reset() - reset the AFU
435 * @ctx_cookie: Adapter context.
437 static int ocxlflash_afu_reset(void *ctx_cookie)
439 struct ocxlflash_context *ctx = ctx_cookie;
440 struct device *dev = ctx->hw_afu->dev;
442 /* Pending implementation from OCXL transport services */
443 dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
445 /* Silently return success until it is implemented */
450 * ocxlflash_set_master() - sets the context as master
451 * @ctx_cookie: Adapter context to set as master.
453 static void ocxlflash_set_master(void *ctx_cookie)
455 struct ocxlflash_context *ctx = ctx_cookie;
461 * ocxlflash_get_context() - obtains the context associated with the host
462 * @pdev: PCI device associated with the host.
463 * @afu_cookie: Hardware AFU associated with the host.
465 * Return: returns the pointer to host adapter context
467 static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
469 struct ocxl_hw_afu *afu = afu_cookie;
471 return afu->ocxl_ctx;
475 * ocxlflash_dev_context_init() - allocate and initialize an adapter context
476 * @pdev: PCI device associated with the host.
477 * @afu_cookie: Hardware AFU associated with the host.
479 * Return: returns the adapter context on success, ERR_PTR on failure
481 static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
483 struct ocxl_hw_afu *afu = afu_cookie;
484 struct device *dev = afu->dev;
485 struct ocxlflash_context *ctx;
488 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
489 if (unlikely(!ctx)) {
490 dev_err(dev, "%s: Context allocation failed\n", __func__);
495 idr_preload(GFP_KERNEL);
496 rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
498 if (unlikely(rc < 0)) {
499 dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
503 spin_lock_init(&ctx->slock);
504 init_waitqueue_head(&ctx->wq);
505 mutex_init(&ctx->state_mutex);
513 ctx->pending_irq = false;
514 ctx->pending_fault = false;
525 * ocxlflash_release_context() - releases an adapter context
526 * @ctx_cookie: Adapter context to be released.
528 * Return: 0 on success, -errno on failure
530 static int ocxlflash_release_context(void *ctx_cookie)
532 struct ocxlflash_context *ctx = ctx_cookie;
539 dev = ctx->hw_afu->dev;
540 mutex_lock(&ctx->state_mutex);
541 if (ctx->state >= STARTED) {
542 dev_err(dev, "%s: Context in use, state=%d\n", __func__,
544 mutex_unlock(&ctx->state_mutex);
548 mutex_unlock(&ctx->state_mutex);
550 idr_remove(&ctx->hw_afu->idr, ctx->pe);
551 ocxlflash_release_mapping(ctx);
558 * ocxlflash_perst_reloads_same_image() - sets the image reload policy
559 * @afu_cookie: Hardware AFU associated with the host.
560 * @image: Whether to load the same image on PERST.
562 static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
564 struct ocxl_hw_afu *afu = afu_cookie;
566 afu->perst_same_image = image;
570 * ocxlflash_read_adapter_vpd() - reads the adapter VPD
571 * @pdev: PCI device associated with the host.
572 * @buf: Buffer to get the VPD data.
573 * @count: Size of buffer (maximum bytes that can be read).
575 * Return: size of VPD on success, -errno on failure
577 static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
580 return pci_read_vpd(pdev, 0, count, buf);
584 * free_afu_irqs() - internal service to free interrupts
585 * @ctx: Adapter context.
587 static void free_afu_irqs(struct ocxlflash_context *ctx)
589 struct ocxl_hw_afu *afu = ctx->hw_afu;
590 struct device *dev = afu->dev;
594 dev_err(dev, "%s: Interrupts not allocated\n", __func__);
598 for (i = ctx->num_irqs; i >= 0; i--)
599 ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
606 * alloc_afu_irqs() - internal service to allocate interrupts
607 * @ctx: Context associated with the request.
608 * @num: Number of interrupts requested.
610 * Return: 0 on success, -errno on failure
612 static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
614 struct ocxl_hw_afu *afu = ctx->hw_afu;
615 struct device *dev = afu->dev;
616 struct ocxlflash_irqs *irqs;
622 dev_err(dev, "%s: Interrupts already allocated\n", __func__);
627 if (num > OCXL_MAX_IRQS) {
628 dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
633 irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
634 if (unlikely(!irqs)) {
635 dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
640 for (i = 0; i < num; i++) {
641 rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
643 dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
648 irqs[i].hwirq = hwirq;
656 for (i = i-1; i >= 0; i--)
657 ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
663 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
664 * @ctx_cookie: Context associated with the request.
665 * @num: Number of interrupts requested.
667 * Return: 0 on success, -errno on failure
669 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
671 return alloc_afu_irqs(ctx_cookie, num);
675 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
676 * @ctx_cookie: Adapter context.
678 static void ocxlflash_free_afu_irqs(void *ctx_cookie)
680 free_afu_irqs(ctx_cookie);
684 * ocxlflash_unconfig_afu() - unconfigure the AFU
685 * @afu: AFU associated with the host.
687 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
689 if (afu->gmmio_virt) {
690 iounmap(afu->gmmio_virt);
691 afu->gmmio_virt = NULL;
696 * ocxlflash_destroy_afu() - destroy the AFU structure
697 * @afu_cookie: AFU to be freed.
699 static void ocxlflash_destroy_afu(void *afu_cookie)
701 struct ocxl_hw_afu *afu = afu_cookie;
707 ocxlflash_release_context(afu->ocxl_ctx);
708 idr_destroy(&afu->idr);
710 /* Disable the AFU */
711 pos = afu->acfg.dvsec_afu_control_pos;
712 ocxl_config_set_afu_state(afu->pdev, pos, 0);
714 ocxlflash_unconfig_afu(afu);
719 * ocxlflash_config_fn() - configure the host function
720 * @pdev: PCI device associated with the host.
721 * @afu: AFU associated with the host.
723 * Return: 0 on success, -errno on failure
725 static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
727 struct ocxl_fn_config *fcfg = &afu->fcfg;
728 struct device *dev = &pdev->dev;
729 u16 base, enabled, supported;
732 /* Read DVSEC config of the function */
733 rc = ocxl_config_read_function(pdev, fcfg);
735 dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
740 /* Check if function has AFUs defined, only 1 per function supported */
741 if (fcfg->max_afu_index >= 0) {
742 afu->is_present = true;
743 if (fcfg->max_afu_index != 0)
744 dev_warn(dev, "%s: Unexpected AFU index value %d\n",
745 __func__, fcfg->max_afu_index);
748 rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
750 dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
755 afu->fn_actag_base = base;
756 afu->fn_actag_enabled = enabled;
758 ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
759 dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
760 __func__, base, enabled);
762 rc = ocxl_link_setup(pdev, 0, &afu->link_token);
764 dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
769 rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
771 dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
778 ocxl_link_release(pdev, afu->link_token);
783 * ocxlflash_unconfig_fn() - unconfigure the host function
784 * @pdev: PCI device associated with the host.
785 * @afu: AFU associated with the host.
787 static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
789 ocxl_link_release(pdev, afu->link_token);
793 * ocxlflash_map_mmio() - map the AFU MMIO space
794 * @afu: AFU associated with the host.
796 * Return: 0 on success, -errno on failure
798 static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
800 struct ocxl_afu_config *acfg = &afu->acfg;
801 struct pci_dev *pdev = afu->pdev;
802 struct device *dev = afu->dev;
803 phys_addr_t gmmio, ppmmio;
806 rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
808 dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
812 gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
813 gmmio += acfg->global_mmio_offset;
815 rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
817 dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
821 ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
822 ppmmio += acfg->pp_mmio_offset;
824 afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
825 if (unlikely(!afu->gmmio_virt)) {
826 dev_err(dev, "%s: MMIO mapping failed\n", __func__);
831 afu->gmmio_phys = gmmio;
832 afu->ppmmio_phys = ppmmio;
836 pci_release_region(pdev, acfg->pp_mmio_bar);
838 pci_release_region(pdev, acfg->global_mmio_bar);
843 * ocxlflash_config_afu() - configure the host AFU
844 * @pdev: PCI device associated with the host.
845 * @afu: AFU associated with the host.
847 * Must be called _after_ host function configuration.
849 * Return: 0 on success, -errno on failure
851 static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
853 struct ocxl_afu_config *acfg = &afu->acfg;
854 struct ocxl_fn_config *fcfg = &afu->fcfg;
855 struct device *dev = &pdev->dev;
861 /* This HW AFU function does not have any AFUs defined */
862 if (!afu->is_present)
865 /* Read AFU config at index 0 */
866 rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
868 dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
873 /* Only one AFU per function is supported, so actag_base is same */
874 base = afu->fn_actag_base;
875 count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
876 pos = acfg->dvsec_afu_control_pos;
878 ocxl_config_set_afu_actag(pdev, pos, base, count);
879 dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
880 afu->afu_actag_base = base;
881 afu->afu_actag_enabled = count;
882 afu->max_pasid = 1 << acfg->pasid_supported_log;
884 ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
886 rc = ocxlflash_map_mmio(afu);
888 dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
894 ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
900 * ocxlflash_create_afu() - create the AFU for OCXL
901 * @pdev: PCI device associated with the host.
903 * Return: AFU on success, NULL on failure
905 static void *ocxlflash_create_afu(struct pci_dev *pdev)
907 struct device *dev = &pdev->dev;
908 struct ocxlflash_context *ctx;
909 struct ocxl_hw_afu *afu;
912 afu = kzalloc(sizeof(*afu), GFP_KERNEL);
913 if (unlikely(!afu)) {
914 dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
922 rc = ocxlflash_config_fn(pdev, afu);
924 dev_err(dev, "%s: Function configuration failed rc=%d\n",
929 rc = ocxlflash_config_afu(pdev, afu);
931 dev_err(dev, "%s: AFU configuration failed rc=%d\n",
936 ctx = ocxlflash_dev_context_init(pdev, afu);
939 dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
948 ocxlflash_unconfig_afu(afu);
950 ocxlflash_unconfig_fn(pdev, afu);
952 idr_destroy(&afu->idr);
959 * ctx_event_pending() - check for any event pending on the context
960 * @ctx: Context to be checked.
962 * Return: true if there is an event pending, false if none pending
964 static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
966 if (ctx->pending_irq || ctx->pending_fault)
973 * afu_poll() - poll the AFU for events on the context
974 * @file: File associated with the adapter context.
975 * @poll: Poll structure from the user.
979 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
981 struct ocxlflash_context *ctx = file->private_data;
982 struct device *dev = ctx->hw_afu->dev;
986 poll_wait(file, &ctx->wq, poll);
988 spin_lock_irqsave(&ctx->slock, lock_flags);
989 if (ctx_event_pending(ctx))
990 mask |= POLLIN | POLLRDNORM;
991 else if (ctx->state == CLOSED)
993 spin_unlock_irqrestore(&ctx->slock, lock_flags);
995 dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
996 __func__, ctx->pe, mask);
1002 * afu_read() - perform a read on the context for any event
1003 * @file: File associated with the adapter context.
1004 * @buf: Buffer to receive the data.
1005 * @count: Size of buffer (maximum bytes that can be read).
1008 * Return: size of the data read on success, -errno on failure
1010 static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
1013 struct ocxlflash_context *ctx = file->private_data;
1014 struct device *dev = ctx->hw_afu->dev;
1015 struct cxl_event event;
1020 DEFINE_WAIT(event_wait);
1023 dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
1029 spin_lock_irqsave(&ctx->slock, lock_flags);
1032 prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
1034 if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
1037 if (file->f_flags & O_NONBLOCK) {
1038 dev_err(dev, "%s: File cannot be blocked on I/O\n",
1044 if (signal_pending(current)) {
1045 dev_err(dev, "%s: Signal pending on the process\n",
1051 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1053 spin_lock_irqsave(&ctx->slock, lock_flags);
1056 finish_wait(&ctx->wq, &event_wait);
1058 memset(&event, 0, sizeof(event));
1059 event.header.process_element = ctx->pe;
1060 event.header.size = sizeof(struct cxl_event_header);
1061 if (ctx->pending_irq) {
1062 esize = sizeof(struct cxl_event_afu_interrupt);
1063 event.header.size += esize;
1064 event.header.type = CXL_EVENT_AFU_INTERRUPT;
1066 bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
1067 clear_bit(bit, &ctx->irq_bitmap);
1068 event.irq.irq = bit + 1;
1069 if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
1070 ctx->pending_irq = false;
1071 } else if (ctx->pending_fault) {
1072 event.header.size += sizeof(struct cxl_event_data_storage);
1073 event.header.type = CXL_EVENT_DATA_STORAGE;
1074 event.fault.addr = ctx->fault_addr;
1075 event.fault.dsisr = ctx->fault_dsisr;
1076 ctx->pending_fault = false;
1079 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1081 if (copy_to_user(buf, &event, event.header.size)) {
1082 dev_err(dev, "%s: copy_to_user failed\n", __func__);
1087 rc = event.header.size;
1091 finish_wait(&ctx->wq, &event_wait);
1092 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1097 * afu_release() - release and free the context
1098 * @inode: File inode pointer.
1099 * @file: File associated with the context.
1101 * Return: 0 on success, -errno on failure
1103 static int afu_release(struct inode *inode, struct file *file)
1105 struct ocxlflash_context *ctx = file->private_data;
1108 /* Unmap and free the interrupts associated with the context */
1109 for (i = ctx->num_irqs; i >= 0; i--)
1110 afu_unmap_irq(0, ctx, i, ctx);
1113 return ocxlflash_release_context(ctx);
1117 * ocxlflash_mmap_fault() - mmap fault handler
1118 * @vmf: VM fault associated with current fault.
1120 * Return: 0 on success, -errno on failure
1122 static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
1124 struct vm_area_struct *vma = vmf->vma;
1125 struct ocxlflash_context *ctx = vma->vm_file->private_data;
1126 struct device *dev = ctx->hw_afu->dev;
1127 u64 mmio_area, offset;
1129 offset = vmf->pgoff << PAGE_SHIFT;
1130 if (offset >= ctx->psn_size)
1131 return VM_FAULT_SIGBUS;
1133 mutex_lock(&ctx->state_mutex);
1134 if (ctx->state != STARTED) {
1135 dev_err(dev, "%s: Context not started, state=%d\n",
1136 __func__, ctx->state);
1137 mutex_unlock(&ctx->state_mutex);
1138 return VM_FAULT_SIGBUS;
1140 mutex_unlock(&ctx->state_mutex);
1142 mmio_area = ctx->psn_phys;
1143 mmio_area += offset;
1145 return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
1148 static const struct vm_operations_struct ocxlflash_vmops = {
1149 .fault = ocxlflash_mmap_fault,
1153 * afu_mmap() - map the fault handler operations
1154 * @file: File associated with the context.
1155 * @vma: VM area associated with mapping.
1157 * Return: 0 on success, -errno on failure
1159 static int afu_mmap(struct file *file, struct vm_area_struct *vma)
1161 struct ocxlflash_context *ctx = file->private_data;
1163 if ((vma_pages(vma) + vma->vm_pgoff) >
1164 (ctx->psn_size >> PAGE_SHIFT))
1167 vma->vm_flags |= VM_IO | VM_PFNMAP;
1168 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1169 vma->vm_ops = &ocxlflash_vmops;
1173 static const struct file_operations ocxl_afu_fops = {
1174 .owner = THIS_MODULE,
1177 .release = afu_release,
1181 #define PATCH_FOPS(NAME) \
1182 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
1185 * ocxlflash_get_fd() - get file descriptor for an adapter context
1186 * @ctx_cookie: Adapter context.
1187 * @fops: File operations to be associated.
1188 * @fd: File descriptor to be returned back.
1190 * Return: pointer to the file on success, ERR_PTR on failure
1192 static struct file *ocxlflash_get_fd(void *ctx_cookie,
1193 struct file_operations *fops, int *fd)
1195 struct ocxlflash_context *ctx = ctx_cookie;
1196 struct device *dev = ctx->hw_afu->dev;
1202 /* Only allow one fd per context */
1204 dev_err(dev, "%s: Context is already mapped to an fd\n",
1210 flags = O_RDWR | O_CLOEXEC;
1212 /* This code is similar to anon_inode_getfd() */
1213 rc = get_unused_fd_flags(flags);
1214 if (unlikely(rc < 0)) {
1215 dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
1221 /* Patch the file ops that are not defined */
1225 PATCH_FOPS(release);
1227 } else /* Use default ops */
1228 fops = (struct file_operations *)&ocxl_afu_fops;
1230 name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
1231 file = ocxlflash_getfile(dev, name, fops, ctx, flags);
1235 dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
1240 ctx->mapping = file->f_mapping;
1245 put_unused_fd(fdtmp);
1252 * ocxlflash_fops_get_context() - get the context associated with the file
1253 * @file: File associated with the adapter context.
1255 * Return: pointer to the context
1257 static void *ocxlflash_fops_get_context(struct file *file)
1259 return file->private_data;
1263 * ocxlflash_afu_irq() - interrupt handler for user contexts
1264 * @irq: Interrupt number.
1265 * @data: Private data provided at interrupt registration, the context.
1267 * Return: Always return IRQ_HANDLED.
1269 static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
1271 struct ocxlflash_context *ctx = data;
1272 struct device *dev = ctx->hw_afu->dev;
1275 dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
1276 __func__, ctx->pe, irq);
1278 for (i = 0; i < ctx->num_irqs; i++) {
1279 if (ctx->irqs[i].virq == irq)
1282 if (unlikely(i >= ctx->num_irqs)) {
1283 dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
1287 spin_lock(&ctx->slock);
1288 set_bit(i - 1, &ctx->irq_bitmap);
1289 ctx->pending_irq = true;
1290 spin_unlock(&ctx->slock);
1292 wake_up_all(&ctx->wq);
1298 * ocxlflash_start_work() - start a user context
1299 * @ctx_cookie: Context to be started.
1300 * @num_irqs: Number of interrupts requested.
1302 * Return: 0 on success, -errno on failure
1304 static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
1306 struct ocxlflash_context *ctx = ctx_cookie;
1307 struct ocxl_hw_afu *afu = ctx->hw_afu;
1308 struct device *dev = afu->dev;
1313 rc = alloc_afu_irqs(ctx, num_irqs);
1314 if (unlikely(rc < 0)) {
1315 dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
1319 for (i = 0; i < num_irqs; i++) {
1320 name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
1321 dev_name(dev), ctx->pe, i);
1322 rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
1324 if (unlikely(rc < 0)) {
1325 dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
1331 rc = start_context(ctx);
1333 dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
1339 for (i = i-1; i >= 0; i--)
1340 afu_unmap_irq(0, ctx, i, ctx);
1346 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
1347 * @file: File installed with adapter file descriptor.
1348 * @vma: VM area associated with mapping.
1350 * Return: 0 on success, -errno on failure
1352 static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
1354 return afu_mmap(file, vma);
1358 * ocxlflash_fd_release() - release the context associated with the file
1359 * @inode: File inode pointer.
1360 * @file: File associated with the adapter context.
1362 * Return: 0 on success, -errno on failure
1364 static int ocxlflash_fd_release(struct inode *inode, struct file *file)
1366 return afu_release(inode, file);
1369 /* Backend ops to ocxlflash services */
1370 const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
1371 .module = THIS_MODULE,
1372 .psa_map = ocxlflash_psa_map,
1373 .psa_unmap = ocxlflash_psa_unmap,
1374 .process_element = ocxlflash_process_element,
1375 .map_afu_irq = ocxlflash_map_afu_irq,
1376 .unmap_afu_irq = ocxlflash_unmap_afu_irq,
1377 .get_irq_objhndl = ocxlflash_get_irq_objhndl,
1378 .start_context = ocxlflash_start_context,
1379 .stop_context = ocxlflash_stop_context,
1380 .afu_reset = ocxlflash_afu_reset,
1381 .set_master = ocxlflash_set_master,
1382 .get_context = ocxlflash_get_context,
1383 .dev_context_init = ocxlflash_dev_context_init,
1384 .release_context = ocxlflash_release_context,
1385 .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
1386 .read_adapter_vpd = ocxlflash_read_adapter_vpd,
1387 .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
1388 .free_afu_irqs = ocxlflash_free_afu_irqs,
1389 .create_afu = ocxlflash_create_afu,
1390 .destroy_afu = ocxlflash_destroy_afu,
1391 .get_fd = ocxlflash_get_fd,
1392 .fops_get_context = ocxlflash_fops_get_context,
1393 .start_work = ocxlflash_start_work,
1394 .fd_mmap = ocxlflash_fd_mmap,
1395 .fd_release = ocxlflash_fd_release,