]> Git Repo - linux.git/blob - drivers/irqchip/irq-riscv-imsic-platform.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / irqchip / irq-riscv-imsic-platform.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  */
6
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
8 #include <linux/acpi.h>
9 #include <linux/bitmap.h>
10 #include <linux/cpu.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/module.h>
17 #include <linux/msi.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/smp.h>
22
23 #include "irq-riscv-imsic-state.h"
24
25 static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
26                                 phys_addr_t *out_msi_pa)
27 {
28         struct imsic_global_config *global;
29         struct imsic_local_config *local;
30
31         global = &imsic->global;
32         local = per_cpu_ptr(global->local, cpu);
33
34         if (BIT(global->guest_index_bits) <= guest_index)
35                 return false;
36
37         if (out_msi_pa)
38                 *out_msi_pa = local->msi_pa + (guest_index * IMSIC_MMIO_PAGE_SZ);
39
40         return true;
41 }
42
43 static void imsic_irq_mask(struct irq_data *d)
44 {
45         imsic_vector_mask(irq_data_get_irq_chip_data(d));
46 }
47
48 static void imsic_irq_unmask(struct irq_data *d)
49 {
50         imsic_vector_unmask(irq_data_get_irq_chip_data(d));
51 }
52
53 static int imsic_irq_retrigger(struct irq_data *d)
54 {
55         struct imsic_vector *vec = irq_data_get_irq_chip_data(d);
56         struct imsic_local_config *local;
57
58         if (WARN_ON(!vec))
59                 return -ENOENT;
60
61         local = per_cpu_ptr(imsic->global.local, vec->cpu);
62         writel_relaxed(vec->local_id, local->msi_va);
63         return 0;
64 }
65
66 static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
67 {
68         phys_addr_t msi_addr;
69
70         if (WARN_ON(!vec))
71                 return;
72
73         if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr)))
74                 return;
75
76         msg->address_hi = upper_32_bits(msi_addr);
77         msg->address_lo = lower_32_bits(msi_addr);
78         msg->data = vec->local_id;
79 }
80
81 static void imsic_irq_compose_msg(struct irq_data *d, struct msi_msg *msg)
82 {
83         imsic_irq_compose_vector_msg(irq_data_get_irq_chip_data(d), msg);
84 }
85
86 #ifdef CONFIG_SMP
87 static void imsic_msi_update_msg(struct irq_data *d, struct imsic_vector *vec)
88 {
89         struct msi_msg msg = { };
90
91         imsic_irq_compose_vector_msg(vec, &msg);
92         irq_data_get_irq_chip(d)->irq_write_msi_msg(d, &msg);
93 }
94
95 static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
96                                   bool force)
97 {
98         struct imsic_vector *old_vec, *new_vec;
99         struct irq_data *pd = d->parent_data;
100
101         old_vec = irq_data_get_irq_chip_data(pd);
102         if (WARN_ON(!old_vec))
103                 return -ENOENT;
104
105         /* If old vector cpu belongs to the target cpumask then do nothing */
106         if (cpumask_test_cpu(old_vec->cpu, mask_val))
107                 return IRQ_SET_MASK_OK_DONE;
108
109         /* If move is already in-flight then return failure */
110         if (imsic_vector_get_move(old_vec))
111                 return -EBUSY;
112
113         /* Get a new vector on the desired set of CPUs */
114         new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
115         if (!new_vec)
116                 return -ENOSPC;
117
118         /* Point device to the new vector */
119         imsic_msi_update_msg(d, new_vec);
120
121         /* Update irq descriptors with the new vector */
122         pd->chip_data = new_vec;
123
124         /* Update effective affinity of parent irq data */
125         irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
126
127         /* Move state of the old vector to the new vector */
128         imsic_vector_move(old_vec, new_vec);
129
130         return IRQ_SET_MASK_OK_DONE;
131 }
132 #endif
133
134 static struct irq_chip imsic_irq_base_chip = {
135         .name                   = "IMSIC",
136         .irq_mask               = imsic_irq_mask,
137         .irq_unmask             = imsic_irq_unmask,
138         .irq_retrigger          = imsic_irq_retrigger,
139         .irq_compose_msi_msg    = imsic_irq_compose_msg,
140         .flags                  = IRQCHIP_SKIP_SET_WAKE |
141                                   IRQCHIP_MASK_ON_SUSPEND,
142 };
143
144 static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
145                                   unsigned int nr_irqs, void *args)
146 {
147         struct imsic_vector *vec;
148
149         /* Multi-MSI is not supported yet. */
150         if (nr_irqs > 1)
151                 return -EOPNOTSUPP;
152
153         vec = imsic_vector_alloc(virq, cpu_online_mask);
154         if (!vec)
155                 return -ENOSPC;
156
157         irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
158                             handle_simple_irq, NULL, NULL);
159         irq_set_noprobe(virq);
160         irq_set_affinity(virq, cpu_online_mask);
161         irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
162
163         return 0;
164 }
165
166 static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
167                                   unsigned int nr_irqs)
168 {
169         struct irq_data *d = irq_domain_get_irq_data(domain, virq);
170
171         imsic_vector_free(irq_data_get_irq_chip_data(d));
172         irq_domain_free_irqs_parent(domain, virq, nr_irqs);
173 }
174
175 static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
176                                    enum irq_domain_bus_token bus_token)
177 {
178         const struct msi_parent_ops *ops = domain->msi_parent_ops;
179         u32 busmask = BIT(bus_token);
180
181         if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
182                 return 0;
183
184         /* Handle pure domain searches */
185         if (bus_token == ops->bus_select_token)
186                 return 1;
187
188         return !!(ops->bus_select_mask & busmask);
189 }
190
191 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
192 static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
193                                  struct irq_data *irqd, int ind)
194 {
195         if (!irqd) {
196                 imsic_vector_debug_show_summary(m, ind);
197                 return;
198         }
199
200         imsic_vector_debug_show(m, irq_data_get_irq_chip_data(irqd), ind);
201 }
202 #endif
203
204 static const struct irq_domain_ops imsic_base_domain_ops = {
205         .alloc          = imsic_irq_domain_alloc,
206         .free           = imsic_irq_domain_free,
207         .select         = imsic_irq_domain_select,
208 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
209         .debug_show     = imsic_irq_debug_show,
210 #endif
211 };
212
213 #ifdef CONFIG_RISCV_IMSIC_PCI
214
215 static void imsic_pci_mask_irq(struct irq_data *d)
216 {
217         pci_msi_mask_irq(d);
218         irq_chip_mask_parent(d);
219 }
220
221 static void imsic_pci_unmask_irq(struct irq_data *d)
222 {
223         irq_chip_unmask_parent(d);
224         pci_msi_unmask_irq(d);
225 }
226
227 #define MATCH_PCI_MSI           BIT(DOMAIN_BUS_PCI_MSI)
228
229 #else
230
231 #define MATCH_PCI_MSI           0
232
233 #endif
234
235 static bool imsic_init_dev_msi_info(struct device *dev,
236                                     struct irq_domain *domain,
237                                     struct irq_domain *real_parent,
238                                     struct msi_domain_info *info)
239 {
240         const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
241
242         /* MSI parent domain specific settings */
243         switch (real_parent->bus_token) {
244         case DOMAIN_BUS_NEXUS:
245                 if (WARN_ON_ONCE(domain != real_parent))
246                         return false;
247 #ifdef CONFIG_SMP
248                 info->chip->irq_set_affinity = imsic_irq_set_affinity;
249 #endif
250                 break;
251         default:
252                 WARN_ON_ONCE(1);
253                 return false;
254         }
255
256         /* Is the target supported? */
257         switch (info->bus_token) {
258 #ifdef CONFIG_RISCV_IMSIC_PCI
259         case DOMAIN_BUS_PCI_DEVICE_MSI:
260         case DOMAIN_BUS_PCI_DEVICE_MSIX:
261                 info->chip->irq_mask = imsic_pci_mask_irq;
262                 info->chip->irq_unmask = imsic_pci_unmask_irq;
263                 break;
264 #endif
265         case DOMAIN_BUS_DEVICE_MSI:
266                 /*
267                  * Per-device MSI should never have any MSI feature bits
268                  * set. It's sole purpose is to create a dumb interrupt
269                  * chip which has a device specific irq_write_msi_msg()
270                  * callback.
271                  */
272                 if (WARN_ON_ONCE(info->flags))
273                         return false;
274
275                 /* Core managed MSI descriptors */
276                 info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
277                                MSI_FLAG_FREE_MSI_DESCS;
278                 break;
279         case DOMAIN_BUS_WIRED_TO_MSI:
280                 break;
281         default:
282                 WARN_ON_ONCE(1);
283                 return false;
284         }
285
286         /* Use hierarchial chip operations re-trigger */
287         info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
288
289         /*
290          * Mask out the domain specific MSI feature flags which are not
291          * supported by the real parent.
292          */
293         info->flags &= pops->supported_flags;
294
295         /* Enforce the required flags */
296         info->flags |= pops->required_flags;
297
298         return true;
299 }
300
301 #define MATCH_PLATFORM_MSI              BIT(DOMAIN_BUS_PLATFORM_MSI)
302
303 static const struct msi_parent_ops imsic_msi_parent_ops = {
304         .supported_flags        = MSI_GENERIC_FLAGS_MASK |
305                                   MSI_FLAG_PCI_MSIX,
306         .required_flags         = MSI_FLAG_USE_DEF_DOM_OPS |
307                                   MSI_FLAG_USE_DEF_CHIP_OPS,
308         .bus_select_token       = DOMAIN_BUS_NEXUS,
309         .bus_select_mask        = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
310         .init_dev_msi_info      = imsic_init_dev_msi_info,
311 };
312
313 int imsic_irqdomain_init(void)
314 {
315         struct imsic_global_config *global;
316
317         if (!imsic || !imsic->fwnode) {
318                 pr_err("early driver not probed\n");
319                 return -ENODEV;
320         }
321
322         if (imsic->base_domain) {
323                 pr_err("%pfwP: irq domain already created\n", imsic->fwnode);
324                 return -ENODEV;
325         }
326
327         /* Create Base IRQ domain */
328         imsic->base_domain = irq_domain_create_tree(imsic->fwnode,
329                                                     &imsic_base_domain_ops, imsic);
330         if (!imsic->base_domain) {
331                 pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
332                 return -ENOMEM;
333         }
334         imsic->base_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
335         imsic->base_domain->msi_parent_ops = &imsic_msi_parent_ops;
336
337         irq_domain_update_bus_token(imsic->base_domain, DOMAIN_BUS_NEXUS);
338
339         global = &imsic->global;
340         pr_info("%pfwP:  hart-index-bits: %d,  guest-index-bits: %d\n",
341                 imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
342         pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
343                 imsic->fwnode, global->group_index_bits, global->group_index_shift);
344         pr_info("%pfwP: per-CPU IDs %d at base PPN %pa\n",
345                 imsic->fwnode, global->nr_ids, &global->base_addr);
346         pr_info("%pfwP: total %d interrupts available\n",
347                 imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
348
349         return 0;
350 }
351
352 static int imsic_platform_probe_common(struct fwnode_handle *fwnode)
353 {
354         if (imsic && imsic->fwnode != fwnode) {
355                 pr_err("%pfwP: fwnode mismatch\n", fwnode);
356                 return -ENODEV;
357         }
358
359         return imsic_irqdomain_init();
360 }
361
362 static int imsic_platform_dt_probe(struct platform_device *pdev)
363 {
364         return imsic_platform_probe_common(pdev->dev.fwnode);
365 }
366
367 #ifdef CONFIG_ACPI
368
369 /*
370  *  On ACPI based systems, PCI enumeration happens early during boot in
371  *  acpi_scan_init(). PCI enumeration expects MSI domain setup before
372  *  it calls pci_set_msi_domain(). Hence, unlike in DT where
373  *  imsic-platform drive probe happens late during boot, ACPI based
374  *  systems need to setup the MSI domain early.
375  */
376 int imsic_platform_acpi_probe(struct fwnode_handle *fwnode)
377 {
378         return imsic_platform_probe_common(fwnode);
379 }
380
381 #endif
382
383 static const struct of_device_id imsic_platform_match[] = {
384         { .compatible = "riscv,imsics" },
385         {}
386 };
387
388 static struct platform_driver imsic_platform_driver = {
389         .driver = {
390                 .name           = "riscv-imsic",
391                 .of_match_table = imsic_platform_match,
392         },
393         .probe = imsic_platform_dt_probe,
394 };
395 builtin_platform_driver(imsic_platform_driver);
This page took 0.052286 seconds and 4 git commands to generate.