]>
Commit | Line | Data |
---|---|---|
8237f8bc CH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2017 SiFive | |
4 | * Copyright (C) 2018 Christoph Hellwig | |
5 | */ | |
6 | #define pr_fmt(fmt) "plic: " fmt | |
7 | #include <linux/interrupt.h> | |
8 | #include <linux/io.h> | |
9 | #include <linux/irq.h> | |
10 | #include <linux/irqchip.h> | |
11 | #include <linux/irqdomain.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/of.h> | |
14 | #include <linux/of_address.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/platform_device.h> | |
17 | #include <linux/spinlock.h> | |
f99fb607 | 18 | #include <asm/smp.h> |
8237f8bc CH |
19 | |
20 | /* | |
21 | * This driver implements a version of the RISC-V PLIC with the actual layout | |
22 | * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: | |
23 | * | |
24 | * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf | |
25 | * | |
26 | * The largest number supported by devices marked as 'sifive,plic-1.0.0', is | |
27 | * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged | |
28 | * Spec. | |
29 | */ | |
30 | ||
31 | #define MAX_DEVICES 1024 | |
32 | #define MAX_CONTEXTS 15872 | |
33 | ||
34 | /* | |
35 | * Each interrupt source has a priority register associated with it. | |
36 | * We always hardwire it to one in Linux. | |
37 | */ | |
38 | #define PRIORITY_BASE 0 | |
39 | #define PRIORITY_PER_ID 4 | |
40 | ||
41 | /* | |
42 | * Each hart context has a vector of interrupt enable bits associated with it. | |
43 | * There's one bit for each interrupt source. | |
44 | */ | |
45 | #define ENABLE_BASE 0x2000 | |
46 | #define ENABLE_PER_HART 0x80 | |
47 | ||
48 | /* | |
49 | * Each hart context has a set of control registers associated with it. Right | |
50 | * now there's only two: a source priority threshold over which the hart will | |
51 | * take an interrupt, and a register to claim interrupts. | |
52 | */ | |
53 | #define CONTEXT_BASE 0x200000 | |
54 | #define CONTEXT_PER_HART 0x1000 | |
55 | #define CONTEXT_THRESHOLD 0x00 | |
56 | #define CONTEXT_CLAIM 0x04 | |
57 | ||
58 | static void __iomem *plic_regs; | |
59 | ||
60 | struct plic_handler { | |
61 | bool present; | |
62 | int ctxid; | |
63 | }; | |
64 | static DEFINE_PER_CPU(struct plic_handler, plic_handlers); | |
65 | ||
66 | static inline void __iomem *plic_hart_offset(int ctxid) | |
67 | { | |
68 | return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART; | |
69 | } | |
70 | ||
71 | static inline u32 __iomem *plic_enable_base(int ctxid) | |
72 | { | |
73 | return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART; | |
74 | } | |
75 | ||
76 | /* | |
77 | * Protect mask operations on the registers given that we can't assume that | |
78 | * atomic memory operations work on them. | |
79 | */ | |
80 | static DEFINE_RAW_SPINLOCK(plic_toggle_lock); | |
81 | ||
82 | static inline void plic_toggle(int ctxid, int hwirq, int enable) | |
83 | { | |
84 | u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32); | |
85 | u32 hwirq_mask = 1 << (hwirq % 32); | |
86 | ||
87 | raw_spin_lock(&plic_toggle_lock); | |
88 | if (enable) | |
89 | writel(readl(reg) | hwirq_mask, reg); | |
90 | else | |
91 | writel(readl(reg) & ~hwirq_mask, reg); | |
92 | raw_spin_unlock(&plic_toggle_lock); | |
93 | } | |
94 | ||
95 | static inline void plic_irq_toggle(struct irq_data *d, int enable) | |
96 | { | |
97 | int cpu; | |
98 | ||
99 | writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); | |
100 | for_each_cpu(cpu, irq_data_get_affinity_mask(d)) { | |
101 | struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); | |
102 | ||
103 | if (handler->present) | |
104 | plic_toggle(handler->ctxid, d->hwirq, enable); | |
105 | } | |
106 | } | |
107 | ||
108 | static void plic_irq_enable(struct irq_data *d) | |
109 | { | |
110 | plic_irq_toggle(d, 1); | |
111 | } | |
112 | ||
113 | static void plic_irq_disable(struct irq_data *d) | |
114 | { | |
115 | plic_irq_toggle(d, 0); | |
116 | } | |
117 | ||
118 | static struct irq_chip plic_chip = { | |
119 | .name = "SiFive PLIC", | |
120 | /* | |
121 | * There is no need to mask/unmask PLIC interrupts. They are "masked" | |
122 | * by reading claim and "unmasked" when writing it back. | |
123 | */ | |
124 | .irq_enable = plic_irq_enable, | |
125 | .irq_disable = plic_irq_disable, | |
126 | }; | |
127 | ||
128 | static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, | |
129 | irq_hw_number_t hwirq) | |
130 | { | |
131 | irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq); | |
132 | irq_set_chip_data(irq, NULL); | |
133 | irq_set_noprobe(irq); | |
134 | return 0; | |
135 | } | |
136 | ||
137 | static const struct irq_domain_ops plic_irqdomain_ops = { | |
138 | .map = plic_irqdomain_map, | |
139 | .xlate = irq_domain_xlate_onecell, | |
140 | }; | |
141 | ||
142 | static struct irq_domain *plic_irqdomain; | |
143 | ||
144 | /* | |
145 | * Handling an interrupt is a two-step process: first you claim the interrupt | |
146 | * by reading the claim register, then you complete the interrupt by writing | |
147 | * that source ID back to the same claim register. This automatically enables | |
148 | * and disables the interrupt, so there's nothing else to do. | |
149 | */ | |
150 | static void plic_handle_irq(struct pt_regs *regs) | |
151 | { | |
152 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); | |
153 | void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM; | |
154 | irq_hw_number_t hwirq; | |
155 | ||
156 | WARN_ON_ONCE(!handler->present); | |
157 | ||
158 | csr_clear(sie, SIE_SEIE); | |
159 | while ((hwirq = readl(claim))) { | |
160 | int irq = irq_find_mapping(plic_irqdomain, hwirq); | |
161 | ||
162 | if (unlikely(irq <= 0)) | |
163 | pr_warn_ratelimited("can't find mapping for hwirq %lu\n", | |
164 | hwirq); | |
165 | else | |
166 | generic_handle_irq(irq); | |
167 | writel(hwirq, claim); | |
168 | } | |
169 | csr_set(sie, SIE_SEIE); | |
170 | } | |
171 | ||
172 | /* | |
173 | * Walk up the DT tree until we find an active RISC-V core (HART) node and | |
174 | * extract the cpuid from it. | |
175 | */ | |
176 | static int plic_find_hart_id(struct device_node *node) | |
177 | { | |
178 | for (; node; node = node->parent) { | |
179 | if (of_device_is_compatible(node, "riscv")) | |
b2f8cfa7 | 180 | return riscv_of_processor_hartid(node); |
8237f8bc CH |
181 | } |
182 | ||
183 | return -1; | |
184 | } | |
185 | ||
186 | static int __init plic_init(struct device_node *node, | |
187 | struct device_node *parent) | |
188 | { | |
189 | int error = 0, nr_handlers, nr_mapped = 0, i; | |
190 | u32 nr_irqs; | |
191 | ||
192 | if (plic_regs) { | |
193 | pr_warn("PLIC already present.\n"); | |
194 | return -ENXIO; | |
195 | } | |
196 | ||
197 | plic_regs = of_iomap(node, 0); | |
198 | if (WARN_ON(!plic_regs)) | |
199 | return -EIO; | |
200 | ||
201 | error = -EINVAL; | |
202 | of_property_read_u32(node, "riscv,ndev", &nr_irqs); | |
203 | if (WARN_ON(!nr_irqs)) | |
204 | goto out_iounmap; | |
205 | ||
206 | nr_handlers = of_irq_count(node); | |
207 | if (WARN_ON(!nr_handlers)) | |
208 | goto out_iounmap; | |
209 | if (WARN_ON(nr_handlers < num_possible_cpus())) | |
210 | goto out_iounmap; | |
211 | ||
212 | error = -ENOMEM; | |
213 | plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1, | |
214 | &plic_irqdomain_ops, NULL); | |
215 | if (WARN_ON(!plic_irqdomain)) | |
216 | goto out_iounmap; | |
217 | ||
218 | for (i = 0; i < nr_handlers; i++) { | |
219 | struct of_phandle_args parent; | |
220 | struct plic_handler *handler; | |
221 | irq_hw_number_t hwirq; | |
f99fb607 | 222 | int cpu, hartid; |
8237f8bc CH |
223 | |
224 | if (of_irq_parse_one(node, i, &parent)) { | |
225 | pr_err("failed to parse parent for context %d.\n", i); | |
226 | continue; | |
227 | } | |
228 | ||
229 | /* skip context holes */ | |
230 | if (parent.args[0] == -1) | |
231 | continue; | |
232 | ||
f99fb607 AP |
233 | hartid = plic_find_hart_id(parent.np); |
234 | if (hartid < 0) { | |
8237f8bc CH |
235 | pr_warn("failed to parse hart ID for context %d.\n", i); |
236 | continue; | |
237 | } | |
238 | ||
f99fb607 | 239 | cpu = riscv_hartid_to_cpuid(hartid); |
8237f8bc CH |
240 | handler = per_cpu_ptr(&plic_handlers, cpu); |
241 | handler->present = true; | |
242 | handler->ctxid = i; | |
243 | ||
244 | /* priority must be > threshold to trigger an interrupt */ | |
245 | writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD); | |
246 | for (hwirq = 1; hwirq <= nr_irqs; hwirq++) | |
247 | plic_toggle(i, hwirq, 0); | |
248 | nr_mapped++; | |
249 | } | |
250 | ||
251 | pr_info("mapped %d interrupts to %d (out of %d) handlers.\n", | |
252 | nr_irqs, nr_mapped, nr_handlers); | |
253 | set_handle_irq(plic_handle_irq); | |
254 | return 0; | |
255 | ||
256 | out_iounmap: | |
257 | iounmap(plic_regs); | |
258 | return error; | |
259 | } | |
260 | ||
261 | IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); | |
262 | IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ |