]>
Commit | Line | Data |
---|---|---|
34e65944 IY |
1 | /* |
2 | * pcie_aer.c | |
3 | * | |
4 | * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp> | |
5 | * VA Linux Systems Japan K.K. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along | |
18 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include "sysemu.h" | |
22 | #include "pci_bridge.h" | |
23 | #include "pcie.h" | |
24 | #include "msix.h" | |
25 | #include "msi.h" | |
26 | #include "pci_internals.h" | |
27 | #include "pcie_regs.h" | |
28 | ||
29 | //#define DEBUG_PCIE | |
30 | #ifdef DEBUG_PCIE | |
31 | # define PCIE_DPRINTF(fmt, ...) \ | |
32 | fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__) | |
33 | #else | |
34 | # define PCIE_DPRINTF(fmt, ...) do {} while (0) | |
35 | #endif | |
36 | #define PCIE_DEV_PRINTF(dev, fmt, ...) \ | |
37 | PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__) | |
38 | ||
39 | /* From 6.2.7 Error Listing and Rules. Table 6-2, 6-3 and 6-4 */ | |
40 | static uint32_t pcie_aer_uncor_default_severity(uint32_t status) | |
41 | { | |
42 | switch (status) { | |
43 | case PCI_ERR_UNC_INTN: | |
44 | case PCI_ERR_UNC_DLP: | |
45 | case PCI_ERR_UNC_SDN: | |
46 | case PCI_ERR_UNC_RX_OVER: | |
47 | case PCI_ERR_UNC_FCP: | |
48 | case PCI_ERR_UNC_MALF_TLP: | |
49 | return PCI_ERR_ROOT_CMD_FATAL_EN; | |
50 | case PCI_ERR_UNC_POISON_TLP: | |
51 | case PCI_ERR_UNC_ECRC: | |
52 | case PCI_ERR_UNC_UNSUP: | |
53 | case PCI_ERR_UNC_COMP_TIME: | |
54 | case PCI_ERR_UNC_COMP_ABORT: | |
55 | case PCI_ERR_UNC_UNX_COMP: | |
56 | case PCI_ERR_UNC_ACSV: | |
57 | case PCI_ERR_UNC_MCBTLP: | |
58 | case PCI_ERR_UNC_ATOP_EBLOCKED: | |
59 | case PCI_ERR_UNC_TLP_PRF_BLOCKED: | |
60 | return PCI_ERR_ROOT_CMD_NONFATAL_EN; | |
61 | default: | |
62 | abort(); | |
63 | break; | |
64 | } | |
65 | return PCI_ERR_ROOT_CMD_FATAL_EN; | |
66 | } | |
67 | ||
68 | static int aer_log_add_err(PCIEAERLog *aer_log, const PCIEAERErr *err) | |
69 | { | |
70 | if (aer_log->log_num == aer_log->log_max) { | |
71 | return -1; | |
72 | } | |
73 | memcpy(&aer_log->log[aer_log->log_num], err, sizeof *err); | |
74 | aer_log->log_num++; | |
75 | return 0; | |
76 | } | |
77 | ||
78 | static void aer_log_del_err(PCIEAERLog *aer_log, PCIEAERErr *err) | |
79 | { | |
80 | assert(aer_log->log_num); | |
81 | *err = aer_log->log[0]; | |
82 | aer_log->log_num--; | |
83 | memmove(&aer_log->log[0], &aer_log->log[1], | |
84 | aer_log->log_num * sizeof *err); | |
85 | } | |
86 | ||
87 | static void aer_log_clear_all_err(PCIEAERLog *aer_log) | |
88 | { | |
89 | aer_log->log_num = 0; | |
90 | } | |
91 | ||
92 | int pcie_aer_init(PCIDevice *dev, uint16_t offset) | |
93 | { | |
94 | PCIExpressDevice *exp; | |
95 | ||
96 | pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, PCI_ERR_VER, | |
97 | offset, PCI_ERR_SIZEOF); | |
98 | exp = &dev->exp; | |
99 | exp->aer_cap = offset; | |
100 | ||
101 | /* log_max is property */ | |
102 | if (dev->exp.aer_log.log_max == PCIE_AER_LOG_MAX_UNSET) { | |
103 | dev->exp.aer_log.log_max = PCIE_AER_LOG_MAX_DEFAULT; | |
104 | } | |
105 | /* clip down the value to avoid unreasobale memory usage */ | |
106 | if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) { | |
107 | return -EINVAL; | |
108 | } | |
109 | dev->exp.aer_log.log = qemu_mallocz(sizeof dev->exp.aer_log.log[0] * | |
110 | dev->exp.aer_log.log_max); | |
111 | ||
112 | pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, | |
113 | PCI_ERR_UNC_SUPPORTED); | |
114 | ||
115 | pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, | |
116 | PCI_ERR_UNC_SEVERITY_DEFAULT); | |
117 | pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER, | |
118 | PCI_ERR_UNC_SUPPORTED); | |
119 | ||
120 | pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS, | |
121 | PCI_ERR_COR_STATUS); | |
122 | ||
123 | pci_set_long(dev->config + offset + PCI_ERR_COR_MASK, | |
124 | PCI_ERR_COR_MASK_DEFAULT); | |
125 | pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK, | |
126 | PCI_ERR_COR_SUPPORTED); | |
127 | ||
128 | /* capabilities and control. multiple header logging is supported */ | |
129 | if (dev->exp.aer_log.log_max > 0) { | |
130 | pci_set_long(dev->config + offset + PCI_ERR_CAP, | |
131 | PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC | | |
132 | PCI_ERR_CAP_MHRC); | |
133 | pci_set_long(dev->wmask + offset + PCI_ERR_CAP, | |
134 | PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE | | |
135 | PCI_ERR_CAP_MHRE); | |
136 | } else { | |
137 | pci_set_long(dev->config + offset + PCI_ERR_CAP, | |
138 | PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC); | |
139 | pci_set_long(dev->wmask + offset + PCI_ERR_CAP, | |
140 | PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); | |
141 | } | |
142 | ||
143 | switch (pcie_cap_get_type(dev)) { | |
144 | case PCI_EXP_TYPE_ROOT_PORT: | |
145 | /* this case will be set by pcie_aer_root_init() */ | |
146 | /* fallthrough */ | |
147 | case PCI_EXP_TYPE_DOWNSTREAM: | |
148 | case PCI_EXP_TYPE_UPSTREAM: | |
149 | pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL, | |
150 | PCI_BRIDGE_CTL_SERR); | |
151 | pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS, | |
152 | PCI_SEC_STATUS_RCV_SYSTEM_ERROR); | |
153 | break; | |
154 | default: | |
155 | /* nothing */ | |
156 | break; | |
157 | } | |
158 | return 0; | |
159 | } | |
160 | ||
161 | void pcie_aer_exit(PCIDevice *dev) | |
162 | { | |
163 | qemu_free(dev->exp.aer_log.log); | |
164 | } | |
165 | ||
166 | static void pcie_aer_update_uncor_status(PCIDevice *dev) | |
167 | { | |
168 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
169 | PCIEAERLog *aer_log = &dev->exp.aer_log; | |
170 | ||
171 | uint16_t i; | |
172 | for (i = 0; i < aer_log->log_num; i++) { | |
173 | pci_long_test_and_set_mask(aer_cap + PCI_ERR_UNCOR_STATUS, | |
174 | dev->exp.aer_log.log[i].status); | |
175 | } | |
176 | } | |
177 | ||
34e65944 IY |
178 | /* |
179 | * return value: | |
247c97f3 | 180 | * true: error message needs to be sent up |
34e65944 IY |
181 | * false: error message is masked |
182 | * | |
183 | * 6.2.6 Error Message Control | |
184 | * Figure 6-3 | |
185 | * all pci express devices part | |
186 | */ | |
187 | static bool | |
188 | pcie_aer_msg_alldev(PCIDevice *dev, const PCIEAERMsg *msg) | |
189 | { | |
34e65944 IY |
190 | if (!(pcie_aer_msg_is_uncor(msg) && |
191 | (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR))) { | |
192 | return false; | |
193 | } | |
194 | ||
195 | /* Signaled System Error | |
196 | * | |
197 | * 7.5.1.1 Command register | |
198 | * Bit 8 SERR# Enable | |
199 | * | |
200 | * When Set, this bit enables reporting of Non-fatal and Fatal | |
201 | * errors detected by the Function to the Root Complex. Note that | |
202 | * errors are reported if enabled either through this bit or through | |
203 | * the PCI Express specific bits in the Device Control register (see | |
204 | * Section 7.8.4). | |
205 | */ | |
206 | pci_word_test_and_set_mask(dev->config + PCI_STATUS, | |
207 | PCI_STATUS_SIG_SYSTEM_ERROR); | |
208 | ||
209 | if (!(msg->severity & | |
210 | pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL))) { | |
211 | return false; | |
212 | } | |
213 | ||
214 | /* send up error message */ | |
247c97f3 MT |
215 | return true; |
216 | } | |
217 | ||
34e65944 IY |
218 | /* |
219 | * return value: | |
220 | * true: error message is sent up | |
221 | * false: error message is masked | |
222 | * | |
223 | * 6.2.6 Error Message Control | |
224 | * Figure 6-3 | |
225 | * virtual pci bridge part | |
226 | */ | |
227 | static bool pcie_aer_msg_vbridge(PCIDevice *dev, const PCIEAERMsg *msg) | |
228 | { | |
229 | uint16_t bridge_control = pci_get_word(dev->config + PCI_BRIDGE_CONTROL); | |
230 | ||
231 | if (pcie_aer_msg_is_uncor(msg)) { | |
232 | /* Received System Error */ | |
233 | pci_word_test_and_set_mask(dev->config + PCI_SEC_STATUS, | |
234 | PCI_SEC_STATUS_RCV_SYSTEM_ERROR); | |
235 | } | |
236 | ||
237 | if (!(bridge_control & PCI_BRIDGE_CTL_SERR)) { | |
238 | return false; | |
239 | } | |
240 | return true; | |
241 | } | |
242 | ||
243 | void pcie_aer_root_set_vector(PCIDevice *dev, unsigned int vector) | |
244 | { | |
245 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
246 | assert(vector < PCI_ERR_ROOT_IRQ_MAX); | |
247 | pci_long_test_and_clear_mask(aer_cap + PCI_ERR_ROOT_STATUS, | |
248 | PCI_ERR_ROOT_IRQ); | |
249 | pci_long_test_and_set_mask(aer_cap + PCI_ERR_ROOT_STATUS, | |
250 | vector << PCI_ERR_ROOT_IRQ_SHIFT); | |
251 | } | |
252 | ||
253 | static unsigned int pcie_aer_root_get_vector(PCIDevice *dev) | |
254 | { | |
255 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
256 | uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); | |
257 | return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT; | |
258 | } | |
259 | ||
c3f33667 MT |
260 | /* Given a status register, get corresponding bits in the command register */ |
261 | static uint32_t pcie_aer_status_to_cmd(uint32_t status) | |
262 | { | |
263 | uint32_t cmd = 0; | |
264 | if (status & PCI_ERR_ROOT_COR_RCV) { | |
265 | cmd |= PCI_ERR_ROOT_CMD_COR_EN; | |
266 | } | |
267 | if (status & PCI_ERR_ROOT_NONFATAL_RCV) { | |
268 | cmd |= PCI_ERR_ROOT_CMD_NONFATAL_EN; | |
269 | } | |
270 | if (status & PCI_ERR_ROOT_FATAL_RCV) { | |
271 | cmd |= PCI_ERR_ROOT_CMD_FATAL_EN; | |
272 | } | |
273 | return cmd; | |
274 | } | |
275 | ||
34e65944 IY |
276 | /* |
277 | * return value: | |
278 | * true: error message is sent up | |
279 | * false: error message is masked | |
280 | * | |
281 | * 6.2.6 Error Message Control | |
282 | * Figure 6-3 | |
283 | * root port part | |
284 | */ | |
285 | static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) | |
286 | { | |
287 | bool msg_sent; | |
288 | uint16_t cmd; | |
289 | uint8_t *aer_cap; | |
290 | uint32_t root_cmd; | |
c3f33667 | 291 | uint32_t root_status, prev_status; |
34e65944 IY |
292 | bool msi_trigger; |
293 | ||
294 | msg_sent = false; | |
295 | cmd = pci_get_word(dev->config + PCI_COMMAND); | |
296 | aer_cap = dev->config + dev->exp.aer_cap; | |
297 | root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); | |
c3f33667 | 298 | prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); |
34e65944 IY |
299 | msi_trigger = false; |
300 | ||
301 | if (cmd & PCI_COMMAND_SERR) { | |
302 | /* System Error. | |
303 | * | |
304 | * The way to report System Error is platform specific and | |
305 | * it isn't implemented in qemu right now. | |
306 | * So just discard the error for now. | |
307 | * OS which cares of aer would receive errors via | |
308 | * native aer mechanims, so this wouldn't matter. | |
309 | */ | |
310 | } | |
311 | ||
312 | /* Errro Message Received: Root Error Status register */ | |
313 | switch (msg->severity) { | |
314 | case PCI_ERR_ROOT_CMD_COR_EN: | |
315 | if (root_status & PCI_ERR_ROOT_COR_RCV) { | |
316 | root_status |= PCI_ERR_ROOT_MULTI_COR_RCV; | |
317 | } else { | |
318 | if (root_cmd & PCI_ERR_ROOT_CMD_COR_EN) { | |
319 | msi_trigger = true; | |
320 | } | |
321 | pci_set_word(aer_cap + PCI_ERR_ROOT_COR_SRC, msg->source_id); | |
322 | } | |
323 | root_status |= PCI_ERR_ROOT_COR_RCV; | |
324 | break; | |
325 | case PCI_ERR_ROOT_CMD_NONFATAL_EN: | |
326 | if (!(root_status & PCI_ERR_ROOT_NONFATAL_RCV) && | |
327 | root_cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) { | |
328 | msi_trigger = true; | |
329 | } | |
330 | root_status |= PCI_ERR_ROOT_NONFATAL_RCV; | |
331 | break; | |
332 | case PCI_ERR_ROOT_CMD_FATAL_EN: | |
333 | if (!(root_status & PCI_ERR_ROOT_FATAL_RCV) && | |
334 | root_cmd & PCI_ERR_ROOT_CMD_FATAL_EN) { | |
335 | msi_trigger = true; | |
336 | } | |
337 | if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) { | |
338 | root_status |= PCI_ERR_ROOT_FIRST_FATAL; | |
339 | } | |
340 | root_status |= PCI_ERR_ROOT_FATAL_RCV; | |
341 | break; | |
342 | default: | |
343 | abort(); | |
344 | break; | |
345 | } | |
346 | if (pcie_aer_msg_is_uncor(msg)) { | |
347 | if (root_status & PCI_ERR_ROOT_UNCOR_RCV) { | |
348 | root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV; | |
349 | } else { | |
350 | pci_set_word(aer_cap + PCI_ERR_ROOT_SRC, msg->source_id); | |
351 | } | |
352 | root_status |= PCI_ERR_ROOT_UNCOR_RCV; | |
353 | } | |
354 | pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status); | |
355 | ||
c3f33667 MT |
356 | /* 6.2.4.1.2 Interrupt Generation */ |
357 | /* All the above did was set some bits in the status register. | |
358 | * Specifically these that match message severity. | |
359 | * The below code relies on this fact. */ | |
360 | if (!(root_cmd & msg->severity) || | |
361 | (pcie_aer_status_to_cmd(prev_status) & root_cmd)) { | |
362 | /* Condition is not being set or was already true so nothing to do. */ | |
363 | return msg_sent; | |
364 | } | |
365 | ||
366 | msg_sent = true; | |
367 | if (msix_enabled(dev)) { | |
368 | msix_notify(dev, pcie_aer_root_get_vector(dev)); | |
369 | } else if (msi_enabled(dev)) { | |
370 | msi_notify(dev, pcie_aer_root_get_vector(dev)); | |
371 | } else { | |
372 | qemu_set_irq(dev->irq[dev->exp.aer_intx], 1); | |
34e65944 IY |
373 | } |
374 | return msg_sent; | |
375 | } | |
376 | ||
377 | /* | |
378 | * 6.2.6 Error Message Control Figure 6-3 | |
247c97f3 | 379 | * |
d33d9156 | 380 | * Walk up the bus tree from the device, propagate the error message. |
34e65944 | 381 | */ |
d33d9156 | 382 | static void pcie_aer_msg(PCIDevice *dev, const PCIEAERMsg *msg) |
34e65944 IY |
383 | { |
384 | uint8_t type; | |
34e65944 | 385 | |
d33d9156 MT |
386 | while (dev) { |
387 | if (!pci_is_express(dev)) { | |
388 | /* just ignore it */ | |
389 | /* TODO: Shouldn't we set PCI_STATUS_SIG_SYSTEM_ERROR? | |
390 | * Consider e.g. a PCI bridge above a PCI Express device. */ | |
34e65944 IY |
391 | return; |
392 | } | |
247c97f3 | 393 | |
d33d9156 MT |
394 | type = pcie_cap_get_type(dev); |
395 | if ((type == PCI_EXP_TYPE_ROOT_PORT || | |
396 | type == PCI_EXP_TYPE_UPSTREAM || | |
397 | type == PCI_EXP_TYPE_DOWNSTREAM) && | |
398 | !pcie_aer_msg_vbridge(dev, msg)) { | |
399 | return; | |
400 | } | |
401 | if (!pcie_aer_msg_alldev(dev, msg)) { | |
402 | return; | |
403 | } | |
404 | if (type == PCI_EXP_TYPE_ROOT_PORT) { | |
405 | pcie_aer_msg_root_port(dev, msg); | |
406 | /* Root port can notify system itself, | |
407 | or send the error message to root complex event collector. */ | |
408 | /* | |
409 | * if root port is associated with an event collector, | |
410 | * return the root complex event collector here. | |
411 | * For now root complex event collector isn't supported. | |
412 | */ | |
247c97f3 MT |
413 | return; |
414 | } | |
d33d9156 | 415 | dev = pci_bridge_get_device(dev->bus); |
247c97f3 | 416 | } |
34e65944 IY |
417 | } |
418 | ||
419 | static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err) | |
420 | { | |
421 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
e6e055c9 | 422 | uint8_t first_bit = ffs(err->status) - 1; |
34e65944 IY |
423 | uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); |
424 | int i; | |
425 | ||
426 | assert(err->status); | |
427 | assert(err->status & (err->status - 1)); | |
428 | ||
429 | errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP); | |
430 | errcap |= PCI_ERR_CAP_FEP(first_bit); | |
431 | ||
432 | if (err->flags & PCIE_AER_ERR_HEADER_VALID) { | |
433 | for (i = 0; i < ARRAY_SIZE(err->header); ++i) { | |
434 | /* 7.10.8 Header Log Register */ | |
435 | uint8_t *header_log = | |
436 | aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0]; | |
437 | cpu_to_be32wu((uint32_t*)header_log, err->header[i]); | |
438 | } | |
439 | } else { | |
440 | assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT)); | |
441 | memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE); | |
442 | } | |
443 | ||
444 | if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) && | |
445 | (pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) & | |
446 | PCI_EXP_DEVCAP2_EETLPP)) { | |
447 | for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) { | |
448 | /* 7.10.12 tlp prefix log register */ | |
449 | uint8_t *prefix_log = | |
450 | aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0]; | |
451 | cpu_to_be32wu((uint32_t*)prefix_log, err->prefix[i]); | |
452 | } | |
453 | errcap |= PCI_ERR_CAP_TLP; | |
454 | } else { | |
455 | memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, | |
456 | PCI_ERR_TLP_PREFIX_LOG_SIZE); | |
457 | } | |
458 | pci_set_long(aer_cap + PCI_ERR_CAP, errcap); | |
459 | } | |
460 | ||
461 | static void pcie_aer_clear_log(PCIDevice *dev) | |
462 | { | |
463 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
464 | ||
465 | pci_long_test_and_clear_mask(aer_cap + PCI_ERR_CAP, | |
466 | PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP); | |
467 | ||
468 | memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE); | |
469 | memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, PCI_ERR_TLP_PREFIX_LOG_SIZE); | |
470 | } | |
471 | ||
472 | static void pcie_aer_clear_error(PCIDevice *dev) | |
473 | { | |
474 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
475 | uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); | |
476 | PCIEAERLog *aer_log = &dev->exp.aer_log; | |
477 | PCIEAERErr err; | |
478 | ||
479 | if (!(errcap & PCI_ERR_CAP_MHRE) || !aer_log->log_num) { | |
480 | pcie_aer_clear_log(dev); | |
481 | return; | |
482 | } | |
483 | ||
484 | /* | |
485 | * If more errors are queued, set corresponding bits in uncorrectable | |
486 | * error status. | |
487 | * We emulate uncorrectable error status register as W1CS. | |
488 | * So set bit in uncorrectable error status here again for multiple | |
489 | * error recording support. | |
490 | * | |
491 | * 6.2.4.2 Multiple Error Handling(Advanced Error Reporting Capability) | |
492 | */ | |
493 | pcie_aer_update_uncor_status(dev); | |
494 | ||
495 | aer_log_del_err(aer_log, &err); | |
496 | pcie_aer_update_log(dev, &err); | |
497 | } | |
498 | ||
499 | static int pcie_aer_record_error(PCIDevice *dev, | |
500 | const PCIEAERErr *err) | |
501 | { | |
502 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
503 | uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); | |
504 | int fep = PCI_ERR_CAP_FEP(errcap); | |
505 | ||
506 | assert(err->status); | |
507 | assert(err->status & (err->status - 1)); | |
508 | ||
509 | if (errcap & PCI_ERR_CAP_MHRE && | |
510 | (pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS) & (1U << fep))) { | |
511 | /* Not first error. queue error */ | |
512 | if (aer_log_add_err(&dev->exp.aer_log, err) < 0) { | |
513 | /* overflow */ | |
514 | return -1; | |
515 | } | |
516 | return 0; | |
517 | } | |
518 | ||
519 | pcie_aer_update_log(dev, err); | |
520 | return 0; | |
521 | } | |
522 | ||
523 | typedef struct PCIEAERInject { | |
524 | PCIDevice *dev; | |
525 | uint8_t *aer_cap; | |
526 | const PCIEAERErr *err; | |
527 | uint16_t devctl; | |
528 | uint16_t devsta; | |
529 | uint32_t error_status; | |
530 | bool unsupported_request; | |
531 | bool log_overflow; | |
532 | PCIEAERMsg msg; | |
533 | } PCIEAERInject; | |
534 | ||
535 | static bool pcie_aer_inject_cor_error(PCIEAERInject *inj, | |
536 | uint32_t uncor_status, | |
537 | bool is_advisory_nonfatal) | |
538 | { | |
539 | PCIDevice *dev = inj->dev; | |
540 | ||
541 | inj->devsta |= PCI_EXP_DEVSTA_CED; | |
542 | if (inj->unsupported_request) { | |
543 | inj->devsta |= PCI_EXP_DEVSTA_URD; | |
544 | } | |
545 | pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta); | |
546 | ||
547 | if (inj->aer_cap) { | |
548 | uint32_t mask; | |
549 | pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_COR_STATUS, | |
550 | inj->error_status); | |
551 | mask = pci_get_long(inj->aer_cap + PCI_ERR_COR_MASK); | |
552 | if (mask & inj->error_status) { | |
553 | return false; | |
554 | } | |
555 | if (is_advisory_nonfatal) { | |
556 | uint32_t uncor_mask = | |
557 | pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK); | |
558 | if (!(uncor_mask & uncor_status)) { | |
559 | inj->log_overflow = !!pcie_aer_record_error(dev, inj->err); | |
560 | } | |
561 | pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS, | |
562 | uncor_status); | |
563 | } | |
564 | } | |
565 | ||
566 | if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE)) { | |
567 | return false; | |
568 | } | |
569 | if (!(inj->devctl & PCI_EXP_DEVCTL_CERE)) { | |
570 | return false; | |
571 | } | |
572 | ||
573 | inj->msg.severity = PCI_ERR_ROOT_CMD_COR_EN; | |
574 | return true; | |
575 | } | |
576 | ||
577 | static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal) | |
578 | { | |
579 | PCIDevice *dev = inj->dev; | |
580 | uint16_t cmd; | |
581 | ||
582 | if (is_fatal) { | |
583 | inj->devsta |= PCI_EXP_DEVSTA_FED; | |
584 | } else { | |
585 | inj->devsta |= PCI_EXP_DEVSTA_NFED; | |
586 | } | |
587 | if (inj->unsupported_request) { | |
588 | inj->devsta |= PCI_EXP_DEVSTA_URD; | |
589 | } | |
590 | pci_set_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta); | |
591 | ||
592 | if (inj->aer_cap) { | |
593 | uint32_t mask = pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK); | |
594 | if (mask & inj->error_status) { | |
595 | pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS, | |
596 | inj->error_status); | |
597 | return false; | |
598 | } | |
599 | ||
600 | inj->log_overflow = !!pcie_aer_record_error(dev, inj->err); | |
601 | pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS, | |
602 | inj->error_status); | |
603 | } | |
604 | ||
605 | cmd = pci_get_word(dev->config + PCI_COMMAND); | |
606 | if (inj->unsupported_request && | |
607 | !(inj->devctl & PCI_EXP_DEVCTL_URRE) && !(cmd & PCI_COMMAND_SERR)) { | |
608 | return false; | |
609 | } | |
610 | if (is_fatal) { | |
611 | if (!((cmd & PCI_COMMAND_SERR) || | |
612 | (inj->devctl & PCI_EXP_DEVCTL_FERE))) { | |
613 | return false; | |
614 | } | |
615 | inj->msg.severity = PCI_ERR_ROOT_CMD_FATAL_EN; | |
616 | } else { | |
617 | if (!((cmd & PCI_COMMAND_SERR) || | |
618 | (inj->devctl & PCI_EXP_DEVCTL_NFERE))) { | |
619 | return false; | |
620 | } | |
621 | inj->msg.severity = PCI_ERR_ROOT_CMD_NONFATAL_EN; | |
622 | } | |
623 | return true; | |
624 | } | |
625 | ||
626 | /* | |
627 | * non-Function specific error must be recorded in all functions. | |
628 | * It is the responsibility of the caller of this function. | |
629 | * It is also caller's responsiblity to determine which function should | |
630 | * report the rerror. | |
631 | * | |
632 | * 6.2.4 Error Logging | |
633 | * 6.2.5 Sqeunce of Device Error Signaling and Logging Operations | |
634 | * table 6-2: Flowchard Showing Sequence of Device Error Signaling and Logging | |
635 | * Operations | |
636 | */ | |
637 | int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err) | |
638 | { | |
639 | uint8_t *aer_cap = NULL; | |
640 | uint16_t devctl = 0; | |
641 | uint16_t devsta = 0; | |
642 | uint32_t error_status = err->status; | |
643 | PCIEAERInject inj; | |
644 | ||
645 | if (!pci_is_express(dev)) { | |
646 | return -ENOSYS; | |
647 | } | |
648 | ||
649 | if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) { | |
650 | error_status &= PCI_ERR_COR_SUPPORTED; | |
651 | } else { | |
652 | error_status &= PCI_ERR_UNC_SUPPORTED; | |
653 | } | |
654 | ||
655 | /* invalid status bit. one and only one bit must be set */ | |
656 | if (!error_status || (error_status & (error_status - 1))) { | |
657 | return -EINVAL; | |
658 | } | |
659 | ||
660 | if (dev->exp.aer_cap) { | |
661 | uint8_t *exp_cap = dev->config + dev->exp.exp_cap; | |
662 | aer_cap = dev->config + dev->exp.aer_cap; | |
663 | devctl = pci_get_long(exp_cap + PCI_EXP_DEVCTL); | |
664 | devsta = pci_get_long(exp_cap + PCI_EXP_DEVSTA); | |
665 | } | |
666 | ||
667 | inj.dev = dev; | |
668 | inj.aer_cap = aer_cap; | |
669 | inj.err = err; | |
670 | inj.devctl = devctl; | |
671 | inj.devsta = devsta; | |
672 | inj.error_status = error_status; | |
673 | inj.unsupported_request = !(err->flags & PCIE_AER_ERR_IS_CORRECTABLE) && | |
674 | err->status == PCI_ERR_UNC_UNSUP; | |
675 | inj.log_overflow = false; | |
676 | ||
677 | if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) { | |
678 | if (!pcie_aer_inject_cor_error(&inj, 0, false)) { | |
679 | return 0; | |
680 | } | |
681 | } else { | |
682 | bool is_fatal = | |
683 | pcie_aer_uncor_default_severity(error_status) == | |
684 | PCI_ERR_ROOT_CMD_FATAL_EN; | |
685 | if (aer_cap) { | |
686 | is_fatal = | |
687 | error_status & pci_get_long(aer_cap + PCI_ERR_UNCOR_SEVER); | |
688 | } | |
689 | if (!is_fatal && (err->flags & PCIE_AER_ERR_MAYBE_ADVISORY)) { | |
690 | inj.error_status = PCI_ERR_COR_ADV_NONFATAL; | |
691 | if (!pcie_aer_inject_cor_error(&inj, error_status, true)) { | |
692 | return 0; | |
693 | } | |
694 | } else { | |
695 | if (!pcie_aer_inject_uncor_error(&inj, is_fatal)) { | |
696 | return 0; | |
697 | } | |
698 | } | |
699 | } | |
700 | ||
701 | /* send up error message */ | |
702 | inj.msg.source_id = err->source_id; | |
703 | pcie_aer_msg(dev, &inj.msg); | |
704 | ||
705 | if (inj.log_overflow) { | |
706 | PCIEAERErr header_log_overflow = { | |
707 | .status = PCI_ERR_COR_HL_OVERFLOW, | |
708 | .flags = PCIE_AER_ERR_IS_CORRECTABLE, | |
709 | }; | |
710 | int ret = pcie_aer_inject_error(dev, &header_log_overflow); | |
711 | assert(!ret); | |
712 | } | |
713 | return 0; | |
714 | } | |
715 | ||
716 | void pcie_aer_write_config(PCIDevice *dev, | |
717 | uint32_t addr, uint32_t val, int len) | |
718 | { | |
719 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
720 | uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); | |
721 | uint32_t first_error = 1U << PCI_ERR_CAP_FEP(errcap); | |
722 | uint32_t uncorsta = pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS); | |
723 | ||
724 | /* uncorrectable error */ | |
725 | if (!(uncorsta & first_error)) { | |
726 | /* the bit that corresponds to the first error is cleared */ | |
727 | pcie_aer_clear_error(dev); | |
728 | } else if (errcap & PCI_ERR_CAP_MHRE) { | |
729 | /* When PCI_ERR_CAP_MHRE is enabled and the first error isn't cleared | |
730 | * nothing should happen. So we have to revert the modification to | |
731 | * the register. | |
732 | */ | |
733 | pcie_aer_update_uncor_status(dev); | |
734 | } else { | |
735 | /* capability & control | |
736 | * PCI_ERR_CAP_MHRE might be cleared, so clear of header log. | |
737 | */ | |
738 | aer_log_clear_all_err(&dev->exp.aer_log); | |
739 | } | |
740 | } | |
741 | ||
742 | void pcie_aer_root_init(PCIDevice *dev) | |
743 | { | |
744 | uint16_t pos = dev->exp.aer_cap; | |
745 | ||
746 | pci_set_long(dev->wmask + pos + PCI_ERR_ROOT_COMMAND, | |
747 | PCI_ERR_ROOT_CMD_EN_MASK); | |
748 | pci_set_long(dev->w1cmask + pos + PCI_ERR_ROOT_STATUS, | |
749 | PCI_ERR_ROOT_STATUS_REPORT_MASK); | |
750 | } | |
751 | ||
752 | void pcie_aer_root_reset(PCIDevice *dev) | |
753 | { | |
754 | uint8_t* aer_cap = dev->config + dev->exp.aer_cap; | |
755 | ||
756 | pci_set_long(aer_cap + PCI_ERR_ROOT_COMMAND, 0); | |
757 | ||
758 | /* | |
759 | * Advanced Error Interrupt Message Number in Root Error Status Register | |
760 | * must be updated by chip dependent code because it's chip dependent | |
761 | * which number is used. | |
762 | */ | |
763 | } | |
764 | ||
34e65944 IY |
765 | void pcie_aer_root_write_config(PCIDevice *dev, |
766 | uint32_t addr, uint32_t val, int len, | |
767 | uint32_t root_cmd_prev) | |
768 | { | |
769 | uint8_t *aer_cap = dev->config + dev->exp.aer_cap; | |
2b3cb353 MT |
770 | uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); |
771 | uint32_t enabled_cmd = pcie_aer_status_to_cmd(root_status); | |
34e65944 | 772 | uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); |
2b3cb353 MT |
773 | /* 6.2.4.1.2 Interrupt Generation */ |
774 | if (!msix_enabled(dev) && !msi_enabled(dev)) { | |
775 | qemu_set_irq(dev->irq[dev->exp.aer_intx], !!(root_cmd & enabled_cmd)); | |
776 | return; | |
777 | } | |
34e65944 | 778 | |
2b3cb353 MT |
779 | if ((root_cmd_prev & enabled_cmd) || !(root_cmd & enabled_cmd)) { |
780 | /* Send MSI on transition from false to true. */ | |
781 | return; | |
782 | } | |
34e65944 | 783 | |
2b3cb353 MT |
784 | if (msix_enabled(dev)) { |
785 | msix_notify(dev, pcie_aer_root_get_vector(dev)); | |
786 | } else if (msi_enabled(dev)) { | |
787 | msi_notify(dev, pcie_aer_root_get_vector(dev)); | |
788 | } else { | |
789 | abort(); | |
34e65944 IY |
790 | } |
791 | } | |
792 | ||
793 | static const VMStateDescription vmstate_pcie_aer_err = { | |
794 | .name = "PCIE_AER_ERROR", | |
795 | .version_id = 1, | |
796 | .minimum_version_id = 1, | |
797 | .minimum_version_id_old = 1, | |
798 | .fields = (VMStateField[]) { | |
799 | VMSTATE_UINT32(status, PCIEAERErr), | |
800 | VMSTATE_UINT16(source_id, PCIEAERErr), | |
801 | VMSTATE_UINT16(flags, PCIEAERErr), | |
802 | VMSTATE_UINT32_ARRAY(header, PCIEAERErr, 4), | |
803 | VMSTATE_UINT32_ARRAY(prefix, PCIEAERErr, 4), | |
804 | VMSTATE_END_OF_LIST() | |
805 | } | |
806 | }; | |
807 | ||
808 | #define VMSTATE_PCIE_AER_ERRS(_field, _state, _field_num, _vmsd, _type) { \ | |
809 | .name = (stringify(_field)), \ | |
810 | .version_id = 0, \ | |
811 | .num_offset = vmstate_offset_value(_state, _field_num, uint16_t), \ | |
812 | .size = sizeof(_type), \ | |
813 | .vmsd = &(_vmsd), \ | |
814 | .flags = VMS_POINTER | VMS_VARRAY_UINT16 | VMS_STRUCT, \ | |
815 | .offset = vmstate_offset_pointer(_state, _field, _type), \ | |
816 | } | |
817 | ||
818 | const VMStateDescription vmstate_pcie_aer_log = { | |
819 | .name = "PCIE_AER_ERROR_LOG", | |
820 | .version_id = 1, | |
821 | .minimum_version_id = 1, | |
822 | .minimum_version_id_old = 1, | |
823 | .fields = (VMStateField[]) { | |
824 | VMSTATE_UINT16(log_num, PCIEAERLog), | |
825 | VMSTATE_UINT16(log_max, PCIEAERLog), | |
826 | VMSTATE_PCIE_AER_ERRS(log, PCIEAERLog, log_num, | |
827 | vmstate_pcie_aer_err, PCIEAERErr), | |
828 | VMSTATE_END_OF_LIST() | |
829 | } | |
830 | }; |