]> Git Repo - qemu.git/blame - hw/s390x/s390-pci-inst.c
s390x/pci: check for invalid function handle
[qemu.git] / hw / s390x / s390-pci-inst.c
CommitLineData
863f6f52
FB
1/*
2 * s390 PCI instructions
3 *
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <[email protected]>
6 * Hong Bo Li <[email protected]>
7 * Yi Min Zhao <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
12 */
13
14#include "s390-pci-inst.h"
15#include "s390-pci-bus.h"
16#include <exec/memory-internal.h>
17#include <qemu/error-report.h>
18
19/* #define DEBUG_S390PCI_INST */
20#ifdef DEBUG_S390PCI_INST
21#define DPRINTF(fmt, ...) \
22 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0)
23#else
24#define DPRINTF(fmt, ...) \
25 do { } while (0)
26#endif
27
28static void s390_set_status_code(CPUS390XState *env,
29 uint8_t r, uint64_t status_code)
30{
31 env->regs[r] &= ~0xff000000ULL;
32 env->regs[r] |= (status_code & 0xff) << 24;
33}
34
35static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
36{
37 S390PCIBusDevice *pbdev;
38 uint32_t res_code, initial_l2, g_l2, finish;
39 int rc, idx;
40 uint64_t resume_token;
41
42 rc = 0;
43 if (lduw_p(&rrb->request.hdr.len) != 32) {
44 res_code = CLP_RC_LEN;
45 rc = -EINVAL;
46 goto out;
47 }
48
49 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
50 res_code = CLP_RC_FMT;
51 rc = -EINVAL;
52 goto out;
53 }
54
55 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
56 ldq_p(&rrb->request.reserved1) != 0 ||
57 ldq_p(&rrb->request.reserved2) != 0) {
58 res_code = CLP_RC_RESNOT0;
59 rc = -EINVAL;
60 goto out;
61 }
62
63 resume_token = ldq_p(&rrb->request.resume_token);
64
65 if (resume_token) {
66 pbdev = s390_pci_find_dev_by_idx(resume_token);
67 if (!pbdev) {
68 res_code = CLP_RC_LISTPCI_BADRT;
69 rc = -EINVAL;
70 goto out;
71 }
72 }
73
74 if (lduw_p(&rrb->response.hdr.len) < 48) {
75 res_code = CLP_RC_8K;
76 rc = -EINVAL;
77 goto out;
78 }
79
80 initial_l2 = lduw_p(&rrb->response.hdr.len);
81 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
82 != 0) {
83 res_code = CLP_RC_LEN;
84 rc = -EINVAL;
85 *cc = 3;
86 goto out;
87 }
88
89 stl_p(&rrb->response.fmt, 0);
90 stq_p(&rrb->response.reserved1, 0);
91 stq_p(&rrb->response.reserved2, 0);
92 stl_p(&rrb->response.mdd, FH_VIRT);
93 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
94 rrb->response.entry_size = sizeof(ClpFhListEntry);
95 finish = 0;
96 idx = resume_token;
97 g_l2 = LIST_PCI_HDR_LEN;
98 do {
99 pbdev = s390_pci_find_dev_by_idx(idx);
100 if (!pbdev) {
101 finish = 1;
102 break;
103 }
104 stw_p(&rrb->response.fh_list[idx - resume_token].device_id,
105 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
106 stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id,
107 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
108 stl_p(&rrb->response.fh_list[idx - resume_token].config, 0x80000000);
109 stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid);
110 stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh);
111
112 g_l2 += sizeof(ClpFhListEntry);
113 /* Add endian check for DPRINTF? */
114 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
115 g_l2,
116 lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id),
117 lduw_p(&rrb->response.fh_list[idx - resume_token].device_id),
118 ldl_p(&rrb->response.fh_list[idx - resume_token].fid),
119 ldl_p(&rrb->response.fh_list[idx - resume_token].fh));
120 idx++;
121 } while (g_l2 < initial_l2);
122
123 if (finish == 1) {
124 resume_token = 0;
125 } else {
126 resume_token = idx;
127 }
128 stq_p(&rrb->response.resume_token, resume_token);
129 stw_p(&rrb->response.hdr.len, g_l2);
130 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
131out:
132 if (rc) {
133 DPRINTF("list pci failed rc 0x%x\n", rc);
134 stw_p(&rrb->response.hdr.rsp, res_code);
135 }
136 return rc;
137}
138
139int clp_service_call(S390CPU *cpu, uint8_t r2)
140{
141 ClpReqHdr *reqh;
142 ClpRspHdr *resh;
143 S390PCIBusDevice *pbdev;
144 uint32_t req_len;
145 uint32_t res_len;
146 uint8_t buffer[4096 * 2];
147 uint8_t cc = 0;
148 CPUS390XState *env = &cpu->env;
149 int i;
150
151 cpu_synchronize_state(CPU(cpu));
152
153 if (env->psw.mask & PSW_MASK_PSTATE) {
154 program_interrupt(env, PGM_PRIVILEGED, 4);
155 return 0;
156 }
157
158 cpu_physical_memory_read(env->regs[r2], buffer, sizeof(*reqh));
159 reqh = (ClpReqHdr *)buffer;
160 req_len = lduw_p(&reqh->len);
161 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
162 program_interrupt(env, PGM_OPERAND, 4);
163 return 0;
164 }
165
166 cpu_physical_memory_read(env->regs[r2], buffer, req_len + sizeof(*resh));
167 resh = (ClpRspHdr *)(buffer + req_len);
168 res_len = lduw_p(&resh->len);
169 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
170 program_interrupt(env, PGM_OPERAND, 4);
171 return 0;
172 }
173 if ((req_len + res_len) > 8192) {
174 program_interrupt(env, PGM_OPERAND, 4);
175 return 0;
176 }
177
178 cpu_physical_memory_read(env->regs[r2], buffer, req_len + res_len);
179
180 if (req_len != 32) {
181 stw_p(&resh->rsp, CLP_RC_LEN);
182 goto out;
183 }
184
185 switch (lduw_p(&reqh->cmd)) {
186 case CLP_LIST_PCI: {
187 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
188 list_pci(rrb, &cc);
189 break;
190 }
191 case CLP_SET_PCI_FN: {
192 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
193 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
194
195 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
196 if (!pbdev) {
197 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
198 goto out;
199 }
200
201 switch (reqsetpci->oc) {
202 case CLP_SET_ENABLE_PCI_FN:
203 pbdev->fh = pbdev->fh | 1 << ENABLE_BIT_OFFSET;
204 stl_p(&ressetpci->fh, pbdev->fh);
205 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
206 break;
207 case CLP_SET_DISABLE_PCI_FN:
208 pbdev->fh = pbdev->fh & ~(1 << ENABLE_BIT_OFFSET);
209 pbdev->error_state = false;
210 pbdev->lgstg_blocked = false;
211 stl_p(&ressetpci->fh, pbdev->fh);
212 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
213 break;
214 default:
215 DPRINTF("unknown set pci command\n");
216 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
217 break;
218 }
219 break;
220 }
221 case CLP_QUERY_PCI_FN: {
222 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
223 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
224
225 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
226 if (!pbdev) {
227 DPRINTF("query pci no pci dev\n");
228 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
229 goto out;
230 }
231
232 for (i = 0; i < PCI_BAR_COUNT; i++) {
233 uint32_t data = pci_get_long(pbdev->pdev->config +
234 PCI_BASE_ADDRESS_0 + (i * 4));
235
236 stl_p(&resquery->bar[i], data);
237 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
238 ctz64(pbdev->pdev->io_regions[i].size) : 0;
239 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
240 ldl_p(&resquery->bar[i]),
241 pbdev->pdev->io_regions[i].size,
242 resquery->bar_size[i]);
243 }
244
245 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR);
246 stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
247 stw_p(&resquery->pchid, 0);
248 stw_p(&resquery->ug, 1);
249 stl_p(&resquery->uid, pbdev->fid);
250 stw_p(&resquery->hdr.rsp, CLP_RC_OK);
251 break;
252 }
253 case CLP_QUERY_PCI_FNGRP: {
254 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
255 resgrp->fr = 1;
256 stq_p(&resgrp->dasm, 0);
257 stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
258 stw_p(&resgrp->mui, 0);
259 stw_p(&resgrp->i, 128);
260 resgrp->version = 0;
261
262 stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
263 break;
264 }
265 default:
266 DPRINTF("unknown clp command\n");
267 stw_p(&resh->rsp, CLP_RC_CMD);
268 break;
269 }
270
271out:
272 cpu_physical_memory_write(env->regs[r2], buffer, req_len + res_len);
273 setcc(cpu, cc);
274 return 0;
275}
276
277int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
278{
279 CPUS390XState *env = &cpu->env;
280 S390PCIBusDevice *pbdev;
281 uint64_t offset;
282 uint64_t data;
283 uint8_t len;
284 uint32_t fh;
285 uint8_t pcias;
286
287 cpu_synchronize_state(CPU(cpu));
288
289 if (env->psw.mask & PSW_MASK_PSTATE) {
290 program_interrupt(env, PGM_PRIVILEGED, 4);
291 return 0;
292 }
293
294 if (r2 & 0x1) {
295 program_interrupt(env, PGM_SPECIFICATION, 4);
296 return 0;
297 }
298
299 fh = env->regs[r2] >> 32;
300 pcias = (env->regs[r2] >> 16) & 0xf;
301 len = env->regs[r2] & 0xf;
302 offset = env->regs[r2 + 1];
303
304 pbdev = s390_pci_find_dev_by_fh(fh);
305 if (!pbdev) {
306 DPRINTF("pcilg no pci dev\n");
307 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
308 return 0;
309 }
310
311 if (pbdev->lgstg_blocked) {
312 setcc(cpu, ZPCI_PCI_LS_ERR);
313 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
314 return 0;
315 }
316
317 if (pcias < 6) {
318 if ((8 - (offset & 0x7)) < len) {
319 program_interrupt(env, PGM_OPERAND, 4);
320 return 0;
321 }
322 MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
323 io_mem_read(mr, offset, &data, len);
324 } else if (pcias == 15) {
325 if ((4 - (offset & 0x3)) < len) {
326 program_interrupt(env, PGM_OPERAND, 4);
327 return 0;
328 }
329 data = pci_host_config_read_common(
330 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
331
332 switch (len) {
333 case 1:
334 break;
335 case 2:
336 data = bswap16(data);
337 break;
338 case 4:
339 data = bswap32(data);
340 break;
341 case 8:
342 data = bswap64(data);
343 break;
344 default:
345 program_interrupt(env, PGM_OPERAND, 4);
346 return 0;
347 }
348 } else {
349 DPRINTF("invalid space\n");
350 setcc(cpu, ZPCI_PCI_LS_ERR);
351 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
352 return 0;
353 }
354
355 env->regs[r1] = data;
356 setcc(cpu, ZPCI_PCI_LS_OK);
357 return 0;
358}
359
360static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset,
361 uint64_t *data, uint8_t len)
362{
363 uint32_t val;
364 uint8_t *msg_data;
365
366 if (offset % PCI_MSIX_ENTRY_SIZE != 8) {
367 return;
368 }
369
370 if (len != 4) {
371 DPRINTF("access msix table msg data but len is %d\n", len);
372 return;
373 }
374
375 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE +
376 PCI_MSIX_ENTRY_VECTOR_CTRL;
377 val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS);
378 pci_set_long(msg_data, val);
379 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data);
380}
381
382static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
383{
384 if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
385 offset >= pbdev->msix.table_offset &&
386 offset <= pbdev->msix.table_offset +
387 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) {
388 return 1;
389 } else {
390 return 0;
391 }
392}
393
394int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
395{
396 CPUS390XState *env = &cpu->env;
397 uint64_t offset, data;
398 S390PCIBusDevice *pbdev;
399 uint8_t len;
400 uint32_t fh;
401 uint8_t pcias;
402
403 cpu_synchronize_state(CPU(cpu));
404
405 if (env->psw.mask & PSW_MASK_PSTATE) {
406 program_interrupt(env, PGM_PRIVILEGED, 4);
407 return 0;
408 }
409
410 if (r2 & 0x1) {
411 program_interrupt(env, PGM_SPECIFICATION, 4);
412 return 0;
413 }
414
415 fh = env->regs[r2] >> 32;
416 pcias = (env->regs[r2] >> 16) & 0xf;
417 len = env->regs[r2] & 0xf;
418 offset = env->regs[r2 + 1];
419
420 pbdev = s390_pci_find_dev_by_fh(fh);
421 if (!pbdev) {
422 DPRINTF("pcistg no pci dev\n");
423 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
424 return 0;
425 }
426
427 if (pbdev->lgstg_blocked) {
428 setcc(cpu, ZPCI_PCI_LS_ERR);
429 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
430 return 0;
431 }
432
433 data = env->regs[r1];
434 if (pcias < 6) {
435 if ((8 - (offset & 0x7)) < len) {
436 program_interrupt(env, PGM_OPERAND, 4);
437 return 0;
438 }
439 MemoryRegion *mr;
440 if (trap_msix(pbdev, offset, pcias)) {
441 offset = offset - pbdev->msix.table_offset;
442 mr = &pbdev->pdev->msix_table_mmio;
443 update_msix_table_msg_data(pbdev, offset, &data, len);
444 } else {
445 mr = pbdev->pdev->io_regions[pcias].memory;
446 }
447
448 io_mem_write(mr, offset, data, len);
449 } else if (pcias == 15) {
450 if ((4 - (offset & 0x3)) < len) {
451 program_interrupt(env, PGM_OPERAND, 4);
452 return 0;
453 }
454 switch (len) {
455 case 1:
456 break;
457 case 2:
458 data = bswap16(data);
459 break;
460 case 4:
461 data = bswap32(data);
462 break;
463 case 8:
464 data = bswap64(data);
465 break;
466 default:
467 program_interrupt(env, PGM_OPERAND, 4);
468 return 0;
469 }
470
471 pci_host_config_write_common(pbdev->pdev, offset,
472 pci_config_size(pbdev->pdev),
473 data, len);
474 } else {
475 DPRINTF("pcistg invalid space\n");
476 setcc(cpu, ZPCI_PCI_LS_ERR);
477 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
478 return 0;
479 }
480
481 setcc(cpu, ZPCI_PCI_LS_OK);
482 return 0;
483}
484
485int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
486{
487 CPUS390XState *env = &cpu->env;
488 uint32_t fh;
489 S390PCIBusDevice *pbdev;
490 ram_addr_t size;
491 IOMMUTLBEntry entry;
492 MemoryRegion *mr;
493
494 cpu_synchronize_state(CPU(cpu));
495
496 if (env->psw.mask & PSW_MASK_PSTATE) {
497 program_interrupt(env, PGM_PRIVILEGED, 4);
498 goto out;
499 }
500
501 if (r2 & 0x1) {
502 program_interrupt(env, PGM_SPECIFICATION, 4);
503 goto out;
504 }
505
506 fh = env->regs[r1] >> 32;
507 size = env->regs[r2 + 1];
508
509 pbdev = s390_pci_find_dev_by_fh(fh);
510
511 if (!pbdev) {
512 DPRINTF("rpcit no pci dev\n");
513 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
514 goto out;
515 }
516
517 mr = pci_device_iommu_address_space(pbdev->pdev)->root;
518 entry = mr->iommu_ops->translate(mr, env->regs[r2], 0);
519
520 if (!entry.translated_addr) {
521 setcc(cpu, ZPCI_PCI_LS_ERR);
522 goto out;
523 }
524
525 entry.addr_mask = size - 1;
526 memory_region_notify_iommu(mr, entry);
527 setcc(cpu, ZPCI_PCI_LS_OK);
528out:
529 return 0;
530}
531
532int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr)
533{
534 CPUS390XState *env = &cpu->env;
535 S390PCIBusDevice *pbdev;
536 MemoryRegion *mr;
537 int i;
538 uint64_t val;
539 uint32_t fh;
540 uint8_t pcias;
541 uint8_t len;
542
543 if (env->psw.mask & PSW_MASK_PSTATE) {
544 program_interrupt(env, PGM_PRIVILEGED, 6);
545 return 0;
546 }
547
548 fh = env->regs[r1] >> 32;
549 pcias = (env->regs[r1] >> 16) & 0xf;
550 len = env->regs[r1] & 0xff;
551
552 if (pcias > 5) {
553 DPRINTF("pcistb invalid space\n");
554 setcc(cpu, ZPCI_PCI_LS_ERR);
555 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
556 return 0;
557 }
558
559 switch (len) {
560 case 16:
561 case 32:
562 case 64:
563 case 128:
564 break;
565 default:
566 program_interrupt(env, PGM_SPECIFICATION, 6);
567 return 0;
568 }
569
570 pbdev = s390_pci_find_dev_by_fh(fh);
571 if (!pbdev) {
572 DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
573 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
574 return 0;
575 }
576
577 if (pbdev->lgstg_blocked) {
578 setcc(cpu, ZPCI_PCI_LS_ERR);
579 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
580 return 0;
581 }
582
583 mr = pbdev->pdev->io_regions[pcias].memory;
584 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
585 program_interrupt(env, PGM_ADDRESSING, 6);
586 return 0;
587 }
588
589 for (i = 0; i < len / 8; i++) {
590 val = ldq_phys(&address_space_memory, gaddr + i * 8);
591 io_mem_write(mr, env->regs[r3] + i * 8, val, 8);
592 }
593
594 setcc(cpu, ZPCI_PCI_LS_OK);
595 return 0;
596}
597
598static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
599{
600 int ret;
601 S390FLICState *fs = s390_get_flic();
602 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
603
604 ret = css_register_io_adapter(S390_PCIPT_ADAPTER,
605 FIB_DATA_ISC(ldl_p(&fib.data)), true, false,
606 &pbdev->routes.adapter.adapter_id);
607 assert(ret == 0);
608
609 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
610 ldq_p(&fib.aisb), true);
611 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
612 ldq_p(&fib.aibv), true);
613
614 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
615 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
616 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
617 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
618 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data));
619 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
620 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
621
622 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
623 return 0;
624}
625
626static int dereg_irqs(S390PCIBusDevice *pbdev)
627{
628 S390FLICState *fs = s390_get_flic();
629 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
630
631 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
632 pbdev->routes.adapter.ind_addr, false);
633
634 pbdev->routes.adapter.summary_addr = 0;
635 pbdev->routes.adapter.summary_offset = 0;
636 pbdev->routes.adapter.ind_addr = 0;
637 pbdev->routes.adapter.ind_offset = 0;
638 pbdev->isc = 0;
639 pbdev->noi = 0;
640 pbdev->sum = 0;
641
642 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
643 return 0;
644}
645
646static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
647{
648 uint64_t pba = ldq_p(&fib.pba);
649 uint64_t pal = ldq_p(&fib.pal);
650 uint64_t g_iota = ldq_p(&fib.iota);
651 uint8_t dt = (g_iota >> 2) & 0x7;
652 uint8_t t = (g_iota >> 11) & 0x1;
653
654 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
655 program_interrupt(env, PGM_OPERAND, 6);
656 return -EINVAL;
657 }
658
659 /* currently we only support designation type 1 with translation */
660 if (!(dt == ZPCI_IOTA_RTTO && t)) {
661 error_report("unsupported ioat dt %d t %d", dt, t);
662 program_interrupt(env, PGM_OPERAND, 6);
663 return -EINVAL;
664 }
665
666 pbdev->pba = pba;
667 pbdev->pal = pal;
668 pbdev->g_iota = g_iota;
669 return 0;
670}
671
672static void dereg_ioat(S390PCIBusDevice *pbdev)
673{
674 pbdev->pba = 0;
675 pbdev->pal = 0;
676 pbdev->g_iota = 0;
677}
678
679int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
680{
681 CPUS390XState *env = &cpu->env;
682 uint8_t oc;
683 uint32_t fh;
684 ZpciFib fib;
685 S390PCIBusDevice *pbdev;
686 uint64_t cc = ZPCI_PCI_LS_OK;
687
688 if (env->psw.mask & PSW_MASK_PSTATE) {
689 program_interrupt(env, PGM_PRIVILEGED, 6);
690 return 0;
691 }
692
693 oc = env->regs[r1] & 0xff;
694 fh = env->regs[r1] >> 32;
695
696 if (fiba & 0x7) {
697 program_interrupt(env, PGM_SPECIFICATION, 6);
698 return 0;
699 }
700
701 pbdev = s390_pci_find_dev_by_fh(fh);
702 if (!pbdev) {
703 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
704 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
705 return 0;
706 }
707
708 cpu_physical_memory_read(fiba, (uint8_t *)&fib, sizeof(fib));
709
710 switch (oc) {
711 case ZPCI_MOD_FC_REG_INT:
712 if (reg_irqs(env, pbdev, fib)) {
713 cc = ZPCI_PCI_LS_ERR;
714 }
715 break;
716 case ZPCI_MOD_FC_DEREG_INT:
717 dereg_irqs(pbdev);
718 break;
719 case ZPCI_MOD_FC_REG_IOAT:
720 if (reg_ioat(env, pbdev, fib)) {
721 cc = ZPCI_PCI_LS_ERR;
722 }
723 break;
724 case ZPCI_MOD_FC_DEREG_IOAT:
725 dereg_ioat(pbdev);
726 break;
727 case ZPCI_MOD_FC_REREG_IOAT:
728 dereg_ioat(pbdev);
729 if (reg_ioat(env, pbdev, fib)) {
730 cc = ZPCI_PCI_LS_ERR;
731 }
732 break;
733 case ZPCI_MOD_FC_RESET_ERROR:
734 pbdev->error_state = false;
735 pbdev->lgstg_blocked = false;
736 break;
737 case ZPCI_MOD_FC_RESET_BLOCK:
738 pbdev->lgstg_blocked = false;
739 break;
740 case ZPCI_MOD_FC_SET_MEASURE:
741 pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
742 break;
743 default:
744 program_interrupt(&cpu->env, PGM_OPERAND, 6);
745 cc = ZPCI_PCI_LS_ERR;
746 }
747
748 setcc(cpu, cc);
749 return 0;
750}
751
752int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
753{
754 CPUS390XState *env = &cpu->env;
755 uint32_t fh;
756 ZpciFib fib;
757 S390PCIBusDevice *pbdev;
758 uint32_t data;
759 uint64_t cc = ZPCI_PCI_LS_OK;
760
761 if (env->psw.mask & PSW_MASK_PSTATE) {
762 program_interrupt(env, PGM_PRIVILEGED, 6);
763 return 0;
764 }
765
766 fh = env->regs[r1] >> 32;
767
768 if (fiba & 0x7) {
769 program_interrupt(env, PGM_SPECIFICATION, 6);
770 return 0;
771 }
772
773 pbdev = s390_pci_find_dev_by_fh(fh);
774 if (!pbdev) {
775 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
776 return 0;
777 }
778
779 memset(&fib, 0, sizeof(fib));
780 stq_p(&fib.pba, pbdev->pba);
781 stq_p(&fib.pal, pbdev->pal);
782 stq_p(&fib.iota, pbdev->g_iota);
783 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
784 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
785 stq_p(&fib.fmb_addr, pbdev->fmb_addr);
786
c0eb33ab
FB
787 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
788 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
789 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
790 stl_p(&fib.data, data);
863f6f52
FB
791
792 if (pbdev->fh >> ENABLE_BIT_OFFSET) {
793 fib.fc |= 0x80;
794 }
795
796 if (pbdev->error_state) {
797 fib.fc |= 0x40;
798 }
799
800 if (pbdev->lgstg_blocked) {
801 fib.fc |= 0x20;
802 }
803
804 if (pbdev->g_iota) {
805 fib.fc |= 0x10;
806 }
807
808 cpu_physical_memory_write(fiba, (uint8_t *)&fib, sizeof(fib));
809 setcc(cpu, cc);
810 return 0;
811}
This page took 0.099546 seconds and 4 git commands to generate.