]> Git Repo - qemu.git/blame - hw/virtio/virtio-mmio.c
spapr_pci: Allow PCI host bridge DMA window to be configured
[qemu.git] / hw / virtio / virtio-mmio.c
CommitLineData
4b52530b
PM
1/*
2 * Virtio MMIO bindings
3 *
4 * Copyright (c) 2011 Linaro Limited
5 *
6 * Author:
7 * Peter Maydell <[email protected]>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "hw/sysbus.h"
23#include "hw/virtio/virtio.h"
24#include "qemu/host-utils.h"
434027ba 25#include "sysemu/kvm.h"
4b52530b 26#include "hw/virtio/virtio-bus.h"
434027ba 27#include "qemu/error-report.h"
4b52530b
PM
28
29/* #define DEBUG_VIRTIO_MMIO */
30
31#ifdef DEBUG_VIRTIO_MMIO
32
33#define DPRINTF(fmt, ...) \
34do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
35#else
36#define DPRINTF(fmt, ...) do {} while (0)
37#endif
38
39/* QOM macros */
40/* virtio-mmio-bus */
41#define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
42#define VIRTIO_MMIO_BUS(obj) \
43 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
44#define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
45 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
46#define VIRTIO_MMIO_BUS_CLASS(klass) \
47 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
48
49/* virtio-mmio */
50#define TYPE_VIRTIO_MMIO "virtio-mmio"
51#define VIRTIO_MMIO(obj) \
52 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
53
54/* Memory mapped register offsets */
55#define VIRTIO_MMIO_MAGIC 0x0
56#define VIRTIO_MMIO_VERSION 0x4
57#define VIRTIO_MMIO_DEVICEID 0x8
58#define VIRTIO_MMIO_VENDORID 0xc
59#define VIRTIO_MMIO_HOSTFEATURES 0x10
60#define VIRTIO_MMIO_HOSTFEATURESSEL 0x14
61#define VIRTIO_MMIO_GUESTFEATURES 0x20
62#define VIRTIO_MMIO_GUESTFEATURESSEL 0x24
63#define VIRTIO_MMIO_GUESTPAGESIZE 0x28
64#define VIRTIO_MMIO_QUEUESEL 0x30
65#define VIRTIO_MMIO_QUEUENUMMAX 0x34
66#define VIRTIO_MMIO_QUEUENUM 0x38
67#define VIRTIO_MMIO_QUEUEALIGN 0x3c
68#define VIRTIO_MMIO_QUEUEPFN 0x40
69#define VIRTIO_MMIO_QUEUENOTIFY 0x50
70#define VIRTIO_MMIO_INTERRUPTSTATUS 0x60
71#define VIRTIO_MMIO_INTERRUPTACK 0x64
72#define VIRTIO_MMIO_STATUS 0x70
73/* Device specific config space starts here */
74#define VIRTIO_MMIO_CONFIG 0x100
75
76#define VIRT_MAGIC 0x74726976 /* 'virt' */
77#define VIRT_VERSION 1
78#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
79
80typedef struct {
81 /* Generic */
82 SysBusDevice parent_obj;
83 MemoryRegion iomem;
84 qemu_irq irq;
4b52530b
PM
85 /* Guest accessible state needing migration and reset */
86 uint32_t host_features_sel;
87 uint32_t guest_features_sel;
88 uint32_t guest_page_shift;
89 /* virtio-bus */
90 VirtioBusState bus;
434027ba
YSP
91 bool ioeventfd_disabled;
92 bool ioeventfd_started;
4b52530b
PM
93} VirtIOMMIOProxy;
94
434027ba
YSP
95static int virtio_mmio_set_host_notifier_internal(VirtIOMMIOProxy *proxy,
96 int n, bool assign,
97 bool set_handler)
98{
99 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
100 VirtQueue *vq = virtio_get_queue(vdev, n);
101 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
102 int r = 0;
103
104 if (assign) {
105 r = event_notifier_init(notifier, 1);
106 if (r < 0) {
107 error_report("%s: unable to init event notifier: %d",
108 __func__, r);
109 return r;
110 }
111 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
112 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
113 true, n, notifier);
114 } else {
115 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
116 true, n, notifier);
117 virtio_queue_set_host_notifier_fd_handler(vq, false, false);
118 event_notifier_cleanup(notifier);
119 }
120 return r;
121}
122
123static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
124{
125 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
126 int n, r;
127
128 if (!kvm_eventfds_enabled() ||
129 proxy->ioeventfd_disabled ||
130 proxy->ioeventfd_started) {
131 return;
132 }
133
134 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
135 if (!virtio_queue_get_num(vdev, n)) {
136 continue;
137 }
138
139 r = virtio_mmio_set_host_notifier_internal(proxy, n, true, true);
140 if (r < 0) {
141 goto assign_error;
142 }
143 }
144 proxy->ioeventfd_started = true;
145 return;
146
147assign_error:
148 while (--n >= 0) {
149 if (!virtio_queue_get_num(vdev, n)) {
150 continue;
151 }
152
153 r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
154 assert(r >= 0);
155 }
156 proxy->ioeventfd_started = false;
157 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
158}
159
160static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
161{
162 int r;
163 int n;
164 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
165
166 if (!proxy->ioeventfd_started) {
167 return;
168 }
169
170 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
171 if (!virtio_queue_get_num(vdev, n)) {
172 continue;
173 }
174
175 r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
176 assert(r >= 0);
177 }
178 proxy->ioeventfd_started = false;
179}
180
4b52530b
PM
181static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
182{
183 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
06d3dff0 184 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
4b52530b
PM
185
186 DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset);
187
188 if (!vdev) {
189 /* If no backend is present, we treat most registers as
190 * read-as-zero, except for the magic number, version and
191 * vendor ID. This is not strictly sanctioned by the virtio
192 * spec, but it allows us to provide transports with no backend
193 * plugged in which don't confuse Linux's virtio code: the
194 * probe won't complain about the bad magic number, but the
195 * device ID of zero means no backend will claim it.
196 */
197 switch (offset) {
198 case VIRTIO_MMIO_MAGIC:
199 return VIRT_MAGIC;
200 case VIRTIO_MMIO_VERSION:
201 return VIRT_VERSION;
202 case VIRTIO_MMIO_VENDORID:
203 return VIRT_VENDOR;
204 default:
205 return 0;
206 }
207 }
208
209 if (offset >= VIRTIO_MMIO_CONFIG) {
210 offset -= VIRTIO_MMIO_CONFIG;
211 switch (size) {
212 case 1:
213 return virtio_config_readb(vdev, offset);
214 case 2:
215 return virtio_config_readw(vdev, offset);
216 case 4:
217 return virtio_config_readl(vdev, offset);
218 default:
219 abort();
220 }
221 }
222 if (size != 4) {
223 DPRINTF("wrong size access to register!\n");
224 return 0;
225 }
226 switch (offset) {
227 case VIRTIO_MMIO_MAGIC:
228 return VIRT_MAGIC;
229 case VIRTIO_MMIO_VERSION:
230 return VIRT_VERSION;
231 case VIRTIO_MMIO_DEVICEID:
232 return vdev->device_id;
233 case VIRTIO_MMIO_VENDORID:
234 return VIRT_VENDOR;
235 case VIRTIO_MMIO_HOSTFEATURES:
236 if (proxy->host_features_sel) {
237 return 0;
238 }
6b8f1020 239 return vdev->host_features;
4b52530b 240 case VIRTIO_MMIO_QUEUENUMMAX:
f7b803b3
PM
241 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
242 return 0;
243 }
4b52530b
PM
244 return VIRTQUEUE_MAX_SIZE;
245 case VIRTIO_MMIO_QUEUEPFN:
246 return virtio_queue_get_addr(vdev, vdev->queue_sel)
247 >> proxy->guest_page_shift;
248 case VIRTIO_MMIO_INTERRUPTSTATUS:
249 return vdev->isr;
250 case VIRTIO_MMIO_STATUS:
251 return vdev->status;
252 case VIRTIO_MMIO_HOSTFEATURESSEL:
253 case VIRTIO_MMIO_GUESTFEATURES:
254 case VIRTIO_MMIO_GUESTFEATURESSEL:
255 case VIRTIO_MMIO_GUESTPAGESIZE:
256 case VIRTIO_MMIO_QUEUESEL:
257 case VIRTIO_MMIO_QUEUENUM:
258 case VIRTIO_MMIO_QUEUEALIGN:
259 case VIRTIO_MMIO_QUEUENOTIFY:
260 case VIRTIO_MMIO_INTERRUPTACK:
261 DPRINTF("read of write-only register\n");
262 return 0;
263 default:
264 DPRINTF("bad register offset\n");
265 return 0;
266 }
267 return 0;
268}
269
270static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
271 unsigned size)
272{
273 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
06d3dff0 274 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
4b52530b
PM
275
276 DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n",
277 (int)offset, value);
278
279 if (!vdev) {
280 /* If no backend is present, we just make all registers
281 * write-ignored. This allows us to provide transports with
282 * no backend plugged in.
283 */
284 return;
285 }
286
287 if (offset >= VIRTIO_MMIO_CONFIG) {
288 offset -= VIRTIO_MMIO_CONFIG;
289 switch (size) {
290 case 1:
291 virtio_config_writeb(vdev, offset, value);
292 break;
293 case 2:
294 virtio_config_writew(vdev, offset, value);
295 break;
296 case 4:
297 virtio_config_writel(vdev, offset, value);
298 break;
299 default:
300 abort();
301 }
302 return;
303 }
304 if (size != 4) {
305 DPRINTF("wrong size access to register!\n");
306 return;
307 }
308 switch (offset) {
309 case VIRTIO_MMIO_HOSTFEATURESSEL:
310 proxy->host_features_sel = value;
311 break;
312 case VIRTIO_MMIO_GUESTFEATURES:
313 if (!proxy->guest_features_sel) {
314 virtio_set_features(vdev, value);
315 }
316 break;
317 case VIRTIO_MMIO_GUESTFEATURESSEL:
318 proxy->guest_features_sel = value;
319 break;
320 case VIRTIO_MMIO_GUESTPAGESIZE:
321 proxy->guest_page_shift = ctz32(value);
322 if (proxy->guest_page_shift > 31) {
323 proxy->guest_page_shift = 0;
324 }
325 DPRINTF("guest page size %" PRIx64 " shift %d\n", value,
326 proxy->guest_page_shift);
327 break;
328 case VIRTIO_MMIO_QUEUESEL:
87b3bd1c 329 if (value < VIRTIO_QUEUE_MAX) {
4b52530b
PM
330 vdev->queue_sel = value;
331 }
332 break;
333 case VIRTIO_MMIO_QUEUENUM:
334 DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
335 virtio_queue_set_num(vdev, vdev->queue_sel, value);
ab223c95
CH
336 /* Note: only call this function for legacy devices */
337 virtio_queue_update_rings(vdev, vdev->queue_sel);
4b52530b
PM
338 break;
339 case VIRTIO_MMIO_QUEUEALIGN:
ab223c95 340 /* Note: this is only valid for legacy devices */
4b52530b
PM
341 virtio_queue_set_align(vdev, vdev->queue_sel, value);
342 break;
343 case VIRTIO_MMIO_QUEUEPFN:
344 if (value == 0) {
345 virtio_reset(vdev);
346 } else {
347 virtio_queue_set_addr(vdev, vdev->queue_sel,
348 value << proxy->guest_page_shift);
349 }
350 break;
351 case VIRTIO_MMIO_QUEUENOTIFY:
87b3bd1c 352 if (value < VIRTIO_QUEUE_MAX) {
4b52530b
PM
353 virtio_queue_notify(vdev, value);
354 }
355 break;
356 case VIRTIO_MMIO_INTERRUPTACK:
357 vdev->isr &= ~value;
358 virtio_update_irq(vdev);
359 break;
360 case VIRTIO_MMIO_STATUS:
434027ba
YSP
361 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
362 virtio_mmio_stop_ioeventfd(proxy);
363 }
364
4b52530b 365 virtio_set_status(vdev, value & 0xff);
434027ba
YSP
366
367 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
368 virtio_mmio_start_ioeventfd(proxy);
369 }
370
4b52530b
PM
371 if (vdev->status == 0) {
372 virtio_reset(vdev);
373 }
374 break;
375 case VIRTIO_MMIO_MAGIC:
376 case VIRTIO_MMIO_VERSION:
377 case VIRTIO_MMIO_DEVICEID:
378 case VIRTIO_MMIO_VENDORID:
379 case VIRTIO_MMIO_HOSTFEATURES:
380 case VIRTIO_MMIO_QUEUENUMMAX:
381 case VIRTIO_MMIO_INTERRUPTSTATUS:
382 DPRINTF("write to readonly register\n");
383 break;
384
385 default:
386 DPRINTF("bad register offset\n");
387 }
388}
389
390static const MemoryRegionOps virtio_mem_ops = {
391 .read = virtio_mmio_read,
392 .write = virtio_mmio_write,
393 .endianness = DEVICE_NATIVE_ENDIAN,
394};
395
396static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
397{
398 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
06d3dff0 399 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
4b52530b
PM
400 int level;
401
06d3dff0 402 if (!vdev) {
4b52530b
PM
403 return;
404 }
06d3dff0 405 level = (vdev->isr != 0);
4b52530b
PM
406 DPRINTF("virtio_mmio setting IRQ %d\n", level);
407 qemu_set_irq(proxy->irq, level);
408}
409
4b52530b
PM
410static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
411{
412 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
413
414 proxy->host_features_sel = qemu_get_be32(f);
415 proxy->guest_features_sel = qemu_get_be32(f);
416 proxy->guest_page_shift = qemu_get_be32(f);
417 return 0;
418}
419
420static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
421{
422 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
423
424 qemu_put_be32(f, proxy->host_features_sel);
425 qemu_put_be32(f, proxy->guest_features_sel);
426 qemu_put_be32(f, proxy->guest_page_shift);
427}
428
429static void virtio_mmio_reset(DeviceState *d)
430{
431 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
432
434027ba 433 virtio_mmio_stop_ioeventfd(proxy);
4b52530b
PM
434 virtio_bus_reset(&proxy->bus);
435 proxy->host_features_sel = 0;
436 proxy->guest_features_sel = 0;
437 proxy->guest_page_shift = 0;
438}
439
434027ba
YSP
440static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
441 bool with_irqfd)
442{
443 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
444 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
445 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
446 VirtQueue *vq = virtio_get_queue(vdev, n);
447 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
448
449 if (assign) {
450 int r = event_notifier_init(notifier, 0);
451 if (r < 0) {
452 return r;
453 }
454 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
455 } else {
456 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
457 event_notifier_cleanup(notifier);
458 }
459
460 if (vdc->guest_notifier_mask) {
461 vdc->guest_notifier_mask(vdev, n, !assign);
462 }
463
464 return 0;
465}
466
467static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
468 bool assign)
469{
470 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
471 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
472 /* TODO: need to check if kvm-arm supports irqfd */
473 bool with_irqfd = false;
474 int r, n;
475
476 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
477
478 for (n = 0; n < nvqs; n++) {
479 if (!virtio_queue_get_num(vdev, n)) {
480 break;
481 }
482
483 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
484 if (r < 0) {
485 goto assign_error;
486 }
487 }
488
489 return 0;
490
491assign_error:
492 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
493 assert(assign);
494 while (--n >= 0) {
495 virtio_mmio_set_guest_notifier(d, n, !assign, false);
496 }
497 return r;
498}
499
500static int virtio_mmio_set_host_notifier(DeviceState *opaque, int n,
501 bool assign)
502{
503 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
504
505 /* Stop using ioeventfd for virtqueue kick if the device starts using host
506 * notifiers. This makes it easy to avoid stepping on each others' toes.
507 */
508 proxy->ioeventfd_disabled = assign;
509 if (assign) {
510 virtio_mmio_stop_ioeventfd(proxy);
511 }
512 /* We don't need to start here: it's not needed because backend
513 * currently only stops on status change away from ok,
514 * reset, vmstop and such. If we do add code to start here,
515 * need to check vmstate, device state etc. */
516 return virtio_mmio_set_host_notifier_internal(proxy, n, assign, false);
517}
518
4b52530b
PM
519/* virtio-mmio device */
520
4b52530b
PM
521static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
522{
523 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
524 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
525
2f4f6035
IM
526 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
527 d, NULL);
4b52530b
PM
528 sysbus_init_irq(sbd, &proxy->irq);
529 memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy,
530 TYPE_VIRTIO_MMIO, 0x200);
531 sysbus_init_mmio(sbd, &proxy->iomem);
532}
533
534static void virtio_mmio_class_init(ObjectClass *klass, void *data)
535{
536 DeviceClass *dc = DEVICE_CLASS(klass);
537
538 dc->realize = virtio_mmio_realizefn;
539 dc->reset = virtio_mmio_reset;
125ee0ed 540 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
4b52530b
PM
541}
542
543static const TypeInfo virtio_mmio_info = {
544 .name = TYPE_VIRTIO_MMIO,
545 .parent = TYPE_SYS_BUS_DEVICE,
546 .instance_size = sizeof(VirtIOMMIOProxy),
547 .class_init = virtio_mmio_class_init,
548};
549
550/* virtio-mmio-bus. */
551
4b52530b
PM
552static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
553{
554 BusClass *bus_class = BUS_CLASS(klass);
555 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
556
557 k->notify = virtio_mmio_update_irq;
558 k->save_config = virtio_mmio_save_config;
559 k->load_config = virtio_mmio_load_config;
434027ba
YSP
560 k->set_host_notifier = virtio_mmio_set_host_notifier;
561 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
4b52530b
PM
562 k->has_variable_vring_alignment = true;
563 bus_class->max_dev = 1;
564}
565
566static const TypeInfo virtio_mmio_bus_info = {
567 .name = TYPE_VIRTIO_MMIO_BUS,
568 .parent = TYPE_VIRTIO_BUS,
569 .instance_size = sizeof(VirtioBusState),
570 .class_init = virtio_mmio_bus_class_init,
571};
572
573static void virtio_mmio_register_types(void)
574{
575 type_register_static(&virtio_mmio_bus_info);
576 type_register_static(&virtio_mmio_info);
577}
578
579type_init(virtio_mmio_register_types)
This page took 0.222464 seconds and 4 git commands to generate.