]>
Commit | Line | Data |
---|---|---|
967f97fa AL |
1 | /* |
2 | * Virtio Support | |
3 | * | |
4 | * Copyright IBM, Corp. 2007 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #include <inttypes.h> | |
967f97fa | 15 | |
64979a4d | 16 | #include "trace.h" |
1de7afc9 | 17 | #include "qemu/error-report.h" |
0d09e41a | 18 | #include "hw/virtio/virtio.h" |
1de7afc9 | 19 | #include "qemu/atomic.h" |
0d09e41a | 20 | #include "hw/virtio/virtio-bus.h" |
967f97fa | 21 | |
6ce69d1c PM |
22 | /* |
23 | * The alignment to use between consumer and producer parts of vring. | |
24 | * x86 pagesize again. This is the default, used by transports like PCI | |
25 | * which don't provide a means for the guest to tell the host the alignment. | |
26 | */ | |
f46f15bc AL |
27 | #define VIRTIO_PCI_VRING_ALIGN 4096 |
28 | ||
967f97fa AL |
29 | typedef struct VRingDesc |
30 | { | |
31 | uint64_t addr; | |
32 | uint32_t len; | |
33 | uint16_t flags; | |
34 | uint16_t next; | |
35 | } VRingDesc; | |
36 | ||
37 | typedef struct VRingAvail | |
38 | { | |
39 | uint16_t flags; | |
40 | uint16_t idx; | |
41 | uint16_t ring[0]; | |
42 | } VRingAvail; | |
43 | ||
44 | typedef struct VRingUsedElem | |
45 | { | |
46 | uint32_t id; | |
47 | uint32_t len; | |
48 | } VRingUsedElem; | |
49 | ||
50 | typedef struct VRingUsed | |
51 | { | |
52 | uint16_t flags; | |
53 | uint16_t idx; | |
54 | VRingUsedElem ring[0]; | |
55 | } VRingUsed; | |
56 | ||
57 | typedef struct VRing | |
58 | { | |
59 | unsigned int num; | |
6ce69d1c | 60 | unsigned int align; |
a8170e5e AK |
61 | hwaddr desc; |
62 | hwaddr avail; | |
63 | hwaddr used; | |
967f97fa AL |
64 | } VRing; |
65 | ||
66 | struct VirtQueue | |
67 | { | |
68 | VRing vring; | |
a8170e5e | 69 | hwaddr pa; |
967f97fa | 70 | uint16_t last_avail_idx; |
bcbabae8 MT |
71 | /* Last used index value we have signalled on */ |
72 | uint16_t signalled_used; | |
73 | ||
74 | /* Last used index value we have signalled on */ | |
75 | bool signalled_used_valid; | |
76 | ||
77 | /* Notification enabled? */ | |
78 | bool notification; | |
79 | ||
e78a2b42 JW |
80 | uint16_t queue_index; |
81 | ||
967f97fa | 82 | int inuse; |
bcbabae8 | 83 | |
7055e687 | 84 | uint16_t vector; |
967f97fa | 85 | void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq); |
1cbdabe2 MT |
86 | VirtIODevice *vdev; |
87 | EventNotifier guest_notifier; | |
88 | EventNotifier host_notifier; | |
967f97fa AL |
89 | }; |
90 | ||
967f97fa | 91 | /* virt queue functions */ |
53c25cea | 92 | static void virtqueue_init(VirtQueue *vq) |
967f97fa | 93 | { |
a8170e5e | 94 | hwaddr pa = vq->pa; |
53c25cea | 95 | |
967f97fa AL |
96 | vq->vring.desc = pa; |
97 | vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc); | |
f46f15bc AL |
98 | vq->vring.used = vring_align(vq->vring.avail + |
99 | offsetof(VRingAvail, ring[vq->vring.num]), | |
6ce69d1c | 100 | vq->vring.align); |
967f97fa AL |
101 | } |
102 | ||
a8170e5e | 103 | static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i) |
967f97fa | 104 | { |
a8170e5e | 105 | hwaddr pa; |
5774cf98 | 106 | pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); |
967f97fa AL |
107 | return ldq_phys(pa); |
108 | } | |
109 | ||
a8170e5e | 110 | static inline uint32_t vring_desc_len(hwaddr desc_pa, int i) |
967f97fa | 111 | { |
a8170e5e | 112 | hwaddr pa; |
5774cf98 | 113 | pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); |
967f97fa AL |
114 | return ldl_phys(pa); |
115 | } | |
116 | ||
a8170e5e | 117 | static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i) |
967f97fa | 118 | { |
a8170e5e | 119 | hwaddr pa; |
5774cf98 | 120 | pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); |
967f97fa AL |
121 | return lduw_phys(pa); |
122 | } | |
123 | ||
a8170e5e | 124 | static inline uint16_t vring_desc_next(hwaddr desc_pa, int i) |
967f97fa | 125 | { |
a8170e5e | 126 | hwaddr pa; |
5774cf98 | 127 | pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); |
967f97fa AL |
128 | return lduw_phys(pa); |
129 | } | |
130 | ||
131 | static inline uint16_t vring_avail_flags(VirtQueue *vq) | |
132 | { | |
a8170e5e | 133 | hwaddr pa; |
967f97fa AL |
134 | pa = vq->vring.avail + offsetof(VRingAvail, flags); |
135 | return lduw_phys(pa); | |
136 | } | |
137 | ||
138 | static inline uint16_t vring_avail_idx(VirtQueue *vq) | |
139 | { | |
a8170e5e | 140 | hwaddr pa; |
967f97fa AL |
141 | pa = vq->vring.avail + offsetof(VRingAvail, idx); |
142 | return lduw_phys(pa); | |
143 | } | |
144 | ||
145 | static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) | |
146 | { | |
a8170e5e | 147 | hwaddr pa; |
967f97fa AL |
148 | pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); |
149 | return lduw_phys(pa); | |
150 | } | |
151 | ||
bcbabae8 MT |
152 | static inline uint16_t vring_used_event(VirtQueue *vq) |
153 | { | |
154 | return vring_avail_ring(vq, vq->vring.num); | |
155 | } | |
156 | ||
967f97fa AL |
157 | static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) |
158 | { | |
a8170e5e | 159 | hwaddr pa; |
967f97fa AL |
160 | pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); |
161 | stl_phys(pa, val); | |
162 | } | |
163 | ||
164 | static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) | |
165 | { | |
a8170e5e | 166 | hwaddr pa; |
967f97fa AL |
167 | pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); |
168 | stl_phys(pa, val); | |
169 | } | |
170 | ||
171 | static uint16_t vring_used_idx(VirtQueue *vq) | |
172 | { | |
a8170e5e | 173 | hwaddr pa; |
967f97fa AL |
174 | pa = vq->vring.used + offsetof(VRingUsed, idx); |
175 | return lduw_phys(pa); | |
176 | } | |
177 | ||
bcbabae8 | 178 | static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) |
967f97fa | 179 | { |
a8170e5e | 180 | hwaddr pa; |
967f97fa | 181 | pa = vq->vring.used + offsetof(VRingUsed, idx); |
bcbabae8 | 182 | stw_phys(pa, val); |
967f97fa AL |
183 | } |
184 | ||
185 | static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) | |
186 | { | |
a8170e5e | 187 | hwaddr pa; |
967f97fa AL |
188 | pa = vq->vring.used + offsetof(VRingUsed, flags); |
189 | stw_phys(pa, lduw_phys(pa) | mask); | |
190 | } | |
191 | ||
192 | static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) | |
193 | { | |
a8170e5e | 194 | hwaddr pa; |
967f97fa AL |
195 | pa = vq->vring.used + offsetof(VRingUsed, flags); |
196 | stw_phys(pa, lduw_phys(pa) & ~mask); | |
197 | } | |
198 | ||
bcbabae8 MT |
199 | static inline void vring_avail_event(VirtQueue *vq, uint16_t val) |
200 | { | |
a8170e5e | 201 | hwaddr pa; |
bcbabae8 MT |
202 | if (!vq->notification) { |
203 | return; | |
204 | } | |
205 | pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); | |
206 | stw_phys(pa, val); | |
207 | } | |
208 | ||
967f97fa AL |
209 | void virtio_queue_set_notification(VirtQueue *vq, int enable) |
210 | { | |
bcbabae8 MT |
211 | vq->notification = enable; |
212 | if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { | |
213 | vring_avail_event(vq, vring_avail_idx(vq)); | |
214 | } else if (enable) { | |
967f97fa | 215 | vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); |
bcbabae8 | 216 | } else { |
967f97fa | 217 | vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); |
bcbabae8 | 218 | } |
92045d80 MT |
219 | if (enable) { |
220 | /* Expose avail event/used flags before caller checks the avail idx. */ | |
221 | smp_mb(); | |
222 | } | |
967f97fa AL |
223 | } |
224 | ||
225 | int virtio_queue_ready(VirtQueue *vq) | |
226 | { | |
227 | return vq->vring.avail != 0; | |
228 | } | |
229 | ||
230 | int virtio_queue_empty(VirtQueue *vq) | |
231 | { | |
232 | return vring_avail_idx(vq) == vq->last_avail_idx; | |
233 | } | |
234 | ||
235 | void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, | |
236 | unsigned int len, unsigned int idx) | |
237 | { | |
238 | unsigned int offset; | |
239 | int i; | |
240 | ||
64979a4d SH |
241 | trace_virtqueue_fill(vq, elem, len, idx); |
242 | ||
967f97fa AL |
243 | offset = 0; |
244 | for (i = 0; i < elem->in_num; i++) { | |
245 | size_t size = MIN(len - offset, elem->in_sg[i].iov_len); | |
246 | ||
26b258e1 AL |
247 | cpu_physical_memory_unmap(elem->in_sg[i].iov_base, |
248 | elem->in_sg[i].iov_len, | |
249 | 1, size); | |
967f97fa | 250 | |
0cea71a2 | 251 | offset += size; |
967f97fa AL |
252 | } |
253 | ||
26b258e1 AL |
254 | for (i = 0; i < elem->out_num; i++) |
255 | cpu_physical_memory_unmap(elem->out_sg[i].iov_base, | |
256 | elem->out_sg[i].iov_len, | |
257 | 0, elem->out_sg[i].iov_len); | |
258 | ||
967f97fa AL |
259 | idx = (idx + vring_used_idx(vq)) % vq->vring.num; |
260 | ||
261 | /* Get a pointer to the next entry in the used ring. */ | |
262 | vring_used_ring_id(vq, idx, elem->index); | |
263 | vring_used_ring_len(vq, idx, len); | |
264 | } | |
265 | ||
266 | void virtqueue_flush(VirtQueue *vq, unsigned int count) | |
267 | { | |
bcbabae8 | 268 | uint16_t old, new; |
967f97fa | 269 | /* Make sure buffer is written before we update index. */ |
b90d2f35 | 270 | smp_wmb(); |
64979a4d | 271 | trace_virtqueue_flush(vq, count); |
bcbabae8 MT |
272 | old = vring_used_idx(vq); |
273 | new = old + count; | |
274 | vring_used_idx_set(vq, new); | |
967f97fa | 275 | vq->inuse -= count; |
bcbabae8 MT |
276 | if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) |
277 | vq->signalled_used_valid = false; | |
967f97fa AL |
278 | } |
279 | ||
280 | void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, | |
281 | unsigned int len) | |
282 | { | |
283 | virtqueue_fill(vq, elem, len, 0); | |
284 | virtqueue_flush(vq, 1); | |
285 | } | |
286 | ||
287 | static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) | |
288 | { | |
289 | uint16_t num_heads = vring_avail_idx(vq) - idx; | |
290 | ||
291 | /* Check it isn't doing very strange things with descriptor numbers. */ | |
bb6834cf | 292 | if (num_heads > vq->vring.num) { |
ce67ed65 SH |
293 | error_report("Guest moved used index from %u to %u", |
294 | idx, vring_avail_idx(vq)); | |
bb6834cf AL |
295 | exit(1); |
296 | } | |
a821ce59 MT |
297 | /* On success, callers read a descriptor at vq->last_avail_idx. |
298 | * Make sure descriptor read does not bypass avail index read. */ | |
299 | if (num_heads) { | |
300 | smp_rmb(); | |
301 | } | |
967f97fa AL |
302 | |
303 | return num_heads; | |
304 | } | |
305 | ||
306 | static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) | |
307 | { | |
308 | unsigned int head; | |
309 | ||
310 | /* Grab the next descriptor number they're advertising, and increment | |
311 | * the index we've seen. */ | |
312 | head = vring_avail_ring(vq, idx % vq->vring.num); | |
313 | ||
314 | /* If their number is silly, that's a fatal mistake. */ | |
bb6834cf | 315 | if (head >= vq->vring.num) { |
ce67ed65 | 316 | error_report("Guest says index %u is available", head); |
bb6834cf AL |
317 | exit(1); |
318 | } | |
967f97fa AL |
319 | |
320 | return head; | |
321 | } | |
322 | ||
a8170e5e | 323 | static unsigned virtqueue_next_desc(hwaddr desc_pa, |
5774cf98 | 324 | unsigned int i, unsigned int max) |
967f97fa AL |
325 | { |
326 | unsigned int next; | |
327 | ||
328 | /* If this descriptor says it doesn't chain, we're done. */ | |
5774cf98 MM |
329 | if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT)) |
330 | return max; | |
967f97fa AL |
331 | |
332 | /* Check they're not leading us off end of descriptors. */ | |
5774cf98 | 333 | next = vring_desc_next(desc_pa, i); |
967f97fa | 334 | /* Make sure compiler knows to grab that: we don't want it changing! */ |
b90d2f35 | 335 | smp_wmb(); |
967f97fa | 336 | |
5774cf98 | 337 | if (next >= max) { |
ce67ed65 | 338 | error_report("Desc next is %u", next); |
bb6834cf AL |
339 | exit(1); |
340 | } | |
967f97fa AL |
341 | |
342 | return next; | |
343 | } | |
344 | ||
0d8d7690 | 345 | void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, |
e1f7b481 MT |
346 | unsigned int *out_bytes, |
347 | unsigned max_in_bytes, unsigned max_out_bytes) | |
967f97fa | 348 | { |
efeea6d0 | 349 | unsigned int idx; |
385ce95d | 350 | unsigned int total_bufs, in_total, out_total; |
967f97fa AL |
351 | |
352 | idx = vq->last_avail_idx; | |
353 | ||
efeea6d0 | 354 | total_bufs = in_total = out_total = 0; |
967f97fa | 355 | while (virtqueue_num_heads(vq, idx)) { |
efeea6d0 | 356 | unsigned int max, num_bufs, indirect = 0; |
a8170e5e | 357 | hwaddr desc_pa; |
967f97fa AL |
358 | int i; |
359 | ||
efeea6d0 MM |
360 | max = vq->vring.num; |
361 | num_bufs = total_bufs; | |
967f97fa | 362 | i = virtqueue_get_head(vq, idx++); |
efeea6d0 MM |
363 | desc_pa = vq->vring.desc; |
364 | ||
365 | if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { | |
366 | if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { | |
ce67ed65 | 367 | error_report("Invalid size for indirect buffer table"); |
efeea6d0 MM |
368 | exit(1); |
369 | } | |
370 | ||
371 | /* If we've got too many, that implies a descriptor loop. */ | |
372 | if (num_bufs >= max) { | |
ce67ed65 | 373 | error_report("Looped descriptor"); |
efeea6d0 MM |
374 | exit(1); |
375 | } | |
376 | ||
377 | /* loop over the indirect descriptor table */ | |
378 | indirect = 1; | |
379 | max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); | |
380 | num_bufs = i = 0; | |
381 | desc_pa = vring_desc_addr(desc_pa, i); | |
382 | } | |
383 | ||
967f97fa AL |
384 | do { |
385 | /* If we've got too many, that implies a descriptor loop. */ | |
5774cf98 | 386 | if (++num_bufs > max) { |
ce67ed65 | 387 | error_report("Looped descriptor"); |
bb6834cf AL |
388 | exit(1); |
389 | } | |
967f97fa | 390 | |
5774cf98 | 391 | if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) { |
0d8d7690 | 392 | in_total += vring_desc_len(desc_pa, i); |
967f97fa | 393 | } else { |
0d8d7690 | 394 | out_total += vring_desc_len(desc_pa, i); |
967f97fa | 395 | } |
e1f7b481 MT |
396 | if (in_total >= max_in_bytes && out_total >= max_out_bytes) { |
397 | goto done; | |
398 | } | |
5774cf98 | 399 | } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); |
efeea6d0 MM |
400 | |
401 | if (!indirect) | |
402 | total_bufs = num_bufs; | |
403 | else | |
404 | total_bufs++; | |
967f97fa | 405 | } |
e1f7b481 | 406 | done: |
0d8d7690 AS |
407 | if (in_bytes) { |
408 | *in_bytes = in_total; | |
409 | } | |
410 | if (out_bytes) { | |
411 | *out_bytes = out_total; | |
412 | } | |
413 | } | |
967f97fa | 414 | |
0d8d7690 AS |
415 | int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, |
416 | unsigned int out_bytes) | |
417 | { | |
418 | unsigned int in_total, out_total; | |
419 | ||
e1f7b481 MT |
420 | virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); |
421 | return in_bytes <= in_total && out_bytes <= out_total; | |
967f97fa AL |
422 | } |
423 | ||
a8170e5e | 424 | void virtqueue_map_sg(struct iovec *sg, hwaddr *addr, |
42fb2e07 KW |
425 | size_t num_sg, int is_write) |
426 | { | |
427 | unsigned int i; | |
a8170e5e | 428 | hwaddr len; |
42fb2e07 KW |
429 | |
430 | for (i = 0; i < num_sg; i++) { | |
431 | len = sg[i].iov_len; | |
432 | sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); | |
433 | if (sg[i].iov_base == NULL || len != sg[i].iov_len) { | |
ce67ed65 | 434 | error_report("virtio: trying to map MMIO memory"); |
42fb2e07 KW |
435 | exit(1); |
436 | } | |
437 | } | |
438 | } | |
439 | ||
967f97fa AL |
440 | int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) |
441 | { | |
5774cf98 | 442 | unsigned int i, head, max; |
a8170e5e | 443 | hwaddr desc_pa = vq->vring.desc; |
967f97fa AL |
444 | |
445 | if (!virtqueue_num_heads(vq, vq->last_avail_idx)) | |
446 | return 0; | |
447 | ||
448 | /* When we start there are none of either input nor output. */ | |
449 | elem->out_num = elem->in_num = 0; | |
450 | ||
5774cf98 MM |
451 | max = vq->vring.num; |
452 | ||
967f97fa | 453 | i = head = virtqueue_get_head(vq, vq->last_avail_idx++); |
bcbabae8 MT |
454 | if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) { |
455 | vring_avail_event(vq, vring_avail_idx(vq)); | |
456 | } | |
efeea6d0 MM |
457 | |
458 | if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { | |
459 | if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { | |
ce67ed65 | 460 | error_report("Invalid size for indirect buffer table"); |
efeea6d0 MM |
461 | exit(1); |
462 | } | |
463 | ||
464 | /* loop over the indirect descriptor table */ | |
465 | max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); | |
466 | desc_pa = vring_desc_addr(desc_pa, i); | |
467 | i = 0; | |
468 | } | |
469 | ||
42fb2e07 | 470 | /* Collect all the descriptors */ |
967f97fa AL |
471 | do { |
472 | struct iovec *sg; | |
473 | ||
5774cf98 | 474 | if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) { |
c8eac1cf MT |
475 | if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) { |
476 | error_report("Too many write descriptors in indirect table"); | |
477 | exit(1); | |
478 | } | |
5774cf98 | 479 | elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i); |
967f97fa | 480 | sg = &elem->in_sg[elem->in_num++]; |
42fb2e07 | 481 | } else { |
c8eac1cf MT |
482 | if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) { |
483 | error_report("Too many read descriptors in indirect table"); | |
484 | exit(1); | |
485 | } | |
42fb2e07 | 486 | elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i); |
967f97fa | 487 | sg = &elem->out_sg[elem->out_num++]; |
42fb2e07 | 488 | } |
967f97fa | 489 | |
5774cf98 | 490 | sg->iov_len = vring_desc_len(desc_pa, i); |
967f97fa AL |
491 | |
492 | /* If we've got too many, that implies a descriptor loop. */ | |
5774cf98 | 493 | if ((elem->in_num + elem->out_num) > max) { |
ce67ed65 | 494 | error_report("Looped descriptor"); |
bb6834cf AL |
495 | exit(1); |
496 | } | |
5774cf98 | 497 | } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); |
967f97fa | 498 | |
42fb2e07 KW |
499 | /* Now map what we have collected */ |
500 | virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1); | |
501 | virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0); | |
502 | ||
967f97fa AL |
503 | elem->index = head; |
504 | ||
505 | vq->inuse++; | |
506 | ||
64979a4d | 507 | trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); |
967f97fa AL |
508 | return elem->in_num + elem->out_num; |
509 | } | |
510 | ||
511 | /* virtio device */ | |
7055e687 MT |
512 | static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) |
513 | { | |
1c819449 FK |
514 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
515 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); | |
516 | ||
517 | if (k->notify) { | |
518 | k->notify(qbus->parent, vector); | |
7055e687 MT |
519 | } |
520 | } | |
967f97fa | 521 | |
53c25cea | 522 | void virtio_update_irq(VirtIODevice *vdev) |
967f97fa | 523 | { |
7055e687 | 524 | virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
967f97fa AL |
525 | } |
526 | ||
4e1837f8 SH |
527 | void virtio_set_status(VirtIODevice *vdev, uint8_t val) |
528 | { | |
181103cd | 529 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
4e1837f8 SH |
530 | trace_virtio_set_status(vdev, val); |
531 | ||
181103cd FK |
532 | if (k->set_status) { |
533 | k->set_status(vdev, val); | |
4e1837f8 SH |
534 | } |
535 | vdev->status = val; | |
536 | } | |
537 | ||
53c25cea | 538 | void virtio_reset(void *opaque) |
967f97fa AL |
539 | { |
540 | VirtIODevice *vdev = opaque; | |
181103cd | 541 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
967f97fa AL |
542 | int i; |
543 | ||
e0c472d8 MT |
544 | virtio_set_status(vdev, 0); |
545 | ||
181103cd FK |
546 | if (k->reset) { |
547 | k->reset(vdev); | |
548 | } | |
967f97fa | 549 | |
704a76fc | 550 | vdev->guest_features = 0; |
967f97fa AL |
551 | vdev->queue_sel = 0; |
552 | vdev->status = 0; | |
553 | vdev->isr = 0; | |
7055e687 MT |
554 | vdev->config_vector = VIRTIO_NO_VECTOR; |
555 | virtio_notify_vector(vdev, vdev->config_vector); | |
967f97fa AL |
556 | |
557 | for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { | |
558 | vdev->vq[i].vring.desc = 0; | |
559 | vdev->vq[i].vring.avail = 0; | |
560 | vdev->vq[i].vring.used = 0; | |
561 | vdev->vq[i].last_avail_idx = 0; | |
53c25cea | 562 | vdev->vq[i].pa = 0; |
7055e687 | 563 | vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
bcbabae8 MT |
564 | vdev->vq[i].signalled_used = 0; |
565 | vdev->vq[i].signalled_used_valid = false; | |
566 | vdev->vq[i].notification = true; | |
967f97fa AL |
567 | } |
568 | } | |
569 | ||
53c25cea | 570 | uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) |
967f97fa | 571 | { |
181103cd | 572 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
967f97fa AL |
573 | uint8_t val; |
574 | ||
5f5a1318 | 575 | if (addr + sizeof(val) > vdev->config_len) { |
967f97fa | 576 | return (uint32_t)-1; |
5f5a1318 JW |
577 | } |
578 | ||
579 | k->get_config(vdev, vdev->config); | |
967f97fa | 580 | |
06dbfc6f | 581 | val = ldub_p(vdev->config + addr); |
967f97fa AL |
582 | return val; |
583 | } | |
584 | ||
53c25cea | 585 | uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) |
967f97fa | 586 | { |
181103cd | 587 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
967f97fa AL |
588 | uint16_t val; |
589 | ||
5f5a1318 | 590 | if (addr + sizeof(val) > vdev->config_len) { |
967f97fa | 591 | return (uint32_t)-1; |
5f5a1318 JW |
592 | } |
593 | ||
594 | k->get_config(vdev, vdev->config); | |
967f97fa | 595 | |
06dbfc6f | 596 | val = lduw_p(vdev->config + addr); |
967f97fa AL |
597 | return val; |
598 | } | |
599 | ||
53c25cea | 600 | uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) |
967f97fa | 601 | { |
181103cd | 602 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
967f97fa AL |
603 | uint32_t val; |
604 | ||
5f5a1318 | 605 | if (addr + sizeof(val) > vdev->config_len) { |
967f97fa | 606 | return (uint32_t)-1; |
5f5a1318 JW |
607 | } |
608 | ||
609 | k->get_config(vdev, vdev->config); | |
967f97fa | 610 | |
06dbfc6f | 611 | val = ldl_p(vdev->config + addr); |
967f97fa AL |
612 | return val; |
613 | } | |
614 | ||
53c25cea | 615 | void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) |
967f97fa | 616 | { |
181103cd | 617 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
967f97fa AL |
618 | uint8_t val = data; |
619 | ||
5f5a1318 | 620 | if (addr + sizeof(val) > vdev->config_len) { |
967f97fa | 621 | return; |
5f5a1318 | 622 | } |
967f97fa | 623 | |
06dbfc6f | 624 | stb_p(vdev->config + addr, val); |
967f97fa | 625 | |
181103cd FK |
626 | if (k->set_config) { |
627 | k->set_config(vdev, vdev->config); | |
628 | } | |
967f97fa AL |
629 | } |
630 | ||
53c25cea | 631 | void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) |
967f97fa | 632 | { |
181103cd | 633 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
967f97fa AL |
634 | uint16_t val = data; |
635 | ||
5f5a1318 | 636 | if (addr + sizeof(val) > vdev->config_len) { |
967f97fa | 637 | return; |
5f5a1318 | 638 | } |
967f97fa | 639 | |
06dbfc6f | 640 | stw_p(vdev->config + addr, val); |
967f97fa | 641 | |
181103cd FK |
642 | if (k->set_config) { |
643 | k->set_config(vdev, vdev->config); | |
644 | } | |
967f97fa AL |
645 | } |
646 | ||
53c25cea | 647 | void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) |
967f97fa | 648 | { |
181103cd | 649 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
967f97fa AL |
650 | uint32_t val = data; |
651 | ||
5f5a1318 | 652 | if (addr + sizeof(val) > vdev->config_len) { |
967f97fa | 653 | return; |
5f5a1318 | 654 | } |
967f97fa | 655 | |
06dbfc6f | 656 | stl_p(vdev->config + addr, val); |
967f97fa | 657 | |
181103cd FK |
658 | if (k->set_config) { |
659 | k->set_config(vdev, vdev->config); | |
660 | } | |
967f97fa AL |
661 | } |
662 | ||
a8170e5e | 663 | void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) |
967f97fa | 664 | { |
7055e687 MT |
665 | vdev->vq[n].pa = addr; |
666 | virtqueue_init(&vdev->vq[n]); | |
53c25cea PB |
667 | } |
668 | ||
a8170e5e | 669 | hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) |
53c25cea PB |
670 | { |
671 | return vdev->vq[n].pa; | |
672 | } | |
673 | ||
e63c0ba1 PM |
674 | void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) |
675 | { | |
676 | if (num <= VIRTQUEUE_MAX_SIZE) { | |
677 | vdev->vq[n].vring.num = num; | |
678 | virtqueue_init(&vdev->vq[n]); | |
679 | } | |
680 | } | |
681 | ||
53c25cea PB |
682 | int virtio_queue_get_num(VirtIODevice *vdev, int n) |
683 | { | |
684 | return vdev->vq[n].vring.num; | |
685 | } | |
967f97fa | 686 | |
c80decdb PB |
687 | int virtio_queue_get_id(VirtQueue *vq) |
688 | { | |
689 | VirtIODevice *vdev = vq->vdev; | |
690 | assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]); | |
691 | return vq - &vdev->vq[0]; | |
692 | } | |
693 | ||
6ce69d1c PM |
694 | void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) |
695 | { | |
696 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); | |
697 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); | |
698 | ||
699 | /* Check that the transport told us it was going to do this | |
700 | * (so a buggy transport will immediately assert rather than | |
701 | * silently failing to migrate this state) | |
702 | */ | |
703 | assert(k->has_variable_vring_alignment); | |
704 | ||
705 | vdev->vq[n].vring.align = align; | |
706 | virtqueue_init(&vdev->vq[n]); | |
707 | } | |
708 | ||
25db9ebe SH |
709 | void virtio_queue_notify_vq(VirtQueue *vq) |
710 | { | |
711 | if (vq->vring.desc) { | |
712 | VirtIODevice *vdev = vq->vdev; | |
713 | trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); | |
714 | vq->handle_output(vdev, vq); | |
715 | } | |
716 | } | |
717 | ||
53c25cea PB |
718 | void virtio_queue_notify(VirtIODevice *vdev, int n) |
719 | { | |
7157e2e2 | 720 | virtio_queue_notify_vq(&vdev->vq[n]); |
967f97fa AL |
721 | } |
722 | ||
7055e687 MT |
723 | uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) |
724 | { | |
725 | return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector : | |
726 | VIRTIO_NO_VECTOR; | |
727 | } | |
728 | ||
729 | void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) | |
730 | { | |
731 | if (n < VIRTIO_PCI_QUEUE_MAX) | |
732 | vdev->vq[n].vector = vector; | |
733 | } | |
734 | ||
967f97fa AL |
735 | VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, |
736 | void (*handle_output)(VirtIODevice *, VirtQueue *)) | |
737 | { | |
738 | int i; | |
739 | ||
740 | for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { | |
741 | if (vdev->vq[i].vring.num == 0) | |
742 | break; | |
743 | } | |
744 | ||
745 | if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) | |
746 | abort(); | |
747 | ||
748 | vdev->vq[i].vring.num = queue_size; | |
6ce69d1c | 749 | vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; |
967f97fa AL |
750 | vdev->vq[i].handle_output = handle_output; |
751 | ||
752 | return &vdev->vq[i]; | |
753 | } | |
754 | ||
f23fd811 JW |
755 | void virtio_del_queue(VirtIODevice *vdev, int n) |
756 | { | |
757 | if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) { | |
758 | abort(); | |
759 | } | |
760 | ||
761 | vdev->vq[n].vring.num = 0; | |
762 | } | |
763 | ||
1cbdabe2 MT |
764 | void virtio_irq(VirtQueue *vq) |
765 | { | |
64979a4d | 766 | trace_virtio_irq(vq); |
1cbdabe2 MT |
767 | vq->vdev->isr |= 0x01; |
768 | virtio_notify_vector(vq->vdev, vq->vector); | |
769 | } | |
770 | ||
bcbabae8 MT |
771 | /* Assuming a given event_idx value from the other size, if |
772 | * we have just incremented index from old to new_idx, | |
773 | * should we trigger an event? */ | |
774 | static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old) | |
967f97fa | 775 | { |
bcbabae8 MT |
776 | /* Note: Xen has similar logic for notification hold-off |
777 | * in include/xen/interface/io/ring.h with req_event and req_prod | |
778 | * corresponding to event_idx + 1 and new respectively. | |
779 | * Note also that req_event and req_prod in Xen start at 1, | |
780 | * event indexes in virtio start at 0. */ | |
781 | return (uint16_t)(new - event - 1) < (uint16_t)(new - old); | |
782 | } | |
783 | ||
784 | static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) | |
785 | { | |
786 | uint16_t old, new; | |
787 | bool v; | |
a281ebc1 MT |
788 | /* We need to expose used array entries before checking used event. */ |
789 | smp_mb(); | |
97b83deb | 790 | /* Always notify when queue is empty (when feature acknowledge) */ |
bcbabae8 MT |
791 | if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) && |
792 | !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) { | |
793 | return true; | |
794 | } | |
795 | ||
796 | if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) { | |
797 | return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); | |
798 | } | |
799 | ||
800 | v = vq->signalled_used_valid; | |
801 | vq->signalled_used_valid = true; | |
802 | old = vq->signalled_used; | |
803 | new = vq->signalled_used = vring_used_idx(vq); | |
804 | return !v || vring_need_event(vring_used_event(vq), new, old); | |
805 | } | |
806 | ||
807 | void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) | |
808 | { | |
809 | if (!vring_notify(vdev, vq)) { | |
967f97fa | 810 | return; |
bcbabae8 | 811 | } |
967f97fa | 812 | |
64979a4d | 813 | trace_virtio_notify(vdev, vq); |
967f97fa | 814 | vdev->isr |= 0x01; |
7055e687 | 815 | virtio_notify_vector(vdev, vq->vector); |
967f97fa AL |
816 | } |
817 | ||
818 | void virtio_notify_config(VirtIODevice *vdev) | |
819 | { | |
7625162c AL |
820 | if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) |
821 | return; | |
822 | ||
967f97fa | 823 | vdev->isr |= 0x03; |
7055e687 | 824 | virtio_notify_vector(vdev, vdev->config_vector); |
967f97fa AL |
825 | } |
826 | ||
827 | void virtio_save(VirtIODevice *vdev, QEMUFile *f) | |
828 | { | |
1c819449 FK |
829 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
830 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); | |
967f97fa AL |
831 | int i; |
832 | ||
1c819449 FK |
833 | if (k->save_config) { |
834 | k->save_config(qbus->parent, f); | |
835 | } | |
967f97fa | 836 | |
967f97fa AL |
837 | qemu_put_8s(f, &vdev->status); |
838 | qemu_put_8s(f, &vdev->isr); | |
839 | qemu_put_be16s(f, &vdev->queue_sel); | |
704a76fc | 840 | qemu_put_be32s(f, &vdev->guest_features); |
967f97fa AL |
841 | qemu_put_be32(f, vdev->config_len); |
842 | qemu_put_buffer(f, vdev->config, vdev->config_len); | |
843 | ||
844 | for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { | |
845 | if (vdev->vq[i].vring.num == 0) | |
846 | break; | |
847 | } | |
848 | ||
849 | qemu_put_be32(f, i); | |
850 | ||
851 | for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { | |
852 | if (vdev->vq[i].vring.num == 0) | |
853 | break; | |
854 | ||
855 | qemu_put_be32(f, vdev->vq[i].vring.num); | |
6ce69d1c PM |
856 | if (k->has_variable_vring_alignment) { |
857 | qemu_put_be32(f, vdev->vq[i].vring.align); | |
858 | } | |
53c25cea | 859 | qemu_put_be64(f, vdev->vq[i].pa); |
967f97fa | 860 | qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); |
1c819449 FK |
861 | if (k->save_queue) { |
862 | k->save_queue(qbus->parent, i, f); | |
863 | } | |
967f97fa AL |
864 | } |
865 | } | |
866 | ||
ad0c9332 PB |
867 | int virtio_set_features(VirtIODevice *vdev, uint32_t val) |
868 | { | |
1c819449 FK |
869 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
870 | VirtioBusClass *vbusk = VIRTIO_BUS_GET_CLASS(qbus); | |
181103cd | 871 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
1c819449 | 872 | uint32_t supported_features = vbusk->get_features(qbus->parent); |
ad0c9332 PB |
873 | bool bad = (val & ~supported_features) != 0; |
874 | ||
875 | val &= supported_features; | |
181103cd FK |
876 | if (k->set_features) { |
877 | k->set_features(vdev, val); | |
ad0c9332 PB |
878 | } |
879 | vdev->guest_features = val; | |
880 | return bad ? -1 : 0; | |
881 | } | |
882 | ||
ff24bd58 | 883 | int virtio_load(VirtIODevice *vdev, QEMUFile *f) |
967f97fa | 884 | { |
ff24bd58 | 885 | int num, i, ret; |
6d74ca5a | 886 | uint32_t features; |
ad0c9332 | 887 | uint32_t supported_features; |
1c819449 FK |
888 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
889 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); | |
967f97fa | 890 | |
1c819449 FK |
891 | if (k->load_config) { |
892 | ret = k->load_config(qbus->parent, f); | |
ff24bd58 MT |
893 | if (ret) |
894 | return ret; | |
895 | } | |
967f97fa | 896 | |
967f97fa AL |
897 | qemu_get_8s(f, &vdev->status); |
898 | qemu_get_8s(f, &vdev->isr); | |
899 | qemu_get_be16s(f, &vdev->queue_sel); | |
6d74ca5a | 900 | qemu_get_be32s(f, &features); |
ad0c9332 PB |
901 | |
902 | if (virtio_set_features(vdev, features) < 0) { | |
1c819449 | 903 | supported_features = k->get_features(qbus->parent); |
ce67ed65 SH |
904 | error_report("Features 0x%x unsupported. Allowed features: 0x%x", |
905 | features, supported_features); | |
6d74ca5a MT |
906 | return -1; |
907 | } | |
967f97fa AL |
908 | vdev->config_len = qemu_get_be32(f); |
909 | qemu_get_buffer(f, vdev->config, vdev->config_len); | |
910 | ||
911 | num = qemu_get_be32(f); | |
912 | ||
913 | for (i = 0; i < num; i++) { | |
914 | vdev->vq[i].vring.num = qemu_get_be32(f); | |
6ce69d1c PM |
915 | if (k->has_variable_vring_alignment) { |
916 | vdev->vq[i].vring.align = qemu_get_be32(f); | |
917 | } | |
53c25cea | 918 | vdev->vq[i].pa = qemu_get_be64(f); |
967f97fa | 919 | qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); |
bcbabae8 MT |
920 | vdev->vq[i].signalled_used_valid = false; |
921 | vdev->vq[i].notification = true; | |
967f97fa | 922 | |
53c25cea | 923 | if (vdev->vq[i].pa) { |
1abeb5a6 | 924 | uint16_t nheads; |
53c25cea | 925 | virtqueue_init(&vdev->vq[i]); |
1abeb5a6 MT |
926 | nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; |
927 | /* Check it isn't doing very strange things with descriptor numbers. */ | |
928 | if (nheads > vdev->vq[i].vring.num) { | |
929 | error_report("VQ %d size 0x%x Guest index 0x%x " | |
6daf194d | 930 | "inconsistent with Host index 0x%x: delta 0x%x", |
1abeb5a6 MT |
931 | i, vdev->vq[i].vring.num, |
932 | vring_avail_idx(&vdev->vq[i]), | |
933 | vdev->vq[i].last_avail_idx, nheads); | |
934 | return -1; | |
935 | } | |
936 | } else if (vdev->vq[i].last_avail_idx) { | |
937 | error_report("VQ %d address 0x0 " | |
6daf194d | 938 | "inconsistent with Host index 0x%x", |
1abeb5a6 MT |
939 | i, vdev->vq[i].last_avail_idx); |
940 | return -1; | |
258dc7c9 | 941 | } |
1c819449 FK |
942 | if (k->load_queue) { |
943 | ret = k->load_queue(qbus->parent, i, f); | |
ff24bd58 MT |
944 | if (ret) |
945 | return ret; | |
7055e687 | 946 | } |
967f97fa AL |
947 | } |
948 | ||
7055e687 | 949 | virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); |
ff24bd58 | 950 | return 0; |
967f97fa AL |
951 | } |
952 | ||
6a1a8cc7 | 953 | void virtio_cleanup(VirtIODevice *vdev) |
b946a153 | 954 | { |
85cf2a8d | 955 | qemu_del_vm_change_state_handler(vdev->vmstate); |
6f79e06b | 956 | g_free(vdev->config); |
7267c094 | 957 | g_free(vdev->vq); |
8e05db92 FK |
958 | } |
959 | ||
1dfb4dd9 | 960 | static void virtio_vmstate_change(void *opaque, int running, RunState state) |
85cf2a8d MT |
961 | { |
962 | VirtIODevice *vdev = opaque; | |
1c819449 FK |
963 | BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); |
964 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); | |
85cf2a8d MT |
965 | bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK); |
966 | vdev->vm_running = running; | |
967 | ||
968 | if (backend_run) { | |
969 | virtio_set_status(vdev, vdev->status); | |
970 | } | |
971 | ||
1c819449 FK |
972 | if (k->vmstate_change) { |
973 | k->vmstate_change(qbus->parent, backend_run); | |
85cf2a8d MT |
974 | } |
975 | ||
976 | if (!backend_run) { | |
977 | virtio_set_status(vdev, vdev->status); | |
978 | } | |
979 | } | |
980 | ||
8e05db92 FK |
981 | void virtio_init(VirtIODevice *vdev, const char *name, |
982 | uint16_t device_id, size_t config_size) | |
967f97fa | 983 | { |
b8193adb | 984 | int i; |
53c25cea | 985 | vdev->device_id = device_id; |
967f97fa AL |
986 | vdev->status = 0; |
987 | vdev->isr = 0; | |
988 | vdev->queue_sel = 0; | |
7055e687 | 989 | vdev->config_vector = VIRTIO_NO_VECTOR; |
7267c094 | 990 | vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX); |
1354869c | 991 | vdev->vm_running = runstate_is_running(); |
8e05db92 | 992 | for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { |
b8193adb | 993 | vdev->vq[i].vector = VIRTIO_NO_VECTOR; |
1cbdabe2 | 994 | vdev->vq[i].vdev = vdev; |
e78a2b42 | 995 | vdev->vq[i].queue_index = i; |
1cbdabe2 | 996 | } |
967f97fa | 997 | |
967f97fa AL |
998 | vdev->name = name; |
999 | vdev->config_len = config_size; | |
8e05db92 | 1000 | if (vdev->config_len) { |
7267c094 | 1001 | vdev->config = g_malloc0(config_size); |
8e05db92 | 1002 | } else { |
967f97fa | 1003 | vdev->config = NULL; |
8e05db92 FK |
1004 | } |
1005 | vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change, | |
1006 | vdev); | |
1007 | } | |
967f97fa | 1008 | |
a8170e5e | 1009 | hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) |
1cbdabe2 MT |
1010 | { |
1011 | return vdev->vq[n].vring.desc; | |
1012 | } | |
1013 | ||
a8170e5e | 1014 | hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) |
1cbdabe2 MT |
1015 | { |
1016 | return vdev->vq[n].vring.avail; | |
1017 | } | |
1018 | ||
a8170e5e | 1019 | hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) |
1cbdabe2 MT |
1020 | { |
1021 | return vdev->vq[n].vring.used; | |
1022 | } | |
1023 | ||
a8170e5e | 1024 | hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n) |
1cbdabe2 MT |
1025 | { |
1026 | return vdev->vq[n].vring.desc; | |
1027 | } | |
1028 | ||
a8170e5e | 1029 | hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) |
1cbdabe2 MT |
1030 | { |
1031 | return sizeof(VRingDesc) * vdev->vq[n].vring.num; | |
1032 | } | |
1033 | ||
a8170e5e | 1034 | hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) |
1cbdabe2 MT |
1035 | { |
1036 | return offsetof(VRingAvail, ring) + | |
2b3af999 | 1037 | sizeof(uint64_t) * vdev->vq[n].vring.num; |
1cbdabe2 MT |
1038 | } |
1039 | ||
a8170e5e | 1040 | hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) |
1cbdabe2 MT |
1041 | { |
1042 | return offsetof(VRingUsed, ring) + | |
1043 | sizeof(VRingUsedElem) * vdev->vq[n].vring.num; | |
1044 | } | |
1045 | ||
a8170e5e | 1046 | hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n) |
1cbdabe2 MT |
1047 | { |
1048 | return vdev->vq[n].vring.used - vdev->vq[n].vring.desc + | |
1049 | virtio_queue_get_used_size(vdev, n); | |
1050 | } | |
1051 | ||
1052 | uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) | |
1053 | { | |
1054 | return vdev->vq[n].last_avail_idx; | |
1055 | } | |
1056 | ||
1057 | void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx) | |
1058 | { | |
1059 | vdev->vq[n].last_avail_idx = idx; | |
1060 | } | |
1061 | ||
1062 | VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) | |
1063 | { | |
1064 | return vdev->vq + n; | |
1065 | } | |
1066 | ||
e78a2b42 JW |
1067 | uint16_t virtio_get_queue_index(VirtQueue *vq) |
1068 | { | |
1069 | return vq->queue_index; | |
1070 | } | |
1071 | ||
15b2bd18 PB |
1072 | static void virtio_queue_guest_notifier_read(EventNotifier *n) |
1073 | { | |
1074 | VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); | |
1075 | if (event_notifier_test_and_clear(n)) { | |
1076 | virtio_irq(vq); | |
1077 | } | |
1078 | } | |
1079 | ||
1080 | void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, | |
1081 | bool with_irqfd) | |
1082 | { | |
1083 | if (assign && !with_irqfd) { | |
1084 | event_notifier_set_handler(&vq->guest_notifier, | |
1085 | virtio_queue_guest_notifier_read); | |
1086 | } else { | |
1087 | event_notifier_set_handler(&vq->guest_notifier, NULL); | |
1088 | } | |
1089 | if (!assign) { | |
1090 | /* Test and clear notifier before closing it, | |
1091 | * in case poll callback didn't have time to run. */ | |
1092 | virtio_queue_guest_notifier_read(&vq->guest_notifier); | |
1093 | } | |
1094 | } | |
1095 | ||
1cbdabe2 MT |
1096 | EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) |
1097 | { | |
1098 | return &vq->guest_notifier; | |
1099 | } | |
b1f416aa PB |
1100 | |
1101 | static void virtio_queue_host_notifier_read(EventNotifier *n) | |
1102 | { | |
1103 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); | |
1104 | if (event_notifier_test_and_clear(n)) { | |
1105 | virtio_queue_notify_vq(vq); | |
1106 | } | |
1107 | } | |
1108 | ||
26b9b5fe PB |
1109 | void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign, |
1110 | bool set_handler) | |
b1f416aa | 1111 | { |
26b9b5fe | 1112 | if (assign && set_handler) { |
b1f416aa PB |
1113 | event_notifier_set_handler(&vq->host_notifier, |
1114 | virtio_queue_host_notifier_read); | |
1115 | } else { | |
1116 | event_notifier_set_handler(&vq->host_notifier, NULL); | |
26b9b5fe PB |
1117 | } |
1118 | if (!assign) { | |
b1f416aa PB |
1119 | /* Test and clear notifier before after disabling event, |
1120 | * in case poll callback didn't have time to run. */ | |
1121 | virtio_queue_host_notifier_read(&vq->host_notifier); | |
1122 | } | |
1123 | } | |
1124 | ||
1cbdabe2 MT |
1125 | EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) |
1126 | { | |
1127 | return &vq->host_notifier; | |
1128 | } | |
8e05db92 | 1129 | |
1034e9cf FK |
1130 | void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) |
1131 | { | |
1132 | if (vdev->bus_name) { | |
1133 | g_free(vdev->bus_name); | |
1134 | vdev->bus_name = NULL; | |
1135 | } | |
1136 | ||
1137 | if (bus_name) { | |
1138 | vdev->bus_name = g_strdup(bus_name); | |
1139 | } | |
1140 | } | |
1141 | ||
8e05db92 FK |
1142 | static int virtio_device_init(DeviceState *qdev) |
1143 | { | |
1144 | VirtIODevice *vdev = VIRTIO_DEVICE(qdev); | |
1145 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(qdev); | |
1146 | assert(k->init != NULL); | |
1147 | if (k->init(vdev) < 0) { | |
1148 | return -1; | |
1149 | } | |
1150 | virtio_bus_plug_device(vdev); | |
1151 | return 0; | |
1152 | } | |
1153 | ||
1034e9cf FK |
1154 | static int virtio_device_exit(DeviceState *qdev) |
1155 | { | |
1156 | VirtIODevice *vdev = VIRTIO_DEVICE(qdev); | |
1157 | ||
1158 | if (vdev->bus_name) { | |
1159 | g_free(vdev->bus_name); | |
1160 | vdev->bus_name = NULL; | |
1161 | } | |
1162 | return 0; | |
1163 | } | |
1164 | ||
8e05db92 FK |
1165 | static void virtio_device_class_init(ObjectClass *klass, void *data) |
1166 | { | |
1167 | /* Set the default value here. */ | |
1168 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1169 | dc->init = virtio_device_init; | |
1034e9cf | 1170 | dc->exit = virtio_device_exit; |
8e05db92 FK |
1171 | dc->bus_type = TYPE_VIRTIO_BUS; |
1172 | } | |
1173 | ||
1174 | static const TypeInfo virtio_device_info = { | |
1175 | .name = TYPE_VIRTIO_DEVICE, | |
1176 | .parent = TYPE_DEVICE, | |
1177 | .instance_size = sizeof(VirtIODevice), | |
1178 | .class_init = virtio_device_class_init, | |
1179 | .abstract = true, | |
1180 | .class_size = sizeof(VirtioDeviceClass), | |
1181 | }; | |
1182 | ||
1183 | static void virtio_register_types(void) | |
1184 | { | |
1185 | type_register_static(&virtio_device_info); | |
1186 | } | |
1187 | ||
1188 | type_init(virtio_register_types) |