]>
Commit | Line | Data |
---|---|---|
0a8a69dd RR |
1 | #ifndef _LINUX_VIRTIO_RING_H |
2 | #define _LINUX_VIRTIO_RING_H | |
0a8a69dd | 3 | |
c5610a5d | 4 | #include <asm/barrier.h> |
0a8a69dd | 5 | #include <linux/irqreturn.h> |
607ca46e DH |
6 | #include <uapi/linux/virtio_ring.h> |
7 | ||
a9a0fef7 RR |
8 | /* |
9 | * Barriers in virtio are tricky. Non-SMP virtio guests can't assume | |
10 | * they're not on an SMP host system, so they need to assume real | |
11 | * barriers. Non-SMP virtio hosts could skip the barriers, but does | |
12 | * anyone care? | |
13 | * | |
14 | * For virtio_pci on SMP, we don't need to order with respect to MMIO | |
a6596127 | 15 | * accesses through relaxed memory I/O windows, so virt_mb() et al are |
a9a0fef7 RR |
16 | * sufficient. |
17 | * | |
18 | * For using virtio to talk to real devices (eg. other heterogeneous | |
19 | * CPUs) we do need real barriers. In theory, we could be using both | |
20 | * kinds of virtio, so it's a runtime decision, and the branch is | |
21 | * actually quite cheap. | |
22 | */ | |
23 | ||
a9a0fef7 RR |
24 | static inline void virtio_mb(bool weak_barriers) |
25 | { | |
26 | if (weak_barriers) | |
a6596127 | 27 | virt_mb(); |
a9a0fef7 RR |
28 | else |
29 | mb(); | |
30 | } | |
31 | ||
32 | static inline void virtio_rmb(bool weak_barriers) | |
33 | { | |
34 | if (weak_barriers) | |
a6596127 | 35 | virt_rmb(); |
a9a0fef7 RR |
36 | else |
37 | rmb(); | |
38 | } | |
39 | ||
40 | static inline void virtio_wmb(bool weak_barriers) | |
41 | { | |
42 | if (weak_barriers) | |
a6596127 | 43 | virt_wmb(); |
a9a0fef7 RR |
44 | else |
45 | wmb(); | |
46 | } | |
a9a0fef7 | 47 | |
788e5b3a MT |
48 | static inline void virtio_store_mb(bool weak_barriers, |
49 | __virtio16 *p, __virtio16 v) | |
50 | { | |
51 | if (weak_barriers) { | |
52 | virt_store_mb(*p, v); | |
53 | } else { | |
54 | WRITE_ONCE(*p, v); | |
55 | mb(); | |
56 | } | |
57 | } | |
58 | ||
0a8a69dd RR |
59 | struct virtio_device; |
60 | struct virtqueue; | |
61 | ||
2a2d1382 AL |
62 | /* |
63 | * Creates a virtqueue and allocates the descriptor ring. If | |
64 | * may_reduce_num is set, then this may allocate a smaller ring than | |
65 | * expected. The caller should query virtqueue_get_ring_size to learn | |
66 | * the actual size of the ring. | |
67 | */ | |
68 | struct virtqueue *vring_create_virtqueue(unsigned int index, | |
69 | unsigned int num, | |
70 | unsigned int vring_align, | |
71 | struct virtio_device *vdev, | |
72 | bool weak_barriers, | |
73 | bool may_reduce_num, | |
74 | bool (*notify)(struct virtqueue *vq), | |
75 | void (*callback)(struct virtqueue *vq), | |
76 | const char *name); | |
77 | ||
78 | /* Creates a virtqueue with a custom layout. */ | |
79 | struct virtqueue *__vring_new_virtqueue(unsigned int index, | |
80 | struct vring vring, | |
81 | struct virtio_device *vdev, | |
82 | bool weak_barriers, | |
83 | bool (*notify)(struct virtqueue *), | |
84 | void (*callback)(struct virtqueue *), | |
85 | const char *name); | |
86 | ||
87 | /* | |
88 | * Creates a virtqueue with a standard layout but a caller-allocated | |
89 | * ring. | |
90 | */ | |
17bb6d40 JW |
91 | struct virtqueue *vring_new_virtqueue(unsigned int index, |
92 | unsigned int num, | |
87c7d57c | 93 | unsigned int vring_align, |
0a8a69dd | 94 | struct virtio_device *vdev, |
7b21e34f | 95 | bool weak_barriers, |
0a8a69dd | 96 | void *pages, |
46f9c2b9 | 97 | bool (*notify)(struct virtqueue *vq), |
9499f5e7 RR |
98 | void (*callback)(struct virtqueue *vq), |
99 | const char *name); | |
2a2d1382 AL |
100 | |
101 | /* | |
102 | * Destroys a virtqueue. If created with vring_create_virtqueue, this | |
103 | * also frees the ring. | |
104 | */ | |
0a8a69dd | 105 | void vring_del_virtqueue(struct virtqueue *vq); |
2a2d1382 | 106 | |
e34f8725 RR |
107 | /* Filter out transport-specific feature bits. */ |
108 | void vring_transport_features(struct virtio_device *vdev); | |
0a8a69dd RR |
109 | |
110 | irqreturn_t vring_interrupt(int irq, void *_vq); | |
0a8a69dd | 111 | #endif /* _LINUX_VIRTIO_RING_H */ |