]>
Commit | Line | Data |
---|---|---|
3a4d5c94 MT |
1 | /* Copyright (C) 2009 Red Hat, Inc. |
2 | * Author: Michael S. Tsirkin <[email protected]> | |
3 | * | |
4 | * This work is licensed under the terms of the GNU GPL, version 2. | |
5 | * | |
6 | * virtio-net server in host kernel. | |
7 | */ | |
8 | ||
9 | #include <linux/compat.h> | |
10 | #include <linux/eventfd.h> | |
11 | #include <linux/vhost.h> | |
12 | #include <linux/virtio_net.h> | |
3a4d5c94 MT |
13 | #include <linux/miscdevice.h> |
14 | #include <linux/module.h> | |
bab632d6 | 15 | #include <linux/moduleparam.h> |
3a4d5c94 MT |
16 | #include <linux/mutex.h> |
17 | #include <linux/workqueue.h> | |
3a4d5c94 | 18 | #include <linux/file.h> |
5a0e3ad6 | 19 | #include <linux/slab.h> |
e6017571 | 20 | #include <linux/sched/clock.h> |
174cd4b1 | 21 | #include <linux/sched/signal.h> |
23cc5a99 | 22 | #include <linux/vmalloc.h> |
3a4d5c94 MT |
23 | |
24 | #include <linux/net.h> | |
25 | #include <linux/if_packet.h> | |
26 | #include <linux/if_arp.h> | |
27 | #include <linux/if_tun.h> | |
501c774c | 28 | #include <linux/if_macvlan.h> |
635b8c8e | 29 | #include <linux/if_tap.h> |
c53cff5e | 30 | #include <linux/if_vlan.h> |
3a4d5c94 MT |
31 | |
32 | #include <net/sock.h> | |
33 | ||
34 | #include "vhost.h" | |
35 | ||
f9611c43 | 36 | static int experimental_zcopytx = 1; |
bab632d6 | 37 | module_param(experimental_zcopytx, int, 0444); |
f9611c43 MT |
38 | MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" |
39 | " 1 -Enable; 0 - Disable"); | |
bab632d6 | 40 | |
3a4d5c94 MT |
41 | /* Max number of bytes transferred before requeueing the job. |
42 | * Using this limit prevents one virtqueue from starving others. */ | |
43 | #define VHOST_NET_WEIGHT 0x80000 | |
44 | ||
bab632d6 MT |
45 | /* MAX number of TX used buffers for outstanding zerocopy */ |
46 | #define VHOST_MAX_PEND 128 | |
47 | #define VHOST_GOODCOPY_LEN 256 | |
48 | ||
eaae8132 MT |
49 | /* |
50 | * For transmit, used buffer len is unused; we override it to track buffer | |
51 | * status internally; used for zerocopy tx only. | |
52 | */ | |
53 | /* Lower device DMA failed */ | |
bf995734 | 54 | #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3) |
eaae8132 | 55 | /* Lower device DMA done */ |
bf995734 | 56 | #define VHOST_DMA_DONE_LEN ((__force __virtio32)2) |
eaae8132 | 57 | /* Lower device DMA in progress */ |
bf995734 | 58 | #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1) |
eaae8132 | 59 | /* Buffer unused */ |
bf995734 | 60 | #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0) |
eaae8132 | 61 | |
bf995734 | 62 | #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN) |
eaae8132 | 63 | |
8570a6e7 AH |
64 | enum { |
65 | VHOST_NET_FEATURES = VHOST_FEATURES | | |
66 | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | | |
6b1e6cc7 JW |
67 | (1ULL << VIRTIO_NET_F_MRG_RXBUF) | |
68 | (1ULL << VIRTIO_F_IOMMU_PLATFORM) | |
8570a6e7 AH |
69 | }; |
70 | ||
3a4d5c94 MT |
71 | enum { |
72 | VHOST_NET_VQ_RX = 0, | |
73 | VHOST_NET_VQ_TX = 1, | |
74 | VHOST_NET_VQ_MAX = 2, | |
75 | }; | |
76 | ||
fe729a57 | 77 | struct vhost_net_ubuf_ref { |
0ad8b480 MT |
78 | /* refcount follows semantics similar to kref: |
79 | * 0: object is released | |
80 | * 1: no outstanding ubufs | |
81 | * >1: outstanding ubufs | |
82 | */ | |
83 | atomic_t refcount; | |
2839400f AH |
84 | wait_queue_head_t wait; |
85 | struct vhost_virtqueue *vq; | |
86 | }; | |
87 | ||
3ab2e420 AH |
88 | struct vhost_net_virtqueue { |
89 | struct vhost_virtqueue vq; | |
81f95a55 MT |
90 | size_t vhost_hlen; |
91 | size_t sock_hlen; | |
2839400f AH |
92 | /* vhost zerocopy support fields below: */ |
93 | /* last used idx for outstanding DMA zerocopy buffers */ | |
94 | int upend_idx; | |
95 | /* first used idx for DMA done zerocopy buffers */ | |
96 | int done_idx; | |
97 | /* an array of userspace buffers info */ | |
98 | struct ubuf_info *ubuf_info; | |
99 | /* Reference counting for outstanding ubufs. | |
100 | * Protected by vq mutex. Writers must also take device mutex. */ | |
fe729a57 | 101 | struct vhost_net_ubuf_ref *ubufs; |
3ab2e420 AH |
102 | }; |
103 | ||
3a4d5c94 MT |
104 | struct vhost_net { |
105 | struct vhost_dev dev; | |
3ab2e420 | 106 | struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; |
3a4d5c94 | 107 | struct vhost_poll poll[VHOST_NET_VQ_MAX]; |
eaae8132 MT |
108 | /* Number of TX recently submitted. |
109 | * Protected by tx vq lock. */ | |
110 | unsigned tx_packets; | |
111 | /* Number of times zerocopy TX recently failed. | |
112 | * Protected by tx vq lock. */ | |
113 | unsigned tx_zcopy_err; | |
1280c27f MT |
114 | /* Flush in progress. Protected by tx vq lock. */ |
115 | bool tx_flush; | |
3a4d5c94 MT |
116 | }; |
117 | ||
fe729a57 | 118 | static unsigned vhost_net_zcopy_mask __read_mostly; |
2839400f | 119 | |
fe729a57 | 120 | static void vhost_net_enable_zcopy(int vq) |
2839400f | 121 | { |
fe729a57 | 122 | vhost_net_zcopy_mask |= 0x1 << vq; |
2839400f AH |
123 | } |
124 | ||
fe729a57 AH |
125 | static struct vhost_net_ubuf_ref * |
126 | vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) | |
2839400f | 127 | { |
fe729a57 | 128 | struct vhost_net_ubuf_ref *ubufs; |
2839400f AH |
129 | /* No zero copy backend? Nothing to count. */ |
130 | if (!zcopy) | |
131 | return NULL; | |
132 | ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); | |
133 | if (!ubufs) | |
134 | return ERR_PTR(-ENOMEM); | |
0ad8b480 | 135 | atomic_set(&ubufs->refcount, 1); |
2839400f AH |
136 | init_waitqueue_head(&ubufs->wait); |
137 | ubufs->vq = vq; | |
138 | return ubufs; | |
139 | } | |
140 | ||
0ad8b480 | 141 | static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) |
2839400f | 142 | { |
0ad8b480 MT |
143 | int r = atomic_sub_return(1, &ubufs->refcount); |
144 | if (unlikely(!r)) | |
145 | wake_up(&ubufs->wait); | |
146 | return r; | |
2839400f AH |
147 | } |
148 | ||
fe729a57 | 149 | static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) |
2839400f | 150 | { |
0ad8b480 MT |
151 | vhost_net_ubuf_put(ubufs); |
152 | wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); | |
c38e39c3 MT |
153 | } |
154 | ||
155 | static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) | |
156 | { | |
157 | vhost_net_ubuf_put_and_wait(ubufs); | |
2839400f AH |
158 | kfree(ubufs); |
159 | } | |
160 | ||
b1ad8496 AH |
161 | static void vhost_net_clear_ubuf_info(struct vhost_net *n) |
162 | { | |
b1ad8496 AH |
163 | int i; |
164 | ||
288cfe78 MT |
165 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
166 | kfree(n->vqs[i].ubuf_info); | |
167 | n->vqs[i].ubuf_info = NULL; | |
b1ad8496 AH |
168 | } |
169 | } | |
170 | ||
0a1febf7 | 171 | static int vhost_net_set_ubuf_info(struct vhost_net *n) |
2839400f AH |
172 | { |
173 | bool zcopy; | |
174 | int i; | |
175 | ||
288cfe78 | 176 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
fe729a57 | 177 | zcopy = vhost_net_zcopy_mask & (0x1 << i); |
2839400f AH |
178 | if (!zcopy) |
179 | continue; | |
180 | n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * | |
181 | UIO_MAXIOV, GFP_KERNEL); | |
182 | if (!n->vqs[i].ubuf_info) | |
183 | goto err; | |
184 | } | |
185 | return 0; | |
186 | ||
187 | err: | |
288cfe78 | 188 | vhost_net_clear_ubuf_info(n); |
2839400f AH |
189 | return -ENOMEM; |
190 | } | |
191 | ||
0a1febf7 | 192 | static void vhost_net_vq_reset(struct vhost_net *n) |
2839400f AH |
193 | { |
194 | int i; | |
195 | ||
288cfe78 MT |
196 | vhost_net_clear_ubuf_info(n); |
197 | ||
2839400f AH |
198 | for (i = 0; i < VHOST_NET_VQ_MAX; i++) { |
199 | n->vqs[i].done_idx = 0; | |
200 | n->vqs[i].upend_idx = 0; | |
201 | n->vqs[i].ubufs = NULL; | |
81f95a55 MT |
202 | n->vqs[i].vhost_hlen = 0; |
203 | n->vqs[i].sock_hlen = 0; | |
2839400f AH |
204 | } |
205 | ||
206 | } | |
207 | ||
eaae8132 MT |
208 | static void vhost_net_tx_packet(struct vhost_net *net) |
209 | { | |
210 | ++net->tx_packets; | |
211 | if (net->tx_packets < 1024) | |
212 | return; | |
213 | net->tx_packets = 0; | |
214 | net->tx_zcopy_err = 0; | |
215 | } | |
216 | ||
217 | static void vhost_net_tx_err(struct vhost_net *net) | |
218 | { | |
219 | ++net->tx_zcopy_err; | |
220 | } | |
221 | ||
222 | static bool vhost_net_tx_select_zcopy(struct vhost_net *net) | |
223 | { | |
1280c27f MT |
224 | /* TX flush waits for outstanding DMAs to be done. |
225 | * Don't start new DMAs. | |
226 | */ | |
227 | return !net->tx_flush && | |
228 | net->tx_packets / 64 >= net->tx_zcopy_err; | |
eaae8132 MT |
229 | } |
230 | ||
bab632d6 MT |
231 | static bool vhost_sock_zcopy(struct socket *sock) |
232 | { | |
233 | return unlikely(experimental_zcopytx) && | |
234 | sock_flag(sock->sk, SOCK_ZEROCOPY); | |
235 | } | |
236 | ||
b211616d MT |
237 | /* In case of DMA done not in order in lower device driver for some reason. |
238 | * upend_idx is used to track end of used idx, done_idx is used to track head | |
239 | * of used idx. Once lower device DMA done contiguously, we will signal KVM | |
240 | * guest used idx. | |
241 | */ | |
094afe7d JW |
242 | static void vhost_zerocopy_signal_used(struct vhost_net *net, |
243 | struct vhost_virtqueue *vq) | |
b211616d | 244 | { |
2839400f AH |
245 | struct vhost_net_virtqueue *nvq = |
246 | container_of(vq, struct vhost_net_virtqueue, vq); | |
c92112ae | 247 | int i, add; |
b211616d MT |
248 | int j = 0; |
249 | ||
2839400f | 250 | for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { |
eaae8132 MT |
251 | if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) |
252 | vhost_net_tx_err(net); | |
b211616d MT |
253 | if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { |
254 | vq->heads[i].len = VHOST_DMA_CLEAR_LEN; | |
b211616d MT |
255 | ++j; |
256 | } else | |
257 | break; | |
258 | } | |
c92112ae JW |
259 | while (j) { |
260 | add = min(UIO_MAXIOV - nvq->done_idx, j); | |
261 | vhost_add_used_and_signal_n(vq->dev, vq, | |
262 | &vq->heads[nvq->done_idx], add); | |
263 | nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; | |
264 | j -= add; | |
265 | } | |
b211616d MT |
266 | } |
267 | ||
eaae8132 | 268 | static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) |
b211616d | 269 | { |
fe729a57 | 270 | struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; |
b211616d | 271 | struct vhost_virtqueue *vq = ubufs->vq; |
0ad8b480 | 272 | int cnt; |
24eb21a1 | 273 | |
b0c057ca MT |
274 | rcu_read_lock_bh(); |
275 | ||
19c73b3e JW |
276 | /* set len to mark this desc buffers done DMA */ |
277 | vq->heads[ubuf->desc].len = success ? | |
278 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; | |
0ad8b480 | 279 | cnt = vhost_net_ubuf_put(ubufs); |
19c73b3e | 280 | |
24eb21a1 MT |
281 | /* |
282 | * Trigger polling thread if guest stopped submitting new buffers: | |
0ad8b480 | 283 | * in this case, the refcount after decrement will eventually reach 1. |
24eb21a1 MT |
284 | * We also trigger polling periodically after each 16 packets |
285 | * (the value 16 here is more or less arbitrary, it's tuned to trigger | |
286 | * less than 10% of times). | |
287 | */ | |
0ad8b480 | 288 | if (cnt <= 1 || !(cnt % 16)) |
24eb21a1 | 289 | vhost_poll_queue(&vq->poll); |
b0c057ca MT |
290 | |
291 | rcu_read_unlock_bh(); | |
b211616d MT |
292 | } |
293 | ||
03088137 JW |
294 | static inline unsigned long busy_clock(void) |
295 | { | |
296 | return local_clock() >> 10; | |
297 | } | |
298 | ||
299 | static bool vhost_can_busy_poll(struct vhost_dev *dev, | |
300 | unsigned long endtime) | |
301 | { | |
302 | return likely(!need_resched()) && | |
303 | likely(!time_after(busy_clock(), endtime)) && | |
304 | likely(!signal_pending(current)) && | |
305 | !vhost_has_work(dev); | |
306 | } | |
307 | ||
8241a1e4 JW |
308 | static void vhost_net_disable_vq(struct vhost_net *n, |
309 | struct vhost_virtqueue *vq) | |
310 | { | |
311 | struct vhost_net_virtqueue *nvq = | |
312 | container_of(vq, struct vhost_net_virtqueue, vq); | |
313 | struct vhost_poll *poll = n->poll + (nvq - n->vqs); | |
314 | if (!vq->private_data) | |
315 | return; | |
316 | vhost_poll_stop(poll); | |
317 | } | |
318 | ||
319 | static int vhost_net_enable_vq(struct vhost_net *n, | |
320 | struct vhost_virtqueue *vq) | |
321 | { | |
322 | struct vhost_net_virtqueue *nvq = | |
323 | container_of(vq, struct vhost_net_virtqueue, vq); | |
324 | struct vhost_poll *poll = n->poll + (nvq - n->vqs); | |
325 | struct socket *sock; | |
326 | ||
327 | sock = vq->private_data; | |
328 | if (!sock) | |
329 | return 0; | |
330 | ||
331 | return vhost_poll_start(poll, sock->file); | |
332 | } | |
333 | ||
03088137 JW |
334 | static int vhost_net_tx_get_vq_desc(struct vhost_net *net, |
335 | struct vhost_virtqueue *vq, | |
336 | struct iovec iov[], unsigned int iov_size, | |
337 | unsigned int *out_num, unsigned int *in_num) | |
338 | { | |
339 | unsigned long uninitialized_var(endtime); | |
340 | int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), | |
6b1e6cc7 | 341 | out_num, in_num, NULL, NULL); |
03088137 JW |
342 | |
343 | if (r == vq->num && vq->busyloop_timeout) { | |
344 | preempt_disable(); | |
345 | endtime = busy_clock() + vq->busyloop_timeout; | |
346 | while (vhost_can_busy_poll(vq->dev, endtime) && | |
347 | vhost_vq_avail_empty(vq->dev, vq)) | |
f2f09a4c | 348 | cpu_relax(); |
03088137 JW |
349 | preempt_enable(); |
350 | r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), | |
6b1e6cc7 | 351 | out_num, in_num, NULL, NULL); |
03088137 JW |
352 | } |
353 | ||
354 | return r; | |
355 | } | |
356 | ||
0ed005ce JW |
357 | static bool vhost_exceeds_maxpend(struct vhost_net *net) |
358 | { | |
359 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; | |
360 | struct vhost_virtqueue *vq = &nvq->vq; | |
361 | ||
362 | return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV | |
363 | == nvq->done_idx; | |
364 | } | |
365 | ||
3a4d5c94 MT |
366 | /* Expects to be always run from workqueue - which acts as |
367 | * read-size critical section for our kind of RCU. */ | |
368 | static void handle_tx(struct vhost_net *net) | |
369 | { | |
2839400f | 370 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; |
81f95a55 | 371 | struct vhost_virtqueue *vq = &nvq->vq; |
98a527aa | 372 | unsigned out, in; |
d5675bd2 | 373 | int head; |
3a4d5c94 MT |
374 | struct msghdr msg = { |
375 | .msg_name = NULL, | |
376 | .msg_namelen = 0, | |
377 | .msg_control = NULL, | |
378 | .msg_controllen = 0, | |
3a4d5c94 MT |
379 | .msg_flags = MSG_DONTWAIT, |
380 | }; | |
381 | size_t len, total_len = 0; | |
70181d51 | 382 | int err; |
3a4d5c94 | 383 | size_t hdr_size; |
28457ee6 | 384 | struct socket *sock; |
fe729a57 | 385 | struct vhost_net_ubuf_ref *uninitialized_var(ubufs); |
cedb9bdc | 386 | bool zcopy, zcopy_used; |
28457ee6 | 387 | |
2e26af79 AH |
388 | mutex_lock(&vq->mutex); |
389 | sock = vq->private_data; | |
3a4d5c94 | 390 | if (!sock) |
2e26af79 | 391 | goto out; |
3a4d5c94 | 392 | |
6b1e6cc7 JW |
393 | if (!vq_iotlb_prefetch(vq)) |
394 | goto out; | |
395 | ||
8ea8cf89 | 396 | vhost_disable_notify(&net->dev, vq); |
3a4d5c94 | 397 | |
81f95a55 | 398 | hdr_size = nvq->vhost_hlen; |
2839400f | 399 | zcopy = nvq->ubufs; |
3a4d5c94 MT |
400 | |
401 | for (;;) { | |
bab632d6 MT |
402 | /* Release DMAs done buffers first */ |
403 | if (zcopy) | |
eaae8132 | 404 | vhost_zerocopy_signal_used(net, vq); |
bab632d6 | 405 | |
f7c6be40 JW |
406 | /* If more outstanding DMAs, queue the work. |
407 | * Handle upend_idx wrap around | |
408 | */ | |
0ed005ce | 409 | if (unlikely(vhost_exceeds_maxpend(net))) |
f7c6be40 JW |
410 | break; |
411 | ||
03088137 JW |
412 | head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, |
413 | ARRAY_SIZE(vq->iov), | |
414 | &out, &in); | |
d5675bd2 | 415 | /* On error, stop handling until the next kick. */ |
7b3384fc | 416 | if (unlikely(head < 0)) |
d5675bd2 | 417 | break; |
3a4d5c94 MT |
418 | /* Nothing new? Wait for eventfd to tell us they refilled. */ |
419 | if (head == vq->num) { | |
8ea8cf89 MT |
420 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
421 | vhost_disable_notify(&net->dev, vq); | |
3a4d5c94 MT |
422 | continue; |
423 | } | |
424 | break; | |
425 | } | |
426 | if (in) { | |
427 | vq_err(vq, "Unexpected descriptor format for TX: " | |
428 | "out %d, int %d\n", out, in); | |
429 | break; | |
430 | } | |
431 | /* Skip header. TODO: support TSO. */ | |
3a4d5c94 | 432 | len = iov_length(vq->iov, out); |
c0371da6 | 433 | iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len); |
98a527aa | 434 | iov_iter_advance(&msg.msg_iter, hdr_size); |
3a4d5c94 | 435 | /* Sanity check */ |
01e97e65 | 436 | if (!msg_data_left(&msg)) { |
3a4d5c94 MT |
437 | vq_err(vq, "Unexpected header len for TX: " |
438 | "%zd expected %zd\n", | |
98a527aa | 439 | len, hdr_size); |
3a4d5c94 MT |
440 | break; |
441 | } | |
01e97e65 | 442 | len = msg_data_left(&msg); |
ce21a029 JW |
443 | |
444 | zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN | |
445 | && (nvq->upend_idx + 1) % UIO_MAXIOV != | |
446 | nvq->done_idx | |
447 | && vhost_net_tx_select_zcopy(net); | |
cedb9bdc | 448 | |
bab632d6 | 449 | /* use msg_control to pass vhost zerocopy ubuf info to skb */ |
cedb9bdc | 450 | if (zcopy_used) { |
ce21a029 JW |
451 | struct ubuf_info *ubuf; |
452 | ubuf = nvq->ubuf_info + nvq->upend_idx; | |
453 | ||
8b38694a | 454 | vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); |
ce21a029 JW |
455 | vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; |
456 | ubuf->callback = vhost_zerocopy_callback; | |
457 | ubuf->ctx = nvq->ubufs; | |
458 | ubuf->desc = nvq->upend_idx; | |
459 | msg.msg_control = ubuf; | |
460 | msg.msg_controllen = sizeof(ubuf); | |
461 | ubufs = nvq->ubufs; | |
0ad8b480 | 462 | atomic_inc(&ubufs->refcount); |
2839400f | 463 | nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; |
ce21a029 | 464 | } else { |
4364d5f9 | 465 | msg.msg_control = NULL; |
ce21a029 JW |
466 | ubufs = NULL; |
467 | } | |
0ed005ce JW |
468 | |
469 | total_len += len; | |
470 | if (total_len < VHOST_NET_WEIGHT && | |
471 | !vhost_vq_avail_empty(&net->dev, vq) && | |
472 | likely(!vhost_exceeds_maxpend(net))) { | |
473 | msg.msg_flags |= MSG_MORE; | |
474 | } else { | |
475 | msg.msg_flags &= ~MSG_MORE; | |
476 | } | |
477 | ||
3a4d5c94 | 478 | /* TODO: Check specific error and bomb out unless ENOBUFS? */ |
1b784140 | 479 | err = sock->ops->sendmsg(sock, &msg, len); |
3a4d5c94 | 480 | if (unlikely(err < 0)) { |
cedb9bdc | 481 | if (zcopy_used) { |
ce21a029 | 482 | vhost_net_ubuf_put(ubufs); |
2839400f AH |
483 | nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) |
484 | % UIO_MAXIOV; | |
bab632d6 | 485 | } |
8dd014ad | 486 | vhost_discard_vq_desc(vq, 1); |
3a4d5c94 MT |
487 | break; |
488 | } | |
489 | if (err != len) | |
95c0ec6a MT |
490 | pr_debug("Truncated TX packet: " |
491 | " len %d != %zd\n", err, len); | |
cedb9bdc | 492 | if (!zcopy_used) |
bab632d6 | 493 | vhost_add_used_and_signal(&net->dev, vq, head, 0); |
c8fb217a | 494 | else |
eaae8132 | 495 | vhost_zerocopy_signal_used(net, vq); |
eaae8132 | 496 | vhost_net_tx_packet(net); |
3a4d5c94 MT |
497 | if (unlikely(total_len >= VHOST_NET_WEIGHT)) { |
498 | vhost_poll_queue(&vq->poll); | |
499 | break; | |
500 | } | |
501 | } | |
2e26af79 | 502 | out: |
3a4d5c94 | 503 | mutex_unlock(&vq->mutex); |
3a4d5c94 MT |
504 | } |
505 | ||
8dd014ad DS |
506 | static int peek_head_len(struct sock *sk) |
507 | { | |
1576d986 | 508 | struct socket *sock = sk->sk_socket; |
8dd014ad DS |
509 | struct sk_buff *head; |
510 | int len = 0; | |
783e3988 | 511 | unsigned long flags; |
8dd014ad | 512 | |
1576d986 JW |
513 | if (sock->ops->peek_len) |
514 | return sock->ops->peek_len(sock); | |
515 | ||
783e3988 | 516 | spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); |
8dd014ad | 517 | head = skb_peek(&sk->sk_receive_queue); |
c53cff5e | 518 | if (likely(head)) { |
8dd014ad | 519 | len = head->len; |
df8a39de | 520 | if (skb_vlan_tag_present(head)) |
c53cff5e BG |
521 | len += VLAN_HLEN; |
522 | } | |
523 | ||
783e3988 | 524 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); |
8dd014ad DS |
525 | return len; |
526 | } | |
527 | ||
1576d986 JW |
528 | static int sk_has_rx_data(struct sock *sk) |
529 | { | |
530 | struct socket *sock = sk->sk_socket; | |
531 | ||
532 | if (sock->ops->peek_len) | |
533 | return sock->ops->peek_len(sock); | |
534 | ||
535 | return skb_queue_empty(&sk->sk_receive_queue); | |
536 | } | |
537 | ||
03088137 JW |
538 | static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) |
539 | { | |
540 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; | |
541 | struct vhost_virtqueue *vq = &nvq->vq; | |
542 | unsigned long uninitialized_var(endtime); | |
543 | int len = peek_head_len(sk); | |
544 | ||
545 | if (!len && vq->busyloop_timeout) { | |
546 | /* Both tx vq and rx socket were polled here */ | |
547 | mutex_lock(&vq->mutex); | |
548 | vhost_disable_notify(&net->dev, vq); | |
549 | ||
550 | preempt_disable(); | |
551 | endtime = busy_clock() + vq->busyloop_timeout; | |
552 | ||
553 | while (vhost_can_busy_poll(&net->dev, endtime) && | |
1576d986 | 554 | !sk_has_rx_data(sk) && |
03088137 | 555 | vhost_vq_avail_empty(&net->dev, vq)) |
f2f09a4c | 556 | cpu_relax(); |
03088137 JW |
557 | |
558 | preempt_enable(); | |
559 | ||
560 | if (vhost_enable_notify(&net->dev, vq)) | |
561 | vhost_poll_queue(&vq->poll); | |
562 | mutex_unlock(&vq->mutex); | |
563 | ||
564 | len = peek_head_len(sk); | |
565 | } | |
566 | ||
567 | return len; | |
568 | } | |
569 | ||
8dd014ad DS |
570 | /* This is a multi-buffer version of vhost_get_desc, that works if |
571 | * vq has read descriptors only. | |
572 | * @vq - the relevant virtqueue | |
573 | * @datalen - data length we'll be reading | |
574 | * @iovcount - returned count of io vectors we fill | |
575 | * @log - vhost log | |
576 | * @log_num - log offset | |
94249369 | 577 | * @quota - headcount quota, 1 for big buffer |
8dd014ad DS |
578 | * returns number of buffer heads allocated, negative on error |
579 | */ | |
580 | static int get_rx_bufs(struct vhost_virtqueue *vq, | |
581 | struct vring_used_elem *heads, | |
582 | int datalen, | |
583 | unsigned *iovcount, | |
584 | struct vhost_log *log, | |
94249369 JW |
585 | unsigned *log_num, |
586 | unsigned int quota) | |
8dd014ad DS |
587 | { |
588 | unsigned int out, in; | |
589 | int seg = 0; | |
590 | int headcount = 0; | |
591 | unsigned d; | |
592 | int r, nlogs = 0; | |
8b38694a MT |
593 | /* len is always initialized before use since we are always called with |
594 | * datalen > 0. | |
595 | */ | |
596 | u32 uninitialized_var(len); | |
8dd014ad | 597 | |
94249369 | 598 | while (datalen > 0 && headcount < quota) { |
e0e9b406 | 599 | if (unlikely(seg >= UIO_MAXIOV)) { |
8dd014ad DS |
600 | r = -ENOBUFS; |
601 | goto err; | |
602 | } | |
47283bef | 603 | r = vhost_get_vq_desc(vq, vq->iov + seg, |
8dd014ad DS |
604 | ARRAY_SIZE(vq->iov) - seg, &out, |
605 | &in, log, log_num); | |
a39ee449 MT |
606 | if (unlikely(r < 0)) |
607 | goto err; | |
608 | ||
609 | d = r; | |
8dd014ad DS |
610 | if (d == vq->num) { |
611 | r = 0; | |
612 | goto err; | |
613 | } | |
614 | if (unlikely(out || in <= 0)) { | |
615 | vq_err(vq, "unexpected descriptor format for RX: " | |
616 | "out %d, in %d\n", out, in); | |
617 | r = -EINVAL; | |
618 | goto err; | |
619 | } | |
620 | if (unlikely(log)) { | |
621 | nlogs += *log_num; | |
622 | log += *log_num; | |
623 | } | |
8b38694a MT |
624 | heads[headcount].id = cpu_to_vhost32(vq, d); |
625 | len = iov_length(vq->iov + seg, in); | |
626 | heads[headcount].len = cpu_to_vhost32(vq, len); | |
627 | datalen -= len; | |
8dd014ad DS |
628 | ++headcount; |
629 | seg += in; | |
630 | } | |
99975cc6 | 631 | heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); |
8dd014ad DS |
632 | *iovcount = seg; |
633 | if (unlikely(log)) | |
634 | *log_num = nlogs; | |
d8316f39 MT |
635 | |
636 | /* Detect overrun */ | |
637 | if (unlikely(datalen > 0)) { | |
638 | r = UIO_MAXIOV + 1; | |
639 | goto err; | |
640 | } | |
8dd014ad DS |
641 | return headcount; |
642 | err: | |
643 | vhost_discard_vq_desc(vq, headcount); | |
644 | return r; | |
645 | } | |
646 | ||
3a4d5c94 MT |
647 | /* Expects to be always run from workqueue - which acts as |
648 | * read-size critical section for our kind of RCU. */ | |
94249369 | 649 | static void handle_rx(struct vhost_net *net) |
3a4d5c94 | 650 | { |
81f95a55 MT |
651 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; |
652 | struct vhost_virtqueue *vq = &nvq->vq; | |
8dd014ad DS |
653 | unsigned uninitialized_var(in), log; |
654 | struct vhost_log *vq_log; | |
655 | struct msghdr msg = { | |
656 | .msg_name = NULL, | |
657 | .msg_namelen = 0, | |
658 | .msg_control = NULL, /* FIXME: get and handle RX aux data. */ | |
659 | .msg_controllen = 0, | |
8dd014ad DS |
660 | .msg_flags = MSG_DONTWAIT, |
661 | }; | |
0960b641 JW |
662 | struct virtio_net_hdr hdr = { |
663 | .flags = 0, | |
664 | .gso_type = VIRTIO_NET_HDR_GSO_NONE | |
8dd014ad | 665 | }; |
8dd014ad | 666 | size_t total_len = 0; |
910a578f MT |
667 | int err, mergeable; |
668 | s16 headcount; | |
8dd014ad DS |
669 | size_t vhost_hlen, sock_hlen; |
670 | size_t vhost_len, sock_len; | |
2e26af79 | 671 | struct socket *sock; |
ba7438ae | 672 | struct iov_iter fixup; |
0960b641 | 673 | __virtio16 num_buffers; |
8dd014ad | 674 | |
8dd014ad | 675 | mutex_lock(&vq->mutex); |
2e26af79 AH |
676 | sock = vq->private_data; |
677 | if (!sock) | |
678 | goto out; | |
6b1e6cc7 JW |
679 | |
680 | if (!vq_iotlb_prefetch(vq)) | |
681 | goto out; | |
682 | ||
8ea8cf89 | 683 | vhost_disable_notify(&net->dev, vq); |
8241a1e4 | 684 | vhost_net_disable_vq(net, vq); |
2e26af79 | 685 | |
81f95a55 MT |
686 | vhost_hlen = nvq->vhost_hlen; |
687 | sock_hlen = nvq->sock_hlen; | |
8dd014ad | 688 | |
ea16c514 | 689 | vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? |
8dd014ad | 690 | vq->log : NULL; |
ea16c514 | 691 | mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); |
8dd014ad | 692 | |
03088137 | 693 | while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { |
8dd014ad DS |
694 | sock_len += sock_hlen; |
695 | vhost_len = sock_len + vhost_hlen; | |
696 | headcount = get_rx_bufs(vq, vq->heads, vhost_len, | |
94249369 JW |
697 | &in, vq_log, &log, |
698 | likely(mergeable) ? UIO_MAXIOV : 1); | |
8dd014ad DS |
699 | /* On error, stop handling until the next kick. */ |
700 | if (unlikely(headcount < 0)) | |
8241a1e4 | 701 | goto out; |
d8316f39 MT |
702 | /* On overrun, truncate and discard */ |
703 | if (unlikely(headcount > UIO_MAXIOV)) { | |
c0371da6 | 704 | iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); |
1b784140 | 705 | err = sock->ops->recvmsg(sock, &msg, |
d8316f39 MT |
706 | 1, MSG_DONTWAIT | MSG_TRUNC); |
707 | pr_debug("Discarded rx packet: len %zd\n", sock_len); | |
708 | continue; | |
709 | } | |
8dd014ad DS |
710 | /* OK, now we need to know about added descriptors. */ |
711 | if (!headcount) { | |
8ea8cf89 | 712 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
8dd014ad DS |
713 | /* They have slipped one in as we were |
714 | * doing that: check again. */ | |
8ea8cf89 | 715 | vhost_disable_notify(&net->dev, vq); |
8dd014ad DS |
716 | continue; |
717 | } | |
718 | /* Nothing new? Wait for eventfd to tell us | |
719 | * they refilled. */ | |
8241a1e4 | 720 | goto out; |
8dd014ad DS |
721 | } |
722 | /* We don't need to be notified again. */ | |
ba7438ae AV |
723 | iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); |
724 | fixup = msg.msg_iter; | |
725 | if (unlikely((vhost_hlen))) { | |
726 | /* We will supply the header ourselves | |
727 | * TODO: support TSO. | |
728 | */ | |
729 | iov_iter_advance(&msg.msg_iter, vhost_hlen); | |
ba7438ae | 730 | } |
1b784140 | 731 | err = sock->ops->recvmsg(sock, &msg, |
8dd014ad DS |
732 | sock_len, MSG_DONTWAIT | MSG_TRUNC); |
733 | /* Userspace might have consumed the packet meanwhile: | |
734 | * it's not supposed to do this usually, but might be hard | |
735 | * to prevent. Discard data we got (if any) and keep going. */ | |
736 | if (unlikely(err != sock_len)) { | |
737 | pr_debug("Discarded rx packet: " | |
738 | " len %d, expected %zd\n", err, sock_len); | |
739 | vhost_discard_vq_desc(vq, headcount); | |
740 | continue; | |
741 | } | |
ba7438ae | 742 | /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ |
4c5a8442 MT |
743 | if (unlikely(vhost_hlen)) { |
744 | if (copy_to_iter(&hdr, sizeof(hdr), | |
745 | &fixup) != sizeof(hdr)) { | |
746 | vq_err(vq, "Unable to write vnet_hdr " | |
747 | "at addr %p\n", vq->iov->iov_base); | |
8241a1e4 | 748 | goto out; |
4c5a8442 MT |
749 | } |
750 | } else { | |
751 | /* Header came from socket; we'll need to patch | |
752 | * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF | |
753 | */ | |
754 | iov_iter_advance(&fixup, sizeof(hdr)); | |
8dd014ad DS |
755 | } |
756 | /* TODO: Should check and handle checksum. */ | |
5201aa49 | 757 | |
0960b641 | 758 | num_buffers = cpu_to_vhost16(vq, headcount); |
cfbdab95 | 759 | if (likely(mergeable) && |
0d79a493 MT |
760 | copy_to_iter(&num_buffers, sizeof num_buffers, |
761 | &fixup) != sizeof num_buffers) { | |
8dd014ad DS |
762 | vq_err(vq, "Failed num_buffers write"); |
763 | vhost_discard_vq_desc(vq, headcount); | |
8241a1e4 | 764 | goto out; |
8dd014ad DS |
765 | } |
766 | vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, | |
767 | headcount); | |
768 | if (unlikely(vq_log)) | |
769 | vhost_log_write(vq, vq_log, log, vhost_len); | |
770 | total_len += vhost_len; | |
771 | if (unlikely(total_len >= VHOST_NET_WEIGHT)) { | |
772 | vhost_poll_queue(&vq->poll); | |
8241a1e4 | 773 | goto out; |
8dd014ad DS |
774 | } |
775 | } | |
8241a1e4 | 776 | vhost_net_enable_vq(net, vq); |
2e26af79 | 777 | out: |
8dd014ad | 778 | mutex_unlock(&vq->mutex); |
8dd014ad DS |
779 | } |
780 | ||
c23f3445 | 781 | static void handle_tx_kick(struct vhost_work *work) |
3a4d5c94 | 782 | { |
c23f3445 TH |
783 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
784 | poll.work); | |
785 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); | |
786 | ||
3a4d5c94 MT |
787 | handle_tx(net); |
788 | } | |
789 | ||
c23f3445 | 790 | static void handle_rx_kick(struct vhost_work *work) |
3a4d5c94 | 791 | { |
c23f3445 TH |
792 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
793 | poll.work); | |
794 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); | |
795 | ||
3a4d5c94 MT |
796 | handle_rx(net); |
797 | } | |
798 | ||
c23f3445 | 799 | static void handle_tx_net(struct vhost_work *work) |
3a4d5c94 | 800 | { |
c23f3445 TH |
801 | struct vhost_net *net = container_of(work, struct vhost_net, |
802 | poll[VHOST_NET_VQ_TX].work); | |
3a4d5c94 MT |
803 | handle_tx(net); |
804 | } | |
805 | ||
c23f3445 | 806 | static void handle_rx_net(struct vhost_work *work) |
3a4d5c94 | 807 | { |
c23f3445 TH |
808 | struct vhost_net *net = container_of(work, struct vhost_net, |
809 | poll[VHOST_NET_VQ_RX].work); | |
3a4d5c94 MT |
810 | handle_rx(net); |
811 | } | |
812 | ||
813 | static int vhost_net_open(struct inode *inode, struct file *f) | |
814 | { | |
23cc5a99 | 815 | struct vhost_net *n; |
c23f3445 | 816 | struct vhost_dev *dev; |
3ab2e420 | 817 | struct vhost_virtqueue **vqs; |
59566b6e | 818 | int i; |
c23f3445 | 819 | |
23cc5a99 MT |
820 | n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); |
821 | if (!n) { | |
822 | n = vmalloc(sizeof *n); | |
823 | if (!n) | |
824 | return -ENOMEM; | |
825 | } | |
3ab2e420 AH |
826 | vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); |
827 | if (!vqs) { | |
d04257b0 | 828 | kvfree(n); |
3ab2e420 AH |
829 | return -ENOMEM; |
830 | } | |
c23f3445 TH |
831 | |
832 | dev = &n->dev; | |
3ab2e420 AH |
833 | vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; |
834 | vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; | |
835 | n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; | |
836 | n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; | |
2839400f AH |
837 | for (i = 0; i < VHOST_NET_VQ_MAX; i++) { |
838 | n->vqs[i].ubufs = NULL; | |
839 | n->vqs[i].ubuf_info = NULL; | |
840 | n->vqs[i].upend_idx = 0; | |
841 | n->vqs[i].done_idx = 0; | |
81f95a55 MT |
842 | n->vqs[i].vhost_hlen = 0; |
843 | n->vqs[i].sock_hlen = 0; | |
2839400f | 844 | } |
59566b6e | 845 | vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); |
3a4d5c94 | 846 | |
c23f3445 TH |
847 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); |
848 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); | |
3a4d5c94 MT |
849 | |
850 | f->private_data = n; | |
851 | ||
852 | return 0; | |
853 | } | |
854 | ||
3a4d5c94 MT |
855 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, |
856 | struct vhost_virtqueue *vq) | |
857 | { | |
858 | struct socket *sock; | |
859 | ||
860 | mutex_lock(&vq->mutex); | |
22fa90c7 | 861 | sock = vq->private_data; |
3a4d5c94 | 862 | vhost_net_disable_vq(n, vq); |
22fa90c7 | 863 | vq->private_data = NULL; |
3a4d5c94 MT |
864 | mutex_unlock(&vq->mutex); |
865 | return sock; | |
866 | } | |
867 | ||
868 | static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, | |
869 | struct socket **rx_sock) | |
870 | { | |
3ab2e420 AH |
871 | *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); |
872 | *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); | |
3a4d5c94 MT |
873 | } |
874 | ||
875 | static void vhost_net_flush_vq(struct vhost_net *n, int index) | |
876 | { | |
877 | vhost_poll_flush(n->poll + index); | |
3ab2e420 | 878 | vhost_poll_flush(&n->vqs[index].vq.poll); |
3a4d5c94 MT |
879 | } |
880 | ||
881 | static void vhost_net_flush(struct vhost_net *n) | |
882 | { | |
883 | vhost_net_flush_vq(n, VHOST_NET_VQ_TX); | |
884 | vhost_net_flush_vq(n, VHOST_NET_VQ_RX); | |
2839400f | 885 | if (n->vqs[VHOST_NET_VQ_TX].ubufs) { |
3ab2e420 | 886 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
1280c27f | 887 | n->tx_flush = true; |
3ab2e420 | 888 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
1280c27f | 889 | /* Wait for all lower device DMAs done. */ |
fe729a57 | 890 | vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); |
3ab2e420 | 891 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
1280c27f | 892 | n->tx_flush = false; |
0ad8b480 | 893 | atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); |
3ab2e420 | 894 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
1280c27f | 895 | } |
3a4d5c94 MT |
896 | } |
897 | ||
898 | static int vhost_net_release(struct inode *inode, struct file *f) | |
899 | { | |
900 | struct vhost_net *n = f->private_data; | |
901 | struct socket *tx_sock; | |
902 | struct socket *rx_sock; | |
903 | ||
904 | vhost_net_stop(n, &tx_sock, &rx_sock); | |
905 | vhost_net_flush(n); | |
b211616d | 906 | vhost_dev_stop(&n->dev); |
ea5d4046 | 907 | vhost_dev_cleanup(&n->dev, false); |
81f95a55 | 908 | vhost_net_vq_reset(n); |
3a4d5c94 | 909 | if (tx_sock) |
09aaacf0 | 910 | sockfd_put(tx_sock); |
3a4d5c94 | 911 | if (rx_sock) |
09aaacf0 | 912 | sockfd_put(rx_sock); |
b0c057ca MT |
913 | /* Make sure no callbacks are outstanding */ |
914 | synchronize_rcu_bh(); | |
3a4d5c94 MT |
915 | /* We do an extra flush before freeing memory, |
916 | * since jobs can re-queue themselves. */ | |
917 | vhost_net_flush(n); | |
3ab2e420 | 918 | kfree(n->dev.vqs); |
d04257b0 | 919 | kvfree(n); |
3a4d5c94 MT |
920 | return 0; |
921 | } | |
922 | ||
923 | static struct socket *get_raw_socket(int fd) | |
924 | { | |
925 | struct { | |
926 | struct sockaddr_ll sa; | |
927 | char buf[MAX_ADDR_LEN]; | |
928 | } uaddr; | |
929 | int uaddr_len = sizeof uaddr, r; | |
930 | struct socket *sock = sockfd_lookup(fd, &r); | |
d47effe1 | 931 | |
3a4d5c94 MT |
932 | if (!sock) |
933 | return ERR_PTR(-ENOTSOCK); | |
934 | ||
935 | /* Parameter checking */ | |
936 | if (sock->sk->sk_type != SOCK_RAW) { | |
937 | r = -ESOCKTNOSUPPORT; | |
938 | goto err; | |
939 | } | |
940 | ||
941 | r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, | |
942 | &uaddr_len, 0); | |
943 | if (r) | |
944 | goto err; | |
945 | ||
946 | if (uaddr.sa.sll_family != AF_PACKET) { | |
947 | r = -EPFNOSUPPORT; | |
948 | goto err; | |
949 | } | |
950 | return sock; | |
951 | err: | |
09aaacf0 | 952 | sockfd_put(sock); |
3a4d5c94 MT |
953 | return ERR_PTR(r); |
954 | } | |
955 | ||
501c774c | 956 | static struct socket *get_tap_socket(int fd) |
3a4d5c94 MT |
957 | { |
958 | struct file *file = fget(fd); | |
959 | struct socket *sock; | |
d47effe1 | 960 | |
3a4d5c94 MT |
961 | if (!file) |
962 | return ERR_PTR(-EBADF); | |
963 | sock = tun_get_socket(file); | |
501c774c AB |
964 | if (!IS_ERR(sock)) |
965 | return sock; | |
635b8c8e | 966 | sock = tap_get_socket(file); |
3a4d5c94 MT |
967 | if (IS_ERR(sock)) |
968 | fput(file); | |
969 | return sock; | |
970 | } | |
971 | ||
972 | static struct socket *get_socket(int fd) | |
973 | { | |
974 | struct socket *sock; | |
d47effe1 | 975 | |
3a4d5c94 MT |
976 | /* special case to disable backend */ |
977 | if (fd == -1) | |
978 | return NULL; | |
979 | sock = get_raw_socket(fd); | |
980 | if (!IS_ERR(sock)) | |
981 | return sock; | |
501c774c | 982 | sock = get_tap_socket(fd); |
3a4d5c94 MT |
983 | if (!IS_ERR(sock)) |
984 | return sock; | |
985 | return ERR_PTR(-ENOTSOCK); | |
986 | } | |
987 | ||
988 | static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |
989 | { | |
990 | struct socket *sock, *oldsock; | |
991 | struct vhost_virtqueue *vq; | |
2839400f | 992 | struct vhost_net_virtqueue *nvq; |
fe729a57 | 993 | struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; |
3a4d5c94 MT |
994 | int r; |
995 | ||
996 | mutex_lock(&n->dev.mutex); | |
997 | r = vhost_dev_check_owner(&n->dev); | |
998 | if (r) | |
999 | goto err; | |
1000 | ||
1001 | if (index >= VHOST_NET_VQ_MAX) { | |
1002 | r = -ENOBUFS; | |
1003 | goto err; | |
1004 | } | |
3ab2e420 | 1005 | vq = &n->vqs[index].vq; |
2839400f | 1006 | nvq = &n->vqs[index]; |
3a4d5c94 MT |
1007 | mutex_lock(&vq->mutex); |
1008 | ||
1009 | /* Verify that ring has been setup correctly. */ | |
1010 | if (!vhost_vq_access_ok(vq)) { | |
1011 | r = -EFAULT; | |
1dace8c8 | 1012 | goto err_vq; |
3a4d5c94 MT |
1013 | } |
1014 | sock = get_socket(fd); | |
1015 | if (IS_ERR(sock)) { | |
1016 | r = PTR_ERR(sock); | |
1dace8c8 | 1017 | goto err_vq; |
3a4d5c94 MT |
1018 | } |
1019 | ||
1020 | /* start polling new socket */ | |
22fa90c7 | 1021 | oldsock = vq->private_data; |
11fe8839 | 1022 | if (sock != oldsock) { |
fe729a57 AH |
1023 | ubufs = vhost_net_ubuf_alloc(vq, |
1024 | sock && vhost_sock_zcopy(sock)); | |
bab632d6 MT |
1025 | if (IS_ERR(ubufs)) { |
1026 | r = PTR_ERR(ubufs); | |
1027 | goto err_ubufs; | |
1028 | } | |
692a998b | 1029 | |
d47effe1 | 1030 | vhost_net_disable_vq(n, vq); |
22fa90c7 | 1031 | vq->private_data = sock; |
80f7d030 | 1032 | r = vhost_vq_init_access(vq); |
f59281da | 1033 | if (r) |
692a998b | 1034 | goto err_used; |
2b8b328b JW |
1035 | r = vhost_net_enable_vq(n, vq); |
1036 | if (r) | |
1037 | goto err_used; | |
692a998b | 1038 | |
2839400f AH |
1039 | oldubufs = nvq->ubufs; |
1040 | nvq->ubufs = ubufs; | |
64e9a9b8 MT |
1041 | |
1042 | n->tx_packets = 0; | |
1043 | n->tx_zcopy_err = 0; | |
1280c27f | 1044 | n->tx_flush = false; |
dd1f4078 | 1045 | } |
3a4d5c94 | 1046 | |
1680e906 MT |
1047 | mutex_unlock(&vq->mutex); |
1048 | ||
c047e5f3 | 1049 | if (oldubufs) { |
c38e39c3 | 1050 | vhost_net_ubuf_put_wait_and_free(oldubufs); |
c047e5f3 | 1051 | mutex_lock(&vq->mutex); |
eaae8132 | 1052 | vhost_zerocopy_signal_used(n, vq); |
c047e5f3 MT |
1053 | mutex_unlock(&vq->mutex); |
1054 | } | |
bab632d6 | 1055 | |
3a4d5c94 MT |
1056 | if (oldsock) { |
1057 | vhost_net_flush_vq(n, index); | |
09aaacf0 | 1058 | sockfd_put(oldsock); |
3a4d5c94 | 1059 | } |
1dace8c8 | 1060 | |
1680e906 MT |
1061 | mutex_unlock(&n->dev.mutex); |
1062 | return 0; | |
1063 | ||
692a998b | 1064 | err_used: |
22fa90c7 | 1065 | vq->private_data = oldsock; |
692a998b JW |
1066 | vhost_net_enable_vq(n, vq); |
1067 | if (ubufs) | |
c38e39c3 | 1068 | vhost_net_ubuf_put_wait_and_free(ubufs); |
bab632d6 | 1069 | err_ubufs: |
09aaacf0 | 1070 | sockfd_put(sock); |
1dace8c8 JD |
1071 | err_vq: |
1072 | mutex_unlock(&vq->mutex); | |
3a4d5c94 MT |
1073 | err: |
1074 | mutex_unlock(&n->dev.mutex); | |
1075 | return r; | |
1076 | } | |
1077 | ||
1078 | static long vhost_net_reset_owner(struct vhost_net *n) | |
1079 | { | |
1080 | struct socket *tx_sock = NULL; | |
1081 | struct socket *rx_sock = NULL; | |
1082 | long err; | |
a9709d68 | 1083 | struct vhost_umem *umem; |
d47effe1 | 1084 | |
3a4d5c94 MT |
1085 | mutex_lock(&n->dev.mutex); |
1086 | err = vhost_dev_check_owner(&n->dev); | |
1087 | if (err) | |
1088 | goto done; | |
a9709d68 JW |
1089 | umem = vhost_dev_reset_owner_prepare(); |
1090 | if (!umem) { | |
150b9e51 MT |
1091 | err = -ENOMEM; |
1092 | goto done; | |
1093 | } | |
3a4d5c94 MT |
1094 | vhost_net_stop(n, &tx_sock, &rx_sock); |
1095 | vhost_net_flush(n); | |
a9709d68 | 1096 | vhost_dev_reset_owner(&n->dev, umem); |
81f95a55 | 1097 | vhost_net_vq_reset(n); |
3a4d5c94 MT |
1098 | done: |
1099 | mutex_unlock(&n->dev.mutex); | |
1100 | if (tx_sock) | |
09aaacf0 | 1101 | sockfd_put(tx_sock); |
3a4d5c94 | 1102 | if (rx_sock) |
09aaacf0 | 1103 | sockfd_put(rx_sock); |
3a4d5c94 MT |
1104 | return err; |
1105 | } | |
1106 | ||
1107 | static int vhost_net_set_features(struct vhost_net *n, u64 features) | |
1108 | { | |
8dd014ad | 1109 | size_t vhost_hlen, sock_hlen, hdr_len; |
3a4d5c94 | 1110 | int i; |
8dd014ad | 1111 | |
e4fca7d6 MT |
1112 | hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | |
1113 | (1ULL << VIRTIO_F_VERSION_1))) ? | |
8dd014ad DS |
1114 | sizeof(struct virtio_net_hdr_mrg_rxbuf) : |
1115 | sizeof(struct virtio_net_hdr); | |
1116 | if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { | |
1117 | /* vhost provides vnet_hdr */ | |
1118 | vhost_hlen = hdr_len; | |
1119 | sock_hlen = 0; | |
1120 | } else { | |
1121 | /* socket provides vnet_hdr */ | |
1122 | vhost_hlen = 0; | |
1123 | sock_hlen = hdr_len; | |
1124 | } | |
3a4d5c94 MT |
1125 | mutex_lock(&n->dev.mutex); |
1126 | if ((features & (1 << VHOST_F_LOG_ALL)) && | |
6b1e6cc7 JW |
1127 | !vhost_log_access_ok(&n->dev)) |
1128 | goto out_unlock; | |
1129 | ||
1130 | if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) { | |
1131 | if (vhost_init_device_iotlb(&n->dev, true)) | |
1132 | goto out_unlock; | |
3a4d5c94 | 1133 | } |
6b1e6cc7 | 1134 | |
3a4d5c94 | 1135 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
3ab2e420 | 1136 | mutex_lock(&n->vqs[i].vq.mutex); |
ea16c514 | 1137 | n->vqs[i].vq.acked_features = features; |
81f95a55 MT |
1138 | n->vqs[i].vhost_hlen = vhost_hlen; |
1139 | n->vqs[i].sock_hlen = sock_hlen; | |
3ab2e420 | 1140 | mutex_unlock(&n->vqs[i].vq.mutex); |
3a4d5c94 | 1141 | } |
3a4d5c94 MT |
1142 | mutex_unlock(&n->dev.mutex); |
1143 | return 0; | |
6b1e6cc7 JW |
1144 | |
1145 | out_unlock: | |
1146 | mutex_unlock(&n->dev.mutex); | |
1147 | return -EFAULT; | |
3a4d5c94 MT |
1148 | } |
1149 | ||
b1ad8496 AH |
1150 | static long vhost_net_set_owner(struct vhost_net *n) |
1151 | { | |
1152 | int r; | |
1153 | ||
1154 | mutex_lock(&n->dev.mutex); | |
05c05351 MT |
1155 | if (vhost_dev_has_owner(&n->dev)) { |
1156 | r = -EBUSY; | |
1157 | goto out; | |
1158 | } | |
b1ad8496 AH |
1159 | r = vhost_net_set_ubuf_info(n); |
1160 | if (r) | |
1161 | goto out; | |
1162 | r = vhost_dev_set_owner(&n->dev); | |
1163 | if (r) | |
1164 | vhost_net_clear_ubuf_info(n); | |
1165 | vhost_net_flush(n); | |
1166 | out: | |
1167 | mutex_unlock(&n->dev.mutex); | |
1168 | return r; | |
1169 | } | |
1170 | ||
3a4d5c94 MT |
1171 | static long vhost_net_ioctl(struct file *f, unsigned int ioctl, |
1172 | unsigned long arg) | |
1173 | { | |
1174 | struct vhost_net *n = f->private_data; | |
1175 | void __user *argp = (void __user *)arg; | |
1176 | u64 __user *featurep = argp; | |
1177 | struct vhost_vring_file backend; | |
1178 | u64 features; | |
1179 | int r; | |
d47effe1 | 1180 | |
3a4d5c94 MT |
1181 | switch (ioctl) { |
1182 | case VHOST_NET_SET_BACKEND: | |
d3553a52 TY |
1183 | if (copy_from_user(&backend, argp, sizeof backend)) |
1184 | return -EFAULT; | |
3a4d5c94 MT |
1185 | return vhost_net_set_backend(n, backend.index, backend.fd); |
1186 | case VHOST_GET_FEATURES: | |
0dd05a3b | 1187 | features = VHOST_NET_FEATURES; |
d3553a52 TY |
1188 | if (copy_to_user(featurep, &features, sizeof features)) |
1189 | return -EFAULT; | |
1190 | return 0; | |
3a4d5c94 | 1191 | case VHOST_SET_FEATURES: |
d3553a52 TY |
1192 | if (copy_from_user(&features, featurep, sizeof features)) |
1193 | return -EFAULT; | |
0dd05a3b | 1194 | if (features & ~VHOST_NET_FEATURES) |
3a4d5c94 MT |
1195 | return -EOPNOTSUPP; |
1196 | return vhost_net_set_features(n, features); | |
1197 | case VHOST_RESET_OWNER: | |
1198 | return vhost_net_reset_owner(n); | |
b1ad8496 AH |
1199 | case VHOST_SET_OWNER: |
1200 | return vhost_net_set_owner(n); | |
3a4d5c94 MT |
1201 | default: |
1202 | mutex_lock(&n->dev.mutex); | |
935cdee7 MT |
1203 | r = vhost_dev_ioctl(&n->dev, ioctl, argp); |
1204 | if (r == -ENOIOCTLCMD) | |
1205 | r = vhost_vring_ioctl(&n->dev, ioctl, argp); | |
1206 | else | |
1207 | vhost_net_flush(n); | |
3a4d5c94 MT |
1208 | mutex_unlock(&n->dev.mutex); |
1209 | return r; | |
1210 | } | |
1211 | } | |
1212 | ||
1213 | #ifdef CONFIG_COMPAT | |
1214 | static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, | |
1215 | unsigned long arg) | |
1216 | { | |
1217 | return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); | |
1218 | } | |
1219 | #endif | |
1220 | ||
6b1e6cc7 JW |
1221 | static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) |
1222 | { | |
1223 | struct file *file = iocb->ki_filp; | |
1224 | struct vhost_net *n = file->private_data; | |
1225 | struct vhost_dev *dev = &n->dev; | |
1226 | int noblock = file->f_flags & O_NONBLOCK; | |
1227 | ||
1228 | return vhost_chr_read_iter(dev, to, noblock); | |
1229 | } | |
1230 | ||
1231 | static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb, | |
1232 | struct iov_iter *from) | |
1233 | { | |
1234 | struct file *file = iocb->ki_filp; | |
1235 | struct vhost_net *n = file->private_data; | |
1236 | struct vhost_dev *dev = &n->dev; | |
1237 | ||
1238 | return vhost_chr_write_iter(dev, from); | |
1239 | } | |
1240 | ||
1241 | static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait) | |
1242 | { | |
1243 | struct vhost_net *n = file->private_data; | |
1244 | struct vhost_dev *dev = &n->dev; | |
1245 | ||
1246 | return vhost_chr_poll(file, dev, wait); | |
1247 | } | |
1248 | ||
373a83a6 | 1249 | static const struct file_operations vhost_net_fops = { |
3a4d5c94 MT |
1250 | .owner = THIS_MODULE, |
1251 | .release = vhost_net_release, | |
6b1e6cc7 JW |
1252 | .read_iter = vhost_net_chr_read_iter, |
1253 | .write_iter = vhost_net_chr_write_iter, | |
1254 | .poll = vhost_net_chr_poll, | |
3a4d5c94 MT |
1255 | .unlocked_ioctl = vhost_net_ioctl, |
1256 | #ifdef CONFIG_COMPAT | |
1257 | .compat_ioctl = vhost_net_compat_ioctl, | |
1258 | #endif | |
1259 | .open = vhost_net_open, | |
6038f373 | 1260 | .llseek = noop_llseek, |
3a4d5c94 MT |
1261 | }; |
1262 | ||
1263 | static struct miscdevice vhost_net_misc = { | |
7c7c7f01 | 1264 | .minor = VHOST_NET_MINOR, |
1265 | .name = "vhost-net", | |
1266 | .fops = &vhost_net_fops, | |
3a4d5c94 MT |
1267 | }; |
1268 | ||
a8d3782f | 1269 | static int vhost_net_init(void) |
3a4d5c94 | 1270 | { |
bab632d6 | 1271 | if (experimental_zcopytx) |
fe729a57 | 1272 | vhost_net_enable_zcopy(VHOST_NET_VQ_TX); |
c23f3445 | 1273 | return misc_register(&vhost_net_misc); |
3a4d5c94 MT |
1274 | } |
1275 | module_init(vhost_net_init); | |
1276 | ||
a8d3782f | 1277 | static void vhost_net_exit(void) |
3a4d5c94 MT |
1278 | { |
1279 | misc_deregister(&vhost_net_misc); | |
3a4d5c94 MT |
1280 | } |
1281 | module_exit(vhost_net_exit); | |
1282 | ||
1283 | MODULE_VERSION("0.0.1"); | |
1284 | MODULE_LICENSE("GPL v2"); | |
1285 | MODULE_AUTHOR("Michael S. Tsirkin"); | |
1286 | MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); | |
7c7c7f01 | 1287 | MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); |
1288 | MODULE_ALIAS("devname:vhost-net"); |