]>
Commit | Line | Data |
---|---|---|
433fc58e AH |
1 | /* |
2 | * vhost transport for vsock | |
3 | * | |
4 | * Copyright (C) 2013-2015 Red Hat, Inc. | |
5 | * Author: Asias He <[email protected]> | |
6 | * Stefan Hajnoczi <[email protected]> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2. | |
9 | */ | |
10 | #include <linux/miscdevice.h> | |
11 | #include <linux/atomic.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/mutex.h> | |
14 | #include <linux/vmalloc.h> | |
15 | #include <net/sock.h> | |
16 | #include <linux/virtio_vsock.h> | |
17 | #include <linux/vhost.h> | |
18 | ||
19 | #include <net/af_vsock.h> | |
20 | #include "vhost.h" | |
21 | ||
22 | #define VHOST_VSOCK_DEFAULT_HOST_CID 2 | |
23 | ||
24 | enum { | |
25 | VHOST_VSOCK_FEATURES = VHOST_FEATURES, | |
26 | }; | |
27 | ||
28 | /* Used to track all the vhost_vsock instances on the system. */ | |
29 | static DEFINE_SPINLOCK(vhost_vsock_lock); | |
30 | static LIST_HEAD(vhost_vsock_list); | |
31 | ||
32 | struct vhost_vsock { | |
33 | struct vhost_dev dev; | |
34 | struct vhost_virtqueue vqs[2]; | |
35 | ||
36 | /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ | |
37 | struct list_head list; | |
38 | ||
39 | struct vhost_work send_pkt_work; | |
40 | spinlock_t send_pkt_list_lock; | |
41 | struct list_head send_pkt_list; /* host->guest pending packets */ | |
42 | ||
43 | atomic_t queued_replies; | |
44 | ||
45 | u32 guest_cid; | |
46 | }; | |
47 | ||
48 | static u32 vhost_transport_get_local_cid(void) | |
49 | { | |
50 | return VHOST_VSOCK_DEFAULT_HOST_CID; | |
51 | } | |
52 | ||
6c083c2b | 53 | static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) |
433fc58e AH |
54 | { |
55 | struct vhost_vsock *vsock; | |
56 | ||
433fc58e AH |
57 | list_for_each_entry(vsock, &vhost_vsock_list, list) { |
58 | u32 other_cid = vsock->guest_cid; | |
59 | ||
60 | /* Skip instances that have no CID yet */ | |
61 | if (other_cid == 0) | |
62 | continue; | |
63 | ||
64 | if (other_cid == guest_cid) { | |
433fc58e AH |
65 | return vsock; |
66 | } | |
67 | } | |
433fc58e AH |
68 | |
69 | return NULL; | |
70 | } | |
71 | ||
6c083c2b G |
72 | static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) |
73 | { | |
74 | struct vhost_vsock *vsock; | |
75 | ||
76 | spin_lock_bh(&vhost_vsock_lock); | |
77 | vsock = __vhost_vsock_get(guest_cid); | |
78 | spin_unlock_bh(&vhost_vsock_lock); | |
79 | ||
80 | return vsock; | |
81 | } | |
82 | ||
433fc58e AH |
83 | static void |
84 | vhost_transport_do_send_pkt(struct vhost_vsock *vsock, | |
85 | struct vhost_virtqueue *vq) | |
86 | { | |
87 | struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; | |
88 | bool added = false; | |
89 | bool restart_tx = false; | |
90 | ||
91 | mutex_lock(&vq->mutex); | |
92 | ||
93 | if (!vq->private_data) | |
94 | goto out; | |
95 | ||
96 | /* Avoid further vmexits, we're already processing the virtqueue */ | |
97 | vhost_disable_notify(&vsock->dev, vq); | |
98 | ||
99 | for (;;) { | |
100 | struct virtio_vsock_pkt *pkt; | |
101 | struct iov_iter iov_iter; | |
102 | unsigned out, in; | |
103 | size_t nbytes; | |
104 | size_t len; | |
105 | int head; | |
106 | ||
107 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
108 | if (list_empty(&vsock->send_pkt_list)) { | |
109 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
110 | vhost_enable_notify(&vsock->dev, vq); | |
111 | break; | |
112 | } | |
113 | ||
114 | pkt = list_first_entry(&vsock->send_pkt_list, | |
115 | struct virtio_vsock_pkt, list); | |
116 | list_del_init(&pkt->list); | |
117 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
118 | ||
119 | head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), | |
120 | &out, &in, NULL, NULL); | |
121 | if (head < 0) { | |
122 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
123 | list_add(&pkt->list, &vsock->send_pkt_list); | |
124 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
125 | break; | |
126 | } | |
127 | ||
128 | if (head == vq->num) { | |
129 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
130 | list_add(&pkt->list, &vsock->send_pkt_list); | |
131 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
132 | ||
133 | /* We cannot finish yet if more buffers snuck in while | |
134 | * re-enabling notify. | |
135 | */ | |
136 | if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { | |
137 | vhost_disable_notify(&vsock->dev, vq); | |
138 | continue; | |
139 | } | |
140 | break; | |
141 | } | |
142 | ||
143 | if (out) { | |
144 | virtio_transport_free_pkt(pkt); | |
145 | vq_err(vq, "Expected 0 output buffers, got %u\n", out); | |
146 | break; | |
147 | } | |
148 | ||
149 | len = iov_length(&vq->iov[out], in); | |
150 | iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len); | |
151 | ||
152 | nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); | |
153 | if (nbytes != sizeof(pkt->hdr)) { | |
154 | virtio_transport_free_pkt(pkt); | |
155 | vq_err(vq, "Faulted on copying pkt hdr\n"); | |
156 | break; | |
157 | } | |
158 | ||
159 | nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter); | |
160 | if (nbytes != pkt->len) { | |
161 | virtio_transport_free_pkt(pkt); | |
162 | vq_err(vq, "Faulted on copying pkt buf\n"); | |
163 | break; | |
164 | } | |
165 | ||
166 | vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); | |
167 | added = true; | |
168 | ||
169 | if (pkt->reply) { | |
170 | int val; | |
171 | ||
172 | val = atomic_dec_return(&vsock->queued_replies); | |
173 | ||
174 | /* Do we have resources to resume tx processing? */ | |
175 | if (val + 1 == tx_vq->num) | |
176 | restart_tx = true; | |
177 | } | |
178 | ||
82dfb540 GG |
179 | /* Deliver to monitoring devices all correctly transmitted |
180 | * packets. | |
181 | */ | |
182 | virtio_transport_deliver_tap_pkt(pkt); | |
183 | ||
433fc58e AH |
184 | virtio_transport_free_pkt(pkt); |
185 | } | |
186 | if (added) | |
187 | vhost_signal(&vsock->dev, vq); | |
188 | ||
189 | out: | |
190 | mutex_unlock(&vq->mutex); | |
191 | ||
192 | if (restart_tx) | |
193 | vhost_poll_queue(&tx_vq->poll); | |
194 | } | |
195 | ||
196 | static void vhost_transport_send_pkt_work(struct vhost_work *work) | |
197 | { | |
198 | struct vhost_virtqueue *vq; | |
199 | struct vhost_vsock *vsock; | |
200 | ||
201 | vsock = container_of(work, struct vhost_vsock, send_pkt_work); | |
202 | vq = &vsock->vqs[VSOCK_VQ_RX]; | |
203 | ||
204 | vhost_transport_do_send_pkt(vsock, vq); | |
205 | } | |
206 | ||
207 | static int | |
208 | vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |
209 | { | |
210 | struct vhost_vsock *vsock; | |
433fc58e AH |
211 | int len = pkt->len; |
212 | ||
213 | /* Find the vhost_vsock according to guest context id */ | |
214 | vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); | |
215 | if (!vsock) { | |
216 | virtio_transport_free_pkt(pkt); | |
217 | return -ENODEV; | |
218 | } | |
219 | ||
433fc58e AH |
220 | if (pkt->reply) |
221 | atomic_inc(&vsock->queued_replies); | |
222 | ||
223 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
224 | list_add_tail(&pkt->list, &vsock->send_pkt_list); | |
225 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
226 | ||
227 | vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); | |
228 | return len; | |
229 | } | |
230 | ||
16320f36 PT |
231 | static int |
232 | vhost_transport_cancel_pkt(struct vsock_sock *vsk) | |
233 | { | |
234 | struct vhost_vsock *vsock; | |
235 | struct virtio_vsock_pkt *pkt, *n; | |
236 | int cnt = 0; | |
237 | LIST_HEAD(freeme); | |
238 | ||
239 | /* Find the vhost_vsock according to guest context id */ | |
240 | vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); | |
241 | if (!vsock) | |
242 | return -ENODEV; | |
243 | ||
244 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
245 | list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { | |
246 | if (pkt->vsk != vsk) | |
247 | continue; | |
248 | list_move(&pkt->list, &freeme); | |
249 | } | |
250 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
251 | ||
252 | list_for_each_entry_safe(pkt, n, &freeme, list) { | |
253 | if (pkt->reply) | |
254 | cnt++; | |
255 | list_del(&pkt->list); | |
256 | virtio_transport_free_pkt(pkt); | |
257 | } | |
258 | ||
259 | if (cnt) { | |
260 | struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; | |
261 | int new_cnt; | |
262 | ||
263 | new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); | |
264 | if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) | |
265 | vhost_poll_queue(&tx_vq->poll); | |
266 | } | |
267 | ||
268 | return 0; | |
269 | } | |
270 | ||
433fc58e AH |
271 | static struct virtio_vsock_pkt * |
272 | vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, | |
273 | unsigned int out, unsigned int in) | |
274 | { | |
275 | struct virtio_vsock_pkt *pkt; | |
276 | struct iov_iter iov_iter; | |
277 | size_t nbytes; | |
278 | size_t len; | |
279 | ||
280 | if (in != 0) { | |
281 | vq_err(vq, "Expected 0 input buffers, got %u\n", in); | |
282 | return NULL; | |
283 | } | |
284 | ||
285 | pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); | |
286 | if (!pkt) | |
287 | return NULL; | |
288 | ||
289 | len = iov_length(vq->iov, out); | |
290 | iov_iter_init(&iov_iter, WRITE, vq->iov, out, len); | |
291 | ||
292 | nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); | |
293 | if (nbytes != sizeof(pkt->hdr)) { | |
294 | vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n", | |
295 | sizeof(pkt->hdr), nbytes); | |
296 | kfree(pkt); | |
297 | return NULL; | |
298 | } | |
299 | ||
300 | if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM) | |
301 | pkt->len = le32_to_cpu(pkt->hdr.len); | |
302 | ||
303 | /* No payload */ | |
304 | if (!pkt->len) | |
305 | return pkt; | |
306 | ||
307 | /* The pkt is too big */ | |
308 | if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) { | |
309 | kfree(pkt); | |
310 | return NULL; | |
311 | } | |
312 | ||
313 | pkt->buf = kmalloc(pkt->len, GFP_KERNEL); | |
314 | if (!pkt->buf) { | |
315 | kfree(pkt); | |
316 | return NULL; | |
317 | } | |
318 | ||
319 | nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter); | |
320 | if (nbytes != pkt->len) { | |
321 | vq_err(vq, "Expected %u byte payload, got %zu bytes\n", | |
322 | pkt->len, nbytes); | |
323 | virtio_transport_free_pkt(pkt); | |
324 | return NULL; | |
325 | } | |
326 | ||
327 | return pkt; | |
328 | } | |
329 | ||
330 | /* Is there space left for replies to rx packets? */ | |
331 | static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) | |
332 | { | |
333 | struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; | |
334 | int val; | |
335 | ||
336 | smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ | |
337 | val = atomic_read(&vsock->queued_replies); | |
338 | ||
339 | return val < vq->num; | |
340 | } | |
341 | ||
342 | static void vhost_vsock_handle_tx_kick(struct vhost_work *work) | |
343 | { | |
344 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | |
345 | poll.work); | |
346 | struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, | |
347 | dev); | |
348 | struct virtio_vsock_pkt *pkt; | |
349 | int head; | |
350 | unsigned int out, in; | |
351 | bool added = false; | |
352 | ||
353 | mutex_lock(&vq->mutex); | |
354 | ||
355 | if (!vq->private_data) | |
356 | goto out; | |
357 | ||
358 | vhost_disable_notify(&vsock->dev, vq); | |
359 | for (;;) { | |
3fda5d6e SH |
360 | u32 len; |
361 | ||
433fc58e AH |
362 | if (!vhost_vsock_more_replies(vsock)) { |
363 | /* Stop tx until the device processes already | |
364 | * pending replies. Leave tx virtqueue | |
365 | * callbacks disabled. | |
366 | */ | |
367 | goto no_more_replies; | |
368 | } | |
369 | ||
370 | head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), | |
371 | &out, &in, NULL, NULL); | |
372 | if (head < 0) | |
373 | break; | |
374 | ||
375 | if (head == vq->num) { | |
376 | if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { | |
377 | vhost_disable_notify(&vsock->dev, vq); | |
378 | continue; | |
379 | } | |
380 | break; | |
381 | } | |
382 | ||
383 | pkt = vhost_vsock_alloc_pkt(vq, out, in); | |
384 | if (!pkt) { | |
385 | vq_err(vq, "Faulted on pkt\n"); | |
386 | continue; | |
387 | } | |
388 | ||
3fda5d6e SH |
389 | len = pkt->len; |
390 | ||
82dfb540 GG |
391 | /* Deliver to monitoring devices all received packets */ |
392 | virtio_transport_deliver_tap_pkt(pkt); | |
393 | ||
433fc58e AH |
394 | /* Only accept correctly addressed packets */ |
395 | if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) | |
396 | virtio_transport_recv_pkt(pkt); | |
397 | else | |
398 | virtio_transport_free_pkt(pkt); | |
399 | ||
3fda5d6e | 400 | vhost_add_used(vq, head, sizeof(pkt->hdr) + len); |
433fc58e AH |
401 | added = true; |
402 | } | |
403 | ||
404 | no_more_replies: | |
405 | if (added) | |
406 | vhost_signal(&vsock->dev, vq); | |
407 | ||
408 | out: | |
409 | mutex_unlock(&vq->mutex); | |
410 | } | |
411 | ||
412 | static void vhost_vsock_handle_rx_kick(struct vhost_work *work) | |
413 | { | |
414 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | |
415 | poll.work); | |
416 | struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, | |
417 | dev); | |
418 | ||
419 | vhost_transport_do_send_pkt(vsock, vq); | |
420 | } | |
421 | ||
422 | static int vhost_vsock_start(struct vhost_vsock *vsock) | |
423 | { | |
0516ffd8 | 424 | struct vhost_virtqueue *vq; |
433fc58e AH |
425 | size_t i; |
426 | int ret; | |
427 | ||
428 | mutex_lock(&vsock->dev.mutex); | |
429 | ||
430 | ret = vhost_dev_check_owner(&vsock->dev); | |
431 | if (ret) | |
432 | goto err; | |
433 | ||
434 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
0516ffd8 | 435 | vq = &vsock->vqs[i]; |
433fc58e AH |
436 | |
437 | mutex_lock(&vq->mutex); | |
438 | ||
439 | if (!vhost_vq_access_ok(vq)) { | |
440 | ret = -EFAULT; | |
433fc58e AH |
441 | goto err_vq; |
442 | } | |
443 | ||
444 | if (!vq->private_data) { | |
445 | vq->private_data = vsock; | |
0516ffd8 SH |
446 | ret = vhost_vq_init_access(vq); |
447 | if (ret) | |
448 | goto err_vq; | |
433fc58e AH |
449 | } |
450 | ||
451 | mutex_unlock(&vq->mutex); | |
452 | } | |
453 | ||
454 | mutex_unlock(&vsock->dev.mutex); | |
455 | return 0; | |
456 | ||
457 | err_vq: | |
0516ffd8 SH |
458 | vq->private_data = NULL; |
459 | mutex_unlock(&vq->mutex); | |
460 | ||
433fc58e | 461 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
0516ffd8 | 462 | vq = &vsock->vqs[i]; |
433fc58e AH |
463 | |
464 | mutex_lock(&vq->mutex); | |
465 | vq->private_data = NULL; | |
466 | mutex_unlock(&vq->mutex); | |
467 | } | |
468 | err: | |
469 | mutex_unlock(&vsock->dev.mutex); | |
470 | return ret; | |
471 | } | |
472 | ||
473 | static int vhost_vsock_stop(struct vhost_vsock *vsock) | |
474 | { | |
475 | size_t i; | |
476 | int ret; | |
477 | ||
478 | mutex_lock(&vsock->dev.mutex); | |
479 | ||
480 | ret = vhost_dev_check_owner(&vsock->dev); | |
481 | if (ret) | |
482 | goto err; | |
483 | ||
484 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
485 | struct vhost_virtqueue *vq = &vsock->vqs[i]; | |
486 | ||
487 | mutex_lock(&vq->mutex); | |
488 | vq->private_data = NULL; | |
489 | mutex_unlock(&vq->mutex); | |
490 | } | |
491 | ||
492 | err: | |
493 | mutex_unlock(&vsock->dev.mutex); | |
494 | return ret; | |
495 | } | |
496 | ||
497 | static void vhost_vsock_free(struct vhost_vsock *vsock) | |
498 | { | |
b226acab | 499 | kvfree(vsock); |
433fc58e AH |
500 | } |
501 | ||
502 | static int vhost_vsock_dev_open(struct inode *inode, struct file *file) | |
503 | { | |
504 | struct vhost_virtqueue **vqs; | |
505 | struct vhost_vsock *vsock; | |
506 | int ret; | |
507 | ||
508 | /* This struct is large and allocation could fail, fall back to vmalloc | |
509 | * if there is no other way. | |
510 | */ | |
dcda9b04 | 511 | vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); |
6c5ab651 MH |
512 | if (!vsock) |
513 | return -ENOMEM; | |
433fc58e AH |
514 | |
515 | vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); | |
516 | if (!vqs) { | |
517 | ret = -ENOMEM; | |
518 | goto out; | |
519 | } | |
520 | ||
521 | atomic_set(&vsock->queued_replies, 0); | |
522 | ||
523 | vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; | |
524 | vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; | |
525 | vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; | |
526 | vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; | |
527 | ||
528 | vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); | |
529 | ||
530 | file->private_data = vsock; | |
531 | spin_lock_init(&vsock->send_pkt_list_lock); | |
532 | INIT_LIST_HEAD(&vsock->send_pkt_list); | |
533 | vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); | |
534 | ||
535 | spin_lock_bh(&vhost_vsock_lock); | |
536 | list_add_tail(&vsock->list, &vhost_vsock_list); | |
537 | spin_unlock_bh(&vhost_vsock_lock); | |
538 | return 0; | |
539 | ||
540 | out: | |
541 | vhost_vsock_free(vsock); | |
542 | return ret; | |
543 | } | |
544 | ||
545 | static void vhost_vsock_flush(struct vhost_vsock *vsock) | |
546 | { | |
547 | int i; | |
548 | ||
549 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) | |
550 | if (vsock->vqs[i].handle_kick) | |
551 | vhost_poll_flush(&vsock->vqs[i].poll); | |
552 | vhost_work_flush(&vsock->dev, &vsock->send_pkt_work); | |
553 | } | |
554 | ||
555 | static void vhost_vsock_reset_orphans(struct sock *sk) | |
556 | { | |
557 | struct vsock_sock *vsk = vsock_sk(sk); | |
558 | ||
559 | /* vmci_transport.c doesn't take sk_lock here either. At least we're | |
560 | * under vsock_table_lock so the sock cannot disappear while we're | |
561 | * executing. | |
562 | */ | |
563 | ||
c4587631 | 564 | if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) { |
433fc58e AH |
565 | sock_set_flag(sk, SOCK_DONE); |
566 | vsk->peer_shutdown = SHUTDOWN_MASK; | |
567 | sk->sk_state = SS_UNCONNECTED; | |
568 | sk->sk_err = ECONNRESET; | |
569 | sk->sk_error_report(sk); | |
570 | } | |
571 | } | |
572 | ||
573 | static int vhost_vsock_dev_release(struct inode *inode, struct file *file) | |
574 | { | |
575 | struct vhost_vsock *vsock = file->private_data; | |
576 | ||
577 | spin_lock_bh(&vhost_vsock_lock); | |
578 | list_del(&vsock->list); | |
579 | spin_unlock_bh(&vhost_vsock_lock); | |
580 | ||
581 | /* Iterating over all connections for all CIDs to find orphans is | |
582 | * inefficient. Room for improvement here. */ | |
583 | vsock_for_each_connected_socket(vhost_vsock_reset_orphans); | |
584 | ||
585 | vhost_vsock_stop(vsock); | |
586 | vhost_vsock_flush(vsock); | |
587 | vhost_dev_stop(&vsock->dev); | |
588 | ||
589 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
590 | while (!list_empty(&vsock->send_pkt_list)) { | |
591 | struct virtio_vsock_pkt *pkt; | |
592 | ||
593 | pkt = list_first_entry(&vsock->send_pkt_list, | |
594 | struct virtio_vsock_pkt, list); | |
595 | list_del_init(&pkt->list); | |
596 | virtio_transport_free_pkt(pkt); | |
597 | } | |
598 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
599 | ||
600 | vhost_dev_cleanup(&vsock->dev, false); | |
601 | kfree(vsock->dev.vqs); | |
602 | vhost_vsock_free(vsock); | |
603 | return 0; | |
604 | } | |
605 | ||
606 | static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) | |
607 | { | |
608 | struct vhost_vsock *other; | |
609 | ||
610 | /* Refuse reserved CIDs */ | |
611 | if (guest_cid <= VMADDR_CID_HOST || | |
612 | guest_cid == U32_MAX) | |
613 | return -EINVAL; | |
614 | ||
615 | /* 64-bit CIDs are not yet supported */ | |
616 | if (guest_cid > U32_MAX) | |
617 | return -EINVAL; | |
618 | ||
619 | /* Refuse if CID is already in use */ | |
433fc58e | 620 | spin_lock_bh(&vhost_vsock_lock); |
6c083c2b G |
621 | other = __vhost_vsock_get(guest_cid); |
622 | if (other && other != vsock) { | |
623 | spin_unlock_bh(&vhost_vsock_lock); | |
624 | return -EADDRINUSE; | |
625 | } | |
433fc58e AH |
626 | vsock->guest_cid = guest_cid; |
627 | spin_unlock_bh(&vhost_vsock_lock); | |
628 | ||
629 | return 0; | |
630 | } | |
631 | ||
632 | static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) | |
633 | { | |
634 | struct vhost_virtqueue *vq; | |
635 | int i; | |
636 | ||
637 | if (features & ~VHOST_VSOCK_FEATURES) | |
638 | return -EOPNOTSUPP; | |
639 | ||
640 | mutex_lock(&vsock->dev.mutex); | |
641 | if ((features & (1 << VHOST_F_LOG_ALL)) && | |
642 | !vhost_log_access_ok(&vsock->dev)) { | |
643 | mutex_unlock(&vsock->dev.mutex); | |
644 | return -EFAULT; | |
645 | } | |
646 | ||
647 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
648 | vq = &vsock->vqs[i]; | |
649 | mutex_lock(&vq->mutex); | |
650 | vq->acked_features = features; | |
651 | mutex_unlock(&vq->mutex); | |
652 | } | |
653 | mutex_unlock(&vsock->dev.mutex); | |
654 | return 0; | |
655 | } | |
656 | ||
657 | static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, | |
658 | unsigned long arg) | |
659 | { | |
660 | struct vhost_vsock *vsock = f->private_data; | |
661 | void __user *argp = (void __user *)arg; | |
662 | u64 guest_cid; | |
663 | u64 features; | |
664 | int start; | |
665 | int r; | |
666 | ||
667 | switch (ioctl) { | |
668 | case VHOST_VSOCK_SET_GUEST_CID: | |
669 | if (copy_from_user(&guest_cid, argp, sizeof(guest_cid))) | |
670 | return -EFAULT; | |
671 | return vhost_vsock_set_cid(vsock, guest_cid); | |
672 | case VHOST_VSOCK_SET_RUNNING: | |
673 | if (copy_from_user(&start, argp, sizeof(start))) | |
674 | return -EFAULT; | |
675 | if (start) | |
676 | return vhost_vsock_start(vsock); | |
677 | else | |
678 | return vhost_vsock_stop(vsock); | |
679 | case VHOST_GET_FEATURES: | |
680 | features = VHOST_VSOCK_FEATURES; | |
681 | if (copy_to_user(argp, &features, sizeof(features))) | |
682 | return -EFAULT; | |
683 | return 0; | |
684 | case VHOST_SET_FEATURES: | |
685 | if (copy_from_user(&features, argp, sizeof(features))) | |
686 | return -EFAULT; | |
687 | return vhost_vsock_set_features(vsock, features); | |
688 | default: | |
689 | mutex_lock(&vsock->dev.mutex); | |
690 | r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); | |
691 | if (r == -ENOIOCTLCMD) | |
692 | r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); | |
693 | else | |
694 | vhost_vsock_flush(vsock); | |
695 | mutex_unlock(&vsock->dev.mutex); | |
696 | return r; | |
697 | } | |
698 | } | |
699 | ||
700 | static const struct file_operations vhost_vsock_fops = { | |
701 | .owner = THIS_MODULE, | |
702 | .open = vhost_vsock_dev_open, | |
703 | .release = vhost_vsock_dev_release, | |
704 | .llseek = noop_llseek, | |
705 | .unlocked_ioctl = vhost_vsock_dev_ioctl, | |
706 | }; | |
707 | ||
708 | static struct miscdevice vhost_vsock_misc = { | |
f4660cc9 | 709 | .minor = VHOST_VSOCK_MINOR, |
433fc58e AH |
710 | .name = "vhost-vsock", |
711 | .fops = &vhost_vsock_fops, | |
712 | }; | |
713 | ||
714 | static struct virtio_transport vhost_transport = { | |
715 | .transport = { | |
716 | .get_local_cid = vhost_transport_get_local_cid, | |
717 | ||
718 | .init = virtio_transport_do_socket_init, | |
719 | .destruct = virtio_transport_destruct, | |
720 | .release = virtio_transport_release, | |
721 | .connect = virtio_transport_connect, | |
722 | .shutdown = virtio_transport_shutdown, | |
16320f36 | 723 | .cancel_pkt = vhost_transport_cancel_pkt, |
433fc58e AH |
724 | |
725 | .dgram_enqueue = virtio_transport_dgram_enqueue, | |
726 | .dgram_dequeue = virtio_transport_dgram_dequeue, | |
727 | .dgram_bind = virtio_transport_dgram_bind, | |
728 | .dgram_allow = virtio_transport_dgram_allow, | |
729 | ||
730 | .stream_enqueue = virtio_transport_stream_enqueue, | |
731 | .stream_dequeue = virtio_transport_stream_dequeue, | |
732 | .stream_has_data = virtio_transport_stream_has_data, | |
733 | .stream_has_space = virtio_transport_stream_has_space, | |
734 | .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, | |
735 | .stream_is_active = virtio_transport_stream_is_active, | |
736 | .stream_allow = virtio_transport_stream_allow, | |
737 | ||
738 | .notify_poll_in = virtio_transport_notify_poll_in, | |
739 | .notify_poll_out = virtio_transport_notify_poll_out, | |
740 | .notify_recv_init = virtio_transport_notify_recv_init, | |
741 | .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, | |
742 | .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, | |
743 | .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, | |
744 | .notify_send_init = virtio_transport_notify_send_init, | |
745 | .notify_send_pre_block = virtio_transport_notify_send_pre_block, | |
746 | .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, | |
747 | .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, | |
748 | ||
749 | .set_buffer_size = virtio_transport_set_buffer_size, | |
750 | .set_min_buffer_size = virtio_transport_set_min_buffer_size, | |
751 | .set_max_buffer_size = virtio_transport_set_max_buffer_size, | |
752 | .get_buffer_size = virtio_transport_get_buffer_size, | |
753 | .get_min_buffer_size = virtio_transport_get_min_buffer_size, | |
754 | .get_max_buffer_size = virtio_transport_get_max_buffer_size, | |
755 | }, | |
756 | ||
757 | .send_pkt = vhost_transport_send_pkt, | |
758 | }; | |
759 | ||
760 | static int __init vhost_vsock_init(void) | |
761 | { | |
762 | int ret; | |
763 | ||
764 | ret = vsock_core_init(&vhost_transport.transport); | |
765 | if (ret < 0) | |
766 | return ret; | |
767 | return misc_register(&vhost_vsock_misc); | |
768 | }; | |
769 | ||
770 | static void __exit vhost_vsock_exit(void) | |
771 | { | |
772 | misc_deregister(&vhost_vsock_misc); | |
773 | vsock_core_exit(); | |
774 | }; | |
775 | ||
776 | module_init(vhost_vsock_init); | |
777 | module_exit(vhost_vsock_exit); | |
778 | MODULE_LICENSE("GPL v2"); | |
779 | MODULE_AUTHOR("Asias He"); | |
780 | MODULE_DESCRIPTION("vhost transport for vsock "); | |
f4660cc9 SH |
781 | MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR); |
782 | MODULE_ALIAS("devname:vhost-vsock"); |