2 * vhost transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
19 #include <net/af_vsock.h>
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
25 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
28 /* Used to track all the vhost_vsock instances on the system. */
29 static DEFINE_SPINLOCK(vhost_vsock_lock);
30 static LIST_HEAD(vhost_vsock_list);
34 struct vhost_virtqueue vqs[2];
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 struct list_head list;
39 struct vhost_work send_pkt_work;
40 spinlock_t send_pkt_list_lock;
41 struct list_head send_pkt_list; /* host->guest pending packets */
43 atomic_t queued_replies;
48 static u32 vhost_transport_get_local_cid(void)
50 return VHOST_VSOCK_DEFAULT_HOST_CID;
53 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
55 struct vhost_vsock *vsock;
57 spin_lock_bh(&vhost_vsock_lock);
58 list_for_each_entry(vsock, &vhost_vsock_list, list) {
59 u32 other_cid = vsock->guest_cid;
61 /* Skip instances that have no CID yet */
65 if (other_cid == guest_cid) {
66 spin_unlock_bh(&vhost_vsock_lock);
70 spin_unlock_bh(&vhost_vsock_lock);
76 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
77 struct vhost_virtqueue *vq)
79 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
81 bool restart_tx = false;
83 mutex_lock(&vq->mutex);
85 if (!vq->private_data)
88 /* Avoid further vmexits, we're already processing the virtqueue */
89 vhost_disable_notify(&vsock->dev, vq);
92 struct virtio_vsock_pkt *pkt;
93 struct iov_iter iov_iter;
99 spin_lock_bh(&vsock->send_pkt_list_lock);
100 if (list_empty(&vsock->send_pkt_list)) {
101 spin_unlock_bh(&vsock->send_pkt_list_lock);
102 vhost_enable_notify(&vsock->dev, vq);
106 pkt = list_first_entry(&vsock->send_pkt_list,
107 struct virtio_vsock_pkt, list);
108 list_del_init(&pkt->list);
109 spin_unlock_bh(&vsock->send_pkt_list_lock);
111 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
112 &out, &in, NULL, NULL);
114 spin_lock_bh(&vsock->send_pkt_list_lock);
115 list_add(&pkt->list, &vsock->send_pkt_list);
116 spin_unlock_bh(&vsock->send_pkt_list_lock);
120 if (head == vq->num) {
121 spin_lock_bh(&vsock->send_pkt_list_lock);
122 list_add(&pkt->list, &vsock->send_pkt_list);
123 spin_unlock_bh(&vsock->send_pkt_list_lock);
125 /* We cannot finish yet if more buffers snuck in while
126 * re-enabling notify.
128 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
129 vhost_disable_notify(&vsock->dev, vq);
136 virtio_transport_free_pkt(pkt);
137 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
141 len = iov_length(&vq->iov[out], in);
142 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
144 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
145 if (nbytes != sizeof(pkt->hdr)) {
146 virtio_transport_free_pkt(pkt);
147 vq_err(vq, "Faulted on copying pkt hdr\n");
151 nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
152 if (nbytes != pkt->len) {
153 virtio_transport_free_pkt(pkt);
154 vq_err(vq, "Faulted on copying pkt buf\n");
158 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
164 val = atomic_dec_return(&vsock->queued_replies);
166 /* Do we have resources to resume tx processing? */
167 if (val + 1 == tx_vq->num)
171 virtio_transport_free_pkt(pkt);
174 vhost_signal(&vsock->dev, vq);
177 mutex_unlock(&vq->mutex);
180 vhost_poll_queue(&tx_vq->poll);
183 static void vhost_transport_send_pkt_work(struct vhost_work *work)
185 struct vhost_virtqueue *vq;
186 struct vhost_vsock *vsock;
188 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
189 vq = &vsock->vqs[VSOCK_VQ_RX];
191 vhost_transport_do_send_pkt(vsock, vq);
195 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
197 struct vhost_vsock *vsock;
200 /* Find the vhost_vsock according to guest context id */
201 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
203 virtio_transport_free_pkt(pkt);
208 atomic_inc(&vsock->queued_replies);
210 spin_lock_bh(&vsock->send_pkt_list_lock);
211 list_add_tail(&pkt->list, &vsock->send_pkt_list);
212 spin_unlock_bh(&vsock->send_pkt_list_lock);
214 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
218 static struct virtio_vsock_pkt *
219 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
220 unsigned int out, unsigned int in)
222 struct virtio_vsock_pkt *pkt;
223 struct iov_iter iov_iter;
228 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
232 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
236 len = iov_length(vq->iov, out);
237 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
239 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
240 if (nbytes != sizeof(pkt->hdr)) {
241 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
242 sizeof(pkt->hdr), nbytes);
247 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
248 pkt->len = le32_to_cpu(pkt->hdr.len);
254 /* The pkt is too big */
255 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
260 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
266 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
267 if (nbytes != pkt->len) {
268 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
270 virtio_transport_free_pkt(pkt);
277 /* Is there space left for replies to rx packets? */
278 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
280 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
283 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
284 val = atomic_read(&vsock->queued_replies);
286 return val < vq->num;
289 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
291 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
293 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
295 struct virtio_vsock_pkt *pkt;
297 unsigned int out, in;
300 mutex_lock(&vq->mutex);
302 if (!vq->private_data)
305 vhost_disable_notify(&vsock->dev, vq);
309 if (!vhost_vsock_more_replies(vsock)) {
310 /* Stop tx until the device processes already
311 * pending replies. Leave tx virtqueue
312 * callbacks disabled.
314 goto no_more_replies;
317 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
318 &out, &in, NULL, NULL);
322 if (head == vq->num) {
323 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
324 vhost_disable_notify(&vsock->dev, vq);
330 pkt = vhost_vsock_alloc_pkt(vq, out, in);
332 vq_err(vq, "Faulted on pkt\n");
338 /* Only accept correctly addressed packets */
339 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
340 virtio_transport_recv_pkt(pkt);
342 virtio_transport_free_pkt(pkt);
344 vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
350 vhost_signal(&vsock->dev, vq);
353 mutex_unlock(&vq->mutex);
356 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
358 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
360 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
363 vhost_transport_do_send_pkt(vsock, vq);
366 static int vhost_vsock_start(struct vhost_vsock *vsock)
371 mutex_lock(&vsock->dev.mutex);
373 ret = vhost_dev_check_owner(&vsock->dev);
377 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
378 struct vhost_virtqueue *vq = &vsock->vqs[i];
380 mutex_lock(&vq->mutex);
382 if (!vhost_vq_access_ok(vq)) {
384 mutex_unlock(&vq->mutex);
388 if (!vq->private_data) {
389 vq->private_data = vsock;
390 vhost_vq_init_access(vq);
393 mutex_unlock(&vq->mutex);
396 mutex_unlock(&vsock->dev.mutex);
400 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
401 struct vhost_virtqueue *vq = &vsock->vqs[i];
403 mutex_lock(&vq->mutex);
404 vq->private_data = NULL;
405 mutex_unlock(&vq->mutex);
408 mutex_unlock(&vsock->dev.mutex);
412 static int vhost_vsock_stop(struct vhost_vsock *vsock)
417 mutex_lock(&vsock->dev.mutex);
419 ret = vhost_dev_check_owner(&vsock->dev);
423 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
424 struct vhost_virtqueue *vq = &vsock->vqs[i];
426 mutex_lock(&vq->mutex);
427 vq->private_data = NULL;
428 mutex_unlock(&vq->mutex);
432 mutex_unlock(&vsock->dev.mutex);
436 static void vhost_vsock_free(struct vhost_vsock *vsock)
441 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
443 struct vhost_virtqueue **vqs;
444 struct vhost_vsock *vsock;
447 /* This struct is large and allocation could fail, fall back to vmalloc
448 * if there is no other way.
450 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
452 vsock = vmalloc(sizeof(*vsock));
457 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
463 atomic_set(&vsock->queued_replies, 0);
465 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
466 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
467 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
468 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
470 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
472 file->private_data = vsock;
473 spin_lock_init(&vsock->send_pkt_list_lock);
474 INIT_LIST_HEAD(&vsock->send_pkt_list);
475 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
477 spin_lock_bh(&vhost_vsock_lock);
478 list_add_tail(&vsock->list, &vhost_vsock_list);
479 spin_unlock_bh(&vhost_vsock_lock);
483 vhost_vsock_free(vsock);
487 static void vhost_vsock_flush(struct vhost_vsock *vsock)
491 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
492 if (vsock->vqs[i].handle_kick)
493 vhost_poll_flush(&vsock->vqs[i].poll);
494 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
497 static void vhost_vsock_reset_orphans(struct sock *sk)
499 struct vsock_sock *vsk = vsock_sk(sk);
501 /* vmci_transport.c doesn't take sk_lock here either. At least we're
502 * under vsock_table_lock so the sock cannot disappear while we're
506 if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
507 sock_set_flag(sk, SOCK_DONE);
508 vsk->peer_shutdown = SHUTDOWN_MASK;
509 sk->sk_state = SS_UNCONNECTED;
510 sk->sk_err = ECONNRESET;
511 sk->sk_error_report(sk);
515 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
517 struct vhost_vsock *vsock = file->private_data;
519 spin_lock_bh(&vhost_vsock_lock);
520 list_del(&vsock->list);
521 spin_unlock_bh(&vhost_vsock_lock);
523 /* Iterating over all connections for all CIDs to find orphans is
524 * inefficient. Room for improvement here. */
525 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
527 vhost_vsock_stop(vsock);
528 vhost_vsock_flush(vsock);
529 vhost_dev_stop(&vsock->dev);
531 spin_lock_bh(&vsock->send_pkt_list_lock);
532 while (!list_empty(&vsock->send_pkt_list)) {
533 struct virtio_vsock_pkt *pkt;
535 pkt = list_first_entry(&vsock->send_pkt_list,
536 struct virtio_vsock_pkt, list);
537 list_del_init(&pkt->list);
538 virtio_transport_free_pkt(pkt);
540 spin_unlock_bh(&vsock->send_pkt_list_lock);
542 vhost_dev_cleanup(&vsock->dev, false);
543 kfree(vsock->dev.vqs);
544 vhost_vsock_free(vsock);
548 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
550 struct vhost_vsock *other;
552 /* Refuse reserved CIDs */
553 if (guest_cid <= VMADDR_CID_HOST ||
554 guest_cid == U32_MAX)
557 /* 64-bit CIDs are not yet supported */
558 if (guest_cid > U32_MAX)
561 /* Refuse if CID is already in use */
562 other = vhost_vsock_get(guest_cid);
563 if (other && other != vsock)
566 spin_lock_bh(&vhost_vsock_lock);
567 vsock->guest_cid = guest_cid;
568 spin_unlock_bh(&vhost_vsock_lock);
573 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
575 struct vhost_virtqueue *vq;
578 if (features & ~VHOST_VSOCK_FEATURES)
581 mutex_lock(&vsock->dev.mutex);
582 if ((features & (1 << VHOST_F_LOG_ALL)) &&
583 !vhost_log_access_ok(&vsock->dev)) {
584 mutex_unlock(&vsock->dev.mutex);
588 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
590 mutex_lock(&vq->mutex);
591 vq->acked_features = features;
592 mutex_unlock(&vq->mutex);
594 mutex_unlock(&vsock->dev.mutex);
598 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
601 struct vhost_vsock *vsock = f->private_data;
602 void __user *argp = (void __user *)arg;
609 case VHOST_VSOCK_SET_GUEST_CID:
610 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
612 return vhost_vsock_set_cid(vsock, guest_cid);
613 case VHOST_VSOCK_SET_RUNNING:
614 if (copy_from_user(&start, argp, sizeof(start)))
617 return vhost_vsock_start(vsock);
619 return vhost_vsock_stop(vsock);
620 case VHOST_GET_FEATURES:
621 features = VHOST_VSOCK_FEATURES;
622 if (copy_to_user(argp, &features, sizeof(features)))
625 case VHOST_SET_FEATURES:
626 if (copy_from_user(&features, argp, sizeof(features)))
628 return vhost_vsock_set_features(vsock, features);
630 mutex_lock(&vsock->dev.mutex);
631 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
632 if (r == -ENOIOCTLCMD)
633 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
635 vhost_vsock_flush(vsock);
636 mutex_unlock(&vsock->dev.mutex);
641 static const struct file_operations vhost_vsock_fops = {
642 .owner = THIS_MODULE,
643 .open = vhost_vsock_dev_open,
644 .release = vhost_vsock_dev_release,
645 .llseek = noop_llseek,
646 .unlocked_ioctl = vhost_vsock_dev_ioctl,
649 static struct miscdevice vhost_vsock_misc = {
650 .minor = MISC_DYNAMIC_MINOR,
651 .name = "vhost-vsock",
652 .fops = &vhost_vsock_fops,
655 static struct virtio_transport vhost_transport = {
657 .get_local_cid = vhost_transport_get_local_cid,
659 .init = virtio_transport_do_socket_init,
660 .destruct = virtio_transport_destruct,
661 .release = virtio_transport_release,
662 .connect = virtio_transport_connect,
663 .shutdown = virtio_transport_shutdown,
665 .dgram_enqueue = virtio_transport_dgram_enqueue,
666 .dgram_dequeue = virtio_transport_dgram_dequeue,
667 .dgram_bind = virtio_transport_dgram_bind,
668 .dgram_allow = virtio_transport_dgram_allow,
670 .stream_enqueue = virtio_transport_stream_enqueue,
671 .stream_dequeue = virtio_transport_stream_dequeue,
672 .stream_has_data = virtio_transport_stream_has_data,
673 .stream_has_space = virtio_transport_stream_has_space,
674 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
675 .stream_is_active = virtio_transport_stream_is_active,
676 .stream_allow = virtio_transport_stream_allow,
678 .notify_poll_in = virtio_transport_notify_poll_in,
679 .notify_poll_out = virtio_transport_notify_poll_out,
680 .notify_recv_init = virtio_transport_notify_recv_init,
681 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
682 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
683 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
684 .notify_send_init = virtio_transport_notify_send_init,
685 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
686 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
687 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
689 .set_buffer_size = virtio_transport_set_buffer_size,
690 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
691 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
692 .get_buffer_size = virtio_transport_get_buffer_size,
693 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
694 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
697 .send_pkt = vhost_transport_send_pkt,
700 static int __init vhost_vsock_init(void)
704 ret = vsock_core_init(&vhost_transport.transport);
707 return misc_register(&vhost_vsock_misc);
710 static void __exit vhost_vsock_exit(void)
712 misc_deregister(&vhost_vsock_misc);
716 module_init(vhost_vsock_init);
717 module_exit(vhost_vsock_exit);
718 MODULE_LICENSE("GPL v2");
719 MODULE_AUTHOR("Asias He");
720 MODULE_DESCRIPTION("vhost transport for vsock ");