1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio vhost-user driver
5 * Copyright(c) 2019 Intel Corporation
7 * This driver allows virtio devices to be used over a vhost-user socket.
9 * Guest devices can be instantiated by kernel module or command line
10 * parameters. One device will be created for each parameter. Syntax:
12 * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
14 * <socket> := vhost-user socket path to connect
15 * <virtio_id> := virtio device id (as in virtio_ids.h)
16 * <platform_id> := (optional) platform device id
19 * virtio_uml.device=/var/uml.socket:1
21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
23 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/virtio.h>
28 #include <linux/virtio_config.h>
29 #include <linux/virtio_ring.h>
30 #include <linux/time-internal.h>
31 #include <linux/virtio-uml.h>
32 #include <shared/as-layout.h>
36 #include "vhost_user.h"
38 #define MAX_SUPPORTED_QUEUE_SIZE 256
40 #define to_virtio_uml_device(_vdev) \
41 container_of(_vdev, struct virtio_uml_device, vdev)
43 struct virtio_uml_platform_data {
45 const char *socket_path;
46 struct work_struct conn_broken_wk;
47 struct platform_device *pdev;
50 struct virtio_uml_device {
51 struct virtio_device vdev;
52 struct platform_device *pdev;
53 struct virtio_uml_platform_data *pdata;
56 int sock, req_fd, irq;
58 u64 protocol_features;
64 u8 config_changed_irq:1;
65 uint64_t vq_irq_vq_map;
69 struct virtio_uml_vq_info {
75 extern unsigned long long physmem_size, highmem;
77 #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
79 /* Vhost-user protocol */
81 static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
82 const int *fds, unsigned int fds_num)
87 rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
94 } while (len && (rc >= 0 || rc == -EINTR));
101 static int full_read(int fd, void *buf, int len, bool abortable)
109 rc = os_read_file(fd, buf, len);
114 } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
123 static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
125 return full_read(fd, msg, sizeof(msg->header), true);
128 static int vhost_user_recv(struct virtio_uml_device *vu_dev,
129 int fd, struct vhost_user_msg *msg,
130 size_t max_payload_size, bool wait)
136 * In virtio time-travel mode, we're handling all the vhost-user
137 * FDs by polling them whenever appropriate. However, we may get
138 * into a situation where we're sending out an interrupt message
139 * to a device (e.g. a net device) and need to handle a simulation
140 * time message while doing so, e.g. one that tells us to update
141 * our idea of how long we can run without scheduling.
143 * Thus, we need to not just read() from the given fd, but need
144 * to also handle messages for the simulation time - this function
145 * does that for us while waiting for the given fd to be readable.
148 time_travel_wait_readable(fd);
150 rc = vhost_user_recv_header(fd, msg);
154 size = msg->header.size;
155 if (size > max_payload_size)
157 return full_read(fd, &msg->payload, size, false);
160 static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
163 struct virtio_uml_platform_data *pdata = vu_dev->pdata;
165 if (rc != -ECONNRESET)
168 if (!vu_dev->registered)
171 virtio_break_device(&vu_dev->vdev);
172 schedule_work(&pdata->conn_broken_wk);
175 static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
176 struct vhost_user_msg *msg,
177 size_t max_payload_size)
179 int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
180 max_payload_size, true);
183 vhost_user_check_reset(vu_dev, rc);
187 if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
193 static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
196 struct vhost_user_msg msg;
197 int rc = vhost_user_recv_resp(vu_dev, &msg,
198 sizeof(msg.payload.integer));
202 if (msg.header.size != sizeof(msg.payload.integer))
204 *value = msg.payload.integer;
208 static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
209 struct vhost_user_msg *msg,
210 size_t max_payload_size)
212 int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
213 max_payload_size, false);
218 if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
225 static int vhost_user_send(struct virtio_uml_device *vu_dev,
226 bool need_response, struct vhost_user_msg *msg,
227 int *fds, size_t num_fds)
229 size_t size = sizeof(msg->header) + msg->header.size;
234 msg->header.flags |= VHOST_USER_VERSION;
237 * The need_response flag indicates that we already need a response,
238 * e.g. to read the features. In these cases, don't request an ACK as
239 * it is meaningless. Also request an ACK only if supported.
241 request_ack = !need_response;
242 if (!(vu_dev->protocol_features &
243 BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
247 msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
249 spin_lock_irqsave(&vu_dev->sock_lock, flags);
250 rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
257 rc = vhost_user_recv_u64(vu_dev, &status);
262 vu_err(vu_dev, "slave reports error: %llu\n", status);
269 spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
273 static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
274 bool need_response, u32 request)
276 struct vhost_user_msg msg = {
277 .header.request = request,
280 return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
283 static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
286 struct vhost_user_msg msg = {
287 .header.request = request,
290 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
293 static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
294 u32 request, u64 value)
296 struct vhost_user_msg msg = {
297 .header.request = request,
298 .header.size = sizeof(msg.payload.integer),
299 .payload.integer = value,
302 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
305 static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
307 return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
310 static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
313 int rc = vhost_user_send_no_payload(vu_dev, true,
314 VHOST_USER_GET_FEATURES);
318 return vhost_user_recv_u64(vu_dev, features);
321 static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
324 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
327 static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
328 u64 *protocol_features)
330 int rc = vhost_user_send_no_payload(vu_dev, true,
331 VHOST_USER_GET_PROTOCOL_FEATURES);
335 return vhost_user_recv_u64(vu_dev, protocol_features);
338 static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
339 u64 protocol_features)
341 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
345 static void vhost_user_reply(struct virtio_uml_device *vu_dev,
346 struct vhost_user_msg *msg, int response)
348 struct vhost_user_msg reply = {
349 .payload.integer = response,
351 size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
354 reply.header = msg->header;
355 reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
356 reply.header.flags |= VHOST_USER_FLAG_REPLY;
357 reply.header.size = sizeof(reply.payload.integer);
359 rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
363 "sending reply to slave request failed: %d (size %zu)\n",
367 static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
368 struct time_travel_event *ev)
370 struct virtqueue *vq;
373 struct vhost_user_msg msg;
374 u8 extra_payload[512];
377 irqreturn_t irq_rc = IRQ_NONE;
380 rc = vhost_user_recv_req(vu_dev, &msg.msg,
381 sizeof(msg.msg.payload) +
382 sizeof(msg.extra_payload));
386 switch (msg.msg.header.request) {
387 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
388 vu_dev->config_changed_irq = true;
391 case VHOST_USER_SLAVE_VRING_CALL:
392 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
393 if (vq->index == msg.msg.payload.vring_state.index) {
395 vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
400 case VHOST_USER_SLAVE_IOTLB_MSG:
401 /* not supported - VIRTIO_F_ACCESS_PLATFORM */
402 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
403 /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
405 vu_err(vu_dev, "unexpected slave request %d\n",
406 msg.msg.header.request);
409 if (ev && !vu_dev->suspended)
410 time_travel_add_irq_event(ev);
412 if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
413 vhost_user_reply(vu_dev, &msg.msg, response);
414 irq_rc = IRQ_HANDLED;
416 /* mask EAGAIN as we try non-blocking read until socket is empty */
417 vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc;
421 static irqreturn_t vu_req_interrupt(int irq, void *data)
423 struct virtio_uml_device *vu_dev = data;
424 irqreturn_t ret = IRQ_HANDLED;
426 if (!um_irq_timetravel_handler_used())
427 ret = vu_req_read_message(vu_dev, NULL);
429 if (vu_dev->recv_rc) {
430 vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
431 } else if (vu_dev->vq_irq_vq_map) {
432 struct virtqueue *vq;
434 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
435 if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
436 vring_interrupt(0 /* ignored */, vq);
438 vu_dev->vq_irq_vq_map = 0;
439 } else if (vu_dev->config_changed_irq) {
440 virtio_config_changed(&vu_dev->vdev);
441 vu_dev->config_changed_irq = false;
447 static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
448 struct time_travel_event *ev)
450 vu_req_read_message(data, ev);
453 static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
457 /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
458 rc = os_pipe(req_fds, true, true);
461 vu_dev->req_fd = req_fds[0];
463 rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
464 vu_req_interrupt, IRQF_SHARED,
465 vu_dev->pdev->name, vu_dev,
466 vu_req_interrupt_comm_handler);
472 rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
480 um_free_irq(vu_dev->irq, vu_dev);
482 os_close_file(req_fds[0]);
484 /* Close unused write end of request fds */
485 os_close_file(req_fds[1]);
489 static int vhost_user_init(struct virtio_uml_device *vu_dev)
491 int rc = vhost_user_set_owner(vu_dev);
495 rc = vhost_user_get_features(vu_dev, &vu_dev->features);
499 if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
500 rc = vhost_user_get_protocol_features(vu_dev,
501 &vu_dev->protocol_features);
504 vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
505 rc = vhost_user_set_protocol_features(vu_dev,
506 vu_dev->protocol_features);
511 if (vu_dev->protocol_features &
512 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
513 rc = vhost_user_init_slave_req(vu_dev);
521 static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
522 u32 offset, void *buf, u32 len)
524 u32 cfg_size = offset + len;
525 struct vhost_user_msg *msg;
526 size_t payload_size = sizeof(msg->payload.config) + cfg_size;
527 size_t msg_size = sizeof(msg->header) + payload_size;
530 if (!(vu_dev->protocol_features &
531 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
534 msg = kzalloc(msg_size, GFP_KERNEL);
537 msg->header.request = VHOST_USER_GET_CONFIG;
538 msg->header.size = payload_size;
539 msg->payload.config.offset = 0;
540 msg->payload.config.size = cfg_size;
542 rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
544 vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
549 rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
552 "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
557 if (msg->header.size != payload_size ||
558 msg->payload.config.size != cfg_size) {
561 "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
562 msg->header.size, payload_size,
563 msg->payload.config.size, cfg_size);
566 memcpy(buf, msg->payload.config.payload + offset, len);
572 static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
573 u32 offset, const void *buf, u32 len)
575 struct vhost_user_msg *msg;
576 size_t payload_size = sizeof(msg->payload.config) + len;
577 size_t msg_size = sizeof(msg->header) + payload_size;
580 if (!(vu_dev->protocol_features &
581 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
584 msg = kzalloc(msg_size, GFP_KERNEL);
587 msg->header.request = VHOST_USER_SET_CONFIG;
588 msg->header.size = payload_size;
589 msg->payload.config.offset = offset;
590 msg->payload.config.size = len;
591 memcpy(msg->payload.config.payload, buf, len);
593 rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
595 vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
601 static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
602 struct vhost_user_mem_region *region_out)
604 unsigned long long mem_offset;
605 int rc = phys_mapping(addr, &mem_offset);
607 if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
610 region_out->guest_addr = addr;
611 region_out->user_addr = addr;
612 region_out->size = size;
613 region_out->mmap_offset = mem_offset;
615 /* Ensure mapping is valid for the entire region */
616 rc = phys_mapping(addr + size - 1, &mem_offset);
617 if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
618 addr + size - 1, rc, *fd_out))
623 static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
625 struct vhost_user_msg msg = {
626 .header.request = VHOST_USER_SET_MEM_TABLE,
627 .header.size = sizeof(msg.payload.mem_regions),
628 .payload.mem_regions.num = 1,
630 unsigned long reserved = uml_reserved - uml_physmem;
635 * This is a bit tricky, see also the comment with setup_physmem().
637 * Essentially, setup_physmem() uses a file to mmap() our physmem,
638 * but the code and data we *already* have is omitted. To us, this
639 * is no difference, since they both become part of our address
640 * space and memory consumption. To somebody looking in from the
641 * outside, however, it is different because the part of our memory
642 * consumption that's already part of the binary (code/data) is not
643 * mapped from the file, so it's not visible to another mmap from
644 * the file descriptor.
646 * Thus, don't advertise this space to the vhost-user slave. This
647 * means that the slave will likely abort or similar when we give
648 * it an address from the hidden range, since it's not marked as
649 * a valid address, but at least that way we detect the issue and
650 * don't just have the slave read an all-zeroes buffer from the
651 * shared memory file, or write something there that we can never
652 * see (depending on the direction of the virtqueue traffic.)
654 * Since we usually don't want to use .text for virtio buffers,
655 * this effectively means that you cannot use
656 * 1) global variables, which are in the .bss and not in the shm
658 * 2) the stack in some processes, depending on where they have
659 * their stack (or maybe only no interrupt stack?)
661 * The stack is already not typically valid for DMA, so this isn't
662 * much of a restriction, but global variables might be encountered.
664 * It might be possible to fix it by copying around the data that's
665 * between bss_start and where we map the file now, but it's not
666 * something that you typically encounter with virtio drivers, so
667 * it didn't seem worthwhile.
669 rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
671 &msg.payload.mem_regions.regions[0]);
676 msg.payload.mem_regions.num++;
677 rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
678 &fds[1], &msg.payload.mem_regions.regions[1]);
683 return vhost_user_send(vu_dev, false, &msg, fds,
684 msg.payload.mem_regions.num);
687 static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
688 u32 request, u32 index, u32 num)
690 struct vhost_user_msg msg = {
691 .header.request = request,
692 .header.size = sizeof(msg.payload.vring_state),
693 .payload.vring_state.index = index,
694 .payload.vring_state.num = num,
697 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
700 static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
703 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
707 static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
708 u32 index, u32 offset)
710 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
714 static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
715 u32 index, u64 desc, u64 used, u64 avail,
718 struct vhost_user_msg msg = {
719 .header.request = VHOST_USER_SET_VRING_ADDR,
720 .header.size = sizeof(msg.payload.vring_addr),
721 .payload.vring_addr.index = index,
722 .payload.vring_addr.desc = desc,
723 .payload.vring_addr.used = used,
724 .payload.vring_addr.avail = avail,
725 .payload.vring_addr.log = log,
728 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
731 static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
732 u32 request, int index, int fd)
734 struct vhost_user_msg msg = {
735 .header.request = request,
736 .header.size = sizeof(msg.payload.integer),
737 .payload.integer = index,
740 if (index & ~VHOST_USER_VRING_INDEX_MASK)
743 msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
744 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
746 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
749 static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
752 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
756 static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
759 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
763 static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
764 u32 index, bool enable)
766 if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
769 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
774 /* Virtio interface */
776 static bool vu_notify(struct virtqueue *vq)
778 struct virtio_uml_vq_info *info = vq->priv;
779 const uint64_t n = 1;
785 time_travel_propagate_time();
787 if (info->kick_fd < 0) {
788 struct virtio_uml_device *vu_dev;
790 vu_dev = to_virtio_uml_device(vq->vdev);
792 return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
797 rc = os_write_file(info->kick_fd, &n, sizeof(n));
798 } while (rc == -EINTR);
799 return !WARN(rc != sizeof(n), "write returned %d\n", rc);
802 static irqreturn_t vu_interrupt(int irq, void *opaque)
804 struct virtqueue *vq = opaque;
805 struct virtio_uml_vq_info *info = vq->priv;
808 irqreturn_t ret = IRQ_NONE;
811 rc = os_read_file(info->call_fd, &n, sizeof(n));
813 ret |= vring_interrupt(irq, vq);
814 } while (rc == sizeof(n) || rc == -EINTR);
815 WARN(rc != -EAGAIN, "read returned %d\n", rc);
820 static void vu_get(struct virtio_device *vdev, unsigned offset,
821 void *buf, unsigned len)
823 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
825 vhost_user_get_config(vu_dev, offset, buf, len);
828 static void vu_set(struct virtio_device *vdev, unsigned offset,
829 const void *buf, unsigned len)
831 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
833 vhost_user_set_config(vu_dev, offset, buf, len);
836 static u8 vu_get_status(struct virtio_device *vdev)
838 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
840 return vu_dev->status;
843 static void vu_set_status(struct virtio_device *vdev, u8 status)
845 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
847 vu_dev->status = status;
850 static void vu_reset(struct virtio_device *vdev)
852 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
857 static void vu_del_vq(struct virtqueue *vq)
859 struct virtio_uml_vq_info *info = vq->priv;
861 if (info->call_fd >= 0) {
862 struct virtio_uml_device *vu_dev;
864 vu_dev = to_virtio_uml_device(vq->vdev);
866 um_free_irq(vu_dev->irq, vq);
867 os_close_file(info->call_fd);
870 if (info->kick_fd >= 0)
871 os_close_file(info->kick_fd);
873 vring_del_virtqueue(vq);
877 static void vu_del_vqs(struct virtio_device *vdev)
879 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
880 struct virtqueue *vq, *n;
883 /* Note: reverse order as a workaround to a decoding bug in snabb */
884 list_for_each_entry_reverse(vq, &vdev->vqs, list)
885 WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
887 /* Ensure previous messages have been processed */
888 WARN_ON(vhost_user_get_features(vu_dev, &features));
890 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
894 static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
895 struct virtqueue *vq)
897 struct virtio_uml_vq_info *info = vq->priv;
901 /* no call FD needed/desired in this case */
902 if (vu_dev->protocol_features &
903 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
904 vu_dev->protocol_features &
905 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
910 /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
911 rc = os_pipe(call_fds, true, true);
915 info->call_fd = call_fds[0];
916 rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
917 vu_interrupt, IRQF_SHARED, info->name, vq);
921 rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
928 um_free_irq(vu_dev->irq, vq);
930 os_close_file(call_fds[0]);
932 /* Close (unused) write end of call fds */
933 os_close_file(call_fds[1]);
938 static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
939 unsigned index, vq_callback_t *callback,
940 const char *name, bool ctx)
942 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
943 struct platform_device *pdev = vu_dev->pdev;
944 struct virtio_uml_vq_info *info;
945 struct virtqueue *vq;
946 int num = MAX_SUPPORTED_QUEUE_SIZE;
949 info = kzalloc(sizeof(*info), GFP_KERNEL);
954 snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
957 vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
958 ctx, vu_notify, callback, info->name);
965 num = virtqueue_get_vring_size(vq);
967 if (vu_dev->protocol_features &
968 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
971 rc = os_eventfd(0, 0);
977 rc = vu_setup_vq_call_fd(vu_dev, vq);
981 rc = vhost_user_set_vring_num(vu_dev, index, num);
985 rc = vhost_user_set_vring_base(vu_dev, index, 0);
989 rc = vhost_user_set_vring_addr(vu_dev, index,
990 virtqueue_get_desc_addr(vq),
991 virtqueue_get_used_addr(vq),
992 virtqueue_get_avail_addr(vq),
1000 if (info->call_fd >= 0) {
1001 um_free_irq(vu_dev->irq, vq);
1002 os_close_file(info->call_fd);
1005 if (info->kick_fd >= 0)
1006 os_close_file(info->kick_fd);
1008 vring_del_virtqueue(vq);
1015 static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
1016 struct virtqueue *vqs[], vq_callback_t *callbacks[],
1017 const char * const names[], const bool *ctx,
1018 struct irq_affinity *desc)
1020 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1021 int i, queue_idx = 0, rc;
1022 struct virtqueue *vq;
1024 /* not supported for now */
1025 if (WARN_ON(nvqs > 64))
1028 rc = vhost_user_set_mem_table(vu_dev);
1032 for (i = 0; i < nvqs; ++i) {
1038 vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
1039 ctx ? ctx[i] : false);
1040 if (IS_ERR(vqs[i])) {
1041 rc = PTR_ERR(vqs[i]);
1046 list_for_each_entry(vq, &vdev->vqs, list) {
1047 struct virtio_uml_vq_info *info = vq->priv;
1049 if (info->kick_fd >= 0) {
1050 rc = vhost_user_set_vring_kick(vu_dev, vq->index,
1056 rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
1068 static u64 vu_get_features(struct virtio_device *vdev)
1070 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1072 return vu_dev->features;
1075 static int vu_finalize_features(struct virtio_device *vdev)
1077 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1078 u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
1080 vring_transport_features(vdev);
1081 vu_dev->features = vdev->features | supported;
1083 return vhost_user_set_features(vu_dev, vu_dev->features);
1086 static const char *vu_bus_name(struct virtio_device *vdev)
1088 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1090 return vu_dev->pdev->name;
1093 static const struct virtio_config_ops virtio_uml_config_ops = {
1096 .get_status = vu_get_status,
1097 .set_status = vu_set_status,
1099 .find_vqs = vu_find_vqs,
1100 .del_vqs = vu_del_vqs,
1101 .get_features = vu_get_features,
1102 .finalize_features = vu_finalize_features,
1103 .bus_name = vu_bus_name,
1106 static void virtio_uml_release_dev(struct device *d)
1108 struct virtio_device *vdev =
1109 container_of(d, struct virtio_device, dev);
1110 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1112 time_travel_propagate_time();
1114 /* might not have been opened due to not negotiating the feature */
1115 if (vu_dev->req_fd >= 0) {
1116 um_free_irq(vu_dev->irq, vu_dev);
1117 os_close_file(vu_dev->req_fd);
1120 os_close_file(vu_dev->sock);
1124 void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
1127 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1129 if (WARN_ON(vdev->config != &virtio_uml_config_ops))
1132 vu_dev->no_vq_suspend = no_vq_suspend;
1133 dev_info(&vdev->dev, "%sabled VQ suspend\n",
1134 no_vq_suspend ? "dis" : "en");
1137 static void vu_of_conn_broken(struct work_struct *wk)
1140 * We can't remove the device from the devicetree so the only thing we
1146 /* Platform device */
1148 static struct virtio_uml_platform_data *
1149 virtio_uml_create_pdata(struct platform_device *pdev)
1151 struct device_node *np = pdev->dev.of_node;
1152 struct virtio_uml_platform_data *pdata;
1156 return ERR_PTR(-EINVAL);
1158 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1160 return ERR_PTR(-ENOMEM);
1162 INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
1165 ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
1167 return ERR_PTR(ret);
1169 ret = of_property_read_u32(np, "virtio-device-id",
1170 &pdata->virtio_device_id);
1172 return ERR_PTR(ret);
1177 static int virtio_uml_probe(struct platform_device *pdev)
1179 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1180 struct virtio_uml_device *vu_dev;
1184 pdata = virtio_uml_create_pdata(pdev);
1186 return PTR_ERR(pdata);
1189 vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
1193 vu_dev->pdata = pdata;
1194 vu_dev->vdev.dev.parent = &pdev->dev;
1195 vu_dev->vdev.dev.release = virtio_uml_release_dev;
1196 vu_dev->vdev.config = &virtio_uml_config_ops;
1197 vu_dev->vdev.id.device = pdata->virtio_device_id;
1198 vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
1199 vu_dev->pdev = pdev;
1200 vu_dev->req_fd = -1;
1202 time_travel_propagate_time();
1205 rc = os_connect_socket(pdata->socket_path);
1206 } while (rc == -EINTR);
1211 spin_lock_init(&vu_dev->sock_lock);
1213 rc = vhost_user_init(vu_dev);
1217 platform_set_drvdata(pdev, vu_dev);
1219 device_set_wakeup_capable(&vu_dev->vdev.dev, true);
1221 rc = register_virtio_device(&vu_dev->vdev);
1223 put_device(&vu_dev->vdev.dev);
1224 vu_dev->registered = 1;
1228 os_close_file(vu_dev->sock);
1234 static int virtio_uml_remove(struct platform_device *pdev)
1236 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1238 unregister_virtio_device(&vu_dev->vdev);
1242 /* Command line device list */
1244 static void vu_cmdline_release_dev(struct device *d)
1248 static struct device vu_cmdline_parent = {
1249 .init_name = "virtio-uml-cmdline",
1250 .release = vu_cmdline_release_dev,
1253 static bool vu_cmdline_parent_registered;
1254 static int vu_cmdline_id;
1256 static int vu_unregister_cmdline_device(struct device *dev, void *data)
1258 struct platform_device *pdev = to_platform_device(dev);
1259 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1261 kfree(pdata->socket_path);
1262 platform_device_unregister(pdev);
1266 static void vu_conn_broken(struct work_struct *wk)
1268 struct virtio_uml_platform_data *pdata;
1270 pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1271 vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
1274 static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
1276 const char *ids = strchr(device, ':');
1277 unsigned int virtio_device_id;
1278 int processed, consumed, err;
1280 struct virtio_uml_platform_data pdata, *ppdata;
1281 struct platform_device *pdev;
1283 if (!ids || ids == device)
1286 processed = sscanf(ids, ":%u%n:%d%n",
1287 &virtio_device_id, &consumed,
1288 &vu_cmdline_id, &consumed);
1290 if (processed < 1 || ids[consumed])
1293 if (!vu_cmdline_parent_registered) {
1294 err = device_register(&vu_cmdline_parent);
1296 pr_err("Failed to register parent device!\n");
1297 put_device(&vu_cmdline_parent);
1300 vu_cmdline_parent_registered = true;
1303 socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
1307 pdata.virtio_device_id = (u32) virtio_device_id;
1308 pdata.socket_path = socket_path;
1310 pr_info("Registering device virtio-uml.%d id=%d at %s\n",
1311 vu_cmdline_id, virtio_device_id, socket_path);
1313 pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
1314 vu_cmdline_id++, &pdata,
1316 err = PTR_ERR_OR_ZERO(pdev);
1320 ppdata = pdev->dev.platform_data;
1321 ppdata->pdev = pdev;
1322 INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
1331 static int vu_cmdline_get_device(struct device *dev, void *data)
1333 struct platform_device *pdev = to_platform_device(dev);
1334 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1335 char *buffer = data;
1336 unsigned int len = strlen(buffer);
1338 snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
1339 pdata->socket_path, pdata->virtio_device_id, pdev->id);
1343 static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
1346 if (vu_cmdline_parent_registered)
1347 device_for_each_child(&vu_cmdline_parent, buffer,
1348 vu_cmdline_get_device);
1349 return strlen(buffer) + 1;
1352 static const struct kernel_param_ops vu_cmdline_param_ops = {
1353 .set = vu_cmdline_set,
1354 .get = vu_cmdline_get,
1357 device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
1358 __uml_help(vu_cmdline_param_ops,
1359 "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
1360 " Configure a virtio device over a vhost-user socket.\n"
1361 " See virtio_ids.h for a list of possible virtio device id values.\n"
1362 " Optionally use a specific platform_device id.\n\n"
1366 static void vu_unregister_cmdline_devices(void)
1368 if (vu_cmdline_parent_registered) {
1369 device_for_each_child(&vu_cmdline_parent, NULL,
1370 vu_unregister_cmdline_device);
1371 device_unregister(&vu_cmdline_parent);
1372 vu_cmdline_parent_registered = false;
1376 /* Platform driver */
1378 static const struct of_device_id virtio_uml_match[] = {
1379 { .compatible = "virtio,uml", },
1382 MODULE_DEVICE_TABLE(of, virtio_uml_match);
1384 static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
1386 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1388 if (!vu_dev->no_vq_suspend) {
1389 struct virtqueue *vq;
1391 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1392 struct virtio_uml_vq_info *info = vq->priv;
1394 info->suspended = true;
1395 vhost_user_set_vring_enable(vu_dev, vq->index, false);
1399 if (!device_may_wakeup(&vu_dev->vdev.dev)) {
1400 vu_dev->suspended = true;
1404 return irq_set_irq_wake(vu_dev->irq, 1);
1407 static int virtio_uml_resume(struct platform_device *pdev)
1409 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1411 if (!vu_dev->no_vq_suspend) {
1412 struct virtqueue *vq;
1414 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1415 struct virtio_uml_vq_info *info = vq->priv;
1417 info->suspended = false;
1418 vhost_user_set_vring_enable(vu_dev, vq->index, true);
1422 vu_dev->suspended = false;
1424 if (!device_may_wakeup(&vu_dev->vdev.dev))
1427 return irq_set_irq_wake(vu_dev->irq, 0);
1430 static struct platform_driver virtio_uml_driver = {
1431 .probe = virtio_uml_probe,
1432 .remove = virtio_uml_remove,
1434 .name = "virtio-uml",
1435 .of_match_table = virtio_uml_match,
1437 .suspend = virtio_uml_suspend,
1438 .resume = virtio_uml_resume,
1441 static int __init virtio_uml_init(void)
1443 return platform_driver_register(&virtio_uml_driver);
1446 static void __exit virtio_uml_exit(void)
1448 platform_driver_unregister(&virtio_uml_driver);
1449 vu_unregister_cmdline_devices();
1452 module_init(virtio_uml_init);
1453 module_exit(virtio_uml_exit);
1454 __uml_exitcall(virtio_uml_exit);
1456 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
1457 MODULE_LICENSE("GPL");