1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio vhost-user driver
5 * Copyright(c) 2019 Intel Corporation
7 * This driver allows virtio devices to be used over a vhost-user socket.
9 * Guest devices can be instantiated by kernel module or command line
10 * parameters. One device will be created for each parameter. Syntax:
12 * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
14 * <socket> := vhost-user socket path to connect
15 * <virtio_id> := virtio device id (as in virtio_ids.h)
16 * <platform_id> := (optional) platform device id
19 * virtio_uml.device=/var/uml.socket:1
21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/virtio.h>
27 #include <linux/virtio_config.h>
28 #include <linux/virtio_ring.h>
29 #include <linux/time-internal.h>
30 #include <shared/as-layout.h>
34 #include "vhost_user.h"
36 #define MAX_SUPPORTED_QUEUE_SIZE 256
38 #define to_virtio_uml_device(_vdev) \
39 container_of(_vdev, struct virtio_uml_device, vdev)
41 struct virtio_uml_platform_data {
43 const char *socket_path;
44 struct work_struct conn_broken_wk;
45 struct platform_device *pdev;
48 struct virtio_uml_device {
49 struct virtio_device vdev;
50 struct platform_device *pdev;
53 int sock, req_fd, irq;
55 u64 protocol_features;
60 u8 config_changed_irq:1;
61 uint64_t vq_irq_vq_map;
64 struct virtio_uml_vq_info {
70 extern unsigned long long physmem_size, highmem;
72 #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
74 /* Vhost-user protocol */
76 static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
77 const int *fds, unsigned int fds_num)
82 rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
89 } while (len && (rc >= 0 || rc == -EINTR));
96 static int full_read(int fd, void *buf, int len, bool abortable)
104 rc = os_read_file(fd, buf, len);
109 } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
118 static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
120 return full_read(fd, msg, sizeof(msg->header), true);
123 static int vhost_user_recv(struct virtio_uml_device *vu_dev,
124 int fd, struct vhost_user_msg *msg,
125 size_t max_payload_size, bool wait)
131 * In virtio time-travel mode, we're handling all the vhost-user
132 * FDs by polling them whenever appropriate. However, we may get
133 * into a situation where we're sending out an interrupt message
134 * to a device (e.g. a net device) and need to handle a simulation
135 * time message while doing so, e.g. one that tells us to update
136 * our idea of how long we can run without scheduling.
138 * Thus, we need to not just read() from the given fd, but need
139 * to also handle messages for the simulation time - this function
140 * does that for us while waiting for the given fd to be readable.
143 time_travel_wait_readable(fd);
145 rc = vhost_user_recv_header(fd, msg);
147 if (rc == -ECONNRESET && vu_dev->registered) {
148 struct virtio_uml_platform_data *pdata;
150 pdata = vu_dev->pdev->dev.platform_data;
152 virtio_break_device(&vu_dev->vdev);
153 schedule_work(&pdata->conn_broken_wk);
157 size = msg->header.size;
158 if (size > max_payload_size)
160 return full_read(fd, &msg->payload, size, false);
163 static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
164 struct vhost_user_msg *msg,
165 size_t max_payload_size)
167 int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
168 max_payload_size, true);
173 if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
179 static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
182 struct vhost_user_msg msg;
183 int rc = vhost_user_recv_resp(vu_dev, &msg,
184 sizeof(msg.payload.integer));
188 if (msg.header.size != sizeof(msg.payload.integer))
190 *value = msg.payload.integer;
194 static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
195 struct vhost_user_msg *msg,
196 size_t max_payload_size)
198 int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
199 max_payload_size, false);
204 if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
211 static int vhost_user_send(struct virtio_uml_device *vu_dev,
212 bool need_response, struct vhost_user_msg *msg,
213 int *fds, size_t num_fds)
215 size_t size = sizeof(msg->header) + msg->header.size;
220 msg->header.flags |= VHOST_USER_VERSION;
223 * The need_response flag indicates that we already need a response,
224 * e.g. to read the features. In these cases, don't request an ACK as
225 * it is meaningless. Also request an ACK only if supported.
227 request_ack = !need_response;
228 if (!(vu_dev->protocol_features &
229 BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
233 msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
235 spin_lock_irqsave(&vu_dev->sock_lock, flags);
236 rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
243 rc = vhost_user_recv_u64(vu_dev, &status);
248 vu_err(vu_dev, "slave reports error: %llu\n", status);
255 spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
259 static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
260 bool need_response, u32 request)
262 struct vhost_user_msg msg = {
263 .header.request = request,
266 return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
269 static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
272 struct vhost_user_msg msg = {
273 .header.request = request,
276 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
279 static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
280 u32 request, u64 value)
282 struct vhost_user_msg msg = {
283 .header.request = request,
284 .header.size = sizeof(msg.payload.integer),
285 .payload.integer = value,
288 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
291 static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
293 return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
296 static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
299 int rc = vhost_user_send_no_payload(vu_dev, true,
300 VHOST_USER_GET_FEATURES);
304 return vhost_user_recv_u64(vu_dev, features);
307 static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
310 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
313 static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
314 u64 *protocol_features)
316 int rc = vhost_user_send_no_payload(vu_dev, true,
317 VHOST_USER_GET_PROTOCOL_FEATURES);
321 return vhost_user_recv_u64(vu_dev, protocol_features);
324 static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
325 u64 protocol_features)
327 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
331 static void vhost_user_reply(struct virtio_uml_device *vu_dev,
332 struct vhost_user_msg *msg, int response)
334 struct vhost_user_msg reply = {
335 .payload.integer = response,
337 size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
340 reply.header = msg->header;
341 reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
342 reply.header.flags |= VHOST_USER_FLAG_REPLY;
343 reply.header.size = sizeof(reply.payload.integer);
345 rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
349 "sending reply to slave request failed: %d (size %zu)\n",
353 static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
354 struct time_travel_event *ev)
356 struct virtqueue *vq;
359 struct vhost_user_msg msg;
360 u8 extra_payload[512];
364 rc = vhost_user_recv_req(vu_dev, &msg.msg,
365 sizeof(msg.msg.payload) +
366 sizeof(msg.extra_payload));
371 switch (msg.msg.header.request) {
372 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
373 vu_dev->config_changed_irq = true;
376 case VHOST_USER_SLAVE_VRING_CALL:
377 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
378 if (vq->index == msg.msg.payload.vring_state.index) {
380 vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
385 case VHOST_USER_SLAVE_IOTLB_MSG:
386 /* not supported - VIRTIO_F_ACCESS_PLATFORM */
387 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
388 /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
390 vu_err(vu_dev, "unexpected slave request %d\n",
391 msg.msg.header.request);
394 if (ev && !vu_dev->suspended)
395 time_travel_add_irq_event(ev);
397 if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
398 vhost_user_reply(vu_dev, &msg.msg, response);
403 static irqreturn_t vu_req_interrupt(int irq, void *data)
405 struct virtio_uml_device *vu_dev = data;
406 irqreturn_t ret = IRQ_HANDLED;
408 if (!um_irq_timetravel_handler_used())
409 ret = vu_req_read_message(vu_dev, NULL);
411 if (vu_dev->vq_irq_vq_map) {
412 struct virtqueue *vq;
414 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
415 if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
416 vring_interrupt(0 /* ignored */, vq);
418 vu_dev->vq_irq_vq_map = 0;
419 } else if (vu_dev->config_changed_irq) {
420 virtio_config_changed(&vu_dev->vdev);
421 vu_dev->config_changed_irq = false;
427 static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
428 struct time_travel_event *ev)
430 vu_req_read_message(data, ev);
433 static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
437 /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
438 rc = os_pipe(req_fds, true, true);
441 vu_dev->req_fd = req_fds[0];
443 rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
444 vu_req_interrupt, IRQF_SHARED,
445 vu_dev->pdev->name, vu_dev,
446 vu_req_interrupt_comm_handler);
452 rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
460 um_free_irq(vu_dev->irq, vu_dev);
462 os_close_file(req_fds[0]);
464 /* Close unused write end of request fds */
465 os_close_file(req_fds[1]);
469 static int vhost_user_init(struct virtio_uml_device *vu_dev)
471 int rc = vhost_user_set_owner(vu_dev);
475 rc = vhost_user_get_features(vu_dev, &vu_dev->features);
479 if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
480 rc = vhost_user_get_protocol_features(vu_dev,
481 &vu_dev->protocol_features);
484 vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
485 rc = vhost_user_set_protocol_features(vu_dev,
486 vu_dev->protocol_features);
491 if (vu_dev->protocol_features &
492 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
493 rc = vhost_user_init_slave_req(vu_dev);
501 static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
502 u32 offset, void *buf, u32 len)
504 u32 cfg_size = offset + len;
505 struct vhost_user_msg *msg;
506 size_t payload_size = sizeof(msg->payload.config) + cfg_size;
507 size_t msg_size = sizeof(msg->header) + payload_size;
510 if (!(vu_dev->protocol_features &
511 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
514 msg = kzalloc(msg_size, GFP_KERNEL);
517 msg->header.request = VHOST_USER_GET_CONFIG;
518 msg->header.size = payload_size;
519 msg->payload.config.offset = 0;
520 msg->payload.config.size = cfg_size;
522 rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
524 vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
529 rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
532 "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
537 if (msg->header.size != payload_size ||
538 msg->payload.config.size != cfg_size) {
541 "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
542 msg->header.size, payload_size,
543 msg->payload.config.size, cfg_size);
546 memcpy(buf, msg->payload.config.payload + offset, len);
552 static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
553 u32 offset, const void *buf, u32 len)
555 struct vhost_user_msg *msg;
556 size_t payload_size = sizeof(msg->payload.config) + len;
557 size_t msg_size = sizeof(msg->header) + payload_size;
560 if (!(vu_dev->protocol_features &
561 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
564 msg = kzalloc(msg_size, GFP_KERNEL);
567 msg->header.request = VHOST_USER_SET_CONFIG;
568 msg->header.size = payload_size;
569 msg->payload.config.offset = offset;
570 msg->payload.config.size = len;
571 memcpy(msg->payload.config.payload, buf, len);
573 rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
575 vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
581 static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
582 struct vhost_user_mem_region *region_out)
584 unsigned long long mem_offset;
585 int rc = phys_mapping(addr, &mem_offset);
587 if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
590 region_out->guest_addr = addr;
591 region_out->user_addr = addr;
592 region_out->size = size;
593 region_out->mmap_offset = mem_offset;
595 /* Ensure mapping is valid for the entire region */
596 rc = phys_mapping(addr + size - 1, &mem_offset);
597 if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
598 addr + size - 1, rc, *fd_out))
603 static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
605 struct vhost_user_msg msg = {
606 .header.request = VHOST_USER_SET_MEM_TABLE,
607 .header.size = sizeof(msg.payload.mem_regions),
608 .payload.mem_regions.num = 1,
610 unsigned long reserved = uml_reserved - uml_physmem;
615 * This is a bit tricky, see also the comment with setup_physmem().
617 * Essentially, setup_physmem() uses a file to mmap() our physmem,
618 * but the code and data we *already* have is omitted. To us, this
619 * is no difference, since they both become part of our address
620 * space and memory consumption. To somebody looking in from the
621 * outside, however, it is different because the part of our memory
622 * consumption that's already part of the binary (code/data) is not
623 * mapped from the file, so it's not visible to another mmap from
624 * the file descriptor.
626 * Thus, don't advertise this space to the vhost-user slave. This
627 * means that the slave will likely abort or similar when we give
628 * it an address from the hidden range, since it's not marked as
629 * a valid address, but at least that way we detect the issue and
630 * don't just have the slave read an all-zeroes buffer from the
631 * shared memory file, or write something there that we can never
632 * see (depending on the direction of the virtqueue traffic.)
634 * Since we usually don't want to use .text for virtio buffers,
635 * this effectively means that you cannot use
636 * 1) global variables, which are in the .bss and not in the shm
638 * 2) the stack in some processes, depending on where they have
639 * their stack (or maybe only no interrupt stack?)
641 * The stack is already not typically valid for DMA, so this isn't
642 * much of a restriction, but global variables might be encountered.
644 * It might be possible to fix it by copying around the data that's
645 * between bss_start and where we map the file now, but it's not
646 * something that you typically encounter with virtio drivers, so
647 * it didn't seem worthwhile.
649 rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
651 &msg.payload.mem_regions.regions[0]);
656 msg.payload.mem_regions.num++;
657 rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
658 &fds[1], &msg.payload.mem_regions.regions[1]);
663 return vhost_user_send(vu_dev, false, &msg, fds,
664 msg.payload.mem_regions.num);
667 static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
668 u32 request, u32 index, u32 num)
670 struct vhost_user_msg msg = {
671 .header.request = request,
672 .header.size = sizeof(msg.payload.vring_state),
673 .payload.vring_state.index = index,
674 .payload.vring_state.num = num,
677 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
680 static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
683 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
687 static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
688 u32 index, u32 offset)
690 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
694 static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
695 u32 index, u64 desc, u64 used, u64 avail,
698 struct vhost_user_msg msg = {
699 .header.request = VHOST_USER_SET_VRING_ADDR,
700 .header.size = sizeof(msg.payload.vring_addr),
701 .payload.vring_addr.index = index,
702 .payload.vring_addr.desc = desc,
703 .payload.vring_addr.used = used,
704 .payload.vring_addr.avail = avail,
705 .payload.vring_addr.log = log,
708 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
711 static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
712 u32 request, int index, int fd)
714 struct vhost_user_msg msg = {
715 .header.request = request,
716 .header.size = sizeof(msg.payload.integer),
717 .payload.integer = index,
720 if (index & ~VHOST_USER_VRING_INDEX_MASK)
723 msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
724 return vhost_user_send(vu_dev, false, &msg, NULL, 0);
726 return vhost_user_send(vu_dev, false, &msg, &fd, 1);
729 static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
732 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
736 static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
739 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
743 static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
744 u32 index, bool enable)
746 if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
749 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
754 /* Virtio interface */
756 static bool vu_notify(struct virtqueue *vq)
758 struct virtio_uml_vq_info *info = vq->priv;
759 const uint64_t n = 1;
765 time_travel_propagate_time();
767 if (info->kick_fd < 0) {
768 struct virtio_uml_device *vu_dev;
770 vu_dev = to_virtio_uml_device(vq->vdev);
772 return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
777 rc = os_write_file(info->kick_fd, &n, sizeof(n));
778 } while (rc == -EINTR);
779 return !WARN(rc != sizeof(n), "write returned %d\n", rc);
782 static irqreturn_t vu_interrupt(int irq, void *opaque)
784 struct virtqueue *vq = opaque;
785 struct virtio_uml_vq_info *info = vq->priv;
788 irqreturn_t ret = IRQ_NONE;
791 rc = os_read_file(info->call_fd, &n, sizeof(n));
793 ret |= vring_interrupt(irq, vq);
794 } while (rc == sizeof(n) || rc == -EINTR);
795 WARN(rc != -EAGAIN, "read returned %d\n", rc);
800 static void vu_get(struct virtio_device *vdev, unsigned offset,
801 void *buf, unsigned len)
803 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
805 vhost_user_get_config(vu_dev, offset, buf, len);
808 static void vu_set(struct virtio_device *vdev, unsigned offset,
809 const void *buf, unsigned len)
811 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
813 vhost_user_set_config(vu_dev, offset, buf, len);
816 static u8 vu_get_status(struct virtio_device *vdev)
818 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
820 return vu_dev->status;
823 static void vu_set_status(struct virtio_device *vdev, u8 status)
825 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
827 vu_dev->status = status;
830 static void vu_reset(struct virtio_device *vdev)
832 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
837 static void vu_del_vq(struct virtqueue *vq)
839 struct virtio_uml_vq_info *info = vq->priv;
841 if (info->call_fd >= 0) {
842 struct virtio_uml_device *vu_dev;
844 vu_dev = to_virtio_uml_device(vq->vdev);
846 um_free_irq(vu_dev->irq, vq);
847 os_close_file(info->call_fd);
850 if (info->kick_fd >= 0)
851 os_close_file(info->kick_fd);
853 vring_del_virtqueue(vq);
857 static void vu_del_vqs(struct virtio_device *vdev)
859 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
860 struct virtqueue *vq, *n;
863 /* Note: reverse order as a workaround to a decoding bug in snabb */
864 list_for_each_entry_reverse(vq, &vdev->vqs, list)
865 WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
867 /* Ensure previous messages have been processed */
868 WARN_ON(vhost_user_get_features(vu_dev, &features));
870 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
874 static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
875 struct virtqueue *vq)
877 struct virtio_uml_vq_info *info = vq->priv;
881 /* no call FD needed/desired in this case */
882 if (vu_dev->protocol_features &
883 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
884 vu_dev->protocol_features &
885 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
890 /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
891 rc = os_pipe(call_fds, true, true);
895 info->call_fd = call_fds[0];
896 rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
897 vu_interrupt, IRQF_SHARED, info->name, vq);
901 rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
908 um_free_irq(vu_dev->irq, vq);
910 os_close_file(call_fds[0]);
912 /* Close (unused) write end of call fds */
913 os_close_file(call_fds[1]);
918 static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
919 unsigned index, vq_callback_t *callback,
920 const char *name, bool ctx)
922 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
923 struct platform_device *pdev = vu_dev->pdev;
924 struct virtio_uml_vq_info *info;
925 struct virtqueue *vq;
926 int num = MAX_SUPPORTED_QUEUE_SIZE;
929 info = kzalloc(sizeof(*info), GFP_KERNEL);
934 snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
937 vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
938 ctx, vu_notify, callback, info->name);
944 num = virtqueue_get_vring_size(vq);
946 if (vu_dev->protocol_features &
947 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
950 rc = os_eventfd(0, 0);
956 rc = vu_setup_vq_call_fd(vu_dev, vq);
960 rc = vhost_user_set_vring_num(vu_dev, index, num);
964 rc = vhost_user_set_vring_base(vu_dev, index, 0);
968 rc = vhost_user_set_vring_addr(vu_dev, index,
969 virtqueue_get_desc_addr(vq),
970 virtqueue_get_used_addr(vq),
971 virtqueue_get_avail_addr(vq),
979 if (info->call_fd >= 0) {
980 um_free_irq(vu_dev->irq, vq);
981 os_close_file(info->call_fd);
984 if (info->kick_fd >= 0)
985 os_close_file(info->kick_fd);
987 vring_del_virtqueue(vq);
994 static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
995 struct virtqueue *vqs[], vq_callback_t *callbacks[],
996 const char * const names[], const bool *ctx,
997 struct irq_affinity *desc)
999 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1000 int i, queue_idx = 0, rc;
1001 struct virtqueue *vq;
1003 /* not supported for now */
1004 if (WARN_ON(nvqs > 64))
1007 rc = vhost_user_set_mem_table(vu_dev);
1011 for (i = 0; i < nvqs; ++i) {
1017 vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
1018 ctx ? ctx[i] : false);
1019 if (IS_ERR(vqs[i])) {
1020 rc = PTR_ERR(vqs[i]);
1025 list_for_each_entry(vq, &vdev->vqs, list) {
1026 struct virtio_uml_vq_info *info = vq->priv;
1028 if (info->kick_fd >= 0) {
1029 rc = vhost_user_set_vring_kick(vu_dev, vq->index,
1035 rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
1047 static u64 vu_get_features(struct virtio_device *vdev)
1049 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1051 return vu_dev->features;
1054 static int vu_finalize_features(struct virtio_device *vdev)
1056 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1057 u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
1059 vring_transport_features(vdev);
1060 vu_dev->features = vdev->features | supported;
1062 return vhost_user_set_features(vu_dev, vu_dev->features);
1065 static const char *vu_bus_name(struct virtio_device *vdev)
1067 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1069 return vu_dev->pdev->name;
1072 static const struct virtio_config_ops virtio_uml_config_ops = {
1075 .get_status = vu_get_status,
1076 .set_status = vu_set_status,
1078 .find_vqs = vu_find_vqs,
1079 .del_vqs = vu_del_vqs,
1080 .get_features = vu_get_features,
1081 .finalize_features = vu_finalize_features,
1082 .bus_name = vu_bus_name,
1085 static void virtio_uml_release_dev(struct device *d)
1087 struct virtio_device *vdev =
1088 container_of(d, struct virtio_device, dev);
1089 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1091 /* might not have been opened due to not negotiating the feature */
1092 if (vu_dev->req_fd >= 0) {
1093 um_free_irq(vu_dev->irq, vu_dev);
1094 os_close_file(vu_dev->req_fd);
1097 os_close_file(vu_dev->sock);
1101 /* Platform device */
1103 static int virtio_uml_probe(struct platform_device *pdev)
1105 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1106 struct virtio_uml_device *vu_dev;
1112 vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
1116 vu_dev->vdev.dev.parent = &pdev->dev;
1117 vu_dev->vdev.dev.release = virtio_uml_release_dev;
1118 vu_dev->vdev.config = &virtio_uml_config_ops;
1119 vu_dev->vdev.id.device = pdata->virtio_device_id;
1120 vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
1121 vu_dev->pdev = pdev;
1122 vu_dev->req_fd = -1;
1125 rc = os_connect_socket(pdata->socket_path);
1126 } while (rc == -EINTR);
1131 spin_lock_init(&vu_dev->sock_lock);
1133 rc = vhost_user_init(vu_dev);
1137 platform_set_drvdata(pdev, vu_dev);
1139 device_set_wakeup_capable(&vu_dev->vdev.dev, true);
1141 rc = register_virtio_device(&vu_dev->vdev);
1143 put_device(&vu_dev->vdev.dev);
1144 vu_dev->registered = 1;
1148 os_close_file(vu_dev->sock);
1152 static int virtio_uml_remove(struct platform_device *pdev)
1154 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1156 unregister_virtio_device(&vu_dev->vdev);
1160 /* Command line device list */
1162 static void vu_cmdline_release_dev(struct device *d)
1166 static struct device vu_cmdline_parent = {
1167 .init_name = "virtio-uml-cmdline",
1168 .release = vu_cmdline_release_dev,
1171 static bool vu_cmdline_parent_registered;
1172 static int vu_cmdline_id;
1174 static int vu_unregister_cmdline_device(struct device *dev, void *data)
1176 struct platform_device *pdev = to_platform_device(dev);
1177 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1179 kfree(pdata->socket_path);
1180 platform_device_unregister(pdev);
1184 static void vu_conn_broken(struct work_struct *wk)
1186 struct virtio_uml_platform_data *pdata;
1188 pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1189 vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
1192 static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
1194 const char *ids = strchr(device, ':');
1195 unsigned int virtio_device_id;
1196 int processed, consumed, err;
1198 struct virtio_uml_platform_data pdata, *ppdata;
1199 struct platform_device *pdev;
1201 if (!ids || ids == device)
1204 processed = sscanf(ids, ":%u%n:%d%n",
1205 &virtio_device_id, &consumed,
1206 &vu_cmdline_id, &consumed);
1208 if (processed < 1 || ids[consumed])
1211 if (!vu_cmdline_parent_registered) {
1212 err = device_register(&vu_cmdline_parent);
1214 pr_err("Failed to register parent device!\n");
1215 put_device(&vu_cmdline_parent);
1218 vu_cmdline_parent_registered = true;
1221 socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
1225 pdata.virtio_device_id = (u32) virtio_device_id;
1226 pdata.socket_path = socket_path;
1228 pr_info("Registering device virtio-uml.%d id=%d at %s\n",
1229 vu_cmdline_id, virtio_device_id, socket_path);
1231 pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
1232 vu_cmdline_id++, &pdata,
1234 err = PTR_ERR_OR_ZERO(pdev);
1238 ppdata = pdev->dev.platform_data;
1239 ppdata->pdev = pdev;
1240 INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
1249 static int vu_cmdline_get_device(struct device *dev, void *data)
1251 struct platform_device *pdev = to_platform_device(dev);
1252 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1253 char *buffer = data;
1254 unsigned int len = strlen(buffer);
1256 snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
1257 pdata->socket_path, pdata->virtio_device_id, pdev->id);
1261 static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
1264 if (vu_cmdline_parent_registered)
1265 device_for_each_child(&vu_cmdline_parent, buffer,
1266 vu_cmdline_get_device);
1267 return strlen(buffer) + 1;
1270 static const struct kernel_param_ops vu_cmdline_param_ops = {
1271 .set = vu_cmdline_set,
1272 .get = vu_cmdline_get,
1275 device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
1276 __uml_help(vu_cmdline_param_ops,
1277 "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
1278 " Configure a virtio device over a vhost-user socket.\n"
1279 " See virtio_ids.h for a list of possible virtio device id values.\n"
1280 " Optionally use a specific platform_device id.\n\n"
1284 static void vu_unregister_cmdline_devices(void)
1286 if (vu_cmdline_parent_registered) {
1287 device_for_each_child(&vu_cmdline_parent, NULL,
1288 vu_unregister_cmdline_device);
1289 device_unregister(&vu_cmdline_parent);
1290 vu_cmdline_parent_registered = false;
1294 /* Platform driver */
1296 static const struct of_device_id virtio_uml_match[] = {
1297 { .compatible = "virtio,uml", },
1300 MODULE_DEVICE_TABLE(of, virtio_uml_match);
1302 static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
1304 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1305 struct virtqueue *vq;
1307 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1308 struct virtio_uml_vq_info *info = vq->priv;
1310 info->suspended = true;
1311 vhost_user_set_vring_enable(vu_dev, vq->index, false);
1314 if (!device_may_wakeup(&vu_dev->vdev.dev)) {
1315 vu_dev->suspended = true;
1319 return irq_set_irq_wake(vu_dev->irq, 1);
1322 static int virtio_uml_resume(struct platform_device *pdev)
1324 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1325 struct virtqueue *vq;
1327 virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1328 struct virtio_uml_vq_info *info = vq->priv;
1330 info->suspended = false;
1331 vhost_user_set_vring_enable(vu_dev, vq->index, true);
1334 vu_dev->suspended = false;
1336 if (!device_may_wakeup(&vu_dev->vdev.dev))
1339 return irq_set_irq_wake(vu_dev->irq, 0);
1342 static struct platform_driver virtio_uml_driver = {
1343 .probe = virtio_uml_probe,
1344 .remove = virtio_uml_remove,
1346 .name = "virtio-uml",
1347 .of_match_table = virtio_uml_match,
1349 .suspend = virtio_uml_suspend,
1350 .resume = virtio_uml_resume,
1353 static int __init virtio_uml_init(void)
1355 return platform_driver_register(&virtio_uml_driver);
1358 static void __exit virtio_uml_exit(void)
1360 platform_driver_unregister(&virtio_uml_driver);
1361 vu_unregister_cmdline_devices();
1364 module_init(virtio_uml_init);
1365 module_exit(virtio_uml_exit);
1366 __uml_exitcall(virtio_uml_exit);
1368 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
1369 MODULE_LICENSE("GPL");