4 * Copyright Aporeto 2017
11 #include "qemu/osdep.h"
14 #include "hw/9pfs/9p.h"
15 #include "hw/xen/xen_backend.h"
16 #include "hw/9pfs/xen-9pfs.h"
17 #include "qemu/config-file.h"
18 #include "fsdev/qemu-fsdev.h"
22 #define MAX_RING_ORDER 8
24 typedef struct Xen9pfsRing {
25 struct Xen9pfsDev *priv;
28 xenevtchn_handle *evtchndev;
32 struct xen_9pfs_data_intf *intf;
34 struct xen_9pfs_data ring;
39 /* local copies, so that we can read/write PDU data directly from
41 RING_IDX out_cons, out_size, in_cons;
45 typedef struct Xen9pfsDev {
46 struct XenDevice xendev; /* must be first */
57 static void xen_9pfs_disconnect(struct XenDevice *xendev);
59 static void xen_9pfs_in_sg(Xen9pfsRing *ring,
65 RING_IDX cons, prod, masked_prod, masked_cons;
67 cons = ring->intf->in_cons;
68 prod = ring->intf->in_prod;
70 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
71 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
73 if (masked_prod < masked_cons) {
74 in_sg[0].iov_base = ring->ring.in + masked_prod;
75 in_sg[0].iov_len = masked_cons - masked_prod;
78 in_sg[0].iov_base = ring->ring.in + masked_prod;
79 in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
80 in_sg[1].iov_base = ring->ring.in;
81 in_sg[1].iov_len = masked_cons;
86 static void xen_9pfs_out_sg(Xen9pfsRing *ring,
91 RING_IDX cons, prod, masked_prod, masked_cons;
93 cons = ring->intf->out_cons;
94 prod = ring->intf->out_prod;
96 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
97 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
99 if (masked_cons < masked_prod) {
100 out_sg[0].iov_base = ring->ring.out + masked_cons;
101 out_sg[0].iov_len = ring->out_size;
105 (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
106 out_sg[0].iov_base = ring->ring.out + masked_cons;
107 out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
109 out_sg[1].iov_base = ring->ring.out;
110 out_sg[1].iov_len = ring->out_size -
111 (XEN_FLEX_RING_SIZE(ring->ring_order) -
115 out_sg[0].iov_base = ring->ring.out + masked_cons;
116 out_sg[0].iov_len = ring->out_size;
122 static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
127 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
128 struct iovec in_sg[2];
132 xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
133 in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
135 ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
137 xen_pv_printf(&xen_9pfs->xendev, 0,
138 "Failed to encode VirtFS request type %d\n", pdu->id + 1);
139 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
140 xen_9pfs_disconnect(&xen_9pfs->xendev);
145 static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
150 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
151 struct iovec out_sg[2];
155 xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
156 out_sg, &num, pdu->idx);
158 ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
160 xen_pv_printf(&xen_9pfs->xendev, 0,
161 "Failed to decode VirtFS request type %d\n", pdu->id);
162 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
163 xen_9pfs_disconnect(&xen_9pfs->xendev);
168 static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
173 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
174 Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
179 ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
180 xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx);
185 static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
190 Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
191 Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
197 ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
198 xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
200 buf_size = iov_size(ring->sg, num);
201 if (buf_size < size) {
202 xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
203 "needs %zu bytes, buffer has %zu\n", pdu->id, size,
205 xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
206 xen_9pfs_disconnect(&xen_9pfs->xendev);
213 static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
216 Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
217 Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];
222 ring->intf->out_cons = ring->out_cons;
225 prod = ring->intf->in_prod;
227 ring->intf->in_prod = prod + pdu->size;
230 ring->inprogress = false;
231 xenevtchn_notify(ring->evtchndev, ring->local_port);
233 qemu_bh_schedule(ring->bh);
236 static const struct V9fsTransport xen_9p_transport = {
237 .pdu_vmarshal = xen_9pfs_pdu_vmarshal,
238 .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
239 .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
240 .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu,
241 .push_and_notify = xen_9pfs_push_and_notify,
244 static int xen_9pfs_init(struct XenDevice *xendev)
249 static int xen_9pfs_receive(Xen9pfsRing *ring)
252 RING_IDX cons, prod, masked_prod, masked_cons, queued;
255 if (ring->inprogress) {
259 cons = ring->intf->out_cons;
260 prod = ring->intf->out_prod;
263 queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
264 if (queued < sizeof(h)) {
267 ring->inprogress = true;
269 masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
270 masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));
272 xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
273 masked_prod, &masked_cons,
274 XEN_FLEX_RING_SIZE(ring->ring_order));
275 if (queued < le32_to_cpu(h.size_le)) {
279 /* cannot fail, because we only handle one request per ring at a time */
280 pdu = pdu_alloc(&ring->priv->state);
281 ring->out_size = le32_to_cpu(h.size_le);
282 ring->out_cons = cons + le32_to_cpu(h.size_le);
289 static void xen_9pfs_bh(void *opaque)
291 Xen9pfsRing *ring = opaque;
292 xen_9pfs_receive(ring);
295 static void xen_9pfs_evtchn_event(void *opaque)
297 Xen9pfsRing *ring = opaque;
300 port = xenevtchn_pending(ring->evtchndev);
301 xenevtchn_unmask(ring->evtchndev, port);
303 qemu_bh_schedule(ring->bh);
306 static void xen_9pfs_disconnect(struct XenDevice *xendev)
308 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
311 for (i = 0; i < xen_9pdev->num_rings; i++) {
312 if (xen_9pdev->rings[i].evtchndev != NULL) {
313 qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
315 xenevtchn_unbind(xen_9pdev->rings[i].evtchndev,
316 xen_9pdev->rings[i].local_port);
317 xen_9pdev->rings[i].evtchndev = NULL;
322 static int xen_9pfs_free(struct XenDevice *xendev)
324 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
327 if (xen_9pdev->rings[0].evtchndev != NULL) {
328 xen_9pfs_disconnect(xendev);
331 for (i = 0; i < xen_9pdev->num_rings; i++) {
332 if (xen_9pdev->rings[i].data != NULL) {
333 xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
334 xen_9pdev->rings[i].data,
335 (1 << xen_9pdev->rings[i].ring_order));
337 if (xen_9pdev->rings[i].intf != NULL) {
338 xengnttab_unmap(xen_9pdev->xendev.gnttabdev,
339 xen_9pdev->rings[i].intf,
342 if (xen_9pdev->rings[i].bh != NULL) {
343 qemu_bh_delete(xen_9pdev->rings[i].bh);
347 g_free(xen_9pdev->id);
348 g_free(xen_9pdev->tag);
349 g_free(xen_9pdev->path);
350 g_free(xen_9pdev->security_model);
351 g_free(xen_9pdev->rings);
355 static int xen_9pfs_connect(struct XenDevice *xendev)
358 Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev);
359 V9fsState *s = &xen_9pdev->state;
362 if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings",
363 &xen_9pdev->num_rings) == -1 ||
364 xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) {
368 xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing));
369 for (i = 0; i < xen_9pdev->num_rings; i++) {
373 xen_9pdev->rings[i].priv = xen_9pdev;
374 xen_9pdev->rings[i].evtchn = -1;
375 xen_9pdev->rings[i].local_port = -1;
377 str = g_strdup_printf("ring-ref%u", i);
378 if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
379 &xen_9pdev->rings[i].ref) == -1) {
384 str = g_strdup_printf("event-channel-%u", i);
385 if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
386 &xen_9pdev->rings[i].evtchn) == -1) {
392 xen_9pdev->rings[i].intf = xengnttab_map_grant_ref(
393 xen_9pdev->xendev.gnttabdev,
394 xen_9pdev->xendev.dom,
395 xen_9pdev->rings[i].ref,
396 PROT_READ | PROT_WRITE);
397 if (!xen_9pdev->rings[i].intf) {
400 ring_order = xen_9pdev->rings[i].intf->ring_order;
401 if (ring_order > MAX_RING_ORDER) {
404 xen_9pdev->rings[i].ring_order = ring_order;
405 xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs(
406 xen_9pdev->xendev.gnttabdev,
408 xen_9pdev->xendev.dom,
409 xen_9pdev->rings[i].intf->ref,
410 PROT_READ | PROT_WRITE);
411 if (!xen_9pdev->rings[i].data) {
414 xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data;
415 xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
416 XEN_FLEX_RING_SIZE(ring_order);
418 xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
419 xen_9pdev->rings[i].out_cons = 0;
420 xen_9pdev->rings[i].out_size = 0;
421 xen_9pdev->rings[i].inprogress = false;
424 xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0);
425 if (xen_9pdev->rings[i].evtchndev == NULL) {
428 qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev));
429 xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
430 (xen_9pdev->rings[i].evtchndev,
432 xen_9pdev->rings[i].evtchn);
433 if (xen_9pdev->rings[i].local_port == -1) {
434 xen_pv_printf(xendev, 0,
435 "xenevtchn_bind_interdomain failed port=%d\n",
436 xen_9pdev->rings[i].evtchn);
439 xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
440 qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev),
441 xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
444 xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");
445 xen_9pdev->path = xenstore_read_be_str(xendev, "path");
446 xen_9pdev->id = s->fsconf.fsdev_id =
447 g_strdup_printf("xen9p%d", xendev->dev);
448 xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag");
449 v9fs_register_transport(s, &xen_9p_transport);
450 fsdev = qemu_opts_create(qemu_find_opts("fsdev"),
453 qemu_opt_set(fsdev, "fsdriver", "local", NULL);
454 qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL);
455 qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL);
456 qemu_opts_set_id(fsdev, s->fsconf.fsdev_id);
457 qemu_fsdev_add(fsdev);
458 v9fs_device_realize_common(s, NULL);
463 xen_9pfs_free(xendev);
467 static void xen_9pfs_alloc(struct XenDevice *xendev)
469 xenstore_write_be_str(xendev, "versions", VERSIONS);
470 xenstore_write_be_int(xendev, "max-rings", MAX_RINGS);
471 xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER);
474 struct XenDevOps xen_9pfs_ops = {
475 .size = sizeof(Xen9pfsDev),
476 .flags = DEVOPS_FLAG_NEED_GNTDEV,
477 .alloc = xen_9pfs_alloc,
478 .init = xen_9pfs_init,
479 .initialise = xen_9pfs_connect,
480 .disconnect = xen_9pfs_disconnect,
481 .free = xen_9pfs_free,