]>
Commit | Line | Data |
---|---|---|
0ea9e1d3 AH |
1 | /* |
2 | * virtio transport for vsock | |
3 | * | |
4 | * Copyright (C) 2013-2015 Red Hat, Inc. | |
5 | * Author: Asias He <[email protected]> | |
6 | * Stefan Hajnoczi <[email protected]> | |
7 | * | |
8 | * Some of the code is take from Gerd Hoffmann <[email protected]>'s | |
9 | * early virtio-vsock proof-of-concept bits. | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2. | |
12 | */ | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/atomic.h> | |
17 | #include <linux/virtio.h> | |
18 | #include <linux/virtio_ids.h> | |
19 | #include <linux/virtio_config.h> | |
20 | #include <linux/virtio_vsock.h> | |
21 | #include <net/sock.h> | |
22 | #include <linux/mutex.h> | |
23 | #include <net/af_vsock.h> | |
24 | ||
25 | static struct workqueue_struct *virtio_vsock_workqueue; | |
26 | static struct virtio_vsock *the_virtio_vsock; | |
27 | static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ | |
28 | ||
29 | struct virtio_vsock { | |
30 | struct virtio_device *vdev; | |
31 | struct virtqueue *vqs[VSOCK_VQ_MAX]; | |
32 | ||
33 | /* Virtqueue processing is deferred to a workqueue */ | |
34 | struct work_struct tx_work; | |
35 | struct work_struct rx_work; | |
36 | struct work_struct event_work; | |
37 | ||
38 | /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] | |
39 | * must be accessed with tx_lock held. | |
40 | */ | |
41 | struct mutex tx_lock; | |
42 | ||
43 | struct work_struct send_pkt_work; | |
44 | spinlock_t send_pkt_list_lock; | |
45 | struct list_head send_pkt_list; | |
46 | ||
b9116823 SH |
47 | struct work_struct loopback_work; |
48 | spinlock_t loopback_list_lock; /* protects loopback_list */ | |
49 | struct list_head loopback_list; | |
50 | ||
0ea9e1d3 AH |
51 | atomic_t queued_replies; |
52 | ||
53 | /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] | |
54 | * must be accessed with rx_lock held. | |
55 | */ | |
56 | struct mutex rx_lock; | |
57 | int rx_buf_nr; | |
58 | int rx_buf_max_nr; | |
59 | ||
60 | /* The following fields are protected by event_lock. | |
61 | * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. | |
62 | */ | |
63 | struct mutex event_lock; | |
64 | struct virtio_vsock_event event_list[8]; | |
65 | ||
66 | u32 guest_cid; | |
67 | }; | |
68 | ||
69 | static struct virtio_vsock *virtio_vsock_get(void) | |
70 | { | |
71 | return the_virtio_vsock; | |
72 | } | |
73 | ||
74 | static u32 virtio_transport_get_local_cid(void) | |
75 | { | |
76 | struct virtio_vsock *vsock = virtio_vsock_get(); | |
77 | ||
78 | return vsock->guest_cid; | |
79 | } | |
80 | ||
b9116823 SH |
81 | static void virtio_transport_loopback_work(struct work_struct *work) |
82 | { | |
83 | struct virtio_vsock *vsock = | |
84 | container_of(work, struct virtio_vsock, loopback_work); | |
85 | LIST_HEAD(pkts); | |
86 | ||
87 | spin_lock_bh(&vsock->loopback_list_lock); | |
88 | list_splice_init(&vsock->loopback_list, &pkts); | |
89 | spin_unlock_bh(&vsock->loopback_list_lock); | |
90 | ||
91 | mutex_lock(&vsock->rx_lock); | |
92 | while (!list_empty(&pkts)) { | |
93 | struct virtio_vsock_pkt *pkt; | |
94 | ||
95 | pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); | |
96 | list_del_init(&pkt->list); | |
97 | ||
98 | virtio_transport_recv_pkt(pkt); | |
99 | } | |
100 | mutex_unlock(&vsock->rx_lock); | |
101 | } | |
102 | ||
103 | static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock, | |
104 | struct virtio_vsock_pkt *pkt) | |
105 | { | |
106 | int len = pkt->len; | |
107 | ||
108 | spin_lock_bh(&vsock->loopback_list_lock); | |
109 | list_add_tail(&pkt->list, &vsock->loopback_list); | |
110 | spin_unlock_bh(&vsock->loopback_list_lock); | |
111 | ||
112 | queue_work(virtio_vsock_workqueue, &vsock->loopback_work); | |
113 | ||
114 | return len; | |
115 | } | |
116 | ||
0ea9e1d3 AH |
117 | static void |
118 | virtio_transport_send_pkt_work(struct work_struct *work) | |
119 | { | |
120 | struct virtio_vsock *vsock = | |
121 | container_of(work, struct virtio_vsock, send_pkt_work); | |
122 | struct virtqueue *vq; | |
123 | bool added = false; | |
124 | bool restart_rx = false; | |
125 | ||
126 | mutex_lock(&vsock->tx_lock); | |
127 | ||
128 | vq = vsock->vqs[VSOCK_VQ_TX]; | |
129 | ||
0ea9e1d3 AH |
130 | for (;;) { |
131 | struct virtio_vsock_pkt *pkt; | |
132 | struct scatterlist hdr, buf, *sgs[2]; | |
133 | int ret, in_sg = 0, out_sg = 0; | |
134 | bool reply; | |
135 | ||
136 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
137 | if (list_empty(&vsock->send_pkt_list)) { | |
138 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
0ea9e1d3 AH |
139 | break; |
140 | } | |
141 | ||
142 | pkt = list_first_entry(&vsock->send_pkt_list, | |
143 | struct virtio_vsock_pkt, list); | |
144 | list_del_init(&pkt->list); | |
145 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
146 | ||
82dfb540 GG |
147 | virtio_transport_deliver_tap_pkt(pkt); |
148 | ||
0ea9e1d3 AH |
149 | reply = pkt->reply; |
150 | ||
151 | sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); | |
152 | sgs[out_sg++] = &hdr; | |
153 | if (pkt->buf) { | |
154 | sg_init_one(&buf, pkt->buf, pkt->len); | |
155 | sgs[out_sg++] = &buf; | |
156 | } | |
157 | ||
158 | ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); | |
21bc54fc GG |
159 | /* Usually this means that there is no more space available in |
160 | * the vq | |
161 | */ | |
0ea9e1d3 AH |
162 | if (ret < 0) { |
163 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
164 | list_add(&pkt->list, &vsock->send_pkt_list); | |
165 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
0ea9e1d3 AH |
166 | break; |
167 | } | |
168 | ||
169 | if (reply) { | |
170 | struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; | |
171 | int val; | |
172 | ||
173 | val = atomic_dec_return(&vsock->queued_replies); | |
174 | ||
175 | /* Do we now have resources to resume rx processing? */ | |
176 | if (val + 1 == virtqueue_get_vring_size(rx_vq)) | |
177 | restart_rx = true; | |
178 | } | |
179 | ||
180 | added = true; | |
181 | } | |
182 | ||
183 | if (added) | |
184 | virtqueue_kick(vq); | |
185 | ||
186 | mutex_unlock(&vsock->tx_lock); | |
187 | ||
188 | if (restart_rx) | |
189 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | |
190 | } | |
191 | ||
192 | static int | |
193 | virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |
194 | { | |
195 | struct virtio_vsock *vsock; | |
196 | int len = pkt->len; | |
197 | ||
198 | vsock = virtio_vsock_get(); | |
199 | if (!vsock) { | |
200 | virtio_transport_free_pkt(pkt); | |
201 | return -ENODEV; | |
202 | } | |
203 | ||
b9116823 SH |
204 | if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) |
205 | return virtio_transport_send_pkt_loopback(vsock, pkt); | |
206 | ||
0ea9e1d3 AH |
207 | if (pkt->reply) |
208 | atomic_inc(&vsock->queued_replies); | |
209 | ||
210 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
211 | list_add_tail(&pkt->list, &vsock->send_pkt_list); | |
212 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
213 | ||
214 | queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); | |
215 | return len; | |
216 | } | |
217 | ||
073b4f2c PT |
218 | static int |
219 | virtio_transport_cancel_pkt(struct vsock_sock *vsk) | |
220 | { | |
221 | struct virtio_vsock *vsock; | |
222 | struct virtio_vsock_pkt *pkt, *n; | |
223 | int cnt = 0; | |
224 | LIST_HEAD(freeme); | |
225 | ||
226 | vsock = virtio_vsock_get(); | |
227 | if (!vsock) { | |
228 | return -ENODEV; | |
229 | } | |
230 | ||
231 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
232 | list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { | |
233 | if (pkt->vsk != vsk) | |
234 | continue; | |
235 | list_move(&pkt->list, &freeme); | |
236 | } | |
237 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
238 | ||
239 | list_for_each_entry_safe(pkt, n, &freeme, list) { | |
240 | if (pkt->reply) | |
241 | cnt++; | |
242 | list_del(&pkt->list); | |
243 | virtio_transport_free_pkt(pkt); | |
244 | } | |
245 | ||
246 | if (cnt) { | |
247 | struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; | |
248 | int new_cnt; | |
249 | ||
250 | new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); | |
251 | if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && | |
252 | new_cnt < virtqueue_get_vring_size(rx_vq)) | |
253 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | |
254 | } | |
255 | ||
256 | return 0; | |
257 | } | |
258 | ||
0ea9e1d3 AH |
259 | static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) |
260 | { | |
261 | int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; | |
262 | struct virtio_vsock_pkt *pkt; | |
263 | struct scatterlist hdr, buf, *sgs[2]; | |
264 | struct virtqueue *vq; | |
265 | int ret; | |
266 | ||
267 | vq = vsock->vqs[VSOCK_VQ_RX]; | |
268 | ||
269 | do { | |
270 | pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); | |
271 | if (!pkt) | |
272 | break; | |
273 | ||
274 | pkt->buf = kmalloc(buf_len, GFP_KERNEL); | |
275 | if (!pkt->buf) { | |
276 | virtio_transport_free_pkt(pkt); | |
277 | break; | |
278 | } | |
279 | ||
280 | pkt->len = buf_len; | |
281 | ||
282 | sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); | |
283 | sgs[0] = &hdr; | |
284 | ||
285 | sg_init_one(&buf, pkt->buf, buf_len); | |
286 | sgs[1] = &buf; | |
287 | ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); | |
288 | if (ret) { | |
289 | virtio_transport_free_pkt(pkt); | |
290 | break; | |
291 | } | |
292 | vsock->rx_buf_nr++; | |
293 | } while (vq->num_free); | |
294 | if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) | |
295 | vsock->rx_buf_max_nr = vsock->rx_buf_nr; | |
296 | virtqueue_kick(vq); | |
297 | } | |
298 | ||
299 | static void virtio_transport_tx_work(struct work_struct *work) | |
300 | { | |
301 | struct virtio_vsock *vsock = | |
302 | container_of(work, struct virtio_vsock, tx_work); | |
303 | struct virtqueue *vq; | |
304 | bool added = false; | |
305 | ||
306 | vq = vsock->vqs[VSOCK_VQ_TX]; | |
307 | mutex_lock(&vsock->tx_lock); | |
308 | do { | |
309 | struct virtio_vsock_pkt *pkt; | |
310 | unsigned int len; | |
311 | ||
312 | virtqueue_disable_cb(vq); | |
313 | while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) { | |
314 | virtio_transport_free_pkt(pkt); | |
315 | added = true; | |
316 | } | |
317 | } while (!virtqueue_enable_cb(vq)); | |
318 | mutex_unlock(&vsock->tx_lock); | |
319 | ||
320 | if (added) | |
321 | queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); | |
322 | } | |
323 | ||
324 | /* Is there space left for replies to rx packets? */ | |
325 | static bool virtio_transport_more_replies(struct virtio_vsock *vsock) | |
326 | { | |
327 | struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; | |
328 | int val; | |
329 | ||
330 | smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ | |
331 | val = atomic_read(&vsock->queued_replies); | |
332 | ||
333 | return val < virtqueue_get_vring_size(vq); | |
334 | } | |
335 | ||
336 | static void virtio_transport_rx_work(struct work_struct *work) | |
337 | { | |
338 | struct virtio_vsock *vsock = | |
339 | container_of(work, struct virtio_vsock, rx_work); | |
340 | struct virtqueue *vq; | |
341 | ||
342 | vq = vsock->vqs[VSOCK_VQ_RX]; | |
343 | ||
344 | mutex_lock(&vsock->rx_lock); | |
345 | ||
346 | do { | |
347 | virtqueue_disable_cb(vq); | |
348 | for (;;) { | |
349 | struct virtio_vsock_pkt *pkt; | |
350 | unsigned int len; | |
351 | ||
352 | if (!virtio_transport_more_replies(vsock)) { | |
353 | /* Stop rx until the device processes already | |
354 | * pending replies. Leave rx virtqueue | |
355 | * callbacks disabled. | |
356 | */ | |
357 | goto out; | |
358 | } | |
359 | ||
360 | pkt = virtqueue_get_buf(vq, &len); | |
361 | if (!pkt) { | |
362 | break; | |
363 | } | |
364 | ||
365 | vsock->rx_buf_nr--; | |
366 | ||
367 | /* Drop short/long packets */ | |
368 | if (unlikely(len < sizeof(pkt->hdr) || | |
369 | len > sizeof(pkt->hdr) + pkt->len)) { | |
370 | virtio_transport_free_pkt(pkt); | |
371 | continue; | |
372 | } | |
373 | ||
374 | pkt->len = len - sizeof(pkt->hdr); | |
82dfb540 | 375 | virtio_transport_deliver_tap_pkt(pkt); |
0ea9e1d3 AH |
376 | virtio_transport_recv_pkt(pkt); |
377 | } | |
378 | } while (!virtqueue_enable_cb(vq)); | |
379 | ||
380 | out: | |
381 | if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) | |
382 | virtio_vsock_rx_fill(vsock); | |
383 | mutex_unlock(&vsock->rx_lock); | |
384 | } | |
385 | ||
386 | /* event_lock must be held */ | |
387 | static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, | |
388 | struct virtio_vsock_event *event) | |
389 | { | |
390 | struct scatterlist sg; | |
391 | struct virtqueue *vq; | |
392 | ||
393 | vq = vsock->vqs[VSOCK_VQ_EVENT]; | |
394 | ||
395 | sg_init_one(&sg, event, sizeof(*event)); | |
396 | ||
397 | return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); | |
398 | } | |
399 | ||
400 | /* event_lock must be held */ | |
401 | static void virtio_vsock_event_fill(struct virtio_vsock *vsock) | |
402 | { | |
403 | size_t i; | |
404 | ||
405 | for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { | |
406 | struct virtio_vsock_event *event = &vsock->event_list[i]; | |
407 | ||
408 | virtio_vsock_event_fill_one(vsock, event); | |
409 | } | |
410 | ||
411 | virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); | |
412 | } | |
413 | ||
414 | static void virtio_vsock_reset_sock(struct sock *sk) | |
415 | { | |
416 | lock_sock(sk); | |
417 | sk->sk_state = SS_UNCONNECTED; | |
418 | sk->sk_err = ECONNRESET; | |
419 | sk->sk_error_report(sk); | |
420 | release_sock(sk); | |
421 | } | |
422 | ||
423 | static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) | |
424 | { | |
425 | struct virtio_device *vdev = vsock->vdev; | |
6c7efafd | 426 | __le64 guest_cid; |
0ea9e1d3 AH |
427 | |
428 | vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), | |
429 | &guest_cid, sizeof(guest_cid)); | |
430 | vsock->guest_cid = le64_to_cpu(guest_cid); | |
431 | } | |
432 | ||
433 | /* event_lock must be held */ | |
434 | static void virtio_vsock_event_handle(struct virtio_vsock *vsock, | |
435 | struct virtio_vsock_event *event) | |
436 | { | |
437 | switch (le32_to_cpu(event->id)) { | |
438 | case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: | |
439 | virtio_vsock_update_guest_cid(vsock); | |
440 | vsock_for_each_connected_socket(virtio_vsock_reset_sock); | |
441 | break; | |
442 | } | |
443 | } | |
444 | ||
445 | static void virtio_transport_event_work(struct work_struct *work) | |
446 | { | |
447 | struct virtio_vsock *vsock = | |
448 | container_of(work, struct virtio_vsock, event_work); | |
449 | struct virtqueue *vq; | |
450 | ||
451 | vq = vsock->vqs[VSOCK_VQ_EVENT]; | |
452 | ||
453 | mutex_lock(&vsock->event_lock); | |
454 | ||
455 | do { | |
456 | struct virtio_vsock_event *event; | |
457 | unsigned int len; | |
458 | ||
459 | virtqueue_disable_cb(vq); | |
460 | while ((event = virtqueue_get_buf(vq, &len)) != NULL) { | |
461 | if (len == sizeof(*event)) | |
462 | virtio_vsock_event_handle(vsock, event); | |
463 | ||
464 | virtio_vsock_event_fill_one(vsock, event); | |
465 | } | |
466 | } while (!virtqueue_enable_cb(vq)); | |
467 | ||
468 | virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); | |
469 | ||
470 | mutex_unlock(&vsock->event_lock); | |
471 | } | |
472 | ||
473 | static void virtio_vsock_event_done(struct virtqueue *vq) | |
474 | { | |
475 | struct virtio_vsock *vsock = vq->vdev->priv; | |
476 | ||
477 | if (!vsock) | |
478 | return; | |
479 | queue_work(virtio_vsock_workqueue, &vsock->event_work); | |
480 | } | |
481 | ||
482 | static void virtio_vsock_tx_done(struct virtqueue *vq) | |
483 | { | |
484 | struct virtio_vsock *vsock = vq->vdev->priv; | |
485 | ||
486 | if (!vsock) | |
487 | return; | |
488 | queue_work(virtio_vsock_workqueue, &vsock->tx_work); | |
489 | } | |
490 | ||
491 | static void virtio_vsock_rx_done(struct virtqueue *vq) | |
492 | { | |
493 | struct virtio_vsock *vsock = vq->vdev->priv; | |
494 | ||
495 | if (!vsock) | |
496 | return; | |
497 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | |
498 | } | |
499 | ||
500 | static struct virtio_transport virtio_transport = { | |
501 | .transport = { | |
502 | .get_local_cid = virtio_transport_get_local_cid, | |
503 | ||
504 | .init = virtio_transport_do_socket_init, | |
505 | .destruct = virtio_transport_destruct, | |
506 | .release = virtio_transport_release, | |
507 | .connect = virtio_transport_connect, | |
508 | .shutdown = virtio_transport_shutdown, | |
073b4f2c | 509 | .cancel_pkt = virtio_transport_cancel_pkt, |
0ea9e1d3 AH |
510 | |
511 | .dgram_bind = virtio_transport_dgram_bind, | |
512 | .dgram_dequeue = virtio_transport_dgram_dequeue, | |
513 | .dgram_enqueue = virtio_transport_dgram_enqueue, | |
514 | .dgram_allow = virtio_transport_dgram_allow, | |
515 | ||
516 | .stream_dequeue = virtio_transport_stream_dequeue, | |
517 | .stream_enqueue = virtio_transport_stream_enqueue, | |
518 | .stream_has_data = virtio_transport_stream_has_data, | |
519 | .stream_has_space = virtio_transport_stream_has_space, | |
520 | .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, | |
521 | .stream_is_active = virtio_transport_stream_is_active, | |
522 | .stream_allow = virtio_transport_stream_allow, | |
523 | ||
524 | .notify_poll_in = virtio_transport_notify_poll_in, | |
525 | .notify_poll_out = virtio_transport_notify_poll_out, | |
526 | .notify_recv_init = virtio_transport_notify_recv_init, | |
527 | .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, | |
528 | .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, | |
529 | .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, | |
530 | .notify_send_init = virtio_transport_notify_send_init, | |
531 | .notify_send_pre_block = virtio_transport_notify_send_pre_block, | |
532 | .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, | |
533 | .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, | |
534 | ||
535 | .set_buffer_size = virtio_transport_set_buffer_size, | |
536 | .set_min_buffer_size = virtio_transport_set_min_buffer_size, | |
537 | .set_max_buffer_size = virtio_transport_set_max_buffer_size, | |
538 | .get_buffer_size = virtio_transport_get_buffer_size, | |
539 | .get_min_buffer_size = virtio_transport_get_min_buffer_size, | |
540 | .get_max_buffer_size = virtio_transport_get_max_buffer_size, | |
541 | }, | |
542 | ||
543 | .send_pkt = virtio_transport_send_pkt, | |
544 | }; | |
545 | ||
546 | static int virtio_vsock_probe(struct virtio_device *vdev) | |
547 | { | |
548 | vq_callback_t *callbacks[] = { | |
549 | virtio_vsock_rx_done, | |
550 | virtio_vsock_tx_done, | |
551 | virtio_vsock_event_done, | |
552 | }; | |
553 | static const char * const names[] = { | |
554 | "rx", | |
555 | "tx", | |
556 | "event", | |
557 | }; | |
558 | struct virtio_vsock *vsock = NULL; | |
559 | int ret; | |
560 | ||
561 | ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); | |
562 | if (ret) | |
563 | return ret; | |
564 | ||
565 | /* Only one virtio-vsock device per guest is supported */ | |
566 | if (the_virtio_vsock) { | |
567 | ret = -EBUSY; | |
568 | goto out; | |
569 | } | |
570 | ||
571 | vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); | |
572 | if (!vsock) { | |
573 | ret = -ENOMEM; | |
574 | goto out; | |
575 | } | |
576 | ||
577 | vsock->vdev = vdev; | |
578 | ||
9b2bbdb2 MT |
579 | ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX, |
580 | vsock->vqs, callbacks, names, | |
581 | NULL); | |
0ea9e1d3 AH |
582 | if (ret < 0) |
583 | goto out; | |
584 | ||
585 | virtio_vsock_update_guest_cid(vsock); | |
586 | ||
587 | ret = vsock_core_init(&virtio_transport.transport); | |
588 | if (ret < 0) | |
589 | goto out_vqs; | |
590 | ||
591 | vsock->rx_buf_nr = 0; | |
592 | vsock->rx_buf_max_nr = 0; | |
593 | atomic_set(&vsock->queued_replies, 0); | |
594 | ||
595 | vdev->priv = vsock; | |
596 | the_virtio_vsock = vsock; | |
597 | mutex_init(&vsock->tx_lock); | |
598 | mutex_init(&vsock->rx_lock); | |
599 | mutex_init(&vsock->event_lock); | |
600 | spin_lock_init(&vsock->send_pkt_list_lock); | |
601 | INIT_LIST_HEAD(&vsock->send_pkt_list); | |
b9116823 SH |
602 | spin_lock_init(&vsock->loopback_list_lock); |
603 | INIT_LIST_HEAD(&vsock->loopback_list); | |
0ea9e1d3 AH |
604 | INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); |
605 | INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); | |
606 | INIT_WORK(&vsock->event_work, virtio_transport_event_work); | |
607 | INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); | |
b9116823 | 608 | INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work); |
0ea9e1d3 AH |
609 | |
610 | mutex_lock(&vsock->rx_lock); | |
611 | virtio_vsock_rx_fill(vsock); | |
612 | mutex_unlock(&vsock->rx_lock); | |
613 | ||
614 | mutex_lock(&vsock->event_lock); | |
615 | virtio_vsock_event_fill(vsock); | |
616 | mutex_unlock(&vsock->event_lock); | |
617 | ||
618 | mutex_unlock(&the_virtio_vsock_mutex); | |
619 | return 0; | |
620 | ||
621 | out_vqs: | |
622 | vsock->vdev->config->del_vqs(vsock->vdev); | |
623 | out: | |
624 | kfree(vsock); | |
625 | mutex_unlock(&the_virtio_vsock_mutex); | |
626 | return ret; | |
627 | } | |
628 | ||
629 | static void virtio_vsock_remove(struct virtio_device *vdev) | |
630 | { | |
631 | struct virtio_vsock *vsock = vdev->priv; | |
632 | struct virtio_vsock_pkt *pkt; | |
633 | ||
b9116823 | 634 | flush_work(&vsock->loopback_work); |
0ea9e1d3 AH |
635 | flush_work(&vsock->rx_work); |
636 | flush_work(&vsock->tx_work); | |
637 | flush_work(&vsock->event_work); | |
638 | flush_work(&vsock->send_pkt_work); | |
639 | ||
640 | vdev->config->reset(vdev); | |
641 | ||
642 | mutex_lock(&vsock->rx_lock); | |
643 | while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) | |
644 | virtio_transport_free_pkt(pkt); | |
645 | mutex_unlock(&vsock->rx_lock); | |
646 | ||
647 | mutex_lock(&vsock->tx_lock); | |
648 | while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) | |
649 | virtio_transport_free_pkt(pkt); | |
650 | mutex_unlock(&vsock->tx_lock); | |
651 | ||
652 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
653 | while (!list_empty(&vsock->send_pkt_list)) { | |
654 | pkt = list_first_entry(&vsock->send_pkt_list, | |
655 | struct virtio_vsock_pkt, list); | |
656 | list_del(&pkt->list); | |
657 | virtio_transport_free_pkt(pkt); | |
658 | } | |
659 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
660 | ||
b9116823 SH |
661 | spin_lock_bh(&vsock->loopback_list_lock); |
662 | while (!list_empty(&vsock->loopback_list)) { | |
663 | pkt = list_first_entry(&vsock->loopback_list, | |
664 | struct virtio_vsock_pkt, list); | |
665 | list_del(&pkt->list); | |
666 | virtio_transport_free_pkt(pkt); | |
667 | } | |
668 | spin_unlock_bh(&vsock->loopback_list_lock); | |
669 | ||
0ea9e1d3 AH |
670 | mutex_lock(&the_virtio_vsock_mutex); |
671 | the_virtio_vsock = NULL; | |
672 | vsock_core_exit(); | |
673 | mutex_unlock(&the_virtio_vsock_mutex); | |
674 | ||
675 | vdev->config->del_vqs(vdev); | |
676 | ||
677 | kfree(vsock); | |
678 | } | |
679 | ||
680 | static struct virtio_device_id id_table[] = { | |
681 | { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, | |
682 | { 0 }, | |
683 | }; | |
684 | ||
685 | static unsigned int features[] = { | |
686 | }; | |
687 | ||
688 | static struct virtio_driver virtio_vsock_driver = { | |
689 | .feature_table = features, | |
690 | .feature_table_size = ARRAY_SIZE(features), | |
691 | .driver.name = KBUILD_MODNAME, | |
692 | .driver.owner = THIS_MODULE, | |
693 | .id_table = id_table, | |
694 | .probe = virtio_vsock_probe, | |
695 | .remove = virtio_vsock_remove, | |
696 | }; | |
697 | ||
698 | static int __init virtio_vsock_init(void) | |
699 | { | |
700 | int ret; | |
701 | ||
702 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); | |
703 | if (!virtio_vsock_workqueue) | |
704 | return -ENOMEM; | |
705 | ret = register_virtio_driver(&virtio_vsock_driver); | |
706 | if (ret) | |
707 | destroy_workqueue(virtio_vsock_workqueue); | |
708 | return ret; | |
709 | } | |
710 | ||
711 | static void __exit virtio_vsock_exit(void) | |
712 | { | |
713 | unregister_virtio_driver(&virtio_vsock_driver); | |
714 | destroy_workqueue(virtio_vsock_workqueue); | |
715 | } | |
716 | ||
717 | module_init(virtio_vsock_init); | |
718 | module_exit(virtio_vsock_exit); | |
719 | MODULE_LICENSE("GPL v2"); | |
720 | MODULE_AUTHOR("Asias He"); | |
721 | MODULE_DESCRIPTION("virtio transport for vsock"); | |
722 | MODULE_DEVICE_TABLE(virtio, id_table); |