]>
Commit | Line | Data |
---|---|---|
2302c1ca MAL |
1 | /* |
2 | * QEMU Block driver for NBD | |
3 | * | |
b626b51a | 4 | * Copyright (C) 2016 Red Hat, Inc. |
2302c1ca MAL |
5 | * Copyright (C) 2008 Bull S.A.S. |
6 | * Author: Laurent Vivier <[email protected]> | |
7 | * | |
8 | * Some parts: | |
9 | * Copyright (C) 2007 Anthony Liguori <[email protected]> | |
10 | * | |
11 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
12 | * of this software and associated documentation files (the "Software"), to deal | |
13 | * in the Software without restriction, including without limitation the rights | |
14 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
15 | * copies of the Software, and to permit persons to whom the Software is | |
16 | * furnished to do so, subject to the following conditions: | |
17 | * | |
18 | * The above copyright notice and this permission notice shall be included in | |
19 | * all copies or substantial portions of the Software. | |
20 | * | |
21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
23 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
24 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
26 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
27 | * THE SOFTWARE. | |
28 | */ | |
29 | ||
80c71a24 | 30 | #include "qemu/osdep.h" |
be41c100 | 31 | #include "qapi/error.h" |
2302c1ca | 32 | #include "nbd-client.h" |
2302c1ca | 33 | |
cfa3ad63 EB |
34 | #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs)) |
35 | #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs)) | |
2302c1ca | 36 | |
07b1b99c | 37 | static void nbd_recv_coroutines_wake_all(NBDClientSession *s) |
69152c09 MAL |
38 | { |
39 | int i; | |
40 | ||
41 | for (i = 0; i < MAX_NBD_REQUESTS; i++) { | |
40f4a218 SH |
42 | NBDClientRequest *req = &s->requests[i]; |
43 | ||
44 | if (req->coroutine && req->receiving) { | |
45 | aio_co_wake(req->coroutine); | |
69152c09 MAL |
46 | } |
47 | } | |
48 | } | |
49 | ||
f53a829b | 50 | static void nbd_teardown_connection(BlockDriverState *bs) |
4a41a2d6 | 51 | { |
10676b81 | 52 | NBDClientSession *client = nbd_get_client_session(bs); |
f53a829b | 53 | |
064097d9 DB |
54 | if (!client->ioc) { /* Already closed */ |
55 | return; | |
56 | } | |
57 | ||
4a41a2d6 | 58 | /* finish any pending coroutines */ |
064097d9 DB |
59 | qio_channel_shutdown(client->ioc, |
60 | QIO_CHANNEL_SHUTDOWN_BOTH, | |
61 | NULL); | |
a12a712a | 62 | BDRV_POLL_WHILE(bs, client->read_reply_co); |
4a41a2d6 | 63 | |
f53a829b | 64 | nbd_client_detach_aio_context(bs); |
064097d9 DB |
65 | object_unref(OBJECT(client->sioc)); |
66 | client->sioc = NULL; | |
67 | object_unref(OBJECT(client->ioc)); | |
68 | client->ioc = NULL; | |
4a41a2d6 SH |
69 | } |
70 | ||
ff82911c | 71 | static coroutine_fn void nbd_read_reply_entry(void *opaque) |
2302c1ca | 72 | { |
ff82911c | 73 | NBDClientSession *s = opaque; |
2302c1ca | 74 | uint64_t i; |
d0a18013 | 75 | int ret = 0; |
be41c100 | 76 | Error *local_err = NULL; |
2302c1ca | 77 | |
72b6ffc7 | 78 | while (!s->quit) { |
ff82911c | 79 | assert(s->reply.handle == 0); |
be41c100 | 80 | ret = nbd_receive_reply(s->ioc, &s->reply, &local_err); |
08ace1d7 | 81 | if (local_err) { |
be41c100 VSO |
82 | error_report_err(local_err); |
83 | } | |
a12a712a | 84 | if (ret <= 0) { |
ff82911c | 85 | break; |
2302c1ca | 86 | } |
2302c1ca | 87 | |
ff82911c PB |
88 | /* There's no need for a mutex on the receive side, because the |
89 | * handler acts as a synchronization point and ensures that only | |
90 | * one coroutine is called until the reply finishes. | |
91 | */ | |
92 | i = HANDLE_TO_INDEX(s, s->reply.handle); | |
40f4a218 SH |
93 | if (i >= MAX_NBD_REQUESTS || |
94 | !s->requests[i].coroutine || | |
d2febedb | 95 | !s->requests[i].receiving || |
f140e300 | 96 | (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply)) |
d2febedb | 97 | { |
ff82911c PB |
98 | break; |
99 | } | |
2302c1ca | 100 | |
40f4a218 | 101 | /* We're woken up again by the request itself. Note that there |
ff82911c PB |
102 | * is no race between yielding and reentering read_reply_co. This |
103 | * is because: | |
104 | * | |
40f4a218 | 105 | * - if the request runs on the same AioContext, it is only |
ff82911c PB |
106 | * entered after we yield |
107 | * | |
40f4a218 | 108 | * - if the request runs on a different AioContext, reentering |
ff82911c PB |
109 | * read_reply_co happens through a bottom half, which can only |
110 | * run after we yield. | |
111 | */ | |
40f4a218 | 112 | aio_co_wake(s->requests[i].coroutine); |
ff82911c | 113 | qemu_coroutine_yield(); |
2302c1ca | 114 | } |
a12a712a | 115 | |
40f4a218 | 116 | s->quit = true; |
07b1b99c | 117 | nbd_recv_coroutines_wake_all(s); |
ff82911c | 118 | s->read_reply_co = NULL; |
2302c1ca MAL |
119 | } |
120 | ||
f53a829b | 121 | static int nbd_co_send_request(BlockDriverState *bs, |
ed2dd912 | 122 | NBDRequest *request, |
1e2a77a8 | 123 | QEMUIOVector *qiov) |
2302c1ca | 124 | { |
10676b81 | 125 | NBDClientSession *s = nbd_get_client_session(bs); |
030fa7f6 | 126 | int rc, i; |
2302c1ca MAL |
127 | |
128 | qemu_co_mutex_lock(&s->send_mutex); | |
6bdcc018 PB |
129 | while (s->in_flight == MAX_NBD_REQUESTS) { |
130 | qemu_co_queue_wait(&s->free_sema, &s->send_mutex); | |
131 | } | |
132 | s->in_flight++; | |
141cabe6 BW |
133 | |
134 | for (i = 0; i < MAX_NBD_REQUESTS; i++) { | |
40f4a218 | 135 | if (s->requests[i].coroutine == NULL) { |
141cabe6 BW |
136 | break; |
137 | } | |
138 | } | |
139 | ||
1c778ef7 | 140 | g_assert(qemu_in_coroutine()); |
141cabe6 | 141 | assert(i < MAX_NBD_REQUESTS); |
40f4a218 SH |
142 | |
143 | s->requests[i].coroutine = qemu_coroutine_self(); | |
f140e300 | 144 | s->requests[i].offset = request->from; |
40f4a218 SH |
145 | s->requests[i].receiving = false; |
146 | ||
141cabe6 | 147 | request->handle = INDEX_TO_HANDLE(s, i); |
064097d9 | 148 | |
72b6ffc7 | 149 | if (s->quit) { |
3c2d5183 SH |
150 | rc = -EIO; |
151 | goto err; | |
72b6ffc7 | 152 | } |
064097d9 | 153 | if (!s->ioc) { |
3c2d5183 SH |
154 | rc = -EPIPE; |
155 | goto err; | |
064097d9 DB |
156 | } |
157 | ||
2302c1ca | 158 | if (qiov) { |
064097d9 | 159 | qio_channel_set_cork(s->ioc, true); |
1c778ef7 | 160 | rc = nbd_send_request(s->ioc, request); |
72b6ffc7 | 161 | if (rc >= 0 && !s->quit) { |
030fa7f6 EB |
162 | if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov, |
163 | NULL) < 0) { | |
2302c1ca MAL |
164 | rc = -EIO; |
165 | } | |
a6934370 VSO |
166 | } else if (rc >= 0) { |
167 | rc = -EIO; | |
2302c1ca | 168 | } |
064097d9 | 169 | qio_channel_set_cork(s->ioc, false); |
2302c1ca | 170 | } else { |
1c778ef7 | 171 | rc = nbd_send_request(s->ioc, request); |
2302c1ca | 172 | } |
3c2d5183 SH |
173 | |
174 | err: | |
72b6ffc7 EB |
175 | if (rc < 0) { |
176 | s->quit = true; | |
3c2d5183 SH |
177 | s->requests[i].coroutine = NULL; |
178 | s->in_flight--; | |
179 | qemu_co_queue_next(&s->free_sema); | |
72b6ffc7 | 180 | } |
2302c1ca MAL |
181 | qemu_co_mutex_unlock(&s->send_mutex); |
182 | return rc; | |
183 | } | |
184 | ||
f140e300 VSO |
185 | static inline uint16_t payload_advance16(uint8_t **payload) |
186 | { | |
187 | *payload += 2; | |
188 | return lduw_be_p(*payload - 2); | |
189 | } | |
190 | ||
191 | static inline uint32_t payload_advance32(uint8_t **payload) | |
192 | { | |
193 | *payload += 4; | |
194 | return ldl_be_p(*payload - 4); | |
195 | } | |
196 | ||
197 | static inline uint64_t payload_advance64(uint8_t **payload) | |
198 | { | |
199 | *payload += 8; | |
200 | return ldq_be_p(*payload - 8); | |
201 | } | |
202 | ||
203 | static int nbd_parse_offset_hole_payload(NBDStructuredReplyChunk *chunk, | |
204 | uint8_t *payload, uint64_t orig_offset, | |
205 | QEMUIOVector *qiov, Error **errp) | |
206 | { | |
207 | uint64_t offset; | |
208 | uint32_t hole_size; | |
209 | ||
210 | if (chunk->length != sizeof(offset) + sizeof(hole_size)) { | |
211 | error_setg(errp, "Protocol error: invalid payload for " | |
212 | "NBD_REPLY_TYPE_OFFSET_HOLE"); | |
213 | return -EINVAL; | |
214 | } | |
215 | ||
216 | offset = payload_advance64(&payload); | |
217 | hole_size = payload_advance32(&payload); | |
218 | ||
b4176cb3 | 219 | if (!hole_size || offset < orig_offset || hole_size > qiov->size || |
f140e300 VSO |
220 | offset > orig_offset + qiov->size - hole_size) { |
221 | error_setg(errp, "Protocol error: server sent chunk exceeding requested" | |
222 | " region"); | |
223 | return -EINVAL; | |
224 | } | |
225 | ||
226 | qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size); | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | /* nbd_parse_error_payload | |
232 | * on success @errp contains message describing nbd error reply | |
233 | */ | |
234 | static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk, | |
235 | uint8_t *payload, int *request_ret, | |
236 | Error **errp) | |
237 | { | |
238 | uint32_t error; | |
239 | uint16_t message_size; | |
240 | ||
241 | assert(chunk->type & (1 << 15)); | |
242 | ||
243 | if (chunk->length < sizeof(error) + sizeof(message_size)) { | |
244 | error_setg(errp, | |
245 | "Protocol error: invalid payload for structured error"); | |
246 | return -EINVAL; | |
247 | } | |
248 | ||
249 | error = nbd_errno_to_system_errno(payload_advance32(&payload)); | |
250 | if (error == 0) { | |
e659fb3b | 251 | error_setg(errp, "Protocol error: server sent structured error chunk " |
f140e300 VSO |
252 | "with error = 0"); |
253 | return -EINVAL; | |
254 | } | |
255 | ||
256 | *request_ret = -error; | |
257 | message_size = payload_advance16(&payload); | |
258 | ||
259 | if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) { | |
e659fb3b | 260 | error_setg(errp, "Protocol error: server sent structured error chunk " |
f140e300 VSO |
261 | "with incorrect message size"); |
262 | return -EINVAL; | |
263 | } | |
264 | ||
265 | /* TODO: Add a trace point to mention the server complaint */ | |
266 | ||
267 | /* TODO handle ERROR_OFFSET */ | |
268 | ||
269 | return 0; | |
270 | } | |
271 | ||
272 | static int nbd_co_receive_offset_data_payload(NBDClientSession *s, | |
273 | uint64_t orig_offset, | |
274 | QEMUIOVector *qiov, Error **errp) | |
275 | { | |
276 | QEMUIOVector sub_qiov; | |
277 | uint64_t offset; | |
278 | size_t data_size; | |
279 | int ret; | |
280 | NBDStructuredReplyChunk *chunk = &s->reply.structured; | |
281 | ||
282 | assert(nbd_reply_is_structured(&s->reply)); | |
283 | ||
b4176cb3 EB |
284 | /* The NBD spec requires at least one byte of payload */ |
285 | if (chunk->length <= sizeof(offset)) { | |
f140e300 VSO |
286 | error_setg(errp, "Protocol error: invalid payload for " |
287 | "NBD_REPLY_TYPE_OFFSET_DATA"); | |
288 | return -EINVAL; | |
289 | } | |
290 | ||
291 | if (nbd_read(s->ioc, &offset, sizeof(offset), errp) < 0) { | |
292 | return -EIO; | |
293 | } | |
294 | be64_to_cpus(&offset); | |
295 | ||
296 | data_size = chunk->length - sizeof(offset); | |
b4176cb3 | 297 | assert(data_size); |
f140e300 VSO |
298 | if (offset < orig_offset || data_size > qiov->size || |
299 | offset > orig_offset + qiov->size - data_size) { | |
300 | error_setg(errp, "Protocol error: server sent chunk exceeding requested" | |
301 | " region"); | |
302 | return -EINVAL; | |
303 | } | |
304 | ||
305 | qemu_iovec_init(&sub_qiov, qiov->niov); | |
306 | qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size); | |
307 | ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp); | |
308 | qemu_iovec_destroy(&sub_qiov); | |
309 | ||
310 | return ret < 0 ? -EIO : 0; | |
311 | } | |
312 | ||
313 | #define NBD_MAX_MALLOC_PAYLOAD 1000 | |
314 | /* nbd_co_receive_structured_payload | |
315 | */ | |
316 | static coroutine_fn int nbd_co_receive_structured_payload( | |
317 | NBDClientSession *s, void **payload, Error **errp) | |
318 | { | |
319 | int ret; | |
320 | uint32_t len; | |
321 | ||
322 | assert(nbd_reply_is_structured(&s->reply)); | |
323 | ||
324 | len = s->reply.structured.length; | |
325 | ||
326 | if (len == 0) { | |
327 | return 0; | |
328 | } | |
329 | ||
330 | if (payload == NULL) { | |
331 | error_setg(errp, "Unexpected structured payload"); | |
332 | return -EINVAL; | |
333 | } | |
334 | ||
335 | if (len > NBD_MAX_MALLOC_PAYLOAD) { | |
336 | error_setg(errp, "Payload too large"); | |
337 | return -EINVAL; | |
338 | } | |
339 | ||
340 | *payload = g_new(char, len); | |
341 | ret = nbd_read(s->ioc, *payload, len, errp); | |
342 | if (ret < 0) { | |
343 | g_free(*payload); | |
344 | *payload = NULL; | |
345 | return ret; | |
346 | } | |
347 | ||
348 | return 0; | |
349 | } | |
350 | ||
351 | /* nbd_co_do_receive_one_chunk | |
352 | * for simple reply: | |
353 | * set request_ret to received reply error | |
354 | * if qiov is not NULL: read payload to @qiov | |
355 | * for structured reply chunk: | |
356 | * if error chunk: read payload, set @request_ret, do not set @payload | |
357 | * else if offset_data chunk: read payload data to @qiov, do not set @payload | |
358 | * else: read payload to @payload | |
359 | * | |
360 | * If function fails, @errp contains corresponding error message, and the | |
361 | * connection with the server is suspect. If it returns 0, then the | |
362 | * transaction succeeded (although @request_ret may be a negative errno | |
363 | * corresponding to the server's error reply), and errp is unchanged. | |
364 | */ | |
365 | static coroutine_fn int nbd_co_do_receive_one_chunk( | |
366 | NBDClientSession *s, uint64_t handle, bool only_structured, | |
367 | int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp) | |
2302c1ca | 368 | { |
319a56cd | 369 | int ret; |
ed397b2f | 370 | int i = HANDLE_TO_INDEX(s, handle); |
f140e300 VSO |
371 | void *local_payload = NULL; |
372 | NBDStructuredReplyChunk *chunk; | |
373 | ||
374 | if (payload) { | |
375 | *payload = NULL; | |
376 | } | |
377 | *request_ret = 0; | |
2302c1ca | 378 | |
ff82911c | 379 | /* Wait until we're woken up by nbd_read_reply_entry. */ |
40f4a218 | 380 | s->requests[i].receiving = true; |
2302c1ca | 381 | qemu_coroutine_yield(); |
40f4a218 | 382 | s->requests[i].receiving = false; |
93970672 | 383 | if (!s->ioc || s->quit) { |
f140e300 VSO |
384 | error_setg(errp, "Connection closed"); |
385 | return -EIO; | |
386 | } | |
387 | ||
388 | assert(s->reply.handle == handle); | |
389 | ||
390 | if (nbd_reply_is_simple(&s->reply)) { | |
391 | if (only_structured) { | |
392 | error_setg(errp, "Protocol error: simple reply when structured " | |
393 | "reply chunk was expected"); | |
394 | return -EINVAL; | |
2302c1ca MAL |
395 | } |
396 | ||
f140e300 VSO |
397 | *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error); |
398 | if (*request_ret < 0 || !qiov) { | |
399 | return 0; | |
400 | } | |
401 | ||
402 | return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, | |
403 | errp) < 0 ? -EIO : 0; | |
404 | } | |
405 | ||
406 | /* handle structured reply chunk */ | |
407 | assert(s->info.structured_reply); | |
408 | chunk = &s->reply.structured; | |
409 | ||
410 | if (chunk->type == NBD_REPLY_TYPE_NONE) { | |
411 | if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) { | |
412 | error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without" | |
e659fb3b | 413 | " NBD_REPLY_FLAG_DONE flag set"); |
f140e300 VSO |
414 | return -EINVAL; |
415 | } | |
b4176cb3 EB |
416 | if (chunk->length) { |
417 | error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with" | |
418 | " nonzero length"); | |
419 | return -EINVAL; | |
420 | } | |
f140e300 VSO |
421 | return 0; |
422 | } | |
423 | ||
424 | if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) { | |
425 | if (!qiov) { | |
426 | error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk"); | |
427 | return -EINVAL; | |
428 | } | |
429 | ||
430 | return nbd_co_receive_offset_data_payload(s, s->requests[i].offset, | |
431 | qiov, errp); | |
432 | } | |
433 | ||
434 | if (nbd_reply_type_is_error(chunk->type)) { | |
435 | payload = &local_payload; | |
436 | } | |
437 | ||
438 | ret = nbd_co_receive_structured_payload(s, payload, errp); | |
439 | if (ret < 0) { | |
440 | return ret; | |
2302c1ca | 441 | } |
ff82911c | 442 | |
f140e300 VSO |
443 | if (nbd_reply_type_is_error(chunk->type)) { |
444 | ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp); | |
445 | g_free(local_payload); | |
446 | return ret; | |
447 | } | |
448 | ||
449 | return 0; | |
450 | } | |
451 | ||
452 | /* nbd_co_receive_one_chunk | |
453 | * Read reply, wake up read_reply_co and set s->quit if needed. | |
454 | * Return value is a fatal error code or normal nbd reply error code | |
455 | */ | |
456 | static coroutine_fn int nbd_co_receive_one_chunk( | |
457 | NBDClientSession *s, uint64_t handle, bool only_structured, | |
458 | QEMUIOVector *qiov, NBDReply *reply, void **payload, Error **errp) | |
459 | { | |
460 | int request_ret; | |
461 | int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured, | |
462 | &request_ret, qiov, payload, errp); | |
463 | ||
464 | if (ret < 0) { | |
465 | s->quit = true; | |
466 | } else { | |
467 | /* For assert at loop start in nbd_read_reply_entry */ | |
468 | if (reply) { | |
469 | *reply = s->reply; | |
470 | } | |
471 | s->reply.handle = 0; | |
472 | ret = request_ret; | |
473 | } | |
ff82911c | 474 | |
ff82911c PB |
475 | if (s->read_reply_co) { |
476 | aio_co_wake(s->read_reply_co); | |
2302c1ca | 477 | } |
6bdcc018 | 478 | |
f140e300 VSO |
479 | return ret; |
480 | } | |
481 | ||
482 | typedef struct NBDReplyChunkIter { | |
483 | int ret; | |
484 | Error *err; | |
485 | bool done, only_structured; | |
486 | } NBDReplyChunkIter; | |
487 | ||
488 | static void nbd_iter_error(NBDReplyChunkIter *iter, bool fatal, | |
489 | int ret, Error **local_err) | |
490 | { | |
491 | assert(ret < 0); | |
492 | ||
493 | if (fatal || iter->ret == 0) { | |
494 | if (iter->ret != 0) { | |
495 | error_free(iter->err); | |
496 | iter->err = NULL; | |
497 | } | |
498 | iter->ret = ret; | |
499 | error_propagate(&iter->err, *local_err); | |
500 | } else { | |
501 | error_free(*local_err); | |
502 | } | |
503 | ||
504 | *local_err = NULL; | |
505 | } | |
506 | ||
507 | /* NBD_FOREACH_REPLY_CHUNK | |
508 | */ | |
509 | #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \ | |
510 | qiov, reply, payload) \ | |
511 | for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \ | |
512 | nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);) | |
513 | ||
514 | /* nbd_reply_chunk_iter_receive | |
515 | */ | |
516 | static bool nbd_reply_chunk_iter_receive(NBDClientSession *s, | |
517 | NBDReplyChunkIter *iter, | |
518 | uint64_t handle, | |
519 | QEMUIOVector *qiov, NBDReply *reply, | |
520 | void **payload) | |
521 | { | |
522 | int ret; | |
523 | NBDReply local_reply; | |
524 | NBDStructuredReplyChunk *chunk; | |
525 | Error *local_err = NULL; | |
526 | if (s->quit) { | |
527 | error_setg(&local_err, "Connection closed"); | |
528 | nbd_iter_error(iter, true, -EIO, &local_err); | |
529 | goto break_loop; | |
530 | } | |
531 | ||
532 | if (iter->done) { | |
533 | /* Previous iteration was last. */ | |
534 | goto break_loop; | |
535 | } | |
536 | ||
537 | if (reply == NULL) { | |
538 | reply = &local_reply; | |
539 | } | |
540 | ||
541 | ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured, | |
542 | qiov, reply, payload, &local_err); | |
543 | if (ret < 0) { | |
544 | /* If it is a fatal error s->quit is set by nbd_co_receive_one_chunk */ | |
545 | nbd_iter_error(iter, s->quit, ret, &local_err); | |
546 | } | |
547 | ||
548 | /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */ | |
549 | if (nbd_reply_is_simple(&s->reply) || s->quit) { | |
550 | goto break_loop; | |
551 | } | |
552 | ||
553 | chunk = &reply->structured; | |
554 | iter->only_structured = true; | |
555 | ||
556 | if (chunk->type == NBD_REPLY_TYPE_NONE) { | |
557 | /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */ | |
558 | assert(chunk->flags & NBD_REPLY_FLAG_DONE); | |
559 | goto break_loop; | |
560 | } | |
561 | ||
562 | if (chunk->flags & NBD_REPLY_FLAG_DONE) { | |
563 | /* This iteration is last. */ | |
564 | iter->done = true; | |
565 | } | |
566 | ||
567 | /* Execute the loop body */ | |
568 | return true; | |
569 | ||
570 | break_loop: | |
571 | s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL; | |
572 | ||
6bdcc018 PB |
573 | qemu_co_mutex_lock(&s->send_mutex); |
574 | s->in_flight--; | |
575 | qemu_co_queue_next(&s->free_sema); | |
576 | qemu_co_mutex_unlock(&s->send_mutex); | |
319a56cd | 577 | |
f140e300 | 578 | return false; |
2302c1ca MAL |
579 | } |
580 | ||
f140e300 VSO |
581 | static int nbd_co_receive_return_code(NBDClientSession *s, uint64_t handle, |
582 | Error **errp) | |
583 | { | |
584 | NBDReplyChunkIter iter; | |
585 | ||
586 | NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) { | |
587 | /* nbd_reply_chunk_iter_receive does all the work */ | |
588 | } | |
589 | ||
590 | error_propagate(errp, iter.err); | |
591 | return iter.ret; | |
592 | } | |
593 | ||
594 | static int nbd_co_receive_cmdread_reply(NBDClientSession *s, uint64_t handle, | |
595 | uint64_t offset, QEMUIOVector *qiov, | |
596 | Error **errp) | |
597 | { | |
598 | NBDReplyChunkIter iter; | |
599 | NBDReply reply; | |
600 | void *payload = NULL; | |
601 | Error *local_err = NULL; | |
602 | ||
603 | NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply, | |
604 | qiov, &reply, &payload) | |
605 | { | |
606 | int ret; | |
607 | NBDStructuredReplyChunk *chunk = &reply.structured; | |
608 | ||
609 | assert(nbd_reply_is_structured(&reply)); | |
610 | ||
611 | switch (chunk->type) { | |
612 | case NBD_REPLY_TYPE_OFFSET_DATA: | |
613 | /* special cased in nbd_co_receive_one_chunk, data is already | |
614 | * in qiov */ | |
615 | break; | |
616 | case NBD_REPLY_TYPE_OFFSET_HOLE: | |
617 | ret = nbd_parse_offset_hole_payload(&reply.structured, payload, | |
618 | offset, qiov, &local_err); | |
619 | if (ret < 0) { | |
620 | s->quit = true; | |
621 | nbd_iter_error(&iter, true, ret, &local_err); | |
622 | } | |
623 | break; | |
624 | default: | |
625 | if (!nbd_reply_type_is_error(chunk->type)) { | |
626 | /* not allowed reply type */ | |
627 | s->quit = true; | |
628 | error_setg(&local_err, | |
629 | "Unexpected reply type: %d (%s) for CMD_READ", | |
630 | chunk->type, nbd_reply_type_lookup(chunk->type)); | |
631 | nbd_iter_error(&iter, true, -EINVAL, &local_err); | |
632 | } | |
633 | } | |
634 | ||
635 | g_free(payload); | |
636 | payload = NULL; | |
637 | } | |
638 | ||
639 | error_propagate(errp, iter.err); | |
640 | return iter.ret; | |
641 | } | |
642 | ||
643 | static int nbd_co_request(BlockDriverState *bs, NBDRequest *request, | |
644 | QEMUIOVector *write_qiov) | |
f35dff7e | 645 | { |
f35dff7e | 646 | int ret; |
f140e300 VSO |
647 | Error *local_err = NULL; |
648 | NBDClientSession *client = nbd_get_client_session(bs); | |
f35dff7e | 649 | |
f140e300 VSO |
650 | assert(request->type != NBD_CMD_READ); |
651 | if (write_qiov) { | |
652 | assert(request->type == NBD_CMD_WRITE); | |
653 | assert(request->len == iov_size(write_qiov->iov, write_qiov->niov)); | |
4bfe4478 | 654 | } else { |
f140e300 | 655 | assert(request->type != NBD_CMD_WRITE); |
4bfe4478 | 656 | } |
f140e300 | 657 | ret = nbd_co_send_request(bs, request, write_qiov); |
f35dff7e | 658 | if (ret < 0) { |
319a56cd | 659 | return ret; |
f35dff7e | 660 | } |
319a56cd | 661 | |
f140e300 VSO |
662 | ret = nbd_co_receive_return_code(client, request->handle, &local_err); |
663 | if (local_err) { | |
664 | error_report_err(local_err); | |
665 | } | |
666 | return ret; | |
f35dff7e VSO |
667 | } |
668 | ||
70c4fb26 EB |
669 | int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, |
670 | uint64_t bytes, QEMUIOVector *qiov, int flags) | |
2302c1ca | 671 | { |
f140e300 VSO |
672 | int ret; |
673 | Error *local_err = NULL; | |
674 | NBDClientSession *client = nbd_get_client_session(bs); | |
ed2dd912 | 675 | NBDRequest request = { |
70c4fb26 EB |
676 | .type = NBD_CMD_READ, |
677 | .from = offset, | |
678 | .len = bytes, | |
679 | }; | |
2302c1ca | 680 | |
70c4fb26 EB |
681 | assert(bytes <= NBD_MAX_BUFFER_SIZE); |
682 | assert(!flags); | |
2302c1ca | 683 | |
9d8f818c EB |
684 | if (!bytes) { |
685 | return 0; | |
686 | } | |
f140e300 VSO |
687 | ret = nbd_co_send_request(bs, &request, NULL); |
688 | if (ret < 0) { | |
689 | return ret; | |
690 | } | |
691 | ||
692 | ret = nbd_co_receive_cmdread_reply(client, request.handle, offset, qiov, | |
693 | &local_err); | |
08ace1d7 | 694 | if (local_err) { |
f140e300 VSO |
695 | error_report_err(local_err); |
696 | } | |
697 | return ret; | |
2302c1ca MAL |
698 | } |
699 | ||
70c4fb26 EB |
700 | int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, |
701 | uint64_t bytes, QEMUIOVector *qiov, int flags) | |
2302c1ca | 702 | { |
10676b81 | 703 | NBDClientSession *client = nbd_get_client_session(bs); |
ed2dd912 | 704 | NBDRequest request = { |
70c4fb26 EB |
705 | .type = NBD_CMD_WRITE, |
706 | .from = offset, | |
707 | .len = bytes, | |
708 | }; | |
2302c1ca | 709 | |
1104d83c | 710 | assert(!(client->info.flags & NBD_FLAG_READ_ONLY)); |
52a46505 | 711 | if (flags & BDRV_REQ_FUA) { |
004a89fc | 712 | assert(client->info.flags & NBD_FLAG_SEND_FUA); |
b626b51a | 713 | request.flags |= NBD_CMD_FLAG_FUA; |
2302c1ca MAL |
714 | } |
715 | ||
70c4fb26 | 716 | assert(bytes <= NBD_MAX_BUFFER_SIZE); |
2302c1ca | 717 | |
9d8f818c EB |
718 | if (!bytes) { |
719 | return 0; | |
720 | } | |
f35dff7e | 721 | return nbd_co_request(bs, &request, qiov); |
2302c1ca MAL |
722 | } |
723 | ||
fa778fff | 724 | int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, |
f5a5ca79 | 725 | int bytes, BdrvRequestFlags flags) |
fa778fff | 726 | { |
fa778fff EB |
727 | NBDClientSession *client = nbd_get_client_session(bs); |
728 | NBDRequest request = { | |
729 | .type = NBD_CMD_WRITE_ZEROES, | |
730 | .from = offset, | |
f5a5ca79 | 731 | .len = bytes, |
fa778fff | 732 | }; |
fa778fff | 733 | |
1104d83c | 734 | assert(!(client->info.flags & NBD_FLAG_READ_ONLY)); |
004a89fc | 735 | if (!(client->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) { |
fa778fff EB |
736 | return -ENOTSUP; |
737 | } | |
738 | ||
739 | if (flags & BDRV_REQ_FUA) { | |
004a89fc | 740 | assert(client->info.flags & NBD_FLAG_SEND_FUA); |
fa778fff EB |
741 | request.flags |= NBD_CMD_FLAG_FUA; |
742 | } | |
743 | if (!(flags & BDRV_REQ_MAY_UNMAP)) { | |
744 | request.flags |= NBD_CMD_FLAG_NO_HOLE; | |
745 | } | |
746 | ||
9d8f818c EB |
747 | if (!bytes) { |
748 | return 0; | |
749 | } | |
f35dff7e | 750 | return nbd_co_request(bs, &request, NULL); |
fa778fff EB |
751 | } |
752 | ||
f53a829b | 753 | int nbd_client_co_flush(BlockDriverState *bs) |
2302c1ca | 754 | { |
10676b81 | 755 | NBDClientSession *client = nbd_get_client_session(bs); |
ed2dd912 | 756 | NBDRequest request = { .type = NBD_CMD_FLUSH }; |
2302c1ca | 757 | |
004a89fc | 758 | if (!(client->info.flags & NBD_FLAG_SEND_FLUSH)) { |
2302c1ca MAL |
759 | return 0; |
760 | } | |
761 | ||
2302c1ca MAL |
762 | request.from = 0; |
763 | request.len = 0; | |
764 | ||
f35dff7e | 765 | return nbd_co_request(bs, &request, NULL); |
2302c1ca MAL |
766 | } |
767 | ||
f5a5ca79 | 768 | int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) |
2302c1ca | 769 | { |
10676b81 | 770 | NBDClientSession *client = nbd_get_client_session(bs); |
ed2dd912 | 771 | NBDRequest request = { |
447e57c3 EB |
772 | .type = NBD_CMD_TRIM, |
773 | .from = offset, | |
f5a5ca79 | 774 | .len = bytes, |
447e57c3 | 775 | }; |
2302c1ca | 776 | |
1104d83c | 777 | assert(!(client->info.flags & NBD_FLAG_READ_ONLY)); |
9d8f818c | 778 | if (!(client->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) { |
2302c1ca MAL |
779 | return 0; |
780 | } | |
2302c1ca | 781 | |
f35dff7e | 782 | return nbd_co_request(bs, &request, NULL); |
2302c1ca MAL |
783 | } |
784 | ||
f53a829b | 785 | void nbd_client_detach_aio_context(BlockDriverState *bs) |
69447cd8 | 786 | { |
ff82911c | 787 | NBDClientSession *client = nbd_get_client_session(bs); |
96d06835 | 788 | qio_channel_detach_aio_context(QIO_CHANNEL(client->ioc)); |
69447cd8 SH |
789 | } |
790 | ||
f53a829b HR |
791 | void nbd_client_attach_aio_context(BlockDriverState *bs, |
792 | AioContext *new_context) | |
69447cd8 | 793 | { |
ff82911c | 794 | NBDClientSession *client = nbd_get_client_session(bs); |
96d06835 | 795 | qio_channel_attach_aio_context(QIO_CHANNEL(client->ioc), new_context); |
ff82911c | 796 | aio_co_schedule(new_context, client->read_reply_co); |
69447cd8 SH |
797 | } |
798 | ||
f53a829b | 799 | void nbd_client_close(BlockDriverState *bs) |
2302c1ca | 800 | { |
10676b81 | 801 | NBDClientSession *client = nbd_get_client_session(bs); |
ed2dd912 | 802 | NBDRequest request = { .type = NBD_CMD_DISC }; |
2302c1ca | 803 | |
064097d9 | 804 | if (client->ioc == NULL) { |
4a41a2d6 SH |
805 | return; |
806 | } | |
807 | ||
1c778ef7 | 808 | nbd_send_request(client->ioc, &request); |
5ad283eb | 809 | |
f53a829b | 810 | nbd_teardown_connection(bs); |
2302c1ca MAL |
811 | } |
812 | ||
75822a12 DB |
813 | int nbd_client_init(BlockDriverState *bs, |
814 | QIOChannelSocket *sioc, | |
815 | const char *export, | |
816 | QCryptoTLSCreds *tlscreds, | |
817 | const char *hostname, | |
818 | Error **errp) | |
2302c1ca | 819 | { |
10676b81 | 820 | NBDClientSession *client = nbd_get_client_session(bs); |
2302c1ca MAL |
821 | int ret; |
822 | ||
823 | /* NBD handshake */ | |
e2bc625f | 824 | logout("session init %s\n", export); |
064097d9 DB |
825 | qio_channel_set_blocking(QIO_CHANNEL(sioc), true, NULL); |
826 | ||
081dd1fe | 827 | client->info.request_sizes = true; |
f140e300 | 828 | client->info.structured_reply = true; |
1c778ef7 | 829 | ret = nbd_receive_negotiate(QIO_CHANNEL(sioc), export, |
75822a12 | 830 | tlscreds, hostname, |
004a89fc | 831 | &client->ioc, &client->info, errp); |
2302c1ca MAL |
832 | if (ret < 0) { |
833 | logout("Failed to negotiate with the NBD server\n"); | |
2302c1ca MAL |
834 | return ret; |
835 | } | |
1104d83c EB |
836 | if (client->info.flags & NBD_FLAG_READ_ONLY && |
837 | !bdrv_is_read_only(bs)) { | |
838 | error_setg(errp, | |
839 | "request for write access conflicts with read-only export"); | |
840 | return -EACCES; | |
841 | } | |
004a89fc | 842 | if (client->info.flags & NBD_FLAG_SEND_FUA) { |
4df863f3 | 843 | bs->supported_write_flags = BDRV_REQ_FUA; |
169407e1 EB |
844 | bs->supported_zero_flags |= BDRV_REQ_FUA; |
845 | } | |
004a89fc | 846 | if (client->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) { |
169407e1 | 847 | bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP; |
4df863f3 | 848 | } |
081dd1fe EB |
849 | if (client->info.min_block > bs->bl.request_alignment) { |
850 | bs->bl.request_alignment = client->info.min_block; | |
851 | } | |
2302c1ca MAL |
852 | |
853 | qemu_co_mutex_init(&client->send_mutex); | |
9bc9732f | 854 | qemu_co_queue_init(&client->free_sema); |
064097d9 DB |
855 | client->sioc = sioc; |
856 | object_ref(OBJECT(client->sioc)); | |
f95910fe DB |
857 | |
858 | if (!client->ioc) { | |
859 | client->ioc = QIO_CHANNEL(sioc); | |
860 | object_ref(OBJECT(client->ioc)); | |
861 | } | |
2302c1ca MAL |
862 | |
863 | /* Now that we're connected, set the socket to be non-blocking and | |
864 | * kick the reply mechanism. */ | |
064097d9 | 865 | qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL); |
ff82911c | 866 | client->read_reply_co = qemu_coroutine_create(nbd_read_reply_entry, client); |
f53a829b | 867 | nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs)); |
2302c1ca MAL |
868 | |
869 | logout("Established connection with NBD server\n"); | |
870 | return 0; | |
871 | } |