]>
Commit | Line | Data |
---|---|---|
666a3af9 DB |
1 | /* |
2 | * QEMU I/O channels | |
3 | * | |
4 | * Copyright (c) 2015 Red Hat, Inc. | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | * | |
19 | */ | |
20 | ||
2a6a4076 MA |
21 | #ifndef QIO_CHANNEL_H |
22 | #define QIO_CHANNEL_H | |
666a3af9 DB |
23 | |
24 | #include "qemu-common.h" | |
666a3af9 | 25 | #include "qom/object.h" |
c4c497d2 | 26 | #include "qemu/coroutine.h" |
bf88c124 | 27 | #include "block/aio.h" |
666a3af9 DB |
28 | |
29 | #define TYPE_QIO_CHANNEL "qio-channel" | |
30 | #define QIO_CHANNEL(obj) \ | |
31 | OBJECT_CHECK(QIOChannel, (obj), TYPE_QIO_CHANNEL) | |
32 | #define QIO_CHANNEL_CLASS(klass) \ | |
33 | OBJECT_CLASS_CHECK(QIOChannelClass, klass, TYPE_QIO_CHANNEL) | |
34 | #define QIO_CHANNEL_GET_CLASS(obj) \ | |
35 | OBJECT_GET_CLASS(QIOChannelClass, obj, TYPE_QIO_CHANNEL) | |
36 | ||
37 | typedef struct QIOChannel QIOChannel; | |
38 | typedef struct QIOChannelClass QIOChannelClass; | |
39 | ||
40 | #define QIO_CHANNEL_ERR_BLOCK -2 | |
41 | ||
42 | typedef enum QIOChannelFeature QIOChannelFeature; | |
43 | ||
44 | enum QIOChannelFeature { | |
8fbf6612 FF |
45 | QIO_CHANNEL_FEATURE_FD_PASS, |
46 | QIO_CHANNEL_FEATURE_SHUTDOWN, | |
47 | QIO_CHANNEL_FEATURE_LISTEN, | |
666a3af9 DB |
48 | }; |
49 | ||
50 | ||
51 | typedef enum QIOChannelShutdown QIOChannelShutdown; | |
52 | ||
53 | enum QIOChannelShutdown { | |
a2458b6f DB |
54 | QIO_CHANNEL_SHUTDOWN_READ = 1, |
55 | QIO_CHANNEL_SHUTDOWN_WRITE = 2, | |
56 | QIO_CHANNEL_SHUTDOWN_BOTH = 3, | |
666a3af9 DB |
57 | }; |
58 | ||
59 | typedef gboolean (*QIOChannelFunc)(QIOChannel *ioc, | |
60 | GIOCondition condition, | |
61 | gpointer data); | |
62 | ||
63 | /** | |
64 | * QIOChannel: | |
65 | * | |
66 | * The QIOChannel defines the core API for a generic I/O channel | |
67 | * class hierarchy. It is inspired by GIOChannel, but has the | |
68 | * following differences | |
69 | * | |
70 | * - Use QOM to properly support arbitrary subclassing | |
71 | * - Support use of iovecs for efficient I/O with multiple blocks | |
72 | * - None of the character set translation, binary data exclusively | |
73 | * - Direct support for QEMU Error object reporting | |
74 | * - File descriptor passing | |
75 | * | |
76 | * This base class is abstract so cannot be instantiated. There | |
77 | * will be subclasses for dealing with sockets, files, and higher | |
78 | * level protocols such as TLS, WebSocket, etc. | |
79 | */ | |
80 | ||
81 | struct QIOChannel { | |
82 | Object parent; | |
83 | unsigned int features; /* bitmask of QIOChannelFeatures */ | |
20f4aa26 | 84 | char *name; |
c4c497d2 PB |
85 | AioContext *ctx; |
86 | Coroutine *read_coroutine; | |
87 | Coroutine *write_coroutine; | |
a5897205 PB |
88 | #ifdef _WIN32 |
89 | HANDLE event; /* For use with GSource on Win32 */ | |
90 | #endif | |
666a3af9 DB |
91 | }; |
92 | ||
93 | /** | |
94 | * QIOChannelClass: | |
95 | * | |
96 | * This class defines the contract that all subclasses | |
97 | * must follow to provide specific channel implementations. | |
98 | * The first five callbacks are mandatory to support, others | |
99 | * provide additional optional features. | |
100 | * | |
101 | * Consult the corresponding public API docs for a description | |
102 | * of the semantics of each callback | |
103 | */ | |
104 | struct QIOChannelClass { | |
105 | ObjectClass parent; | |
106 | ||
107 | /* Mandatory callbacks */ | |
108 | ssize_t (*io_writev)(QIOChannel *ioc, | |
109 | const struct iovec *iov, | |
110 | size_t niov, | |
111 | int *fds, | |
112 | size_t nfds, | |
113 | Error **errp); | |
114 | ssize_t (*io_readv)(QIOChannel *ioc, | |
115 | const struct iovec *iov, | |
116 | size_t niov, | |
117 | int **fds, | |
118 | size_t *nfds, | |
119 | Error **errp); | |
120 | int (*io_close)(QIOChannel *ioc, | |
121 | Error **errp); | |
122 | GSource * (*io_create_watch)(QIOChannel *ioc, | |
123 | GIOCondition condition); | |
124 | int (*io_set_blocking)(QIOChannel *ioc, | |
125 | bool enabled, | |
126 | Error **errp); | |
127 | ||
128 | /* Optional callbacks */ | |
129 | int (*io_shutdown)(QIOChannel *ioc, | |
130 | QIOChannelShutdown how, | |
131 | Error **errp); | |
132 | void (*io_set_cork)(QIOChannel *ioc, | |
133 | bool enabled); | |
134 | void (*io_set_delay)(QIOChannel *ioc, | |
135 | bool enabled); | |
136 | off_t (*io_seek)(QIOChannel *ioc, | |
137 | off_t offset, | |
138 | int whence, | |
139 | Error **errp); | |
bf88c124 PB |
140 | void (*io_set_aio_fd_handler)(QIOChannel *ioc, |
141 | AioContext *ctx, | |
142 | IOHandler *io_read, | |
143 | IOHandler *io_write, | |
144 | void *opaque); | |
666a3af9 DB |
145 | }; |
146 | ||
147 | /* General I/O handling functions */ | |
148 | ||
149 | /** | |
150 | * qio_channel_has_feature: | |
151 | * @ioc: the channel object | |
152 | * @feature: the feature to check support of | |
153 | * | |
154 | * Determine whether the channel implementation supports | |
155 | * the optional feature named in @feature. | |
156 | * | |
157 | * Returns: true if supported, false otherwise. | |
158 | */ | |
159 | bool qio_channel_has_feature(QIOChannel *ioc, | |
160 | QIOChannelFeature feature); | |
161 | ||
d8d3c7cc FF |
162 | /** |
163 | * qio_channel_set_feature: | |
164 | * @ioc: the channel object | |
165 | * @feature: the feature to set support for | |
166 | * | |
167 | * Add channel support for the feature named in @feature. | |
168 | */ | |
169 | void qio_channel_set_feature(QIOChannel *ioc, | |
170 | QIOChannelFeature feature); | |
171 | ||
20f4aa26 DB |
172 | /** |
173 | * qio_channel_set_name: | |
174 | * @ioc: the channel object | |
175 | * @name: the name of the channel | |
176 | * | |
177 | * Sets the name of the channel, which serves as an aid | |
178 | * to debugging. The name is used when creating GSource | |
179 | * watches for this channel. | |
180 | */ | |
181 | void qio_channel_set_name(QIOChannel *ioc, | |
182 | const char *name); | |
183 | ||
666a3af9 DB |
184 | /** |
185 | * qio_channel_readv_full: | |
186 | * @ioc: the channel object | |
187 | * @iov: the array of memory regions to read data into | |
188 | * @niov: the length of the @iov array | |
189 | * @fds: pointer to an array that will received file handles | |
190 | * @nfds: pointer filled with number of elements in @fds on return | |
821791b5 | 191 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
192 | * |
193 | * Read data from the IO channel, storing it in the | |
194 | * memory regions referenced by @iov. Each element | |
195 | * in the @iov will be fully populated with data | |
196 | * before the next one is used. The @niov parameter | |
197 | * specifies the total number of elements in @iov. | |
198 | * | |
199 | * It is not required for all @iov to be filled with | |
200 | * data. If the channel is in blocking mode, at least | |
201 | * one byte of data will be read, but no more is | |
202 | * guaranteed. If the channel is non-blocking and no | |
203 | * data is available, it will return QIO_CHANNEL_ERR_BLOCK | |
204 | * | |
205 | * If the channel has passed any file descriptors, | |
206 | * the @fds array pointer will be allocated and | |
207 | * the elements filled with the received file | |
208 | * descriptors. The @nfds pointer will be updated | |
209 | * to indicate the size of the @fds array that | |
210 | * was allocated. It is the callers responsibility | |
211 | * to call close() on each file descriptor and to | |
212 | * call g_free() on the array pointer in @fds. | |
213 | * | |
214 | * It is an error to pass a non-NULL @fds parameter | |
215 | * unless qio_channel_has_feature() returns a true | |
216 | * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. | |
217 | * | |
218 | * Returns: the number of bytes read, or -1 on error, | |
219 | * or QIO_CHANNEL_ERR_BLOCK if no data is available | |
220 | * and the channel is non-blocking | |
221 | */ | |
222 | ssize_t qio_channel_readv_full(QIOChannel *ioc, | |
223 | const struct iovec *iov, | |
224 | size_t niov, | |
225 | int **fds, | |
226 | size_t *nfds, | |
227 | Error **errp); | |
228 | ||
229 | ||
230 | /** | |
231 | * qio_channel_writev_full: | |
232 | * @ioc: the channel object | |
233 | * @iov: the array of memory regions to write data from | |
234 | * @niov: the length of the @iov array | |
235 | * @fds: an array of file handles to send | |
236 | * @nfds: number of file handles in @fds | |
821791b5 | 237 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
238 | * |
239 | * Write data to the IO channel, reading it from the | |
240 | * memory regions referenced by @iov. Each element | |
241 | * in the @iov will be fully sent, before the next | |
242 | * one is used. The @niov parameter specifies the | |
243 | * total number of elements in @iov. | |
244 | * | |
245 | * It is not required for all @iov data to be fully | |
246 | * sent. If the channel is in blocking mode, at least | |
247 | * one byte of data will be sent, but no more is | |
248 | * guaranteed. If the channel is non-blocking and no | |
249 | * data can be sent, it will return QIO_CHANNEL_ERR_BLOCK | |
250 | * | |
251 | * If there are file descriptors to send, the @fds | |
252 | * array should be non-NULL and provide the handles. | |
253 | * All file descriptors will be sent if at least one | |
254 | * byte of data was sent. | |
255 | * | |
256 | * It is an error to pass a non-NULL @fds parameter | |
257 | * unless qio_channel_has_feature() returns a true | |
258 | * value for the QIO_CHANNEL_FEATURE_FD_PASS constant. | |
259 | * | |
260 | * Returns: the number of bytes sent, or -1 on error, | |
261 | * or QIO_CHANNEL_ERR_BLOCK if no data is can be sent | |
262 | * and the channel is non-blocking | |
263 | */ | |
264 | ssize_t qio_channel_writev_full(QIOChannel *ioc, | |
265 | const struct iovec *iov, | |
266 | size_t niov, | |
267 | int *fds, | |
268 | size_t nfds, | |
269 | Error **errp); | |
270 | ||
e8ffaa31 EB |
271 | /** |
272 | * qio_channel_readv_all_eof: | |
273 | * @ioc: the channel object | |
274 | * @iov: the array of memory regions to read data into | |
275 | * @niov: the length of the @iov array | |
276 | * @errp: pointer to a NULL-initialized error object | |
277 | * | |
278 | * Read data from the IO channel, storing it in the | |
279 | * memory regions referenced by @iov. Each element | |
280 | * in the @iov will be fully populated with data | |
281 | * before the next one is used. The @niov parameter | |
282 | * specifies the total number of elements in @iov. | |
283 | * | |
284 | * The function will wait for all requested data | |
285 | * to be read, yielding from the current coroutine | |
286 | * if required. | |
287 | * | |
288 | * If end-of-file occurs before any data is read, | |
289 | * no error is reported; otherwise, if it occurs | |
290 | * before all requested data has been read, an error | |
291 | * will be reported. | |
292 | * | |
293 | * Returns: 1 if all bytes were read, 0 if end-of-file | |
294 | * occurs without data, or -1 on error | |
295 | */ | |
296 | int qio_channel_readv_all_eof(QIOChannel *ioc, | |
297 | const struct iovec *iov, | |
298 | size_t niov, | |
299 | Error **errp); | |
300 | ||
d4622e55 DB |
301 | /** |
302 | * qio_channel_readv_all: | |
303 | * @ioc: the channel object | |
304 | * @iov: the array of memory regions to read data into | |
305 | * @niov: the length of the @iov array | |
306 | * @errp: pointer to a NULL-initialized error object | |
307 | * | |
308 | * Read data from the IO channel, storing it in the | |
309 | * memory regions referenced by @iov. Each element | |
310 | * in the @iov will be fully populated with data | |
311 | * before the next one is used. The @niov parameter | |
312 | * specifies the total number of elements in @iov. | |
313 | * | |
314 | * The function will wait for all requested data | |
315 | * to be read, yielding from the current coroutine | |
316 | * if required. | |
317 | * | |
318 | * If end-of-file occurs before all requested data | |
319 | * has been read, an error will be reported. | |
320 | * | |
321 | * Returns: 0 if all bytes were read, or -1 on error | |
322 | */ | |
323 | int qio_channel_readv_all(QIOChannel *ioc, | |
324 | const struct iovec *iov, | |
325 | size_t niov, | |
326 | Error **errp); | |
327 | ||
328 | ||
329 | /** | |
330 | * qio_channel_writev_all: | |
331 | * @ioc: the channel object | |
332 | * @iov: the array of memory regions to write data from | |
333 | * @niov: the length of the @iov array | |
334 | * @errp: pointer to a NULL-initialized error object | |
335 | * | |
336 | * Write data to the IO channel, reading it from the | |
337 | * memory regions referenced by @iov. Each element | |
338 | * in the @iov will be fully sent, before the next | |
339 | * one is used. The @niov parameter specifies the | |
340 | * total number of elements in @iov. | |
341 | * | |
342 | * The function will wait for all requested data | |
343 | * to be written, yielding from the current coroutine | |
344 | * if required. | |
345 | * | |
346 | * Returns: 0 if all bytes were written, or -1 on error | |
347 | */ | |
348 | int qio_channel_writev_all(QIOChannel *ioc, | |
349 | const struct iovec *iov, | |
350 | size_t niov, | |
351 | Error **erp); | |
352 | ||
666a3af9 DB |
353 | /** |
354 | * qio_channel_readv: | |
355 | * @ioc: the channel object | |
356 | * @iov: the array of memory regions to read data into | |
357 | * @niov: the length of the @iov array | |
821791b5 | 358 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
359 | * |
360 | * Behaves as qio_channel_readv_full() but does not support | |
361 | * receiving of file handles. | |
362 | */ | |
363 | ssize_t qio_channel_readv(QIOChannel *ioc, | |
364 | const struct iovec *iov, | |
365 | size_t niov, | |
366 | Error **errp); | |
367 | ||
368 | /** | |
369 | * qio_channel_writev: | |
370 | * @ioc: the channel object | |
371 | * @iov: the array of memory regions to write data from | |
372 | * @niov: the length of the @iov array | |
821791b5 | 373 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
374 | * |
375 | * Behaves as qio_channel_writev_full() but does not support | |
376 | * sending of file handles. | |
377 | */ | |
378 | ssize_t qio_channel_writev(QIOChannel *ioc, | |
379 | const struct iovec *iov, | |
380 | size_t niov, | |
381 | Error **errp); | |
382 | ||
383 | /** | |
50ea44f0 | 384 | * qio_channel_read: |
666a3af9 DB |
385 | * @ioc: the channel object |
386 | * @buf: the memory region to read data into | |
387 | * @buflen: the length of @buf | |
821791b5 | 388 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
389 | * |
390 | * Behaves as qio_channel_readv_full() but does not support | |
391 | * receiving of file handles, and only supports reading into | |
392 | * a single memory region. | |
393 | */ | |
394 | ssize_t qio_channel_read(QIOChannel *ioc, | |
395 | char *buf, | |
396 | size_t buflen, | |
397 | Error **errp); | |
398 | ||
399 | /** | |
61f7c6a0 | 400 | * qio_channel_write: |
666a3af9 DB |
401 | * @ioc: the channel object |
402 | * @buf: the memory regions to send data from | |
403 | * @buflen: the length of @buf | |
821791b5 | 404 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
405 | * |
406 | * Behaves as qio_channel_writev_full() but does not support | |
407 | * sending of file handles, and only supports writing from a | |
408 | * single memory region. | |
409 | */ | |
410 | ssize_t qio_channel_write(QIOChannel *ioc, | |
411 | const char *buf, | |
412 | size_t buflen, | |
413 | Error **errp); | |
414 | ||
e8ffaa31 EB |
415 | /** |
416 | * qio_channel_read_all_eof: | |
417 | * @ioc: the channel object | |
418 | * @buf: the memory region to read data into | |
419 | * @buflen: the number of bytes to @buf | |
420 | * @errp: pointer to a NULL-initialized error object | |
421 | * | |
422 | * Reads @buflen bytes into @buf, possibly blocking or (if the | |
423 | * channel is non-blocking) yielding from the current coroutine | |
424 | * multiple times until the entire content is read. If end-of-file | |
425 | * occurs immediately it is not an error, but if it occurs after | |
426 | * data has been read it will return an error rather than a | |
427 | * short-read. Otherwise behaves as qio_channel_read(). | |
428 | * | |
429 | * Returns: 1 if all bytes were read, 0 if end-of-file occurs | |
430 | * without data, or -1 on error | |
431 | */ | |
432 | int qio_channel_read_all_eof(QIOChannel *ioc, | |
433 | char *buf, | |
434 | size_t buflen, | |
435 | Error **errp); | |
436 | ||
d4622e55 DB |
437 | /** |
438 | * qio_channel_read_all: | |
439 | * @ioc: the channel object | |
440 | * @buf: the memory region to read data into | |
441 | * @buflen: the number of bytes to @buf | |
442 | * @errp: pointer to a NULL-initialized error object | |
443 | * | |
444 | * Reads @buflen bytes into @buf, possibly blocking or (if the | |
445 | * channel is non-blocking) yielding from the current coroutine | |
446 | * multiple times until the entire content is read. If end-of-file | |
447 | * occurs it will return an error rather than a short-read. Otherwise | |
448 | * behaves as qio_channel_read(). | |
449 | * | |
450 | * Returns: 0 if all bytes were read, or -1 on error | |
451 | */ | |
452 | int qio_channel_read_all(QIOChannel *ioc, | |
453 | char *buf, | |
454 | size_t buflen, | |
455 | Error **errp); | |
e8ffaa31 | 456 | |
d4622e55 DB |
457 | /** |
458 | * qio_channel_write_all: | |
459 | * @ioc: the channel object | |
460 | * @buf: the memory region to write data into | |
461 | * @buflen: the number of bytes to @buf | |
462 | * @errp: pointer to a NULL-initialized error object | |
463 | * | |
464 | * Writes @buflen bytes from @buf, possibly blocking or (if the | |
465 | * channel is non-blocking) yielding from the current coroutine | |
466 | * multiple times until the entire content is written. Otherwise | |
467 | * behaves as qio_channel_write(). | |
468 | * | |
469 | * Returns: 0 if all bytes were written, or -1 on error | |
470 | */ | |
471 | int qio_channel_write_all(QIOChannel *ioc, | |
472 | const char *buf, | |
473 | size_t buflen, | |
474 | Error **errp); | |
475 | ||
666a3af9 DB |
476 | /** |
477 | * qio_channel_set_blocking: | |
478 | * @ioc: the channel object | |
479 | * @enabled: the blocking flag state | |
821791b5 | 480 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
481 | * |
482 | * If @enabled is true, then the channel is put into | |
483 | * blocking mode, otherwise it will be non-blocking. | |
484 | * | |
485 | * In non-blocking mode, read/write operations may | |
486 | * return QIO_CHANNEL_ERR_BLOCK if they would otherwise | |
487 | * block on I/O | |
488 | */ | |
489 | int qio_channel_set_blocking(QIOChannel *ioc, | |
490 | bool enabled, | |
491 | Error **errp); | |
492 | ||
493 | /** | |
494 | * qio_channel_close: | |
495 | * @ioc: the channel object | |
821791b5 | 496 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
497 | * |
498 | * Close the channel, flushing any pending I/O | |
499 | * | |
500 | * Returns: 0 on success, -1 on error | |
501 | */ | |
502 | int qio_channel_close(QIOChannel *ioc, | |
503 | Error **errp); | |
504 | ||
505 | /** | |
506 | * qio_channel_shutdown: | |
507 | * @ioc: the channel object | |
508 | * @how: the direction to shutdown | |
821791b5 | 509 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
510 | * |
511 | * Shutdowns transmission and/or receiving of data | |
512 | * without closing the underlying transport. | |
513 | * | |
514 | * Not all implementations will support this facility, | |
515 | * so may report an error. To avoid errors, the | |
516 | * caller may check for the feature flag | |
517 | * QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling | |
518 | * this method. | |
519 | * | |
520 | * Returns: 0 on success, -1 on error | |
521 | */ | |
522 | int qio_channel_shutdown(QIOChannel *ioc, | |
523 | QIOChannelShutdown how, | |
524 | Error **errp); | |
525 | ||
526 | /** | |
527 | * qio_channel_set_delay: | |
528 | * @ioc: the channel object | |
529 | * @enabled: the new flag state | |
530 | * | |
531 | * Controls whether the underlying transport is | |
532 | * permitted to delay writes in order to merge | |
533 | * small packets. If @enabled is true, then the | |
534 | * writes may be delayed in order to opportunistically | |
535 | * merge small packets into larger ones. If @enabled | |
536 | * is false, writes are dispatched immediately with | |
537 | * no delay. | |
538 | * | |
539 | * When @enabled is false, applications may wish to | |
540 | * use the qio_channel_set_cork() method to explicitly | |
541 | * control write merging. | |
542 | * | |
543 | * On channels which are backed by a socket, this | |
544 | * API corresponds to the inverse of TCP_NODELAY flag, | |
545 | * controlling whether the Nagle algorithm is active. | |
546 | * | |
547 | * This setting is merely a hint, so implementations are | |
548 | * free to ignore this without it being considered an | |
549 | * error. | |
550 | */ | |
551 | void qio_channel_set_delay(QIOChannel *ioc, | |
552 | bool enabled); | |
553 | ||
554 | /** | |
555 | * qio_channel_set_cork: | |
556 | * @ioc: the channel object | |
557 | * @enabled: the new flag state | |
558 | * | |
559 | * Controls whether the underlying transport is | |
560 | * permitted to dispatch data that is written. | |
561 | * If @enabled is true, then any data written will | |
562 | * be queued in local buffers until @enabled is | |
563 | * set to false once again. | |
564 | * | |
565 | * This feature is typically used when the automatic | |
566 | * write coalescing facility is disabled via the | |
567 | * qio_channel_set_delay() method. | |
568 | * | |
569 | * On channels which are backed by a socket, this | |
570 | * API corresponds to the TCP_CORK flag. | |
571 | * | |
572 | * This setting is merely a hint, so implementations are | |
573 | * free to ignore this without it being considered an | |
574 | * error. | |
575 | */ | |
576 | void qio_channel_set_cork(QIOChannel *ioc, | |
577 | bool enabled); | |
578 | ||
579 | ||
580 | /** | |
581 | * qio_channel_seek: | |
582 | * @ioc: the channel object | |
583 | * @offset: the position to seek to, relative to @whence | |
584 | * @whence: one of the (POSIX) SEEK_* constants listed below | |
821791b5 | 585 | * @errp: pointer to a NULL-initialized error object |
666a3af9 DB |
586 | * |
587 | * Moves the current I/O position within the channel | |
588 | * @ioc, to be @offset. The value of @offset is | |
589 | * interpreted relative to @whence: | |
590 | * | |
591 | * SEEK_SET - the position is set to @offset bytes | |
592 | * SEEK_CUR - the position is moved by @offset bytes | |
593 | * SEEK_END - the position is set to end of the file plus @offset bytes | |
594 | * | |
595 | * Not all implementations will support this facility, | |
596 | * so may report an error. | |
597 | * | |
598 | * Returns: the new position on success, (off_t)-1 on failure | |
599 | */ | |
600 | off_t qio_channel_io_seek(QIOChannel *ioc, | |
601 | off_t offset, | |
602 | int whence, | |
603 | Error **errp); | |
604 | ||
605 | ||
606 | /** | |
607 | * qio_channel_create_watch: | |
608 | * @ioc: the channel object | |
609 | * @condition: the I/O condition to monitor | |
610 | * | |
611 | * Create a new main loop source that is used to watch | |
612 | * for the I/O condition @condition. Typically the | |
613 | * qio_channel_add_watch() method would be used instead | |
614 | * of this, since it directly attaches a callback to | |
615 | * the source | |
616 | * | |
617 | * Returns: the new main loop source. | |
618 | */ | |
619 | GSource *qio_channel_create_watch(QIOChannel *ioc, | |
620 | GIOCondition condition); | |
621 | ||
622 | /** | |
623 | * qio_channel_add_watch: | |
624 | * @ioc: the channel object | |
625 | * @condition: the I/O condition to monitor | |
626 | * @func: callback to invoke when the source becomes ready | |
627 | * @user_data: opaque data to pass to @func | |
628 | * @notify: callback to free @user_data | |
629 | * | |
630 | * Create a new main loop source that is used to watch | |
631 | * for the I/O condition @condition. The callback @func | |
632 | * will be registered against the source, to be invoked | |
633 | * when the source becomes ready. The optional @user_data | |
634 | * will be passed to @func when it is invoked. The @notify | |
635 | * callback will be used to free @user_data when the | |
636 | * watch is deleted | |
637 | * | |
638 | * The returned source ID can be used with g_source_remove() | |
639 | * to remove and free the source when no longer required. | |
640 | * Alternatively the @func callback can return a FALSE | |
641 | * value. | |
642 | * | |
643 | * Returns: the source ID | |
644 | */ | |
645 | guint qio_channel_add_watch(QIOChannel *ioc, | |
646 | GIOCondition condition, | |
647 | QIOChannelFunc func, | |
648 | gpointer user_data, | |
649 | GDestroyNotify notify); | |
650 | ||
315409c7 PX |
651 | /** |
652 | * qio_channel_add_watch_full: | |
653 | * @ioc: the channel object | |
654 | * @condition: the I/O condition to monitor | |
655 | * @func: callback to invoke when the source becomes ready | |
656 | * @user_data: opaque data to pass to @func | |
657 | * @notify: callback to free @user_data | |
658 | * @context: the context to run the watch source | |
659 | * | |
660 | * Similar as qio_channel_add_watch(), but allows to specify context | |
661 | * to run the watch source. | |
662 | * | |
663 | * Returns: the source ID | |
664 | */ | |
665 | guint qio_channel_add_watch_full(QIOChannel *ioc, | |
666 | GIOCondition condition, | |
667 | QIOChannelFunc func, | |
668 | gpointer user_data, | |
669 | GDestroyNotify notify, | |
670 | GMainContext *context); | |
671 | ||
672 | /** | |
673 | * qio_channel_add_watch_source: | |
674 | * @ioc: the channel object | |
675 | * @condition: the I/O condition to monitor | |
676 | * @func: callback to invoke when the source becomes ready | |
677 | * @user_data: opaque data to pass to @func | |
678 | * @notify: callback to free @user_data | |
679 | * @context: gcontext to bind the source to | |
680 | * | |
681 | * Similar as qio_channel_add_watch(), but allows to specify context | |
682 | * to run the watch source, meanwhile return the GSource object | |
683 | * instead of tag ID, with the GSource referenced already. | |
684 | * | |
685 | * Note: callers is responsible to unref the source when not needed. | |
686 | * | |
687 | * Returns: the source pointer | |
688 | */ | |
689 | GSource *qio_channel_add_watch_source(QIOChannel *ioc, | |
690 | GIOCondition condition, | |
691 | QIOChannelFunc func, | |
692 | gpointer user_data, | |
693 | GDestroyNotify notify, | |
694 | GMainContext *context); | |
666a3af9 | 695 | |
c4c497d2 PB |
696 | /** |
697 | * qio_channel_attach_aio_context: | |
698 | * @ioc: the channel object | |
699 | * @ctx: the #AioContext to set the handlers on | |
700 | * | |
701 | * Request that qio_channel_yield() sets I/O handlers on | |
702 | * the given #AioContext. If @ctx is %NULL, qio_channel_yield() | |
703 | * uses QEMU's main thread event loop. | |
704 | * | |
705 | * You can move a #QIOChannel from one #AioContext to another even if | |
706 | * I/O handlers are set for a coroutine. However, #QIOChannel provides | |
707 | * no synchronization between the calls to qio_channel_yield() and | |
708 | * qio_channel_attach_aio_context(). | |
709 | * | |
710 | * Therefore you should first call qio_channel_detach_aio_context() | |
711 | * to ensure that the coroutine is not entered concurrently. Then, | |
712 | * while the coroutine has yielded, call qio_channel_attach_aio_context(), | |
713 | * and then aio_co_schedule() to place the coroutine on the new | |
714 | * #AioContext. The calls to qio_channel_detach_aio_context() | |
715 | * and qio_channel_attach_aio_context() should be protected with | |
716 | * aio_context_acquire() and aio_context_release(). | |
717 | */ | |
718 | void qio_channel_attach_aio_context(QIOChannel *ioc, | |
719 | AioContext *ctx); | |
720 | ||
721 | /** | |
722 | * qio_channel_detach_aio_context: | |
723 | * @ioc: the channel object | |
724 | * | |
725 | * Disable any I/O handlers set by qio_channel_yield(). With the | |
726 | * help of aio_co_schedule(), this allows moving a coroutine that was | |
727 | * paused by qio_channel_yield() to another context. | |
728 | */ | |
729 | void qio_channel_detach_aio_context(QIOChannel *ioc); | |
730 | ||
666a3af9 DB |
731 | /** |
732 | * qio_channel_yield: | |
733 | * @ioc: the channel object | |
734 | * @condition: the I/O condition to wait for | |
735 | * | |
c4c497d2 PB |
736 | * Yields execution from the current coroutine until the condition |
737 | * indicated by @condition becomes available. @condition must | |
738 | * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In | |
739 | * addition, no two coroutine can be waiting on the same condition | |
740 | * and channel at the same time. | |
666a3af9 DB |
741 | * |
742 | * This must only be called from coroutine context | |
743 | */ | |
744 | void qio_channel_yield(QIOChannel *ioc, | |
745 | GIOCondition condition); | |
746 | ||
747 | /** | |
748 | * qio_channel_wait: | |
749 | * @ioc: the channel object | |
750 | * @condition: the I/O condition to wait for | |
751 | * | |
752 | * Block execution from the current thread until | |
753 | * the condition indicated by @condition becomes | |
754 | * available. | |
755 | * | |
756 | * This will enter a nested event loop to perform | |
757 | * the wait. | |
758 | */ | |
759 | void qio_channel_wait(QIOChannel *ioc, | |
760 | GIOCondition condition); | |
761 | ||
bf88c124 PB |
762 | /** |
763 | * qio_channel_set_aio_fd_handler: | |
764 | * @ioc: the channel object | |
765 | * @ctx: the AioContext to set the handlers on | |
766 | * @io_read: the read handler | |
767 | * @io_write: the write handler | |
768 | * @opaque: the opaque value passed to the handler | |
769 | * | |
770 | * This is used internally by qio_channel_yield(). It can | |
771 | * be used by channel implementations to forward the handlers | |
772 | * to another channel (e.g. from #QIOChannelTLS to the | |
773 | * underlying socket). | |
774 | */ | |
775 | void qio_channel_set_aio_fd_handler(QIOChannel *ioc, | |
776 | AioContext *ctx, | |
777 | IOHandler *io_read, | |
778 | IOHandler *io_write, | |
779 | void *opaque); | |
780 | ||
2a6a4076 | 781 | #endif /* QIO_CHANNEL_H */ |