QLIST_ENTRY(AioHandler) node;
};
+static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
+{
+ /* If aio_poll is in progress, just mark the node as deleted */
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
+ node->deleted = 1;
+ node->pfd.revents = 0;
+ } else {
+ /* Otherwise, delete it for real. We can't just mark it as
+ * deleted because deleted nodes are only cleaned up after
+ * releasing the list_lock.
+ */
+ QLIST_REMOVE(node, node);
+ g_free(node);
+ }
+}
+
void aio_set_fd_handler(AioContext *ctx,
int fd,
bool is_external,
void *opaque)
{
/* fd is a SOCKET in our case */
- AioHandler *node;
+ AioHandler *old_node;
+ AioHandler *node = NULL;
qemu_lockcnt_lock(&ctx->list_lock);
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
- if (node->pfd.fd == fd && !node->deleted) {
+ QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
+ if (old_node->pfd.fd == fd && !old_node->deleted) {
break;
}
}
- /* Are we deleting the fd handler? */
- if (!io_read && !io_write) {
- if (node) {
- /* If aio_poll is in progress, just mark the node as deleted */
- if (qemu_lockcnt_count(&ctx->list_lock)) {
- node->deleted = 1;
- node->pfd.revents = 0;
- } else {
- /* Otherwise, delete it for real. We can't just mark it as
- * deleted because deleted nodes are only cleaned up after
- * releasing the list_lock.
- */
- QLIST_REMOVE(node, node);
- g_free(node);
- }
- }
- } else {
+ if (io_read || io_write) {
HANDLE event;
long bitmask = 0;
- if (node == NULL) {
- /* Alloc and insert if it's not already there */
- node = g_new0(AioHandler, 1);
- node->pfd.fd = fd;
- QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
- }
+ /* Alloc and insert if it's not already there */
+ node = g_new0(AioHandler, 1);
+ node->pfd.fd = fd;
node->pfd.events = 0;
if (node->io_read) {
bitmask |= FD_WRITE | FD_CONNECT;
}
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
event = event_notifier_get_handle(&ctx->notifier);
WSAEventSelect(node->pfd.fd, event, bitmask);
}
+ if (old_node) {
+ aio_remove_fd_handler(ctx, old_node);
+ }
qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
if (node) {
g_source_remove_poll(&ctx->source, &node->pfd);
- /* aio_poll is in progress, just mark the node as deleted */
- if (qemu_lockcnt_count(&ctx->list_lock)) {
- node->deleted = 1;
- node->pfd.revents = 0;
- } else {
- /* Otherwise, delete it for real. We can't just mark it as
- * deleted because deleted nodes are only cleaned up after
- * releasing the list_lock.
- */
- QLIST_REMOVE(node, node);
- g_free(node);
- }
+ aio_remove_fd_handler(ctx, node);
}
} else {
if (node == NULL) {
int count;
int timeout;
+ /*
+ * There cannot be two concurrent aio_poll calls for the same AioContext (or
+ * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
+ * We rely on this below to avoid slow locked accesses to ctx->notify_me.
+ */
+ assert(in_aio_context_home_thread(ctx));
progress = false;
/* aio_notify can avoid the expensive event_notifier_set if
* so disable the optimization now.
*/
if (blocking) {
- atomic_add(&ctx->notify_me, 2);
+ atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2);
+ /*
+ * Write ctx->notify_me before computing the timeout
+ * (reading bottom half flags, etc.). Pairs with
+ * smp_mb in aio_notify().
+ */
+ smp_mb();
}
qemu_lockcnt_inc(&ctx->list_lock);
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (blocking) {
assert(first);
- assert(in_aio_context_home_thread(ctx));
- atomic_sub(&ctx->notify_me, 2);
+ atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2);
aio_notify_accept(ctx);
}