*/
struct rdma_cm_id *cm_id; /* connection manager ID */
struct rdma_cm_id *listen_id;
+ bool connected;
struct ibv_context *verbs;
struct rdma_event_channel *channel;
int *resp_idx,
int (*callback)(RDMAContext *rdma));
-static inline uint64_t ram_chunk_index(uint8_t *start, uint8_t *host)
+static inline uint64_t ram_chunk_index(const uint8_t *start,
+ const uint8_t *host)
{
return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT;
}
-static inline uint8_t *ram_chunk_start(RDMALocalBlock *rdma_ram_block,
+static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block,
uint64_t i)
{
return (uint8_t *) (((uintptr_t) rdma_ram_block->local_host_addr)
+ (i << RDMA_REG_CHUNK_SHIFT));
}
-static inline uint8_t *ram_chunk_end(RDMALocalBlock *rdma_ram_block, uint64_t i)
+static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block,
+ uint64_t i)
{
uint8_t *result = ram_chunk_start(rdma_ram_block, i) +
(1UL << RDMA_REG_CHUNK_SHIFT);
* connections (both IPv4 and IPv6) if the destination machine does not have
* a regular infiniband network available for use.
*
- * The only way to gaurantee that an error is thrown for broken kernels is
+ * The only way to guarantee that an error is thrown for broken kernels is
* for the management software to choose a *specific* interface at bind time
* and validate what time of hardware it is.
*
* Infiniband.
*
* If we detect that we have a *pure* RoCE environment, then we can safely
- * thrown an error even if the management sofware has specified '[::]' as the
+ * thrown an error even if the management software has specified '[::]' as the
* bind address.
*
* However, if there is are multiple hetergeneous devices, then we cannot make
* devices (non-ethernet).
*
* If not, then we can safely proceed with the migration.
- * Otherwise, there are no gaurantees until the bug is fixed in linux.
+ * Otherwise, there are no guarantees until the bug is fixed in linux.
*/
if (!verbs) {
int num_devices, x;
ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
RDMA_RESOLVE_TIMEOUT_MS);
if (!ret) {
- ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs);
- if (ret) {
- continue;
+ if (e->ai_family == AF_INET6) {
+ ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs);
+ if (ret) {
+ continue;
+ }
}
goto route;
}
ERROR(errp, "result not equal to event_addr_resolved %s",
rdma_event_str(cm_event->event));
perror("rdma_resolve_addr");
+ rdma_ack_cm_event(cm_event);
ret = -EINVAL;
goto err_resolve_get_addr;
}
}
- if (ibv_post_send(rdma->qp, &send_wr, &bad_wr)) {
- return -1;
- }
+ ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
- if (ret < 0) {
+ if (ret > 0) {
fprintf(stderr, "Failed to use post IB SEND for control!\n");
- return ret;
+ return -ret;
}
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL);
struct rdma_cm_event *cm_event;
int ret, idx;
- if (rdma->cm_id) {
+ if (rdma->cm_id && rdma->connected) {
if (rdma->error_state) {
RDMAControlHeader head = { .len = 0,
.type = RDMA_CONTROL_ERROR,
}
}
DDPRINTF("Disconnected.\n");
- rdma->cm_id = NULL;
+ rdma->connected = false;
}
g_free(rdma->block);
}
}
- if (rdma->qp) {
- ibv_destroy_qp(rdma->qp);
- rdma->qp = NULL;
- }
if (rdma->cq) {
ibv_destroy_cq(rdma->cq);
rdma->cq = NULL;
rdma->listen_id = NULL;
}
if (rdma->cm_id) {
+ if (rdma->qp) {
+ rdma_destroy_qp(rdma->cm_id);
+ rdma->qp = NULL;
+ }
rdma_destroy_id(rdma->cm_id);
rdma->cm_id = NULL;
}
rdma->cm_id = NULL;
goto err_rdma_source_connect;
}
+ rdma->connected = true;
memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
network_to_caps(&cap);
} else {
ERROR(errp, "bad RDMA migration address '%s'", host_port);
g_free(rdma);
- return NULL;
+ rdma = NULL;
}
+
+ qapi_free_InetSocketAddress(addr);
}
return rdma;
/*
* QEMUFile interface to the control channel.
* SEND messages for control only.
- * pc.ram is handled with regular RDMA messages.
+ * VM's ram is handled with regular RDMA messages.
*/
static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
int64_t pos, int size)
/*
* Push out any writes that
- * we're queued up for pc.ram.
+ * we're queued up for VM's ram.
*/
ret = qemu_rdma_write_flush(f, rdma);
if (ret < 0) {
}
rdma_ack_cm_event(cm_event);
+ rdma->connected = true;
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
if (ret) {
}
ret = qemu_rdma_source_init(rdma, &local_err,
- s->enabled_capabilities[MIGRATION_CAPABILITY_X_RDMA_PIN_ALL]);
+ s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]);
if (ret) {
goto err;