#include <poll.h>
#include <math.h>
#include <arpa/inet.h>
-#include "qemu-common.h"
+#include "sysemu/sysemu.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qemu/bitops.h"
#include "qemu/uuid.h"
#include "sysemu/replay.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc.h"
+#include "qapi/qapi-commands-machine.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
#include "crypto/secret.h"
iTask->status = status;
iTask->do_retry = 0;
+ iTask->err_code = 0;
iTask->task = task;
if (status != SCSI_STATUS_GOOD) {
+ iTask->err_code = -EIO;
if (iTask->retries++ < ISCSI_CMD_RETRIES) {
if (status == SCSI_STATUS_BUSY ||
status == SCSI_STATUS_TIMEOUT ||
timer_mod(&iTask->retry_timer,
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time);
iTask->do_retry = 1;
- }
- } else if (status == SCSI_STATUS_CHECK_CONDITION) {
- int error = iscsi_translate_sense(&task->sense);
- if (error == EAGAIN) {
- error_report("iSCSI CheckCondition: %s",
- iscsi_get_error(iscsi));
- iTask->do_retry = 1;
- } else {
- iTask->err_code = -error;
- iTask->err_str = g_strdup(iscsi_get_error(iscsi));
+ } else if (status == SCSI_STATUS_CHECK_CONDITION) {
+ int error = iscsi_translate_sense(&task->sense);
+ if (error == EAGAIN) {
+ error_report("iSCSI CheckCondition: %s",
+ iscsi_get_error(iscsi));
+ iTask->do_retry = 1;
+ } else {
+ iTask->err_code = -error;
+ iTask->err_str = g_strdup(iscsi_get_error(iscsi));
+ }
}
}
}
IscsiAIOCB *acb = (IscsiAIOCB *)blockacb;
IscsiLun *iscsilun = acb->iscsilun;
- qemu_mutex_lock(&iscsilun->mutex);
+ WITH_QEMU_LOCK_GUARD(&iscsilun->mutex) {
- /* If it was cancelled or completed already, our work is done here */
- if (acb->cancelled || acb->status != -EINPROGRESS) {
- qemu_mutex_unlock(&iscsilun->mutex);
- return;
- }
+ /* If it was cancelled or completed already, our work is done here */
+ if (acb->cancelled || acb->status != -EINPROGRESS) {
+ return;
+ }
- acb->cancelled = true;
+ acb->cancelled = true;
- qemu_aio_ref(acb); /* released in iscsi_abort_task_cb() */
+ qemu_aio_ref(acb); /* released in iscsi_abort_task_cb() */
- /* send a task mgmt call to the target to cancel the task on the target */
- if (iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task,
- iscsi_abort_task_cb, acb) < 0) {
- qemu_aio_unref(acb); /* since iscsi_abort_task_cb() won't be called */
+ /* send a task mgmt call to the target to cancel the task on the target */
+ if (iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task,
+ iscsi_abort_task_cb, acb) < 0) {
+ qemu_aio_unref(acb); /* since iscsi_abort_task_cb() won't be called */
+ }
}
-
- qemu_mutex_unlock(&iscsilun->mutex);
}
static const AIOCBInfo iscsi_aiocb_info = {
false,
(ev & POLLIN) ? iscsi_process_read : NULL,
(ev & POLLOUT) ? iscsi_process_write : NULL,
- NULL,
+ NULL, NULL,
iscsilun);
iscsilun->events = ev;
}
{
IscsiLun *iscsilun = opaque;
- qemu_mutex_lock(&iscsilun->mutex);
+ WITH_QEMU_LOCK_GUARD(&iscsilun->mutex) {
+ /* check for timed out requests */
+ iscsi_service(iscsilun->iscsi, 0);
- /* check for timed out requests */
- iscsi_service(iscsilun->iscsi, 0);
+ if (iscsilun->request_timed_out) {
+ iscsilun->request_timed_out = false;
+ iscsi_reconnect(iscsilun->iscsi);
+ }
- if (iscsilun->request_timed_out) {
- iscsilun->request_timed_out = false;
- iscsi_reconnect(iscsilun->iscsi);
+ /*
+ * newer versions of libiscsi may return zero events. Ensure we are
+ * able to return to service once this situation changes.
+ */
+ iscsi_set_events(iscsilun);
}
- /* newer versions of libiscsi may return zero events. Ensure we are able
- * to return to service once this situation changes. */
- iscsi_set_events(iscsilun);
-
- qemu_mutex_unlock(&iscsilun->mutex);
-
timer_mod(iscsilun->event_timer,
qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL);
}
return sector * BDRV_SECTOR_SIZE / iscsilun->block_size;
}
-static bool is_byte_request_lun_aligned(int64_t offset, int count,
+static bool is_byte_request_lun_aligned(int64_t offset, int64_t bytes,
IscsiLun *iscsilun)
{
- if (offset % iscsilun->block_size || count % iscsilun->block_size) {
+ if (offset % iscsilun->block_size || bytes % iscsilun->block_size) {
error_report("iSCSI misaligned request: "
"iscsilun->block_size %u, offset %" PRIi64
- ", count %d",
- iscsilun->block_size, offset, count);
+ ", bytes %" PRIi64,
+ iscsilun->block_size, offset, bytes);
return false;
}
return true;
iscsi_allocmap_set_allocated(iscsilun, offset, *pnum);
}
- if (*pnum > bytes) {
- *pnum = bytes;
- }
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
g_free(iTask.err_str);
acb->ioh->driver_status |= SG_ERR_DRIVER_SENSE;
acb->ioh->sb_len_wr = acb->task->datain.size - 2;
- ss = (acb->ioh->mx_sb_len >= acb->ioh->sb_len_wr) ?
- acb->ioh->mx_sb_len : acb->ioh->sb_len_wr;
+ ss = MIN(acb->ioh->mx_sb_len, acb->ioh->sb_len_wr);
memcpy(acb->ioh->sbp, &acb->task->datain.data[2], ss);
}
}
static int
-coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
+coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset,
+ int64_t bytes)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
return 0;
}
+ /*
+ * We don't want to overflow list.num which is uint32_t.
+ * We rely on our max_pdiscard.
+ */
+ assert(bytes / iscsilun->block_size <= UINT32_MAX);
+
list.lba = offset / iscsilun->block_size;
list.num = bytes / iscsilun->block_size;
static int
coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
- int bytes, BdrvRequestFlags flags)
+ int64_t bytes, BdrvRequestFlags flags)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
uint64_t lba;
- uint32_t nb_blocks;
+ uint64_t nb_blocks;
bool use_16_for_ws = iscsilun->use_16_for_rw;
int r = 0;
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
if (use_16_for_ws) {
+ /*
+ * iscsi_writesame16_task num_blocks argument is uint32_t. We rely here
+ * on our max_pwrite_zeroes limit.
+ */
+ assert(nb_blocks <= UINT32_MAX);
iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
iscsilun->zeroblock, iscsilun->block_size,
nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
0, 0, iscsi_co_generic_cb, &iTask);
} else {
+ /*
+ * iscsi_writesame10_task num_blocks argument is uint16_t. We rely here
+ * on our max_pwrite_zeroes limit.
+ */
+ assert(nb_blocks <= UINT16_MAX);
iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba,
iscsilun->zeroblock, iscsilun->block_size,
nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
{
IscsiLun *iscsilun = opaque;
- qemu_mutex_lock(&iscsilun->mutex);
+ QEMU_LOCK_GUARD(&iscsilun->mutex);
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
error_report("iSCSI: NOP timeout. Reconnecting...");
iscsilun->request_timed_out = true;
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
- goto out;
+ return;
}
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
iscsi_set_events(iscsilun);
-
-out:
- qemu_mutex_unlock(&iscsilun->mutex);
}
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
IscsiLun *iscsilun = bs->opaque;
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
- false, NULL, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL, NULL);
iscsilun->events = 0;
if (iscsilun->nop_timer) {
- timer_del(iscsilun->nop_timer);
timer_free(iscsilun->nop_timer);
iscsilun->nop_timer = NULL;
}
if (iscsilun->event_timer) {
- timer_del(iscsilun->event_timer);
timer_free(iscsilun->event_timer);
iscsilun->event_timer = NULL;
}
int i, ret = 0, timeout = 0, lun;
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
- qemu_opts_absorb_qdict(opts, options, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!qemu_opts_absorb_qdict(opts, options, errp)) {
ret = -EINVAL;
goto out;
}
}
if (iscsilun->lbp.lbpu) {
- if (iscsilun->bl.max_unmap < 0xffffffff / block_size) {
- bs->bl.max_pdiscard =
- iscsilun->bl.max_unmap * iscsilun->block_size;
- }
+ bs->bl.max_pdiscard =
+ MIN_NON_ZERO(iscsilun->bl.max_unmap * iscsilun->block_size,
+ (uint64_t)UINT32_MAX * iscsilun->block_size);
bs->bl.pdiscard_alignment =
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
} else {
bs->bl.pdiscard_alignment = iscsilun->block_size;
}
- if (iscsilun->bl.max_ws_len < 0xffffffff / block_size) {
- bs->bl.max_pwrite_zeroes =
- iscsilun->bl.max_ws_len * iscsilun->block_size;
- }
+ bs->bl.max_pwrite_zeroes =
+ MIN_NON_ZERO(iscsilun->bl.max_ws_len * iscsilun->block_size,
+ max_xfer_len * iscsilun->block_size);
+
if (iscsilun->lbp.lbpws) {
bs->bl.pwrite_zeroes_alignment =
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
static int coroutine_fn iscsi_co_truncate(BlockDriverState *bs, int64_t offset,
bool exact, PreallocMode prealloc,
- Error **errp)
+ BdrvRequestFlags flags, Error **errp)
{
IscsiLun *iscsilun = bs->opaque;
int64_t cur_length;
static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
{
IscsiLun *iscsilun = bs->opaque;
- bdi->unallocated_blocks_are_zero = iscsilun->lbprz;
bdi->cluster_size = iscsilun->cluster_size;
return 0;
}
static int coroutine_fn iscsi_co_copy_range_from(BlockDriverState *bs,
BdrvChild *src,
- uint64_t src_offset,
+ int64_t src_offset,
BdrvChild *dst,
- uint64_t dst_offset,
- uint64_t bytes,
+ int64_t dst_offset,
+ int64_t bytes,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
static int coroutine_fn iscsi_co_copy_range_to(BlockDriverState *bs,
BdrvChild *src,
- uint64_t src_offset,
+ int64_t src_offset,
BdrvChild *dst,
- uint64_t dst_offset,
- uint64_t bytes,
+ int64_t dst_offset,
+ int64_t bytes,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{