#include "config-host.h"
#include <poll.h>
+#include <math.h>
#include <arpa/inet.h>
#include "qemu-common.h"
#include "qemu/config-file.h"
#include "qemu/bitops.h"
#include "qemu/bitmap.h"
#include "block/block_int.h"
-#include "trace.h"
#include "block/scsi.h"
#include "qemu/iov.h"
#include "sysemu/sysemu.h"
typedef struct IscsiLun {
struct iscsi_context *iscsi;
+ AioContext *aio_context;
int lun;
enum scsi_inquiry_peripheral_device_type type;
int block_size;
unsigned char *zeroblock;
unsigned long *allocationmap;
int cluster_sectors;
+ bool use_16_for_rw;
} IscsiLun;
typedef struct IscsiTask {
struct scsi_task *task;
Coroutine *co;
QEMUBH *bh;
+ IscsiLun *iscsilun;
+ QEMUTimer retry_timer;
} IscsiTask;
typedef struct IscsiAIOCB {
- BlockDriverAIOCB common;
+ BlockAIOCB common;
QEMUIOVector *qiov;
QEMUBH *bh;
IscsiLun *iscsilun;
struct scsi_task *task;
uint8_t *buf;
int status;
- int canceled;
- int retries;
int64_t sector_num;
int nb_sectors;
#ifdef __linux__
#define NOP_INTERVAL 5000
#define MAX_NOP_FAILURES 3
-#define ISCSI_CMD_RETRIES 5
+#define ISCSI_CMD_RETRIES ARRAY_SIZE(iscsi_retry_times)
+static const unsigned iscsi_retry_times[] = {8, 32, 128, 512, 2048};
-/* this threshhold is a trade-off knob to choose between
+/* this threshold is a trade-off knob to choose between
* the potential additional overhead of an extra GET_LBA_STATUS request
* vs. unnecessarily reading a lot of zero sectors over the wire.
* If a read request is greater or equal than ISCSI_CHECKALLOC_THRES
g_free(acb->buf);
acb->buf = NULL;
- if (acb->canceled == 0) {
- acb->common.cb(acb->common.opaque, acb->status);
- }
+ acb->common.cb(acb->common.opaque, acb->status);
if (acb->task != NULL) {
scsi_free_scsi_task(acb->task);
acb->task = NULL;
}
- qemu_aio_release(acb);
+ qemu_aio_unref(acb);
}
static void
if (acb->bh) {
return;
}
- acb->bh = qemu_bh_new(iscsi_bh_cb, acb);
+ acb->bh = aio_bh_new(acb->iscsilun->aio_context, iscsi_bh_cb, acb);
qemu_bh_schedule(acb->bh);
}
static void iscsi_co_generic_bh_cb(void *opaque)
{
struct IscsiTask *iTask = opaque;
+ iTask->complete = 1;
qemu_bh_delete(iTask->bh);
qemu_coroutine_enter(iTask->co, NULL);
}
+static void iscsi_retry_timer_expired(void *opaque)
+{
+ struct IscsiTask *iTask = opaque;
+ iTask->complete = 1;
+ if (iTask->co) {
+ qemu_coroutine_enter(iTask->co, NULL);
+ }
+}
+
+static inline unsigned exp_random(double mean)
+{
+ return -mean * log((double)rand() / RAND_MAX);
+}
+
static void
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
void *command_data, void *opaque)
struct IscsiTask *iTask = opaque;
struct scsi_task *task = command_data;
- iTask->complete = 1;
iTask->status = status;
iTask->do_retry = 0;
iTask->task = task;
- if (iTask->retries-- > 0 && status == SCSI_STATUS_CHECK_CONDITION
- && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
- error_report("iSCSI CheckCondition: %s", iscsi_get_error(iscsi));
- iTask->do_retry = 1;
- goto out;
- }
-
if (status != SCSI_STATUS_GOOD) {
+ if (iTask->retries++ < ISCSI_CMD_RETRIES) {
+ if (status == SCSI_STATUS_CHECK_CONDITION
+ && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
+ error_report("iSCSI CheckCondition: %s",
+ iscsi_get_error(iscsi));
+ iTask->do_retry = 1;
+ goto out;
+ }
+ if (status == SCSI_STATUS_BUSY) {
+ unsigned retry_time =
+ exp_random(iscsi_retry_times[iTask->retries - 1]);
+ error_report("iSCSI Busy (retry #%u in %u ms): %s",
+ iTask->retries, retry_time,
+ iscsi_get_error(iscsi));
+ aio_timer_init(iTask->iscsilun->aio_context,
+ &iTask->retry_timer, QEMU_CLOCK_REALTIME,
+ SCALE_MS, iscsi_retry_timer_expired, iTask);
+ timer_mod(&iTask->retry_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time);
+ iTask->do_retry = 1;
+ return;
+ }
+ }
error_report("iSCSI Failure: %s", iscsi_get_error(iscsi));
}
out:
if (iTask->co) {
- iTask->bh = qemu_bh_new(iscsi_co_generic_bh_cb, iTask);
+ iTask->bh = aio_bh_new(iTask->iscsilun->aio_context,
+ iscsi_co_generic_bh_cb, iTask);
qemu_bh_schedule(iTask->bh);
+ } else {
+ iTask->complete = 1;
}
}
{
*iTask = (struct IscsiTask) {
.co = qemu_coroutine_self(),
- .retries = ISCSI_CMD_RETRIES,
+ .iscsilun = iscsilun,
};
}
}
static void
-iscsi_aio_cancel(BlockDriverAIOCB *blockacb)
+iscsi_aio_cancel(BlockAIOCB *blockacb)
{
IscsiAIOCB *acb = (IscsiAIOCB *)blockacb;
IscsiLun *iscsilun = acb->iscsilun;
return;
}
- acb->canceled = 1;
-
/* send a task mgmt call to the target to cancel the task on the target */
iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task,
iscsi_abort_task_cb, acb);
- while (acb->status == -EINPROGRESS) {
- qemu_aio_wait();
- }
}
static const AIOCBInfo iscsi_aiocb_info = {
.aiocb_size = sizeof(IscsiAIOCB),
- .cancel = iscsi_aio_cancel,
+ .cancel_async = iscsi_aio_cancel,
};
ev = POLLIN;
ev |= iscsi_which_events(iscsi);
if (ev != iscsilun->events) {
- qemu_aio_set_fd_handler(iscsi_get_fd(iscsi),
- iscsi_process_read,
- (ev & POLLOUT) ? iscsi_process_write : NULL,
- iscsilun);
+ aio_set_fd_handler(iscsilun->aio_context,
+ iscsi_get_fd(iscsi),
+ iscsi_process_read,
+ (ev & POLLOUT) ? iscsi_process_write : NULL,
+ iscsilun);
}
return 1;
}
+static unsigned long *iscsi_allocationmap_init(IscsiLun *iscsilun)
+{
+ return bitmap_try_new(DIV_ROUND_UP(sector_lun2qemu(iscsilun->num_blocks,
+ iscsilun),
+ iscsilun->cluster_sectors));
+}
+
static void iscsi_allocationmap_set(IscsiLun *iscsilun, int64_t sector_num,
int nb_sectors)
{
struct IscsiTask iTask;
uint64_t lba;
uint32_t num_sectors;
- uint8_t *data = NULL;
- uint8_t *buf = NULL;
if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
}
+ if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) {
+ error_report("iSCSI Error: Write of %d sectors exceeds max_xfer_len "
+ "of %d sectors", nb_sectors, bs->bl.max_transfer_length);
+ return -EINVAL;
+ }
+
lba = sector_qemu2lun(sector_num, iscsilun);
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
-#if !defined(LIBISCSI_FEATURE_IOVECTOR)
- /* if the iovec only contains one buffer we can pass it directly */
- if (iov->niov == 1) {
- data = iov->iov[0].iov_base;
- } else {
- size_t size = MIN(nb_sectors * BDRV_SECTOR_SIZE, iov->size);
- buf = g_malloc(size);
- qemu_iovec_to_buf(iov, 0, buf, size);
- data = buf;
- }
-#endif
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
- iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba,
- data, num_sectors * iscsilun->block_size,
- iscsilun->block_size, 0, 0, 0, 0, 0,
- iscsi_co_generic_cb, &iTask);
+ if (iscsilun->use_16_for_rw) {
+ iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba,
+ NULL, num_sectors * iscsilun->block_size,
+ iscsilun->block_size, 0, 0, 0, 0, 0,
+ iscsi_co_generic_cb, &iTask);
+ } else {
+ iTask.task = iscsi_write10_task(iscsilun->iscsi, iscsilun->lun, lba,
+ NULL, num_sectors * iscsilun->block_size,
+ iscsilun->block_size, 0, 0, 0, 0, 0,
+ iscsi_co_generic_cb, &iTask);
+ }
if (iTask.task == NULL) {
- g_free(buf);
return -ENOMEM;
}
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
scsi_task_set_iov_out(iTask.task, (struct scsi_iovec *) iov->iov,
iov->niov);
-#endif
while (!iTask.complete) {
iscsi_set_events(iscsilun);
qemu_coroutine_yield();
goto retry;
}
- g_free(buf);
-
if (iTask.status != SCSI_STATUS_GOOD) {
return -EIO;
}
}
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
static bool iscsi_allocationmap_is_allocated(IscsiLun *iscsilun,
int64_t sector_num, int nb_sectors)
{
return ret;
}
-#endif /* LIBISCSI_FEATURE_IOVECTOR */
-
-
static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors,
QEMUIOVector *iov)
struct IscsiTask iTask;
uint64_t lba;
uint32_t num_sectors;
-#if !defined(LIBISCSI_FEATURE_IOVECTOR)
- int i;
-#endif
if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
}
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
+ if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) {
+ error_report("iSCSI Error: Read of %d sectors exceeds max_xfer_len "
+ "of %d sectors", nb_sectors, bs->bl.max_transfer_length);
+ return -EINVAL;
+ }
+
if (iscsilun->lbprz && nb_sectors >= ISCSI_CHECKALLOC_THRES &&
!iscsi_allocationmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
int64_t ret;
return 0;
}
}
-#endif
lba = sector_qemu2lun(sector_num, iscsilun);
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
- switch (iscsilun->type) {
- case TYPE_DISK:
+ if (iscsilun->use_16_for_rw) {
iTask.task = iscsi_read16_task(iscsilun->iscsi, iscsilun->lun, lba,
num_sectors * iscsilun->block_size,
iscsilun->block_size, 0, 0, 0, 0, 0,
iscsi_co_generic_cb, &iTask);
- break;
- default:
+ } else {
iTask.task = iscsi_read10_task(iscsilun->iscsi, iscsilun->lun, lba,
num_sectors * iscsilun->block_size,
iscsilun->block_size,
-#if !defined(CONFIG_LIBISCSI_1_4) /* API change from 1.4.0 to 1.5.0 */
0, 0, 0, 0, 0,
-#endif
iscsi_co_generic_cb, &iTask);
- break;
}
if (iTask.task == NULL) {
return -ENOMEM;
}
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
scsi_task_set_iov_in(iTask.task, (struct scsi_iovec *) iov->iov, iov->niov);
-#else
- for (i = 0; i < iov->niov; i++) {
- scsi_task_add_data_in_buffer(iTask.task,
- iov->iov[i].iov_len,
- iov->iov[i].iov_base);
- }
-#endif
while (!iTask.complete) {
iscsi_set_events(iscsilun);
g_free(acb->buf);
acb->buf = NULL;
- if (acb->canceled != 0) {
- return;
- }
-
acb->status = 0;
if (status < 0) {
error_report("Failed to ioctl(SG_IO) to iSCSI lun. %s",
iscsi_schedule_bh(acb);
}
-static BlockDriverAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
+static BlockAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
unsigned long int req, void *buf,
- BlockDriverCompletionFunc *cb, void *opaque)
+ BlockCompletionFunc *cb, void *opaque)
{
IscsiLun *iscsilun = bs->opaque;
struct iscsi_context *iscsi = iscsilun->iscsi;
acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque);
acb->iscsilun = iscsilun;
- acb->canceled = 0;
acb->bh = NULL;
acb->status = -EINPROGRESS;
acb->buf = NULL;
if (acb->task == NULL) {
error_report("iSCSI: Failed to allocate task for scsi command. %s",
iscsi_get_error(iscsi));
- qemu_aio_release(acb);
+ qemu_aio_unref(acb);
return NULL;
}
memset(acb->task, 0, sizeof(struct scsi_task));
data.data = acb->ioh->dxferp;
data.size = acb->ioh->dxfer_len;
} else {
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
scsi_task_set_iov_out(acb->task,
(struct scsi_iovec *) acb->ioh->dxferp,
acb->ioh->iovec_count);
-#else
- struct iovec *iov = (struct iovec *)acb->ioh->dxferp;
-
- acb->buf = g_malloc(acb->ioh->dxfer_len);
- data.data = acb->buf;
- data.size = iov_to_buf(iov, acb->ioh->iovec_count, 0,
- acb->buf, acb->ioh->dxfer_len);
-#endif
}
}
(data.size > 0) ? &data : NULL,
acb) != 0) {
scsi_free_scsi_task(acb->task);
- qemu_aio_release(acb);
+ qemu_aio_unref(acb);
return NULL;
}
acb->ioh->dxfer_len,
acb->ioh->dxferp);
} else {
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
scsi_task_set_iov_in(acb->task,
(struct scsi_iovec *) acb->ioh->dxferp,
acb->ioh->iovec_count);
-#else
- int i;
- for (i = 0; i < acb->ioh->iovec_count; i++) {
- struct iovec *iov = (struct iovec *)acb->ioh->dxferp;
-
- scsi_task_add_data_in_buffer(acb->task,
- iov[i].iov_len,
- iov[i].iov_base);
- }
-#endif
}
}
return &acb->common;
}
-
static void ioctl_cb(void *opaque, int status)
{
int *p_status = opaque;
iscsi_aio_ioctl(bs, req, buf, ioctl_cb, &status);
while (status == -EINPROGRESS) {
- qemu_aio_wait();
+ aio_poll(iscsilun->aio_context, true);
}
return 0;
return 0;
}
-#if defined(SCSI_SENSE_ASCQ_CAPACITY_DATA_HAS_CHANGED)
-
static int
coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, BdrvRequestFlags flags)
struct IscsiTask iTask;
uint64_t lba;
uint32_t nb_blocks;
+ bool use_16_for_ws = iscsilun->use_16_for_rw;
if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
}
- if ((flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->lbp.lbpws) {
- /* WRITE SAME with UNMAP is not supported by the target,
- * fall back and try WRITE SAME without UNMAP */
- flags &= ~BDRV_REQ_MAY_UNMAP;
+ if (flags & BDRV_REQ_MAY_UNMAP) {
+ if (!use_16_for_ws && !iscsilun->lbp.lbpws10) {
+ /* WRITESAME10 with UNMAP is unsupported try WRITESAME16 */
+ use_16_for_ws = true;
+ }
+ if (use_16_for_ws && !iscsilun->lbp.lbpws) {
+ /* WRITESAME16 with UNMAP is not supported by the target,
+ * fall back and try WRITESAME10/16 without UNMAP */
+ flags &= ~BDRV_REQ_MAY_UNMAP;
+ use_16_for_ws = iscsilun->use_16_for_rw;
+ }
}
if (!(flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->has_write_same) {
- /* WRITE SAME without UNMAP is not supported by the target */
+ /* WRITESAME without UNMAP is not supported by the target */
return -ENOTSUP;
}
nb_blocks = sector_qemu2lun(nb_sectors, iscsilun);
if (iscsilun->zeroblock == NULL) {
- iscsilun->zeroblock = g_malloc0(iscsilun->block_size);
+ iscsilun->zeroblock = g_try_malloc0(iscsilun->block_size);
+ if (iscsilun->zeroblock == NULL) {
+ return -ENOMEM;
+ }
}
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
- if (iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
- iscsilun->zeroblock, iscsilun->block_size,
- nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
- 0, 0, iscsi_co_generic_cb, &iTask) == NULL) {
+ if (use_16_for_ws) {
+ iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
+ iscsilun->zeroblock, iscsilun->block_size,
+ nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
+ 0, 0, iscsi_co_generic_cb, &iTask);
+ } else {
+ iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba,
+ iscsilun->zeroblock, iscsilun->block_size,
+ nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
+ 0, 0, iscsi_co_generic_cb, &iTask);
+ }
+ if (iTask.task == NULL) {
return -ENOMEM;
}
return 0;
}
-#endif /* SCSI_SENSE_ASCQ_CAPACITY_DATA_HAS_CHANGED */
-
static void parse_chap(struct iscsi_context *iscsi, const char *target,
Error **errp)
{
return iscsi_name;
}
-#if defined(LIBISCSI_FEATURE_NOP_COUNTER)
static void iscsi_nop_timed_event(void *opaque)
{
IscsiLun *iscsilun = opaque;
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
iscsi_set_events(iscsilun);
}
-#endif
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
{
iscsilun->num_blocks = rc16->returned_lba + 1;
iscsilun->lbpme = rc16->lbpme;
iscsilun->lbprz = rc16->lbprz;
+ iscsilun->use_16_for_rw = (rc16->returned_lba > 0xffffffff);
}
}
break;
return NULL;
}
+static void iscsi_detach_aio_context(BlockDriverState *bs)
+{
+ IscsiLun *iscsilun = bs->opaque;
+
+ aio_set_fd_handler(iscsilun->aio_context,
+ iscsi_get_fd(iscsilun->iscsi),
+ NULL, NULL, NULL);
+ iscsilun->events = 0;
+
+ if (iscsilun->nop_timer) {
+ timer_del(iscsilun->nop_timer);
+ timer_free(iscsilun->nop_timer);
+ iscsilun->nop_timer = NULL;
+ }
+}
+
+static void iscsi_attach_aio_context(BlockDriverState *bs,
+ AioContext *new_context)
+{
+ IscsiLun *iscsilun = bs->opaque;
+
+ iscsilun->aio_context = new_context;
+ iscsi_set_events(iscsilun);
+
+ /* Set up a timer for sending out iSCSI NOPs */
+ iscsilun->nop_timer = aio_timer_new(iscsilun->aio_context,
+ QEMU_CLOCK_REALTIME, SCALE_MS,
+ iscsi_nop_timed_event, iscsilun);
+ timer_mod(iscsilun->nop_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
+}
+
+static bool iscsi_is_write_protected(IscsiLun *iscsilun)
+{
+ struct scsi_task *task;
+ struct scsi_mode_sense *ms = NULL;
+ bool wrprotected = false;
+
+ task = iscsi_modesense6_sync(iscsilun->iscsi, iscsilun->lun,
+ 1, SCSI_MODESENSE_PC_CURRENT,
+ 0x3F, 0, 255);
+ if (task == NULL) {
+ error_report("iSCSI: Failed to send MODE_SENSE(6) command: %s",
+ iscsi_get_error(iscsilun->iscsi));
+ goto out;
+ }
+
+ if (task->status != SCSI_STATUS_GOOD) {
+ error_report("iSCSI: Failed MODE_SENSE(6), LUN assumed writable");
+ goto out;
+ }
+ ms = scsi_datain_unmarshall(task);
+ if (!ms) {
+ error_report("iSCSI: Failed to unmarshall MODE_SENSE(6) data: %s",
+ iscsi_get_error(iscsilun->iscsi));
+ goto out;
+ }
+ wrprotected = ms->device_specific_parameter & 0x80;
+
+out:
+ if (task) {
+ scsi_free_scsi_task(task);
+ }
+ return wrprotected;
+}
+
/*
* We support iscsi url's on the form
* iscsi://[<username>%<password>@]<host>[:<port>]/<targetname>/<lun>
QemuOpts *opts;
Error *local_err = NULL;
const char *filename;
- int i, ret;
+ int i, ret = 0;
if ((BDRV_SECTOR_SIZE % 512) != 0) {
error_setg(errp, "iSCSI: Invalid BDRV_SECTOR_SIZE. "
}
iscsilun->iscsi = iscsi;
+ iscsilun->aio_context = bdrv_get_aio_context(bs);
iscsilun->lun = iscsi_url->lun;
iscsilun->has_write_same = true;
scsi_free_scsi_task(task);
task = NULL;
+ /* Check the write protect flag of the LUN if we want to write */
+ if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) &&
+ iscsi_is_write_protected(iscsilun)) {
+ error_setg(errp, "Cannot open a write protected LUN as read-write");
+ ret = -EACCES;
+ goto out;
+ }
+
iscsi_readcapacity_sync(iscsilun, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
scsi_free_scsi_task(task);
task = NULL;
-#if defined(LIBISCSI_FEATURE_NOP_COUNTER)
- /* Set up a timer for sending out iSCSI NOPs */
- iscsilun->nop_timer = timer_new_ms(QEMU_CLOCK_REALTIME, iscsi_nop_timed_event, iscsilun);
- timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
-#endif
+ iscsi_attach_aio_context(bs, iscsilun->aio_context);
/* Guess the internal cluster (page) size of the iscsi target by the means
* of opt_unmap_gran. Transfer the unmap granularity only if it has a
iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) {
iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran *
iscsilun->block_size) >> BDRV_SECTOR_BITS;
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
if (iscsilun->lbprz && !(bs->open_flags & BDRV_O_NOCACHE)) {
- iscsilun->allocationmap =
- bitmap_new(DIV_ROUND_UP(bs->total_sectors,
- iscsilun->cluster_sectors));
+ iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun);
+ if (iscsilun->allocationmap == NULL) {
+ ret = -ENOMEM;
+ }
}
-#endif
}
out:
qemu_opts_del(opts);
- if (initiator_name != NULL) {
- g_free(initiator_name);
- }
+ g_free(initiator_name);
if (iscsi_url != NULL) {
iscsi_destroy_url(iscsi_url);
}
IscsiLun *iscsilun = bs->opaque;
struct iscsi_context *iscsi = iscsilun->iscsi;
- if (iscsilun->nop_timer) {
- timer_del(iscsilun->nop_timer);
- timer_free(iscsilun->nop_timer);
- }
- qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL);
+ iscsi_detach_aio_context(bs);
iscsi_destroy_context(iscsi);
g_free(iscsilun->zeroblock);
g_free(iscsilun->allocationmap);
memset(iscsilun, 0, sizeof(IscsiLun));
}
-static int iscsi_refresh_limits(BlockDriverState *bs)
+static int sector_limits_lun2qemu(int64_t sector, IscsiLun *iscsilun)
{
- IscsiLun *iscsilun = bs->opaque;
+ return MIN(sector_lun2qemu(sector, iscsilun), INT_MAX / 2 + 1);
+}
+static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
+{
/* We don't actually refresh here, but just return data queried in
* iscsi_open(): iscsi targets don't change their limits. */
+
+ IscsiLun *iscsilun = bs->opaque;
+ uint32_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff;
+
+ if (iscsilun->bl.max_xfer_len) {
+ max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len);
+ }
+
+ bs->bl.max_transfer_length = sector_limits_lun2qemu(max_xfer_len, iscsilun);
+
if (iscsilun->lbp.lbpu) {
if (iscsilun->bl.max_unmap < 0xffffffff) {
- bs->bl.max_discard = sector_lun2qemu(iscsilun->bl.max_unmap,
- iscsilun);
+ bs->bl.max_discard =
+ sector_limits_lun2qemu(iscsilun->bl.max_unmap, iscsilun);
}
- bs->bl.discard_alignment = sector_lun2qemu(iscsilun->bl.opt_unmap_gran,
- iscsilun);
+ bs->bl.discard_alignment =
+ sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun);
}
if (iscsilun->bl.max_ws_len < 0xffffffff) {
- bs->bl.max_write_zeroes = sector_lun2qemu(iscsilun->bl.max_ws_len,
- iscsilun);
+ bs->bl.max_write_zeroes =
+ sector_limits_lun2qemu(iscsilun->bl.max_ws_len, iscsilun);
}
if (iscsilun->lbp.lbpws) {
- bs->bl.write_zeroes_alignment = sector_lun2qemu(iscsilun->bl.opt_unmap_gran,
- iscsilun);
+ bs->bl.write_zeroes_alignment =
+ sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun);
}
- bs->bl.opt_transfer_length = sector_lun2qemu(iscsilun->bl.opt_xfer_len,
- iscsilun);
- return 0;
+ bs->bl.opt_transfer_length =
+ sector_limits_lun2qemu(iscsilun->bl.opt_xfer_len, iscsilun);
}
/* Since iscsi_open() ignores bdrv_flags, there is nothing to do here in
if (iscsilun->allocationmap != NULL) {
g_free(iscsilun->allocationmap);
- iscsilun->allocationmap =
- bitmap_new(DIV_ROUND_UP(bs->total_sectors,
- iscsilun->cluster_sectors));
+ iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun);
}
return 0;
}
-static int iscsi_create(const char *filename, QEMUOptionParameter *options,
- Error **errp)
+static int iscsi_create(const char *filename, QemuOpts *opts, Error **errp)
{
int ret = 0;
int64_t total_size = 0;
IscsiLun *iscsilun = NULL;
QDict *bs_options;
- bs = bdrv_new("", &error_abort);
+ bs = bdrv_new();
/* Read out options */
- while (options && options->name) {
- if (!strcmp(options->name, "size")) {
- total_size = options->value.n / BDRV_SECTOR_SIZE;
- }
- options++;
- }
-
- bs->opaque = g_malloc0(sizeof(struct IscsiLun));
+ total_size = DIV_ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
+ BDRV_SECTOR_SIZE);
+ bs->opaque = g_new0(struct IscsiLun, 1);
iscsilun = bs->opaque;
bs_options = qdict_new();
if (ret != 0) {
goto out;
}
- if (iscsilun->nop_timer) {
- timer_del(iscsilun->nop_timer);
- timer_free(iscsilun->nop_timer);
- }
+ iscsi_detach_aio_context(bs);
if (iscsilun->type != TYPE_DISK) {
ret = -ENODEV;
goto out;
return 0;
}
-static QEMUOptionParameter iscsi_create_options[] = {
- {
- .name = BLOCK_OPT_SIZE,
- .type = OPT_SIZE,
- .help = "Virtual disk size"
- },
- { NULL }
+static QemuOptsList iscsi_create_opts = {
+ .name = "iscsi-create-opts",
+ .head = QTAILQ_HEAD_INITIALIZER(iscsi_create_opts.head),
+ .desc = {
+ {
+ .name = BLOCK_OPT_SIZE,
+ .type = QEMU_OPT_SIZE,
+ .help = "Virtual disk size"
+ },
+ { /* end of list */ }
+ }
};
static BlockDriver bdrv_iscsi = {
.bdrv_file_open = iscsi_open,
.bdrv_close = iscsi_close,
.bdrv_create = iscsi_create,
- .create_options = iscsi_create_options,
+ .create_opts = &iscsi_create_opts,
.bdrv_reopen_prepare = iscsi_reopen_prepare,
.bdrv_getlength = iscsi_getlength,
.bdrv_truncate = iscsi_truncate,
.bdrv_refresh_limits = iscsi_refresh_limits,
-#if defined(LIBISCSI_FEATURE_IOVECTOR)
.bdrv_co_get_block_status = iscsi_co_get_block_status,
-#endif
.bdrv_co_discard = iscsi_co_discard,
-#if defined(SCSI_SENSE_ASCQ_CAPACITY_DATA_HAS_CHANGED)
.bdrv_co_write_zeroes = iscsi_co_write_zeroes,
-#endif
.bdrv_co_readv = iscsi_co_readv,
.bdrv_co_writev = iscsi_co_writev,
.bdrv_co_flush_to_disk = iscsi_co_flush,
.bdrv_ioctl = iscsi_ioctl,
.bdrv_aio_ioctl = iscsi_aio_ioctl,
#endif
+
+ .bdrv_detach_aio_context = iscsi_detach_aio_context,
+ .bdrv_attach_aio_context = iscsi_attach_aio_context,
};
static QemuOptsList qemu_iscsi_opts = {