* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include <hw/hw.h>
-#include <hw/i386/pc.h>
-#include <hw/pci/pci.h>
-#include <hw/isa/isa.h>
+#include "hw/hw.h"
+#include "hw/i386/pc.h"
+#include "hw/pci/pci.h"
+#include "hw/isa/isa.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
#include "sysemu/block-backend.h"
#include "qemu/cutils.h"
-#include <hw/ide/internal.h>
+#include "hw/ide/internal.h"
/* These values were based on a Seagate ST3500418AS but have been modified
to make more sense in QEMU */
{ 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
};
-static int ide_handle_rw_error(IDEState *s, int error, int op);
static void ide_dummy_transfer_stop(IDEState *s);
static void padstr(char *str, const char *src, int len)
}
/* Got an entry! Submit and exit. */
- iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
- ide_issue_trim_cb, opaque);
+ iocb->aiocb = blk_aio_pdiscard(iocb->blk,
+ sector << BDRV_SECTOR_BITS,
+ count << BDRV_SECTOR_BITS,
+ ide_issue_trim_cb, opaque);
return;
}
}
}
-BlockAIOCB *ide_issue_trim(BlockBackend *blk,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
+BlockAIOCB *ide_issue_trim(
+ int64_t offset, QEMUIOVector *qiov,
+ BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
{
+ BlockBackend *blk = opaque;
TrimAIOCB *iocb;
- iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque);
+ iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
iocb->blk = blk;
iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
iocb->ret = 0;
s->error = ABRT_ERR;
}
+static void ide_set_retry(IDEState *s)
+{
+ s->bus->retry_unit = s->unit;
+ s->bus->retry_sector_num = ide_get_sector(s);
+ s->bus->retry_nsector = s->nsector;
+}
+
+static void ide_clear_retry(IDEState *s)
+{
+ s->bus->retry_unit = -1;
+ s->bus->retry_sector_num = 0;
+ s->bus->retry_nsector = 0;
+}
+
/* prepare data transfer and tell what to do after */
void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
EndTransferFunc *end_transfer_func)
s->end_transfer_func = end_transfer_func;
s->data_ptr = buf;
s->data_end = buf + size;
+ ide_set_retry(s);
if (!(s->status & ERR_STAT)) {
s->status |= DRQ_STAT;
}
req->iov.iov_len = iov->size;
qemu_iovec_init_external(&req->qiov, &req->iov, 1);
- aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors,
- ide_buffered_readv_cb, req);
+ aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
+ &req->qiov, 0, ide_buffered_readv_cb, req);
QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
return aioreq;
void ide_set_inactive(IDEState *s, bool more)
{
s->bus->dma->aiocb = NULL;
- s->bus->retry_unit = -1;
- s->bus->retry_sector_num = 0;
- s->bus->retry_nsector = 0;
+ ide_clear_retry(s);
if (s->bus->dma->ops->set_inactive) {
s->bus->dma->ops->set_inactive(s->bus->dma, more);
}
ide_set_irq(s->bus);
}
-static int ide_handle_rw_error(IDEState *s, int error, int op)
+int ide_handle_rw_error(IDEState *s, int error, int op)
{
bool is_read = (op & IDE_RETRY_READ) != 0;
BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
s->bus->error_status = op;
} else if (action == BLOCK_ERROR_ACTION_REPORT) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
- if (op & IDE_RETRY_DMA) {
+ if (IS_IDE_RETRY_DMA(op)) {
ide_dma_error(s);
+ } else if (IS_IDE_RETRY_ATAPI(op)) {
+ ide_atapi_io_error(s, -error);
} else {
ide_rw_error(s);
}
IDEState *s = opaque;
int n;
int64_t sector_num;
+ uint64_t offset;
bool stay_active = false;
if (ret == -ECANCELED) {
return;
}
if (ret < 0) {
- int op = IDE_RETRY_DMA;
-
- if (s->dma_cmd == IDE_DMA_READ)
- op |= IDE_RETRY_READ;
- else if (s->dma_cmd == IDE_DMA_TRIM)
- op |= IDE_RETRY_TRIM;
-
- if (ide_handle_rw_error(s, -ret, op)) {
+ if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
+ s->bus->dma->aiocb = NULL;
+ dma_buf_commit(s, 0);
return;
}
}
return;
}
+ offset = sector_num << BDRV_SECTOR_BITS;
switch (s->dma_cmd) {
case IDE_DMA_READ:
- s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
- ide_dma_cb, s);
+ s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
+ BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_WRITE:
- s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
- ide_dma_cb, s);
+ s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
+ BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_TRIM:
- s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
- ide_issue_trim, ide_dma_cb, s,
+ s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
+ &s->sg, offset, BDRV_SECTOR_SIZE,
+ ide_issue_trim, s->blk, ide_dma_cb, s,
DMA_DIRECTION_TO_DEVICE);
break;
+ default:
+ abort();
}
return;
static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
{
- s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
+ s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
s->io_buffer_size = 0;
s->dma_cmd = dma_cmd;
void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
{
s->io_buffer_index = 0;
- s->bus->retry_unit = s->unit;
- s->bus->retry_sector_num = ide_get_sector(s);
- s->bus->retry_nsector = s->nsector;
+ ide_set_retry(s);
if (s->bus->dma->ops->start_dma) {
s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
}
block_acct_start(blk_get_stats(s->blk), &s->acct,
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
- s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
- ide_sector_write_cb, s);
+ s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
+ &s->qiov, 0, ide_sector_write_cb, s);
}
static void ide_flush_cb(void *opaque, int ret)
}
s->status |= BUSY_STAT;
+ ide_set_retry(s);
block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
}
}
/* called when the inserted state of the media has changed */
-static void ide_cd_change_cb(void *opaque, bool load)
+static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
{
IDEState *s = opaque;
uint64_t nb_sectors;
s->status = READY_STAT | SEEK_STAT;
s->atapi_dma = s->feature & 1;
+ if (s->atapi_dma) {
+ s->dma_cmd = IDE_DMA_ATAPI;
+ }
s->nsector = 1;
ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
ide_atapi_cmd);
if (s->bus->dma->ops->restart) {
s->bus->dma->ops->restart(s->bus->dma);
}
- }
-
- if (error_status & IDE_RETRY_DMA) {
+ } else if (IS_IDE_RETRY_DMA(error_status)) {
if (error_status & IDE_RETRY_TRIM) {
ide_restart_dma(s, IDE_DMA_TRIM);
} else {
ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
}
- } else if (error_status & IDE_RETRY_PIO) {
+ } else if (IS_IDE_RETRY_PIO(error_status)) {
if (is_read) {
ide_sector_read(s);
} else {
}
} else if (error_status & IDE_RETRY_FLUSH) {
ide_flush_cache(s);
+ } else if (IS_IDE_RETRY_ATAPI(error_status)) {
+ assert(s->end_transfer_func == ide_atapi_cmd);
+ ide_atapi_dma_restart(s);
} else {
- /*
- * We've not got any bits to tell us about ATAPI - but
- * we do have the end_transfer_func that tells us what
- * we're trying to do.
- */
- if (s->end_transfer_func == ide_atapi_cmd) {
- ide_atapi_dma_restart(s);
- }
+ abort();
}
}
void ide_register_restart_cb(IDEBus *bus)
{
if (bus->dma->ops->restart_dma) {
- qemu_add_vm_change_state_handler(ide_restart_cb, bus);
+ bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
}
}
bus->dma = &ide_dma_nop;
}
+void ide_exit(IDEState *s)
+{
+ timer_del(s->sector_write_timer);
+ timer_free(s->sector_write_timer);
+ qemu_vfree(s->smart_selftest_data);
+ qemu_vfree(s->io_buffer);
+}
+
static const MemoryRegionPortio ide_portio_list[] = {
{ 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
{ 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
{
/* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
bridge has been setup properly to always register with ISA. */
- isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
+ isa_register_portio_list(dev, &bus->portio_list,
+ iobase, ide_portio_list, bus, "ide");
if (iobase2) {
- isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
+ isa_register_portio_list(dev, &bus->portio2_list,
+ iobase2, ide_portio2_list, bus, "ide");
}
}
void ide_drive_get(DriveInfo **hd, int n)
{
int i;
- int highest_bus = drive_get_max_bus(IF_IDE) + 1;
- int max_devs = drive_get_max_devs(IF_IDE);
- int n_buses = max_devs ? (n / max_devs) : n;
-
- /*
- * Note: The number of actual buses available is not known.
- * We compute this based on the size of the DriveInfo* array, n.
- * If it is less than max_devs * <num_real_buses>,
- * We will stop looking for drives prematurely instead of overfilling
- * the array.
- */
-
- if (highest_bus > n_buses) {
- error_report("Too many IDE buses defined (%d > %d)",
- highest_bus, n_buses);
- exit(1);
- }
for (i = 0; i < n; i++) {
hd[i] = drive_get_by_index(IF_IDE, i);