*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/isa/isa.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
+#include "sysemu/blockdev.h"
#include "sysemu/dma.h"
#include "hw/block/block.h"
#include "sysemu/block-backend.h"
#include "qemu/cutils.h"
#include "hw/ide/internal.h"
+#include "trace.h"
/* These values were based on a Seagate ST3500418AS but have been modified
to make more sense in QEMU */
{ 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
};
+const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
+ [IDE_DMA_READ] = "DMA READ",
+ [IDE_DMA_WRITE] = "DMA WRITE",
+ [IDE_DMA_TRIM] = "DMA TRIM",
+ [IDE_DMA_ATAPI] = "DMA ATAPI"
+};
+
+static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
+{
+ if ((unsigned)enval < IDE_DMA__COUNT) {
+ return IDE_DMA_CMD_lookup[enval];
+ }
+ return "DMA UNKNOWN CMD";
+}
+
static void ide_dummy_transfer_stop(IDEState *s);
static void padstr(char *str, const char *src, int len)
if (dev && dev->conf.discard_granularity) {
put_le16(p + 169, 1); /* TRIM support */
}
+ if (dev) {
+ put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
+ }
ide_identify_size(s);
s->identify_set = 1;
}
}
+static bool ide_sect_range_ok(IDEState *s,
+ uint64_t sector, uint64_t nb_sectors)
+{
+ uint64_t total_sectors;
+
+ blk_get_geometry(s->blk, &total_sectors);
+ if (sector > total_sectors || nb_sectors > total_sectors - sector) {
+ return false;
+ }
+ return true;
+}
+
typedef struct TrimAIOCB {
BlockAIOCB common;
- BlockBackend *blk;
+ IDEState *s;
QEMUBH *bh;
int ret;
QEMUIOVector *qiov;
BlockAIOCB *aiocb;
int i, j;
+ bool is_invalid;
} TrimAIOCB;
static void trim_aio_cancel(BlockAIOCB *acb)
{
TrimAIOCB *iocb = opaque;
- iocb->common.cb(iocb->common.opaque, iocb->ret);
-
+ if (iocb->is_invalid) {
+ ide_dma_error(iocb->s);
+ } else {
+ iocb->common.cb(iocb->common.opaque, iocb->ret);
+ }
qemu_bh_delete(iocb->bh);
iocb->bh = NULL;
qemu_aio_unref(iocb);
static void ide_issue_trim_cb(void *opaque, int ret)
{
TrimAIOCB *iocb = opaque;
+ IDEState *s = iocb->s;
+
if (ret >= 0) {
while (iocb->j < iocb->qiov->niov) {
int j = iocb->j;
continue;
}
+ if (!ide_sect_range_ok(s, sector, count)) {
+ iocb->is_invalid = true;
+ goto done;
+ }
+
/* Got an entry! Submit and exit. */
- iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
- ide_issue_trim_cb, opaque);
+ iocb->aiocb = blk_aio_pdiscard(s->blk,
+ sector << BDRV_SECTOR_BITS,
+ count << BDRV_SECTOR_BITS,
+ ide_issue_trim_cb, opaque);
return;
}
iocb->ret = ret;
}
+done:
iocb->aiocb = NULL;
if (iocb->bh) {
qemu_bh_schedule(iocb->bh);
int64_t offset, QEMUIOVector *qiov,
BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
{
- BlockBackend *blk = opaque;
+ IDEState *s = opaque;
TrimAIOCB *iocb;
- iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
- iocb->blk = blk;
+ iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
+ iocb->s = s;
iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
iocb->ret = 0;
iocb->qiov = qiov;
iocb->i = -1;
iocb->j = 0;
+ iocb->is_invalid = false;
ide_issue_trim_cb(iocb, 0);
return &iocb->common;
}
s->error = ABRT_ERR;
}
+static void ide_set_retry(IDEState *s)
+{
+ s->bus->retry_unit = s->unit;
+ s->bus->retry_sector_num = ide_get_sector(s);
+ s->bus->retry_nsector = s->nsector;
+}
+
+static void ide_clear_retry(IDEState *s)
+{
+ s->bus->retry_unit = -1;
+ s->bus->retry_sector_num = 0;
+ s->bus->retry_nsector = 0;
+}
+
/* prepare data transfer and tell what to do after */
void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
EndTransferFunc *end_transfer_func)
s->end_transfer_func = end_transfer_func;
s->data_ptr = buf;
s->data_end = buf + size;
+ ide_set_retry(s);
if (!(s->status & ERR_STAT)) {
s->status |= DRQ_STAT;
}
ide_set_irq(s->bus);
}
-static bool ide_sect_range_ok(IDEState *s,
- uint64_t sector, uint64_t nb_sectors)
-{
- uint64_t total_sectors;
-
- blk_get_geometry(s->blk, &total_sectors);
- if (sector > total_sectors || nb_sectors > total_sectors - sector) {
- return false;
- }
- return true;
-}
-
static void ide_buffered_readv_cb(void *opaque, int ret)
{
IDEBufferedRequest *req = opaque;
* write requests) pending and we can avoid to drain. */
QLIST_FOREACH(req, &s->buffered_requests, list) {
if (!req->orphaned) {
-#ifdef DEBUG_IDE
- printf("%s: invoking cb %p of buffered request %p with"
- " -ECANCELED\n", __func__, req->original_cb, req);
-#endif
+ trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
req->original_cb(req->original_opaque, -ECANCELED);
}
req->orphaned = true;
* aio operation with preadv/pwritev.
*/
if (s->bus->dma->aiocb) {
-#ifdef DEBUG_IDE
- printf("%s: draining all remaining requests", __func__);
-#endif
+ trace_ide_cancel_dma_sync_remaining();
blk_drain(s->blk);
assert(s->bus->dma->aiocb == NULL);
}
n = s->req_nb_sectors;
}
-#if defined(DEBUG_IDE)
- printf("sector=%" PRId64 "\n", sector_num);
-#endif
+ trace_ide_sector_read(sector_num, n);
if (!ide_sect_range_ok(s, sector_num, n)) {
ide_rw_error(s);
void ide_set_inactive(IDEState *s, bool more)
{
s->bus->dma->aiocb = NULL;
- s->bus->retry_unit = -1;
- s->bus->retry_sector_num = 0;
- s->bus->retry_nsector = 0;
+ ide_clear_retry(s);
if (s->bus->dma->ops->set_inactive) {
s->bus->dma->ops->set_inactive(s->bus->dma, more);
}
}
if (ret < 0) {
if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
+ s->bus->dma->aiocb = NULL;
+ dma_buf_commit(s, 0);
return;
}
}
goto eot;
}
-#ifdef DEBUG_AIO
- printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
- sector_num, n, s->dma_cmd);
-#endif
+ trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
!ide_sect_range_ok(s, sector_num, n)) {
switch (s->dma_cmd) {
case IDE_DMA_READ:
s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
- ide_dma_cb, s);
+ BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_WRITE:
s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
- ide_dma_cb, s);
+ BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_TRIM:
s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
- &s->sg, offset,
- ide_issue_trim, s->blk, ide_dma_cb, s,
+ &s->sg, offset, BDRV_SECTOR_SIZE,
+ ide_issue_trim, s, ide_dma_cb, s,
DMA_DIRECTION_TO_DEVICE);
break;
default:
static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
{
- s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
+ s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
s->io_buffer_size = 0;
s->dma_cmd = dma_cmd;
void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
{
s->io_buffer_index = 0;
- s->bus->retry_unit = s->unit;
- s->bus->retry_sector_num = ide_get_sector(s);
- s->bus->retry_nsector = s->nsector;
+ ide_set_retry(s);
if (s->bus->dma->ops->start_dma) {
s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
}
s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
sector_num = ide_get_sector(s);
-#if defined(DEBUG_IDE)
- printf("sector=%" PRId64 "\n", sector_num);
-#endif
+
n = s->nsector;
if (n > s->req_nb_sectors) {
n = s->req_nb_sectors;
}
+ trace_ide_sector_write(sector_num, n);
+
if (!ide_sect_range_ok(s, sector_num, n)) {
ide_rw_error(s);
block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
}
s->status |= BUSY_STAT;
+ ide_set_retry(s);
block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
- s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
+
+ if (blk_bs(s->blk)) {
+ s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
+ } else {
+ /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
+ * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
+ */
+ ide_flush_cb(s, 0);
+ }
}
static void ide_cfata_metadata_inquiry(IDEState *s)
}
/* called when the inserted state of the media has changed */
-static void ide_cd_change_cb(void *opaque, bool load)
+static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
{
IDEState *s = opaque;
uint64_t nb_sectors;
bus->ifs[1].select &= ~(1 << 7);
}
+/* IOport [W]rite [R]egisters */
+enum ATA_IOPORT_WR {
+ ATA_IOPORT_WR_DATA = 0,
+ ATA_IOPORT_WR_FEATURES = 1,
+ ATA_IOPORT_WR_SECTOR_COUNT = 2,
+ ATA_IOPORT_WR_SECTOR_NUMBER = 3,
+ ATA_IOPORT_WR_CYLINDER_LOW = 4,
+ ATA_IOPORT_WR_CYLINDER_HIGH = 5,
+ ATA_IOPORT_WR_DEVICE_HEAD = 6,
+ ATA_IOPORT_WR_COMMAND = 7,
+ ATA_IOPORT_WR_NUM_REGISTERS,
+};
+
+const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
+ [ATA_IOPORT_WR_DATA] = "Data",
+ [ATA_IOPORT_WR_FEATURES] = "Features",
+ [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
+ [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
+ [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
+ [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
+ [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
+ [ATA_IOPORT_WR_COMMAND] = "Command"
+};
+
void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
IDEBus *bus = opaque;
+ IDEState *s = idebus_active_if(bus);
+ int reg_num = addr & 7;
-#ifdef DEBUG_IDE
- printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
-#endif
-
- addr &= 7;
+ trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
/* ignore writes to command block while busy with previous command */
- if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
+ if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
return;
+ }
- switch(addr) {
+ switch (reg_num) {
case 0:
break;
- case 1:
- ide_clear_hob(bus);
+ case ATA_IOPORT_WR_FEATURES:
+ ide_clear_hob(bus);
/* NOTE: data is written to the two drives */
- bus->ifs[0].hob_feature = bus->ifs[0].feature;
- bus->ifs[1].hob_feature = bus->ifs[1].feature;
+ bus->ifs[0].hob_feature = bus->ifs[0].feature;
+ bus->ifs[1].hob_feature = bus->ifs[1].feature;
bus->ifs[0].feature = val;
bus->ifs[1].feature = val;
break;
- case 2:
+ case ATA_IOPORT_WR_SECTOR_COUNT:
ide_clear_hob(bus);
bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
bus->ifs[0].nsector = val;
bus->ifs[1].nsector = val;
break;
- case 3:
+ case ATA_IOPORT_WR_SECTOR_NUMBER:
ide_clear_hob(bus);
bus->ifs[0].hob_sector = bus->ifs[0].sector;
bus->ifs[1].hob_sector = bus->ifs[1].sector;
bus->ifs[0].sector = val;
bus->ifs[1].sector = val;
break;
- case 4:
+ case ATA_IOPORT_WR_CYLINDER_LOW:
ide_clear_hob(bus);
bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
bus->ifs[0].lcyl = val;
bus->ifs[1].lcyl = val;
break;
- case 5:
+ case ATA_IOPORT_WR_CYLINDER_HIGH:
ide_clear_hob(bus);
bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
bus->ifs[0].hcyl = val;
bus->ifs[1].hcyl = val;
break;
- case 6:
+ case ATA_IOPORT_WR_DEVICE_HEAD:
/* FIXME: HOB readback uses bit 7 */
bus->ifs[0].select = (val & ~0x10) | 0xa0;
bus->ifs[1].select = (val | 0x10) | 0xa0;
bus->unit = (val >> 4) & 1;
break;
default:
- case 7:
+ case ATA_IOPORT_WR_COMMAND:
/* command */
ide_exec_cmd(bus, val);
break;
static void ide_reset(IDEState *s)
{
-#ifdef DEBUG_IDE
- printf("ide: reset\n");
-#endif
+ trace_ide_reset(s);
if (s->pio_aiocb) {
blk_aio_cancel(s->pio_aiocb);
IDEState *s;
bool complete;
-#if defined(DEBUG_IDE)
- printf("ide: CMD=%02x\n", val);
-#endif
s = idebus_active_if(bus);
+ trace_ide_exec_cmd(bus, s, val);
+
/* ignore commands to non existent slave */
if (s != bus->ifs && !s->blk) {
return;
}
}
-uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
+/* IOport [R]ead [R]egisters */
+enum ATA_IOPORT_RR {
+ ATA_IOPORT_RR_DATA = 0,
+ ATA_IOPORT_RR_ERROR = 1,
+ ATA_IOPORT_RR_SECTOR_COUNT = 2,
+ ATA_IOPORT_RR_SECTOR_NUMBER = 3,
+ ATA_IOPORT_RR_CYLINDER_LOW = 4,
+ ATA_IOPORT_RR_CYLINDER_HIGH = 5,
+ ATA_IOPORT_RR_DEVICE_HEAD = 6,
+ ATA_IOPORT_RR_STATUS = 7,
+ ATA_IOPORT_RR_NUM_REGISTERS,
+};
+
+const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
+ [ATA_IOPORT_RR_DATA] = "Data",
+ [ATA_IOPORT_RR_ERROR] = "Error",
+ [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
+ [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
+ [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
+ [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
+ [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
+ [ATA_IOPORT_RR_STATUS] = "Status"
+};
+
+uint32_t ide_ioport_read(void *opaque, uint32_t addr)
{
IDEBus *bus = opaque;
IDEState *s = idebus_active_if(bus);
- uint32_t addr;
+ uint32_t reg_num;
int ret, hob;
- addr = addr1 & 7;
+ reg_num = addr & 7;
/* FIXME: HOB readback uses bit 7, but it's always set right now */
//hob = s->select & (1 << 7);
hob = 0;
- switch(addr) {
- case 0:
+ switch (reg_num) {
+ case ATA_IOPORT_RR_DATA:
ret = 0xff;
break;
- case 1:
+ case ATA_IOPORT_RR_ERROR:
if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
(s != bus->ifs && !s->blk)) {
ret = 0;
ret = s->hob_feature;
}
break;
- case 2:
+ case ATA_IOPORT_RR_SECTOR_COUNT:
if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
ret = 0;
} else if (!hob) {
ret = s->hob_nsector;
}
break;
- case 3:
+ case ATA_IOPORT_RR_SECTOR_NUMBER:
if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
ret = 0;
} else if (!hob) {
ret = s->hob_sector;
}
break;
- case 4:
+ case ATA_IOPORT_RR_CYLINDER_LOW:
if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
ret = 0;
} else if (!hob) {
ret = s->hob_lcyl;
}
break;
- case 5:
+ case ATA_IOPORT_RR_CYLINDER_HIGH:
if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
ret = 0;
} else if (!hob) {
ret = s->hob_hcyl;
}
break;
- case 6:
+ case ATA_IOPORT_RR_DEVICE_HEAD:
if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
ret = 0;
} else {
}
break;
default:
- case 7:
+ case ATA_IOPORT_RR_STATUS:
if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
(s != bus->ifs && !s->blk)) {
ret = 0;
qemu_irq_lower(bus->irq);
break;
}
-#ifdef DEBUG_IDE
- printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
-#endif
+
+ trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
return ret;
}
} else {
ret = s->status;
}
-#ifdef DEBUG_IDE
- printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
-#endif
+
+ trace_ide_status_read(addr, ret, bus, s);
return ret;
}
IDEState *s;
int i;
-#ifdef DEBUG_IDE
- printf("ide: write control addr=0x%x val=%02x\n", addr, val);
-#endif
+ trace_ide_cmd_write(addr, val, bus);
+
/* common for both drives */
if (!(bus->cmd & IDE_CMD_RESET) &&
(val & IDE_CMD_RESET)) {
IDEState *s = idebus_active_if(bus);
uint8_t *p;
+ trace_ide_data_writew(addr, val, bus, s);
+
/* PIO data access allowed only when DRQ bit is set. The result of a write
* during PIO out is indeterminate, just ignore it. */
if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
s->status &= ~DRQ_STAT;
s->end_transfer_func(s);
}
+
+ trace_ide_data_readw(addr, ret, bus, s);
return ret;
}
IDEState *s = idebus_active_if(bus);
uint8_t *p;
+ trace_ide_data_writel(addr, val, bus, s);
+
/* PIO data access allowed only when DRQ bit is set. The result of a write
* during PIO out is indeterminate, just ignore it. */
if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
/* PIO data access allowed only when DRQ bit is set. The result of a read
* during PIO in is indeterminate, return 0 and don't move forward. */
if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
- return 0;
+ ret = 0;
+ goto out;
}
p = s->data_ptr;
s->status &= ~DRQ_STAT;
s->end_transfer_func(s);
}
+
+out:
+ trace_ide_data_readl(addr, ret, bus, s);
return ret;
}
/* pending async DMA */
if (bus->dma->aiocb) {
-#ifdef DEBUG_AIO
- printf("aio_cancel\n");
-#endif
+ trace_ide_bus_reset_aio();
blk_aio_cancel(bus->dma->aiocb);
bus->dma->aiocb = NULL;
}
const char *version, const char *serial, const char *model,
uint64_t wwn,
uint32_t cylinders, uint32_t heads, uint32_t secs,
- int chs_trans)
+ int chs_trans, Error **errp)
{
uint64_t nb_sectors;
blk_set_guest_block_size(blk, 2048);
} else {
if (!blk_is_inserted(s->blk)) {
- error_report("Device needs media, but drive is empty");
+ error_setg(errp, "Device needs media, but drive is empty");
return -1;
}
if (blk_is_read_only(blk)) {
- error_report("Can't use a read-only drive");
+ error_setg(errp, "Can't use a read-only drive");
return -1;
}
blk_set_dev_ops(blk, &ide_hd_block_ops, s);
void ide_register_restart_cb(IDEBus *bus)
{
if (bus->dma->ops->restart_dma) {
- qemu_add_vm_change_state_handler(ide_restart_cb, bus);
+ bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
}
}
bus->dma = &ide_dma_nop;
}
+void ide_exit(IDEState *s)
+{
+ timer_del(s->sector_write_timer);
+ timer_free(s->sector_write_timer);
+ qemu_vfree(s->smart_selftest_data);
+ qemu_vfree(s->io_buffer);
+}
+
static const MemoryRegionPortio ide_portio_list[] = {
{ 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
{ 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
{
/* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
bridge has been setup properly to always register with ISA. */
- isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
+ isa_register_portio_list(dev, &bus->portio_list,
+ iobase, ide_portio_list, bus, "ide");
if (iobase2) {
- isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
+ isa_register_portio_list(dev, &bus->portio2_list,
+ iobase2, ide_portio2_list, bus, "ide");
}
}
return 0;
}
-static void ide_drive_pio_pre_save(void *opaque)
+static int ide_drive_pio_pre_save(void *opaque)
{
IDEState *s = opaque;
int idx;
} else {
s->end_transfer_fn_idx = idx;
}
+
+ return 0;
}
static bool ide_drive_pio_state_needed(void *opaque)
void ide_drive_get(DriveInfo **hd, int n)
{
int i;
- int highest_bus = drive_get_max_bus(IF_IDE) + 1;
- int max_devs = drive_get_max_devs(IF_IDE);
- int n_buses = max_devs ? (n / max_devs) : n;
-
- /*
- * Note: The number of actual buses available is not known.
- * We compute this based on the size of the DriveInfo* array, n.
- * If it is less than max_devs * <num_real_buses>,
- * We will stop looking for drives prematurely instead of overfilling
- * the array.
- */
-
- if (highest_bus > n_buses) {
- error_report("Too many IDE buses defined (%d > %d)",
- highest_bus, n_buses);
- exit(1);
- }
for (i = 0; i < n; i++) {
hd[i] = drive_get_by_index(IF_IDE, i);