* THE SOFTWARE.
*/
#include <hw/hw.h>
-#include <hw/pc.h>
+#include <hw/i386/pc.h>
#include <hw/pci/pci.h>
-#include <hw/isa.h>
-#include "block/block.h"
+#include <hw/isa/isa.h>
+#include "sysemu/block-backend.h"
#include "sysemu/dma.h"
-
+#include "qemu/error-report.h"
#include <hw/ide/pci.h>
#define BMDMA_PAGE_SIZE 4096
+#define BM_MIGRATION_COMPAT_STATUS_BITS \
+ (IDE_RETRY_DMA | IDE_RETRY_PIO | \
+ IDE_RETRY_READ | IDE_RETRY_FLUSH)
+
static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
- BlockDriverCompletionFunc *dma_cb)
+ BlockCompletionFunc *dma_cb)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
- bm->unit = s->unit;
bm->dma_cb = dma_cb;
bm->cur_prd_last = 0;
bm->cur_prd_addr = 0;
bm->cur_prd_len = 0;
- bm->sector_num = ide_get_sector(s);
- bm->nsector = s->nsector;
if (bm->status & BM_STATUS_DMAING) {
bm->dma_cb(bmdma_active_if(bm), 0);
}
}
-/* return 0 if buffer completed */
-static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
+/**
+ * Prepare an sglist based on available PRDs.
+ * @limit: How many bytes to prepare total.
+ *
+ * Returns the number of bytes prepared, -1 on error.
+ * IDEState.io_buffer_size will contain the number of bytes described
+ * by the PRDs, whether or not we added them to the sglist.
+ */
+static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
IDEState *s = bmdma_active_if(bm);
+ PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
struct {
uint32_t addr;
uint32_t size;
} prd;
int l, len;
- pci_dma_sglist_init(&s->sg, &bm->pci_dev->dev,
+ pci_dma_sglist_init(&s->sg, pci_dev,
s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
s->io_buffer_size = 0;
for(;;) {
if (bm->cur_prd_len == 0) {
/* end of table (with a fail safe of one page) */
if (bm->cur_prd_last ||
- (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
- return s->io_buffer_size != 0;
- pci_dma_read(&bm->pci_dev->dev, bm->cur_addr, &prd, 8);
+ (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) {
+ return s->sg.size;
+ }
+ pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
bm->cur_addr += 8;
prd.addr = le32_to_cpu(prd.addr);
prd.size = le32_to_cpu(prd.size);
}
l = bm->cur_prd_len;
if (l > 0) {
- qemu_sglist_add(&s->sg, bm->cur_prd_addr, l);
+ uint64_t sg_len;
+
+ /* Don't add extra bytes to the SGList; consume any remaining
+ * PRDs from the guest, but ignore them. */
+ sg_len = MIN(limit - s->sg.size, bm->cur_prd_len);
+ if (sg_len) {
+ qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len);
+ }
+
bm->cur_prd_addr += l;
bm->cur_prd_len -= l;
s->io_buffer_size += l;
}
}
- return 1;
+
+ qemu_sglist_destroy(&s->sg);
+ s->io_buffer_size = 0;
+ return -1;
}
/* return 0 if buffer completed */
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
IDEState *s = bmdma_active_if(bm);
+ PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
struct {
uint32_t addr;
uint32_t size;
if (bm->cur_prd_last ||
(bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
return 0;
- pci_dma_read(&bm->pci_dev->dev, bm->cur_addr, &prd, 8);
+ pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
bm->cur_addr += 8;
prd.addr = le32_to_cpu(prd.addr);
prd.size = le32_to_cpu(prd.size);
l = bm->cur_prd_len;
if (l > 0) {
if (is_write) {
- pci_dma_write(&bm->pci_dev->dev, bm->cur_prd_addr,
+ pci_dma_write(pci_dev, bm->cur_prd_addr,
s->io_buffer + s->io_buffer_index, l);
} else {
- pci_dma_read(&bm->pci_dev->dev, bm->cur_prd_addr,
+ pci_dma_read(pci_dev, bm->cur_prd_addr,
s->io_buffer + s->io_buffer_index, l);
}
bm->cur_prd_addr += l;
return 1;
}
-static int bmdma_set_unit(IDEDMA *dma, int unit)
-{
- BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
- bm->unit = unit;
-
- return 0;
-}
-
-static int bmdma_add_status(IDEDMA *dma, int status)
+static void bmdma_set_inactive(IDEDMA *dma, bool more)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
- bm->status |= status;
- return 0;
-}
-
-static int bmdma_set_inactive(IDEDMA *dma)
-{
- BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
-
- bm->status &= ~BM_STATUS_DMAING;
bm->dma_cb = NULL;
- bm->unit = -1;
-
- return 0;
-}
-
-static void bmdma_restart_dma(BMDMAState *bm, enum ide_dma_cmd dma_cmd)
-{
- IDEState *s = bmdma_active_if(bm);
-
- ide_set_sector(s, bm->sector_num);
- s->io_buffer_index = 0;
- s->io_buffer_size = 0;
- s->nsector = bm->nsector;
- s->dma_cmd = dma_cmd;
- bm->cur_addr = bm->addr;
- bm->dma_cb = ide_dma_cb;
- bmdma_start_dma(&bm->dma, s, bm->dma_cb);
-}
-
-/* TODO This should be common IDE code */
-static void bmdma_restart_bh(void *opaque)
-{
- BMDMAState *bm = opaque;
- IDEBus *bus = bm->bus;
- bool is_read;
- int error_status;
-
- qemu_bh_delete(bm->bh);
- bm->bh = NULL;
-
- if (bm->unit == (uint8_t) -1) {
- return;
- }
-
- is_read = (bus->error_status & BM_STATUS_RETRY_READ) != 0;
-
- /* The error status must be cleared before resubmitting the request: The
- * request may fail again, and this case can only be distinguished if the
- * called function can set a new error status. */
- error_status = bus->error_status;
- bus->error_status = 0;
-
- if (error_status & BM_STATUS_DMA_RETRY) {
- if (error_status & BM_STATUS_RETRY_TRIM) {
- bmdma_restart_dma(bm, IDE_DMA_TRIM);
- } else {
- bmdma_restart_dma(bm, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
- }
- } else if (error_status & BM_STATUS_PIO_RETRY) {
- if (is_read) {
- ide_sector_read(bmdma_active_if(bm));
- } else {
- ide_sector_write(bmdma_active_if(bm));
- }
- } else if (error_status & BM_STATUS_RETRY_FLUSH) {
- ide_flush_cache(bmdma_active_if(bm));
+ if (more) {
+ bm->status |= BM_STATUS_DMAING;
+ } else {
+ bm->status &= ~BM_STATUS_DMAING;
}
}
-static void bmdma_restart_cb(void *opaque, int running, RunState state)
+static void bmdma_restart_dma(IDEDMA *dma)
{
- IDEDMA *dma = opaque;
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
- if (!running)
- return;
-
- if (!bm->bh) {
- bm->bh = qemu_bh_new(bmdma_restart_bh, &bm->dma);
- qemu_bh_schedule(bm->bh);
- }
+ bm->cur_addr = bm->addr;
}
static void bmdma_cancel(BMDMAState *bm)
{
if (bm->status & BM_STATUS_DMAING) {
/* cancel DMA request */
- bmdma_set_inactive(&bm->dma);
+ bmdma_set_inactive(&bm->dma, false);
}
}
-static int bmdma_reset(IDEDMA *dma)
+static void bmdma_reset(IDEDMA *dma)
{
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
bm->cur_prd_last = 0;
bm->cur_prd_addr = 0;
bm->cur_prd_len = 0;
- bm->sector_num = 0;
- bm->nsector = 0;
-
- return 0;
-}
-
-static int bmdma_start_transfer(IDEDMA *dma)
-{
- return 0;
}
static void bmdma_irq(void *opaque, int n, int level)
/* Ignore writes to SSBM if it keeps the old value */
if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
if (!(val & BM_CMD_START)) {
+ /* First invoke the callbacks of all buffered requests
+ * and flag those requests as orphaned. Ideally there
+ * are no unbuffered (Scatter Gather DMA Requests or
+ * write requests) pending and we can avoid to drain. */
+ IDEBufferedRequest *req;
+ IDEState *s = idebus_active_if(bm->bus);
+ QLIST_FOREACH(req, &s->buffered_requests, list) {
+ if (!req->orphaned) {
+#ifdef DEBUG_IDE
+ printf("%s: invoking cb %p of buffered request %p with"
+ " -ECANCELED\n", __func__, req->original_cb, req);
+#endif
+ req->original_cb(req->original_opaque, -ECANCELED);
+ }
+ req->orphaned = true;
+ }
/*
* We can't cancel Scatter Gather DMA in the middle of the
* operation or a partial (not full) DMA transfer would reach
* aio operation with preadv/pwritev.
*/
if (bm->bus->dma->aiocb) {
- bdrv_drain_all();
+#ifdef DEBUG_IDE
+ printf("%s: draining all remaining requests", __func__);
+#endif
+ blk_drain_all();
assert(bm->bus->dma->aiocb == NULL);
}
+ bm->status &= ~BM_STATUS_DMAING;
} else {
bm->cur_addr = bm->addr;
if (!(bm->status & BM_STATUS_DMAING)) {
BMDMAState *bm = opaque;
uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
+ bm->migration_retry_unit = bm->bus->retry_unit;
+ bm->migration_retry_sector_num = bm->bus->retry_sector_num;
+ bm->migration_retry_nsector = bm->bus->retry_nsector;
bm->migration_compat_status =
(bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits);
}
bm->status = bm->migration_compat_status & ~abused_bits;
bm->bus->error_status |= bm->migration_compat_status & abused_bits;
}
+ if (bm->bus->error_status) {
+ bm->bus->retry_sector_num = bm->migration_retry_sector_num;
+ bm->bus->retry_nsector = bm->migration_retry_nsector;
+ bm->bus->retry_unit = bm->migration_retry_unit;
+ }
return 0;
}
.name = "ide bmdma_current",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .needed = ide_bmdma_current_needed,
+ .fields = (VMStateField[]) {
VMSTATE_UINT32(cur_addr, BMDMAState),
VMSTATE_UINT32(cur_prd_last, BMDMAState),
VMSTATE_UINT32(cur_prd_addr, BMDMAState),
}
};
-const VMStateDescription vmstate_bmdma_status = {
+static const VMStateDescription vmstate_bmdma_status = {
.name ="ide bmdma/status",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .needed = ide_bmdma_status_needed,
+ .fields = (VMStateField[]) {
VMSTATE_UINT8(status, BMDMAState),
VMSTATE_END_OF_LIST()
}
.name = "ide bmdma",
.version_id = 3,
.minimum_version_id = 0,
- .minimum_version_id_old = 0,
.pre_save = ide_bmdma_pre_save,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT8(cmd, BMDMAState),
VMSTATE_UINT8(migration_compat_status, BMDMAState),
VMSTATE_UINT32(addr, BMDMAState),
- VMSTATE_INT64(sector_num, BMDMAState),
- VMSTATE_UINT32(nsector, BMDMAState),
- VMSTATE_UINT8(unit, BMDMAState),
+ VMSTATE_INT64(migration_retry_sector_num, BMDMAState),
+ VMSTATE_UINT32(migration_retry_nsector, BMDMAState),
+ VMSTATE_UINT8(migration_retry_unit, BMDMAState),
VMSTATE_END_OF_LIST()
},
- .subsections = (VMStateSubsection []) {
- {
- .vmsd = &vmstate_bmdma_current,
- .needed = ide_bmdma_current_needed,
- }, {
- .vmsd = &vmstate_bmdma_status,
- .needed = ide_bmdma_status_needed,
- }, {
- /* empty */
- }
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_bmdma_current,
+ &vmstate_bmdma_status,
+ NULL
}
};
for(i = 0; i < 2; i++) {
/* current versions always store 0/1, but older version
stored bigger values. We only need last bit */
- d->bmdma[i].unit &= 1;
+ d->bmdma[i].migration_retry_unit &= 1;
ide_bmdma_post_load(&d->bmdma[i], -1);
}
.name = "ide",
.version_id = 3,
.minimum_version_id = 0,
- .minimum_version_id_old = 0,
.post_load = ide_pci_post_load,
- .fields = (VMStateField []) {
- VMSTATE_PCI_DEVICE(dev, PCIIDEState),
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState),
VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
vmstate_bmdma, BMDMAState),
VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
{
- PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev);
+ PCIIDEState *d = PCI_IDE(dev);
static const int bus[4] = { 0, 0, 1, 1 };
static const int unit[4] = { 0, 1, 0, 1 };
int i;
static const struct IDEDMAOps bmdma_ops = {
.start_dma = bmdma_start_dma,
- .start_transfer = bmdma_start_transfer,
.prepare_buf = bmdma_prepare_buf,
.rw_buf = bmdma_rw_buf,
- .set_unit = bmdma_set_unit,
- .add_status = bmdma_add_status,
+ .restart_dma = bmdma_restart_dma,
.set_inactive = bmdma_set_inactive,
- .restart_cb = bmdma_restart_cb,
.reset = bmdma_reset,
};
void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
{
- qemu_irq *irq;
-
if (bus->dma == &bm->dma) {
return;
}
bm->dma.ops = &bmdma_ops;
bus->dma = &bm->dma;
bm->irq = bus->irq;
- irq = qemu_allocate_irqs(bmdma_irq, bm, 1);
- bus->irq = *irq;
+ bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0);
bm->pci_dev = d;
}
+
+static const TypeInfo pci_ide_type_info = {
+ .name = TYPE_PCI_IDE,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCIIDEState),
+ .abstract = true,
+};
+
+static void pci_ide_register_types(void)
+{
+ type_register_static(&pci_ide_type_info);
+}
+
+type_init(pci_ide_register_types)