2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/isa/isa.h>
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/dma.h"
33 #include "hw/block/block.h"
34 #include "sysemu/block-backend.h"
36 #include <hw/ide/internal.h>
38 /* These values were based on a Seagate ST3500418AS but have been modified
39 to make more sense in QEMU */
40 static const int smart_attributes[][12] = {
41 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
42 /* raw read error rate*/
43 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
45 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
46 /* start stop count */
47 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
48 /* remapped sectors */
49 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
51 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
52 /* power cycle count */
53 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* airflow-temperature-celsius */
55 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 static int ide_handle_rw_error(IDEState *s, int error, int op);
59 static void ide_dummy_transfer_stop(IDEState *s);
61 static void padstr(char *str, const char *src, int len)
64 for(i = 0; i < len; i++) {
73 static void put_le16(uint16_t *p, unsigned int v)
78 static void ide_identify_size(IDEState *s)
80 uint16_t *p = (uint16_t *)s->identify_data;
81 put_le16(p + 60, s->nb_sectors);
82 put_le16(p + 61, s->nb_sectors >> 16);
83 put_le16(p + 100, s->nb_sectors);
84 put_le16(p + 101, s->nb_sectors >> 16);
85 put_le16(p + 102, s->nb_sectors >> 32);
86 put_le16(p + 103, s->nb_sectors >> 48);
89 static void ide_identify(IDEState *s)
93 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
95 p = (uint16_t *)s->identify_data;
96 if (s->identify_set) {
99 memset(p, 0, sizeof(s->identify_data));
101 put_le16(p + 0, 0x0040);
102 put_le16(p + 1, s->cylinders);
103 put_le16(p + 3, s->heads);
104 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
105 put_le16(p + 5, 512); /* XXX: retired, remove ? */
106 put_le16(p + 6, s->sectors);
107 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
108 put_le16(p + 20, 3); /* XXX: retired, remove ? */
109 put_le16(p + 21, 512); /* cache size in sectors */
110 put_le16(p + 22, 4); /* ecc bytes */
111 padstr((char *)(p + 23), s->version, 8); /* firmware version */
112 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
113 #if MAX_MULT_SECTORS > 1
114 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
116 put_le16(p + 48, 1); /* dword I/O */
117 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
118 put_le16(p + 51, 0x200); /* PIO transfer cycle */
119 put_le16(p + 52, 0x200); /* DMA transfer cycle */
120 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
121 put_le16(p + 54, s->cylinders);
122 put_le16(p + 55, s->heads);
123 put_le16(p + 56, s->sectors);
124 oldsize = s->cylinders * s->heads * s->sectors;
125 put_le16(p + 57, oldsize);
126 put_le16(p + 58, oldsize >> 16);
128 put_le16(p + 59, 0x100 | s->mult_sectors);
129 /* *(p + 60) := nb_sectors -- see ide_identify_size */
130 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
131 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
132 put_le16(p + 63, 0x07); /* mdma0-2 supported */
133 put_le16(p + 64, 0x03); /* pio3-4 supported */
134 put_le16(p + 65, 120);
135 put_le16(p + 66, 120);
136 put_le16(p + 67, 120);
137 put_le16(p + 68, 120);
138 if (dev && dev->conf.discard_granularity) {
139 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
143 put_le16(p + 75, s->ncq_queues - 1);
145 put_le16(p + 76, (1 << 8));
148 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
149 put_le16(p + 81, 0x16); /* conforms to ata5 */
150 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
151 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
152 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
153 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
154 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
156 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
158 put_le16(p + 84, (1 << 14) | 0);
160 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
161 if (blk_enable_write_cache(s->blk)) {
162 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
164 put_le16(p + 85, (1 << 14) | 1);
166 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
167 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
168 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
170 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
172 put_le16(p + 87, (1 << 14) | 0);
174 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
175 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
176 /* *(p + 100) := nb_sectors -- see ide_identify_size */
177 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
178 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
179 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
181 if (dev && dev->conf.physical_block_size)
182 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
184 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
185 put_le16(p + 108, s->wwn >> 48);
186 put_le16(p + 109, s->wwn >> 32);
187 put_le16(p + 110, s->wwn >> 16);
188 put_le16(p + 111, s->wwn);
190 if (dev && dev->conf.discard_granularity) {
191 put_le16(p + 169, 1); /* TRIM support */
194 ide_identify_size(s);
198 memcpy(s->io_buffer, p, sizeof(s->identify_data));
201 static void ide_atapi_identify(IDEState *s)
205 p = (uint16_t *)s->identify_data;
206 if (s->identify_set) {
209 memset(p, 0, sizeof(s->identify_data));
211 /* Removable CDROM, 50us response, 12 byte packets */
212 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
213 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
214 put_le16(p + 20, 3); /* buffer type */
215 put_le16(p + 21, 512); /* cache size in sectors */
216 put_le16(p + 22, 4); /* ecc bytes */
217 padstr((char *)(p + 23), s->version, 8); /* firmware version */
218 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
219 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
221 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
222 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
223 put_le16(p + 62, 7); /* single word dma0-2 supported */
224 put_le16(p + 63, 7); /* mdma0-2 supported */
226 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
227 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
228 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
230 put_le16(p + 64, 3); /* pio3-4 supported */
231 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
232 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
233 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
234 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
236 put_le16(p + 71, 30); /* in ns */
237 put_le16(p + 72, 30); /* in ns */
240 put_le16(p + 75, s->ncq_queues - 1);
242 put_le16(p + 76, (1 << 8));
245 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
247 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
248 put_le16(p + 87, (1 << 8)); /* WWN enabled */
252 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
256 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
257 put_le16(p + 108, s->wwn >> 48);
258 put_le16(p + 109, s->wwn >> 32);
259 put_le16(p + 110, s->wwn >> 16);
260 put_le16(p + 111, s->wwn);
266 memcpy(s->io_buffer, p, sizeof(s->identify_data));
269 static void ide_cfata_identify_size(IDEState *s)
271 uint16_t *p = (uint16_t *)s->identify_data;
272 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
273 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
274 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
275 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
278 static void ide_cfata_identify(IDEState *s)
283 p = (uint16_t *)s->identify_data;
284 if (s->identify_set) {
287 memset(p, 0, sizeof(s->identify_data));
289 cur_sec = s->cylinders * s->heads * s->sectors;
291 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
292 put_le16(p + 1, s->cylinders); /* Default cylinders */
293 put_le16(p + 3, s->heads); /* Default heads */
294 put_le16(p + 6, s->sectors); /* Default sectors per track */
295 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
296 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
297 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
298 put_le16(p + 22, 0x0004); /* ECC bytes */
299 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
300 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
301 #if MAX_MULT_SECTORS > 1
302 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
304 put_le16(p + 47, 0x0000);
306 put_le16(p + 49, 0x0f00); /* Capabilities */
307 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
308 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
309 put_le16(p + 53, 0x0003); /* Translation params valid */
310 put_le16(p + 54, s->cylinders); /* Current cylinders */
311 put_le16(p + 55, s->heads); /* Current heads */
312 put_le16(p + 56, s->sectors); /* Current sectors */
313 put_le16(p + 57, cur_sec); /* Current capacity */
314 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
315 if (s->mult_sectors) /* Multiple sector setting */
316 put_le16(p + 59, 0x100 | s->mult_sectors);
317 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
318 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
319 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
320 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
321 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
322 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
323 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
324 put_le16(p + 82, 0x400c); /* Command Set supported */
325 put_le16(p + 83, 0x7068); /* Command Set supported */
326 put_le16(p + 84, 0x4000); /* Features supported */
327 put_le16(p + 85, 0x000c); /* Command Set enabled */
328 put_le16(p + 86, 0x7044); /* Command Set enabled */
329 put_le16(p + 87, 0x4000); /* Features enabled */
330 put_le16(p + 91, 0x4060); /* Current APM level */
331 put_le16(p + 129, 0x0002); /* Current features option */
332 put_le16(p + 130, 0x0005); /* Reassigned sectors */
333 put_le16(p + 131, 0x0001); /* Initial power mode */
334 put_le16(p + 132, 0x0000); /* User signature */
335 put_le16(p + 160, 0x8100); /* Power requirement */
336 put_le16(p + 161, 0x8001); /* CF command set */
338 ide_cfata_identify_size(s);
342 memcpy(s->io_buffer, p, sizeof(s->identify_data));
345 static void ide_set_signature(IDEState *s)
347 s->select &= 0xf0; /* clear head */
351 if (s->drive_kind == IDE_CD) {
363 typedef struct TrimAIOCB {
373 static void trim_aio_cancel(BlockAIOCB *acb)
375 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
377 /* Exit the loop so ide_issue_trim_cb will not continue */
378 iocb->j = iocb->qiov->niov - 1;
379 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
381 iocb->ret = -ECANCELED;
384 blk_aio_cancel_async(iocb->aiocb);
389 static const AIOCBInfo trim_aiocb_info = {
390 .aiocb_size = sizeof(TrimAIOCB),
391 .cancel_async = trim_aio_cancel,
394 static void ide_trim_bh_cb(void *opaque)
396 TrimAIOCB *iocb = opaque;
398 iocb->common.cb(iocb->common.opaque, iocb->ret);
400 qemu_bh_delete(iocb->bh);
402 qemu_aio_unref(iocb);
405 static void ide_issue_trim_cb(void *opaque, int ret)
407 TrimAIOCB *iocb = opaque;
409 while (iocb->j < iocb->qiov->niov) {
411 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
413 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
415 /* 6-byte LBA + 2-byte range per entry */
416 uint64_t entry = le64_to_cpu(buffer[i]);
417 uint64_t sector = entry & 0x0000ffffffffffffULL;
418 uint16_t count = entry >> 48;
424 /* Got an entry! Submit and exit. */
425 iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
426 ide_issue_trim_cb, opaque);
439 qemu_bh_schedule(iocb->bh);
443 BlockAIOCB *ide_issue_trim(BlockBackend *blk,
444 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
445 BlockCompletionFunc *cb, void *opaque)
449 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque);
451 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
456 ide_issue_trim_cb(iocb, 0);
457 return &iocb->common;
460 static inline void ide_abort_command(IDEState *s)
462 ide_transfer_stop(s);
463 s->status = READY_STAT | ERR_STAT;
467 /* prepare data transfer and tell what to do after */
468 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
469 EndTransferFunc *end_transfer_func)
471 s->end_transfer_func = end_transfer_func;
473 s->data_end = buf + size;
474 if (!(s->status & ERR_STAT)) {
475 s->status |= DRQ_STAT;
477 if (s->bus->dma->ops->start_transfer) {
478 s->bus->dma->ops->start_transfer(s->bus->dma);
482 static void ide_cmd_done(IDEState *s)
484 if (s->bus->dma->ops->cmd_done) {
485 s->bus->dma->ops->cmd_done(s->bus->dma);
489 void ide_transfer_stop(IDEState *s)
491 s->end_transfer_func = ide_transfer_stop;
492 s->data_ptr = s->io_buffer;
493 s->data_end = s->io_buffer;
494 s->status &= ~DRQ_STAT;
498 int64_t ide_get_sector(IDEState *s)
501 if (s->select & 0x40) {
504 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
505 (s->lcyl << 8) | s->sector;
507 sector_num = ((int64_t)s->hob_hcyl << 40) |
508 ((int64_t) s->hob_lcyl << 32) |
509 ((int64_t) s->hob_sector << 24) |
510 ((int64_t) s->hcyl << 16) |
511 ((int64_t) s->lcyl << 8) | s->sector;
514 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
515 (s->select & 0x0f) * s->sectors + (s->sector - 1);
520 void ide_set_sector(IDEState *s, int64_t sector_num)
523 if (s->select & 0x40) {
525 s->select = (s->select & 0xf0) | (sector_num >> 24);
526 s->hcyl = (sector_num >> 16);
527 s->lcyl = (sector_num >> 8);
528 s->sector = (sector_num);
530 s->sector = sector_num;
531 s->lcyl = sector_num >> 8;
532 s->hcyl = sector_num >> 16;
533 s->hob_sector = sector_num >> 24;
534 s->hob_lcyl = sector_num >> 32;
535 s->hob_hcyl = sector_num >> 40;
538 cyl = sector_num / (s->heads * s->sectors);
539 r = sector_num % (s->heads * s->sectors);
542 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
543 s->sector = (r % s->sectors) + 1;
547 static void ide_rw_error(IDEState *s) {
548 ide_abort_command(s);
552 static bool ide_sect_range_ok(IDEState *s,
553 uint64_t sector, uint64_t nb_sectors)
555 uint64_t total_sectors;
557 blk_get_geometry(s->blk, &total_sectors);
558 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
564 static void ide_sector_read(IDEState *s);
566 static void ide_sector_read_cb(void *opaque, int ret)
568 IDEState *s = opaque;
572 s->status &= ~BUSY_STAT;
574 if (ret == -ECANCELED) {
577 block_acct_done(blk_get_stats(s->blk), &s->acct);
579 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
586 if (n > s->req_nb_sectors) {
587 n = s->req_nb_sectors;
590 /* Allow the guest to read the io_buffer */
591 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
595 ide_set_sector(s, ide_get_sector(s) + n);
597 s->io_buffer_offset += 512 * n;
600 static void ide_sector_read(IDEState *s)
605 s->status = READY_STAT | SEEK_STAT;
606 s->error = 0; /* not needed by IDE spec, but needed by Windows */
607 sector_num = ide_get_sector(s);
611 ide_transfer_stop(s);
615 s->status |= BUSY_STAT;
617 if (n > s->req_nb_sectors) {
618 n = s->req_nb_sectors;
621 #if defined(DEBUG_IDE)
622 printf("sector=%" PRId64 "\n", sector_num);
625 if (!ide_sect_range_ok(s, sector_num, n)) {
630 s->iov.iov_base = s->io_buffer;
631 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
632 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
634 block_acct_start(blk_get_stats(s->blk), &s->acct,
635 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
636 s->pio_aiocb = blk_aio_readv(s->blk, sector_num, &s->qiov, n,
637 ide_sector_read_cb, s);
640 static void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
642 if (s->bus->dma->ops->commit_buf) {
643 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
645 qemu_sglist_destroy(&s->sg);
648 void ide_set_inactive(IDEState *s, bool more)
650 s->bus->dma->aiocb = NULL;
651 s->bus->retry_unit = -1;
652 s->bus->retry_sector_num = 0;
653 s->bus->retry_nsector = 0;
654 if (s->bus->dma->ops->set_inactive) {
655 s->bus->dma->ops->set_inactive(s->bus->dma, more);
660 void ide_dma_error(IDEState *s)
662 dma_buf_commit(s, 0);
663 ide_abort_command(s);
664 ide_set_inactive(s, false);
668 static int ide_handle_rw_error(IDEState *s, int error, int op)
670 bool is_read = (op & IDE_RETRY_READ) != 0;
671 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
673 if (action == BLOCK_ERROR_ACTION_STOP) {
674 assert(s->bus->retry_unit == s->unit);
675 s->bus->error_status = op;
676 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
677 if (op & IDE_RETRY_DMA) {
683 blk_error_action(s->blk, action, is_read, error);
684 return action != BLOCK_ERROR_ACTION_IGNORE;
687 static void ide_dma_cb(void *opaque, int ret)
689 IDEState *s = opaque;
692 bool stay_active = false;
694 if (ret == -ECANCELED) {
698 int op = IDE_RETRY_DMA;
700 if (s->dma_cmd == IDE_DMA_READ)
701 op |= IDE_RETRY_READ;
702 else if (s->dma_cmd == IDE_DMA_TRIM)
703 op |= IDE_RETRY_TRIM;
705 if (ide_handle_rw_error(s, -ret, op)) {
710 n = s->io_buffer_size >> 9;
711 if (n > s->nsector) {
712 /* The PRDs were longer than needed for this request. Shorten them so
713 * we don't get a negative remainder. The Active bit must remain set
714 * after the request completes. */
719 sector_num = ide_get_sector(s);
721 assert(s->io_buffer_size == s->sg.size);
722 dma_buf_commit(s, s->io_buffer_size);
724 ide_set_sector(s, sector_num);
728 /* end of transfer ? */
729 if (s->nsector == 0) {
730 s->status = READY_STAT | SEEK_STAT;
735 /* launch next transfer */
737 s->io_buffer_index = 0;
738 s->io_buffer_size = n * 512;
739 if (s->bus->dma->ops->prepare_buf(s->bus->dma, ide_cmd_is_read(s)) < 512) {
740 /* The PRDs were too short. Reset the Active bit, but don't raise an
742 s->status = READY_STAT | SEEK_STAT;
743 dma_buf_commit(s, 0);
748 printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
749 sector_num, n, s->dma_cmd);
752 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
753 !ide_sect_range_ok(s, sector_num, n)) {
758 switch (s->dma_cmd) {
760 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
764 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
768 s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
769 ide_issue_trim, ide_dma_cb, s,
770 DMA_DIRECTION_TO_DEVICE);
776 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
777 block_acct_done(blk_get_stats(s->blk), &s->acct);
779 ide_set_inactive(s, stay_active);
782 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
784 s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
785 s->io_buffer_size = 0;
786 s->dma_cmd = dma_cmd;
790 block_acct_start(blk_get_stats(s->blk), &s->acct,
791 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
794 block_acct_start(blk_get_stats(s->blk), &s->acct,
795 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
801 ide_start_dma(s, ide_dma_cb);
804 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
806 s->io_buffer_index = 0;
807 s->bus->retry_unit = s->unit;
808 s->bus->retry_sector_num = ide_get_sector(s);
809 s->bus->retry_nsector = s->nsector;
810 if (s->bus->dma->ops->start_dma) {
811 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
815 static void ide_sector_write(IDEState *s);
817 static void ide_sector_write_timer_cb(void *opaque)
819 IDEState *s = opaque;
823 static void ide_sector_write_cb(void *opaque, int ret)
825 IDEState *s = opaque;
828 if (ret == -ECANCELED) {
831 block_acct_done(blk_get_stats(s->blk), &s->acct);
834 s->status &= ~BUSY_STAT;
837 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
843 if (n > s->req_nb_sectors) {
844 n = s->req_nb_sectors;
847 s->io_buffer_offset += 512 * n;
849 if (s->nsector == 0) {
850 /* no more sectors to write */
851 ide_transfer_stop(s);
854 if (n1 > s->req_nb_sectors) {
855 n1 = s->req_nb_sectors;
857 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
860 ide_set_sector(s, ide_get_sector(s) + n);
862 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
863 /* It seems there is a bug in the Windows 2000 installer HDD
864 IDE driver which fills the disk with empty logs when the
865 IDE write IRQ comes too early. This hack tries to correct
866 that at the expense of slower write performances. Use this
867 option _only_ to install Windows 2000. You must disable it
869 timer_mod(s->sector_write_timer,
870 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
876 static void ide_sector_write(IDEState *s)
881 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
882 sector_num = ide_get_sector(s);
883 #if defined(DEBUG_IDE)
884 printf("sector=%" PRId64 "\n", sector_num);
887 if (n > s->req_nb_sectors) {
888 n = s->req_nb_sectors;
891 if (!ide_sect_range_ok(s, sector_num, n)) {
896 s->iov.iov_base = s->io_buffer;
897 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
898 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
900 block_acct_start(blk_get_stats(s->blk), &s->acct,
901 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
902 s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
903 ide_sector_write_cb, s);
906 static void ide_flush_cb(void *opaque, int ret)
908 IDEState *s = opaque;
912 if (ret == -ECANCELED) {
916 /* XXX: What sector number to set here? */
917 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
923 block_acct_done(blk_get_stats(s->blk), &s->acct);
925 s->status = READY_STAT | SEEK_STAT;
930 static void ide_flush_cache(IDEState *s)
932 if (s->blk == NULL) {
937 s->status |= BUSY_STAT;
938 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
939 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
942 static void ide_cfata_metadata_inquiry(IDEState *s)
947 p = (uint16_t *) s->io_buffer;
949 spd = ((s->mdata_size - 1) >> 9) + 1;
951 put_le16(p + 0, 0x0001); /* Data format revision */
952 put_le16(p + 1, 0x0000); /* Media property: silicon */
953 put_le16(p + 2, s->media_changed); /* Media status */
954 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
955 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
956 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
957 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
960 static void ide_cfata_metadata_read(IDEState *s)
964 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
965 s->status = ERR_STAT;
970 p = (uint16_t *) s->io_buffer;
973 put_le16(p + 0, s->media_changed); /* Media status */
974 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
975 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
976 s->nsector << 9), 0x200 - 2));
979 static void ide_cfata_metadata_write(IDEState *s)
981 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
982 s->status = ERR_STAT;
987 s->media_changed = 0;
989 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
991 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
992 s->nsector << 9), 0x200 - 2));
995 /* called when the inserted state of the media has changed */
996 static void ide_cd_change_cb(void *opaque, bool load)
998 IDEState *s = opaque;
1001 s->tray_open = !load;
1002 blk_get_geometry(s->blk, &nb_sectors);
1003 s->nb_sectors = nb_sectors;
1006 * First indicate to the guest that a CD has been removed. That's
1007 * done on the next command the guest sends us.
1009 * Then we set UNIT_ATTENTION, by which the guest will
1010 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1012 s->cdrom_changed = 1;
1013 s->events.new_media = true;
1014 s->events.eject_request = false;
1015 ide_set_irq(s->bus);
1018 static void ide_cd_eject_request_cb(void *opaque, bool force)
1020 IDEState *s = opaque;
1022 s->events.eject_request = true;
1024 s->tray_locked = false;
1026 ide_set_irq(s->bus);
1029 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1033 /* handle the 'magic' 0 nsector count conversion here. to avoid
1034 * fiddling with the rest of the read logic, we just store the
1035 * full sector count in ->nsector and ignore ->hob_nsector from now
1041 if (!s->nsector && !s->hob_nsector)
1044 int lo = s->nsector;
1045 int hi = s->hob_nsector;
1047 s->nsector = (hi << 8) | lo;
1052 static void ide_clear_hob(IDEBus *bus)
1054 /* any write clears HOB high bit of device control register */
1055 bus->ifs[0].select &= ~(1 << 7);
1056 bus->ifs[1].select &= ~(1 << 7);
1059 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1061 IDEBus *bus = opaque;
1064 printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1069 /* ignore writes to command block while busy with previous command */
1070 if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1078 /* NOTE: data is written to the two drives */
1079 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1080 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1081 bus->ifs[0].feature = val;
1082 bus->ifs[1].feature = val;
1086 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1087 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1088 bus->ifs[0].nsector = val;
1089 bus->ifs[1].nsector = val;
1093 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1094 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1095 bus->ifs[0].sector = val;
1096 bus->ifs[1].sector = val;
1100 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1101 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1102 bus->ifs[0].lcyl = val;
1103 bus->ifs[1].lcyl = val;
1107 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1108 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1109 bus->ifs[0].hcyl = val;
1110 bus->ifs[1].hcyl = val;
1113 /* FIXME: HOB readback uses bit 7 */
1114 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1115 bus->ifs[1].select = (val | 0x10) | 0xa0;
1117 bus->unit = (val >> 4) & 1;
1122 ide_exec_cmd(bus, val);
1127 static bool cmd_nop(IDEState *s, uint8_t cmd)
1132 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1134 switch (s->feature) {
1137 ide_sector_start_dma(s, IDE_DMA_TRIM);
1143 ide_abort_command(s);
1147 static bool cmd_identify(IDEState *s, uint8_t cmd)
1149 if (s->blk && s->drive_kind != IDE_CD) {
1150 if (s->drive_kind != IDE_CFATA) {
1153 ide_cfata_identify(s);
1155 s->status = READY_STAT | SEEK_STAT;
1156 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1157 ide_set_irq(s->bus);
1160 if (s->drive_kind == IDE_CD) {
1161 ide_set_signature(s);
1163 ide_abort_command(s);
1169 static bool cmd_verify(IDEState *s, uint8_t cmd)
1171 bool lba48 = (cmd == WIN_VERIFY_EXT);
1173 /* do sector number check ? */
1174 ide_cmd_lba48_transform(s, lba48);
1179 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1181 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1182 /* Disable Read and Write Multiple */
1183 s->mult_sectors = 0;
1184 } else if ((s->nsector & 0xff) != 0 &&
1185 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1186 (s->nsector & (s->nsector - 1)) != 0)) {
1187 ide_abort_command(s);
1189 s->mult_sectors = s->nsector & 0xff;
1195 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1197 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1199 if (!s->blk || !s->mult_sectors) {
1200 ide_abort_command(s);
1204 ide_cmd_lba48_transform(s, lba48);
1205 s->req_nb_sectors = s->mult_sectors;
1210 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1212 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1215 if (!s->blk || !s->mult_sectors) {
1216 ide_abort_command(s);
1220 ide_cmd_lba48_transform(s, lba48);
1222 s->req_nb_sectors = s->mult_sectors;
1223 n = MIN(s->nsector, s->req_nb_sectors);
1225 s->status = SEEK_STAT | READY_STAT;
1226 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1228 s->media_changed = 1;
1233 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1235 bool lba48 = (cmd == WIN_READ_EXT);
1237 if (s->drive_kind == IDE_CD) {
1238 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1239 ide_abort_command(s);
1244 ide_abort_command(s);
1248 ide_cmd_lba48_transform(s, lba48);
1249 s->req_nb_sectors = 1;
1255 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1257 bool lba48 = (cmd == WIN_WRITE_EXT);
1260 ide_abort_command(s);
1264 ide_cmd_lba48_transform(s, lba48);
1266 s->req_nb_sectors = 1;
1267 s->status = SEEK_STAT | READY_STAT;
1268 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1270 s->media_changed = 1;
1275 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1277 bool lba48 = (cmd == WIN_READDMA_EXT);
1280 ide_abort_command(s);
1284 ide_cmd_lba48_transform(s, lba48);
1285 ide_sector_start_dma(s, IDE_DMA_READ);
1290 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1292 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1295 ide_abort_command(s);
1299 ide_cmd_lba48_transform(s, lba48);
1300 ide_sector_start_dma(s, IDE_DMA_WRITE);
1302 s->media_changed = 1;
1307 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1313 static bool cmd_seek(IDEState *s, uint8_t cmd)
1315 /* XXX: Check that seek is within bounds */
1319 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1321 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1323 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1324 if (s->nb_sectors == 0) {
1325 ide_abort_command(s);
1329 ide_cmd_lba48_transform(s, lba48);
1330 ide_set_sector(s, s->nb_sectors - 1);
1335 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1337 s->nsector = 0xff; /* device active or idle */
1341 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1343 uint16_t *identify_data;
1346 ide_abort_command(s);
1350 /* XXX: valid for CDROM ? */
1351 switch (s->feature) {
1352 case 0x02: /* write cache enable */
1353 blk_set_enable_write_cache(s->blk, true);
1354 identify_data = (uint16_t *)s->identify_data;
1355 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1357 case 0x82: /* write cache disable */
1358 blk_set_enable_write_cache(s->blk, false);
1359 identify_data = (uint16_t *)s->identify_data;
1360 put_le16(identify_data + 85, (1 << 14) | 1);
1363 case 0xcc: /* reverting to power-on defaults enable */
1364 case 0x66: /* reverting to power-on defaults disable */
1365 case 0xaa: /* read look-ahead enable */
1366 case 0x55: /* read look-ahead disable */
1367 case 0x05: /* set advanced power management mode */
1368 case 0x85: /* disable advanced power management mode */
1369 case 0x69: /* NOP */
1370 case 0x67: /* NOP */
1371 case 0x96: /* NOP */
1372 case 0x9a: /* NOP */
1373 case 0x42: /* enable Automatic Acoustic Mode */
1374 case 0xc2: /* disable Automatic Acoustic Mode */
1376 case 0x03: /* set transfer mode */
1378 uint8_t val = s->nsector & 0x07;
1379 identify_data = (uint16_t *)s->identify_data;
1381 switch (s->nsector >> 3) {
1382 case 0x00: /* pio default */
1383 case 0x01: /* pio mode */
1384 put_le16(identify_data + 62, 0x07);
1385 put_le16(identify_data + 63, 0x07);
1386 put_le16(identify_data + 88, 0x3f);
1388 case 0x02: /* sigle word dma mode*/
1389 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1390 put_le16(identify_data + 63, 0x07);
1391 put_le16(identify_data + 88, 0x3f);
1393 case 0x04: /* mdma mode */
1394 put_le16(identify_data + 62, 0x07);
1395 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1396 put_le16(identify_data + 88, 0x3f);
1398 case 0x08: /* udma mode */
1399 put_le16(identify_data + 62, 0x07);
1400 put_le16(identify_data + 63, 0x07);
1401 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1411 ide_abort_command(s);
1416 /*** ATAPI commands ***/
1418 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1420 ide_atapi_identify(s);
1421 s->status = READY_STAT | SEEK_STAT;
1422 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1423 ide_set_irq(s->bus);
1427 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1429 ide_set_signature(s);
1431 if (s->drive_kind == IDE_CD) {
1432 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1433 * devices to return a clear status register
1434 * with READY_STAT *not* set. */
1437 s->status = READY_STAT | SEEK_STAT;
1438 /* The bits of the error register are not as usual for this command!
1439 * They are part of the regular output (this is why ERR_STAT isn't set)
1440 * Device 0 passed, Device 1 passed or not present. */
1442 ide_set_irq(s->bus);
1448 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1450 ide_set_signature(s);
1451 s->status = 0x00; /* NOTE: READY is _not_ set */
1457 static bool cmd_packet(IDEState *s, uint8_t cmd)
1459 /* overlapping commands not supported */
1460 if (s->feature & 0x02) {
1461 ide_abort_command(s);
1465 s->status = READY_STAT | SEEK_STAT;
1466 s->atapi_dma = s->feature & 1;
1468 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1474 /*** CF-ATA commands ***/
1476 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1478 s->error = 0x09; /* miscellaneous error */
1479 s->status = READY_STAT | SEEK_STAT;
1480 ide_set_irq(s->bus);
1485 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1487 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1488 * required for Windows 8 to work with AHCI */
1490 if (cmd == CFA_WEAR_LEVEL) {
1494 if (cmd == CFA_ERASE_SECTORS) {
1495 s->media_changed = 1;
1501 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1503 s->status = READY_STAT | SEEK_STAT;
1505 memset(s->io_buffer, 0, 0x200);
1506 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1507 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1508 s->io_buffer[0x02] = s->select; /* Head */
1509 s->io_buffer[0x03] = s->sector; /* Sector */
1510 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1511 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1512 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1513 s->io_buffer[0x13] = 0x00; /* Erase flag */
1514 s->io_buffer[0x18] = 0x00; /* Hot count */
1515 s->io_buffer[0x19] = 0x00; /* Hot count */
1516 s->io_buffer[0x1a] = 0x01; /* Hot count */
1518 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1519 ide_set_irq(s->bus);
1524 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1526 switch (s->feature) {
1527 case 0x02: /* Inquiry Metadata Storage */
1528 ide_cfata_metadata_inquiry(s);
1530 case 0x03: /* Read Metadata Storage */
1531 ide_cfata_metadata_read(s);
1533 case 0x04: /* Write Metadata Storage */
1534 ide_cfata_metadata_write(s);
1537 ide_abort_command(s);
1541 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1542 s->status = 0x00; /* NOTE: READY is _not_ set */
1543 ide_set_irq(s->bus);
1548 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1550 switch (s->feature) {
1551 case 0x01: /* sense temperature in device */
1552 s->nsector = 0x50; /* +20 C */
1555 ide_abort_command(s);
1563 /*** SMART commands ***/
1565 static bool cmd_smart(IDEState *s, uint8_t cmd)
1569 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1573 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1577 switch (s->feature) {
1579 s->smart_enabled = 0;
1583 s->smart_enabled = 1;
1586 case SMART_ATTR_AUTOSAVE:
1587 switch (s->sector) {
1589 s->smart_autosave = 0;
1592 s->smart_autosave = 1;
1600 if (!s->smart_errors) {
1609 case SMART_READ_THRESH:
1610 memset(s->io_buffer, 0, 0x200);
1611 s->io_buffer[0] = 0x01; /* smart struct version */
1613 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1614 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1615 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1619 for (n = 0; n < 511; n++) {
1620 s->io_buffer[511] += s->io_buffer[n];
1622 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1624 s->status = READY_STAT | SEEK_STAT;
1625 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1626 ide_set_irq(s->bus);
1629 case SMART_READ_DATA:
1630 memset(s->io_buffer, 0, 0x200);
1631 s->io_buffer[0] = 0x01; /* smart struct version */
1633 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1635 for (i = 0; i < 11; i++) {
1636 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1640 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1641 if (s->smart_selftest_count == 0) {
1642 s->io_buffer[363] = 0;
1645 s->smart_selftest_data[3 +
1646 (s->smart_selftest_count - 1) *
1649 s->io_buffer[364] = 0x20;
1650 s->io_buffer[365] = 0x01;
1651 /* offline data collection capacity: execute + self-test*/
1652 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1653 s->io_buffer[368] = 0x03; /* smart capability (1) */
1654 s->io_buffer[369] = 0x00; /* smart capability (2) */
1655 s->io_buffer[370] = 0x01; /* error logging supported */
1656 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1657 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1658 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1660 for (n = 0; n < 511; n++) {
1661 s->io_buffer[511] += s->io_buffer[n];
1663 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1665 s->status = READY_STAT | SEEK_STAT;
1666 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1667 ide_set_irq(s->bus);
1670 case SMART_READ_LOG:
1671 switch (s->sector) {
1672 case 0x01: /* summary smart error log */
1673 memset(s->io_buffer, 0, 0x200);
1674 s->io_buffer[0] = 0x01;
1675 s->io_buffer[1] = 0x00; /* no error entries */
1676 s->io_buffer[452] = s->smart_errors & 0xff;
1677 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1679 for (n = 0; n < 511; n++) {
1680 s->io_buffer[511] += s->io_buffer[n];
1682 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1684 case 0x06: /* smart self test log */
1685 memset(s->io_buffer, 0, 0x200);
1686 s->io_buffer[0] = 0x01;
1687 if (s->smart_selftest_count == 0) {
1688 s->io_buffer[508] = 0;
1690 s->io_buffer[508] = s->smart_selftest_count;
1691 for (n = 2; n < 506; n++) {
1692 s->io_buffer[n] = s->smart_selftest_data[n];
1696 for (n = 0; n < 511; n++) {
1697 s->io_buffer[511] += s->io_buffer[n];
1699 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1704 s->status = READY_STAT | SEEK_STAT;
1705 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1706 ide_set_irq(s->bus);
1709 case SMART_EXECUTE_OFFLINE:
1710 switch (s->sector) {
1711 case 0: /* off-line routine */
1712 case 1: /* short self test */
1713 case 2: /* extended self test */
1714 s->smart_selftest_count++;
1715 if (s->smart_selftest_count > 21) {
1716 s->smart_selftest_count = 1;
1718 n = 2 + (s->smart_selftest_count - 1) * 24;
1719 s->smart_selftest_data[n] = s->sector;
1720 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1721 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1722 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1731 ide_abort_command(s);
1735 #define HD_OK (1u << IDE_HD)
1736 #define CD_OK (1u << IDE_CD)
1737 #define CFA_OK (1u << IDE_CFATA)
1738 #define HD_CFA_OK (HD_OK | CFA_OK)
1739 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1741 /* Set the Disk Seek Completed status bit during completion */
1742 #define SET_DSC (1u << 8)
1744 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1745 static const struct {
1746 /* Returns true if the completion code should be run */
1747 bool (*handler)(IDEState *s, uint8_t cmd);
1749 } ide_cmd_table[0x100] = {
1750 /* NOP not implemented, mandatory for CD */
1751 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1752 [WIN_DSM] = { cmd_data_set_management, ALL_OK },
1753 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1754 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1755 [WIN_READ] = { cmd_read_pio, ALL_OK },
1756 [WIN_READ_ONCE] = { cmd_read_pio, ALL_OK },
1757 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1758 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1759 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1760 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1761 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1762 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1763 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1764 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
1765 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
1766 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
1767 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
1768 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
1769 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
1770 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
1771 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
1772 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
1773 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
1774 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
1775 [WIN_STANDBYNOW2] = { cmd_nop, ALL_OK },
1776 [WIN_IDLEIMMEDIATE2] = { cmd_nop, ALL_OK },
1777 [WIN_STANDBY2] = { cmd_nop, ALL_OK },
1778 [WIN_SETIDLE2] = { cmd_nop, ALL_OK },
1779 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, ALL_OK | SET_DSC },
1780 [WIN_SLEEPNOW2] = { cmd_nop, ALL_OK },
1781 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
1782 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
1783 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
1784 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1785 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1786 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
1787 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
1788 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1789 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
1790 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
1791 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
1792 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
1793 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
1794 [WIN_STANDBYNOW1] = { cmd_nop, ALL_OK },
1795 [WIN_IDLEIMMEDIATE] = { cmd_nop, ALL_OK },
1796 [WIN_STANDBY] = { cmd_nop, ALL_OK },
1797 [WIN_SETIDLE1] = { cmd_nop, ALL_OK },
1798 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, ALL_OK | SET_DSC },
1799 [WIN_SLEEPNOW1] = { cmd_nop, ALL_OK },
1800 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
1801 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
1802 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
1803 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
1804 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
1805 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
1806 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, ALL_OK | SET_DSC },
1809 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
1811 return cmd < ARRAY_SIZE(ide_cmd_table)
1812 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
1815 void ide_exec_cmd(IDEBus *bus, uint32_t val)
1820 #if defined(DEBUG_IDE)
1821 printf("ide: CMD=%02x\n", val);
1823 s = idebus_active_if(bus);
1824 /* ignore commands to non existent slave */
1825 if (s != bus->ifs && !s->blk) {
1829 /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */
1830 if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET)
1833 if (!ide_cmd_permitted(s, val)) {
1834 ide_abort_command(s);
1835 ide_set_irq(s->bus);
1839 s->status = READY_STAT | BUSY_STAT;
1841 s->io_buffer_offset = 0;
1843 complete = ide_cmd_table[val].handler(s, val);
1845 s->status &= ~BUSY_STAT;
1846 assert(!!s->error == !!(s->status & ERR_STAT));
1848 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
1849 s->status |= SEEK_STAT;
1853 ide_set_irq(s->bus);
1857 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
1859 IDEBus *bus = opaque;
1860 IDEState *s = idebus_active_if(bus);
1865 /* FIXME: HOB readback uses bit 7, but it's always set right now */
1866 //hob = s->select & (1 << 7);
1873 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1874 (s != bus->ifs && !s->blk)) {
1879 ret = s->hob_feature;
1883 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1886 ret = s->nsector & 0xff;
1888 ret = s->hob_nsector;
1892 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1897 ret = s->hob_sector;
1901 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1910 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1919 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1927 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1928 (s != bus->ifs && !s->blk)) {
1933 qemu_irq_lower(bus->irq);
1937 printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
1942 uint32_t ide_status_read(void *opaque, uint32_t addr)
1944 IDEBus *bus = opaque;
1945 IDEState *s = idebus_active_if(bus);
1948 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1949 (s != bus->ifs && !s->blk)) {
1955 printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
1960 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
1962 IDEBus *bus = opaque;
1967 printf("ide: write control addr=0x%x val=%02x\n", addr, val);
1969 /* common for both drives */
1970 if (!(bus->cmd & IDE_CMD_RESET) &&
1971 (val & IDE_CMD_RESET)) {
1972 /* reset low to high */
1973 for(i = 0;i < 2; i++) {
1975 s->status = BUSY_STAT | SEEK_STAT;
1978 } else if ((bus->cmd & IDE_CMD_RESET) &&
1979 !(val & IDE_CMD_RESET)) {
1981 for(i = 0;i < 2; i++) {
1983 if (s->drive_kind == IDE_CD)
1984 s->status = 0x00; /* NOTE: READY is _not_ set */
1986 s->status = READY_STAT | SEEK_STAT;
1987 ide_set_signature(s);
1995 * Returns true if the running PIO transfer is a PIO out (i.e. data is
1996 * transferred from the device to the guest), false if it's a PIO in
1998 static bool ide_is_pio_out(IDEState *s)
2000 if (s->end_transfer_func == ide_sector_write ||
2001 s->end_transfer_func == ide_atapi_cmd) {
2003 } else if (s->end_transfer_func == ide_sector_read ||
2004 s->end_transfer_func == ide_transfer_stop ||
2005 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2006 s->end_transfer_func == ide_dummy_transfer_stop) {
2013 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2015 IDEBus *bus = opaque;
2016 IDEState *s = idebus_active_if(bus);
2019 /* PIO data access allowed only when DRQ bit is set. The result of a write
2020 * during PIO out is indeterminate, just ignore it. */
2021 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2026 *(uint16_t *)p = le16_to_cpu(val);
2029 if (p >= s->data_end)
2030 s->end_transfer_func(s);
2033 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2035 IDEBus *bus = opaque;
2036 IDEState *s = idebus_active_if(bus);
2040 /* PIO data access allowed only when DRQ bit is set. The result of a read
2041 * during PIO in is indeterminate, return 0 and don't move forward. */
2042 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2047 ret = cpu_to_le16(*(uint16_t *)p);
2050 if (p >= s->data_end)
2051 s->end_transfer_func(s);
2055 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2057 IDEBus *bus = opaque;
2058 IDEState *s = idebus_active_if(bus);
2061 /* PIO data access allowed only when DRQ bit is set. The result of a write
2062 * during PIO out is indeterminate, just ignore it. */
2063 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2068 *(uint32_t *)p = le32_to_cpu(val);
2071 if (p >= s->data_end)
2072 s->end_transfer_func(s);
2075 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2077 IDEBus *bus = opaque;
2078 IDEState *s = idebus_active_if(bus);
2082 /* PIO data access allowed only when DRQ bit is set. The result of a read
2083 * during PIO in is indeterminate, return 0 and don't move forward. */
2084 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2089 ret = cpu_to_le32(*(uint32_t *)p);
2092 if (p >= s->data_end)
2093 s->end_transfer_func(s);
2097 static void ide_dummy_transfer_stop(IDEState *s)
2099 s->data_ptr = s->io_buffer;
2100 s->data_end = s->io_buffer;
2101 s->io_buffer[0] = 0xff;
2102 s->io_buffer[1] = 0xff;
2103 s->io_buffer[2] = 0xff;
2104 s->io_buffer[3] = 0xff;
2107 static void ide_reset(IDEState *s)
2110 printf("ide: reset\n");
2114 blk_aio_cancel(s->pio_aiocb);
2115 s->pio_aiocb = NULL;
2118 if (s->drive_kind == IDE_CFATA)
2119 s->mult_sectors = 0;
2121 s->mult_sectors = MAX_MULT_SECTORS;
2138 s->status = READY_STAT | SEEK_STAT;
2142 /* ATAPI specific */
2145 s->cdrom_changed = 0;
2146 s->packet_transfer_size = 0;
2147 s->elementary_transfer_size = 0;
2148 s->io_buffer_index = 0;
2149 s->cd_sector_size = 0;
2154 s->io_buffer_size = 0;
2155 s->req_nb_sectors = 0;
2157 ide_set_signature(s);
2158 /* init the transfer handler so that 0xffff is returned on data
2160 s->end_transfer_func = ide_dummy_transfer_stop;
2161 ide_dummy_transfer_stop(s);
2162 s->media_changed = 0;
2165 void ide_bus_reset(IDEBus *bus)
2169 ide_reset(&bus->ifs[0]);
2170 ide_reset(&bus->ifs[1]);
2173 /* pending async DMA */
2174 if (bus->dma->aiocb) {
2176 printf("aio_cancel\n");
2178 blk_aio_cancel(bus->dma->aiocb);
2179 bus->dma->aiocb = NULL;
2182 /* reset dma provider too */
2183 if (bus->dma->ops->reset) {
2184 bus->dma->ops->reset(bus->dma);
2188 static bool ide_cd_is_tray_open(void *opaque)
2190 return ((IDEState *)opaque)->tray_open;
2193 static bool ide_cd_is_medium_locked(void *opaque)
2195 return ((IDEState *)opaque)->tray_locked;
2198 static void ide_resize_cb(void *opaque)
2200 IDEState *s = opaque;
2201 uint64_t nb_sectors;
2203 if (!s->identify_set) {
2207 blk_get_geometry(s->blk, &nb_sectors);
2208 s->nb_sectors = nb_sectors;
2210 /* Update the identify data buffer. */
2211 if (s->drive_kind == IDE_CFATA) {
2212 ide_cfata_identify_size(s);
2214 /* IDE_CD uses a different set of callbacks entirely. */
2215 assert(s->drive_kind != IDE_CD);
2216 ide_identify_size(s);
2220 static const BlockDevOps ide_cd_block_ops = {
2221 .change_media_cb = ide_cd_change_cb,
2222 .eject_request_cb = ide_cd_eject_request_cb,
2223 .is_tray_open = ide_cd_is_tray_open,
2224 .is_medium_locked = ide_cd_is_medium_locked,
2227 static const BlockDevOps ide_hd_block_ops = {
2228 .resize_cb = ide_resize_cb,
2231 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2232 const char *version, const char *serial, const char *model,
2234 uint32_t cylinders, uint32_t heads, uint32_t secs,
2237 uint64_t nb_sectors;
2240 s->drive_kind = kind;
2242 blk_get_geometry(blk, &nb_sectors);
2243 s->cylinders = cylinders;
2246 s->chs_trans = chs_trans;
2247 s->nb_sectors = nb_sectors;
2249 /* The SMART values should be preserved across power cycles
2251 s->smart_enabled = 1;
2252 s->smart_autosave = 1;
2253 s->smart_errors = 0;
2254 s->smart_selftest_count = 0;
2255 if (kind == IDE_CD) {
2256 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2257 blk_set_guest_block_size(blk, 2048);
2259 if (!blk_is_inserted(s->blk)) {
2260 error_report("Device needs media, but drive is empty");
2263 if (blk_is_read_only(blk)) {
2264 error_report("Can't use a read-only drive");
2267 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2270 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2272 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2273 "QM%05d", s->drive_serial);
2276 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2280 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2283 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2286 strcpy(s->drive_model_str, "QEMU HARDDISK");
2292 pstrcpy(s->version, sizeof(s->version), version);
2294 pstrcpy(s->version, sizeof(s->version), qemu_get_version());
2298 blk_iostatus_enable(blk);
2302 static void ide_init1(IDEBus *bus, int unit)
2304 static int drive_serial = 1;
2305 IDEState *s = &bus->ifs[unit];
2309 s->drive_serial = drive_serial++;
2310 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2311 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2312 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2313 memset(s->io_buffer, 0, s->io_buffer_total_len);
2315 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2316 memset(s->smart_selftest_data, 0, 512);
2318 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2319 ide_sector_write_timer_cb, s);
2322 static int ide_nop_int(IDEDMA *dma, int x)
2327 static void ide_nop(IDEDMA *dma)
2331 static int32_t ide_nop_int32(IDEDMA *dma, int x)
2336 static const IDEDMAOps ide_dma_nop_ops = {
2337 .prepare_buf = ide_nop_int32,
2338 .restart_dma = ide_nop,
2339 .rw_buf = ide_nop_int,
2342 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2344 s->unit = s->bus->retry_unit;
2345 ide_set_sector(s, s->bus->retry_sector_num);
2346 s->nsector = s->bus->retry_nsector;
2347 s->bus->dma->ops->restart_dma(s->bus->dma);
2348 s->io_buffer_size = 0;
2349 s->dma_cmd = dma_cmd;
2350 ide_start_dma(s, ide_dma_cb);
2353 static void ide_restart_bh(void *opaque)
2355 IDEBus *bus = opaque;
2360 qemu_bh_delete(bus->bh);
2363 error_status = bus->error_status;
2364 if (bus->error_status == 0) {
2368 s = idebus_active_if(bus);
2369 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2371 /* The error status must be cleared before resubmitting the request: The
2372 * request may fail again, and this case can only be distinguished if the
2373 * called function can set a new error status. */
2374 bus->error_status = 0;
2376 if (error_status & IDE_RETRY_DMA) {
2377 if (error_status & IDE_RETRY_TRIM) {
2378 ide_restart_dma(s, IDE_DMA_TRIM);
2380 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2382 } else if (error_status & IDE_RETRY_PIO) {
2386 ide_sector_write(s);
2388 } else if (error_status & IDE_RETRY_FLUSH) {
2392 * We've not got any bits to tell us about ATAPI - but
2393 * we do have the end_transfer_func that tells us what
2394 * we're trying to do.
2396 if (s->end_transfer_func == ide_atapi_cmd) {
2397 ide_atapi_dma_restart(s);
2402 static void ide_restart_cb(void *opaque, int running, RunState state)
2404 IDEBus *bus = opaque;
2410 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2411 qemu_bh_schedule(bus->bh);
2415 void ide_register_restart_cb(IDEBus *bus)
2417 if (bus->dma->ops->restart_dma) {
2418 qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2422 static IDEDMA ide_dma_nop = {
2423 .ops = &ide_dma_nop_ops,
2427 void ide_init2(IDEBus *bus, qemu_irq irq)
2431 for(i = 0; i < 2; i++) {
2433 ide_reset(&bus->ifs[i]);
2436 bus->dma = &ide_dma_nop;
2439 static const MemoryRegionPortio ide_portio_list[] = {
2440 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2441 { 0, 2, 2, .read = ide_data_readw, .write = ide_data_writew },
2442 { 0, 4, 4, .read = ide_data_readl, .write = ide_data_writel },
2443 PORTIO_END_OF_LIST(),
2446 static const MemoryRegionPortio ide_portio2_list[] = {
2447 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2448 PORTIO_END_OF_LIST(),
2451 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2453 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2454 bridge has been setup properly to always register with ISA. */
2455 isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2458 isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2462 static bool is_identify_set(void *opaque, int version_id)
2464 IDEState *s = opaque;
2466 return s->identify_set != 0;
2469 static EndTransferFunc* transfer_end_table[] = {
2473 ide_atapi_cmd_reply_end,
2475 ide_dummy_transfer_stop,
2478 static int transfer_end_table_idx(EndTransferFunc *fn)
2482 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2483 if (transfer_end_table[i] == fn)
2489 static int ide_drive_post_load(void *opaque, int version_id)
2491 IDEState *s = opaque;
2493 if (s->blk && s->identify_set) {
2494 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2499 static int ide_drive_pio_post_load(void *opaque, int version_id)
2501 IDEState *s = opaque;
2503 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2506 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2507 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2508 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2509 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2514 static void ide_drive_pio_pre_save(void *opaque)
2516 IDEState *s = opaque;
2519 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2520 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2522 idx = transfer_end_table_idx(s->end_transfer_func);
2524 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2526 s->end_transfer_fn_idx = 2;
2528 s->end_transfer_fn_idx = idx;
2532 static bool ide_drive_pio_state_needed(void *opaque)
2534 IDEState *s = opaque;
2536 return ((s->status & DRQ_STAT) != 0)
2537 || (s->bus->error_status & IDE_RETRY_PIO);
2540 static bool ide_tray_state_needed(void *opaque)
2542 IDEState *s = opaque;
2544 return s->tray_open || s->tray_locked;
2547 static bool ide_atapi_gesn_needed(void *opaque)
2549 IDEState *s = opaque;
2551 return s->events.new_media || s->events.eject_request;
2554 static bool ide_error_needed(void *opaque)
2556 IDEBus *bus = opaque;
2558 return (bus->error_status != 0);
2561 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2562 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2563 .name ="ide_drive/atapi/gesn_state",
2565 .minimum_version_id = 1,
2566 .fields = (VMStateField[]) {
2567 VMSTATE_BOOL(events.new_media, IDEState),
2568 VMSTATE_BOOL(events.eject_request, IDEState),
2569 VMSTATE_END_OF_LIST()
2573 static const VMStateDescription vmstate_ide_tray_state = {
2574 .name = "ide_drive/tray_state",
2576 .minimum_version_id = 1,
2577 .fields = (VMStateField[]) {
2578 VMSTATE_BOOL(tray_open, IDEState),
2579 VMSTATE_BOOL(tray_locked, IDEState),
2580 VMSTATE_END_OF_LIST()
2584 static const VMStateDescription vmstate_ide_drive_pio_state = {
2585 .name = "ide_drive/pio_state",
2587 .minimum_version_id = 1,
2588 .pre_save = ide_drive_pio_pre_save,
2589 .post_load = ide_drive_pio_post_load,
2590 .fields = (VMStateField[]) {
2591 VMSTATE_INT32(req_nb_sectors, IDEState),
2592 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2593 vmstate_info_uint8, uint8_t),
2594 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2595 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2596 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2597 VMSTATE_INT32(elementary_transfer_size, IDEState),
2598 VMSTATE_INT32(packet_transfer_size, IDEState),
2599 VMSTATE_END_OF_LIST()
2603 const VMStateDescription vmstate_ide_drive = {
2604 .name = "ide_drive",
2606 .minimum_version_id = 0,
2607 .post_load = ide_drive_post_load,
2608 .fields = (VMStateField[]) {
2609 VMSTATE_INT32(mult_sectors, IDEState),
2610 VMSTATE_INT32(identify_set, IDEState),
2611 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2612 VMSTATE_UINT8(feature, IDEState),
2613 VMSTATE_UINT8(error, IDEState),
2614 VMSTATE_UINT32(nsector, IDEState),
2615 VMSTATE_UINT8(sector, IDEState),
2616 VMSTATE_UINT8(lcyl, IDEState),
2617 VMSTATE_UINT8(hcyl, IDEState),
2618 VMSTATE_UINT8(hob_feature, IDEState),
2619 VMSTATE_UINT8(hob_sector, IDEState),
2620 VMSTATE_UINT8(hob_nsector, IDEState),
2621 VMSTATE_UINT8(hob_lcyl, IDEState),
2622 VMSTATE_UINT8(hob_hcyl, IDEState),
2623 VMSTATE_UINT8(select, IDEState),
2624 VMSTATE_UINT8(status, IDEState),
2625 VMSTATE_UINT8(lba48, IDEState),
2626 VMSTATE_UINT8(sense_key, IDEState),
2627 VMSTATE_UINT8(asc, IDEState),
2628 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2629 VMSTATE_END_OF_LIST()
2631 .subsections = (VMStateSubsection []) {
2633 .vmsd = &vmstate_ide_drive_pio_state,
2634 .needed = ide_drive_pio_state_needed,
2636 .vmsd = &vmstate_ide_tray_state,
2637 .needed = ide_tray_state_needed,
2639 .vmsd = &vmstate_ide_atapi_gesn_state,
2640 .needed = ide_atapi_gesn_needed,
2647 static const VMStateDescription vmstate_ide_error_status = {
2648 .name ="ide_bus/error",
2650 .minimum_version_id = 1,
2651 .fields = (VMStateField[]) {
2652 VMSTATE_INT32(error_status, IDEBus),
2653 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2654 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2655 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2656 VMSTATE_END_OF_LIST()
2660 const VMStateDescription vmstate_ide_bus = {
2663 .minimum_version_id = 1,
2664 .fields = (VMStateField[]) {
2665 VMSTATE_UINT8(cmd, IDEBus),
2666 VMSTATE_UINT8(unit, IDEBus),
2667 VMSTATE_END_OF_LIST()
2669 .subsections = (VMStateSubsection []) {
2671 .vmsd = &vmstate_ide_error_status,
2672 .needed = ide_error_needed,
2679 void ide_drive_get(DriveInfo **hd, int n)
2682 int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2683 int max_devs = drive_get_max_devs(IF_IDE);
2684 int n_buses = max_devs ? (n / max_devs) : n;
2687 * Note: The number of actual buses available is not known.
2688 * We compute this based on the size of the DriveInfo* array, n.
2689 * If it is less than max_devs * <num_real_buses>,
2690 * We will stop looking for drives prematurely instead of overfilling
2694 if (highest_bus > n_buses) {
2695 error_report("Too many IDE buses defined (%d > %d)",
2696 highest_bus, n_buses);
2700 for (i = 0; i < n; i++) {
2701 hd[i] = drive_get_by_index(IF_IDE, i);