2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
28 #include "hw/isa/isa.h"
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/blockdev.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qapi/error.h"
37 #include "qemu/cutils.h"
39 #include "hw/ide/internal.h"
42 /* These values were based on a Seagate ST3500418AS but have been modified
43 to make more sense in QEMU */
44 static const int smart_attributes[][12] = {
45 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
46 /* raw read error rate*/
47 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
49 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
50 /* start stop count */
51 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
52 /* remapped sectors */
53 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
55 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56 /* power cycle count */
57 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
58 /* airflow-temperature-celsius */
59 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
62 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
63 [IDE_DMA_READ] = "DMA READ",
64 [IDE_DMA_WRITE] = "DMA WRITE",
65 [IDE_DMA_TRIM] = "DMA TRIM",
66 [IDE_DMA_ATAPI] = "DMA ATAPI"
69 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
71 if ((unsigned)enval < IDE_DMA__COUNT) {
72 return IDE_DMA_CMD_lookup[enval];
74 return "DMA UNKNOWN CMD";
77 static void ide_dummy_transfer_stop(IDEState *s);
79 static void padstr(char *str, const char *src, int len)
82 for(i = 0; i < len; i++) {
91 static void put_le16(uint16_t *p, unsigned int v)
96 static void ide_identify_size(IDEState *s)
98 uint16_t *p = (uint16_t *)s->identify_data;
99 put_le16(p + 60, s->nb_sectors);
100 put_le16(p + 61, s->nb_sectors >> 16);
101 put_le16(p + 100, s->nb_sectors);
102 put_le16(p + 101, s->nb_sectors >> 16);
103 put_le16(p + 102, s->nb_sectors >> 32);
104 put_le16(p + 103, s->nb_sectors >> 48);
107 static void ide_identify(IDEState *s)
110 unsigned int oldsize;
111 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
113 p = (uint16_t *)s->identify_data;
114 if (s->identify_set) {
117 memset(p, 0, sizeof(s->identify_data));
119 put_le16(p + 0, 0x0040);
120 put_le16(p + 1, s->cylinders);
121 put_le16(p + 3, s->heads);
122 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
123 put_le16(p + 5, 512); /* XXX: retired, remove ? */
124 put_le16(p + 6, s->sectors);
125 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
126 put_le16(p + 20, 3); /* XXX: retired, remove ? */
127 put_le16(p + 21, 512); /* cache size in sectors */
128 put_le16(p + 22, 4); /* ecc bytes */
129 padstr((char *)(p + 23), s->version, 8); /* firmware version */
130 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
131 #if MAX_MULT_SECTORS > 1
132 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
134 put_le16(p + 48, 1); /* dword I/O */
135 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
136 put_le16(p + 51, 0x200); /* PIO transfer cycle */
137 put_le16(p + 52, 0x200); /* DMA transfer cycle */
138 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
139 put_le16(p + 54, s->cylinders);
140 put_le16(p + 55, s->heads);
141 put_le16(p + 56, s->sectors);
142 oldsize = s->cylinders * s->heads * s->sectors;
143 put_le16(p + 57, oldsize);
144 put_le16(p + 58, oldsize >> 16);
146 put_le16(p + 59, 0x100 | s->mult_sectors);
147 /* *(p + 60) := nb_sectors -- see ide_identify_size */
148 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
149 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
150 put_le16(p + 63, 0x07); /* mdma0-2 supported */
151 put_le16(p + 64, 0x03); /* pio3-4 supported */
152 put_le16(p + 65, 120);
153 put_le16(p + 66, 120);
154 put_le16(p + 67, 120);
155 put_le16(p + 68, 120);
156 if (dev && dev->conf.discard_granularity) {
157 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
161 put_le16(p + 75, s->ncq_queues - 1);
163 put_le16(p + 76, (1 << 8));
166 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
167 put_le16(p + 81, 0x16); /* conforms to ata5 */
168 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
169 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
170 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
171 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
172 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
174 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
176 put_le16(p + 84, (1 << 14) | 0);
178 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
179 if (blk_enable_write_cache(s->blk)) {
180 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
182 put_le16(p + 85, (1 << 14) | 1);
184 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
185 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
186 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
188 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
190 put_le16(p + 87, (1 << 14) | 0);
192 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
193 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
194 /* *(p + 100) := nb_sectors -- see ide_identify_size */
195 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
196 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
197 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
199 if (dev && dev->conf.physical_block_size)
200 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
202 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
203 put_le16(p + 108, s->wwn >> 48);
204 put_le16(p + 109, s->wwn >> 32);
205 put_le16(p + 110, s->wwn >> 16);
206 put_le16(p + 111, s->wwn);
208 if (dev && dev->conf.discard_granularity) {
209 put_le16(p + 169, 1); /* TRIM support */
212 put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
215 ide_identify_size(s);
219 memcpy(s->io_buffer, p, sizeof(s->identify_data));
222 static void ide_atapi_identify(IDEState *s)
226 p = (uint16_t *)s->identify_data;
227 if (s->identify_set) {
230 memset(p, 0, sizeof(s->identify_data));
232 /* Removable CDROM, 50us response, 12 byte packets */
233 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
234 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
235 put_le16(p + 20, 3); /* buffer type */
236 put_le16(p + 21, 512); /* cache size in sectors */
237 put_le16(p + 22, 4); /* ecc bytes */
238 padstr((char *)(p + 23), s->version, 8); /* firmware version */
239 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
240 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
242 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
243 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
244 put_le16(p + 62, 7); /* single word dma0-2 supported */
245 put_le16(p + 63, 7); /* mdma0-2 supported */
247 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
248 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
249 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
251 put_le16(p + 64, 3); /* pio3-4 supported */
252 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
253 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
254 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
255 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
257 put_le16(p + 71, 30); /* in ns */
258 put_le16(p + 72, 30); /* in ns */
261 put_le16(p + 75, s->ncq_queues - 1);
263 put_le16(p + 76, (1 << 8));
266 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
268 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
269 put_le16(p + 87, (1 << 8)); /* WWN enabled */
273 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
277 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
278 put_le16(p + 108, s->wwn >> 48);
279 put_le16(p + 109, s->wwn >> 32);
280 put_le16(p + 110, s->wwn >> 16);
281 put_le16(p + 111, s->wwn);
287 memcpy(s->io_buffer, p, sizeof(s->identify_data));
290 static void ide_cfata_identify_size(IDEState *s)
292 uint16_t *p = (uint16_t *)s->identify_data;
293 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
294 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
295 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
296 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
299 static void ide_cfata_identify(IDEState *s)
304 p = (uint16_t *)s->identify_data;
305 if (s->identify_set) {
308 memset(p, 0, sizeof(s->identify_data));
310 cur_sec = s->cylinders * s->heads * s->sectors;
312 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
313 put_le16(p + 1, s->cylinders); /* Default cylinders */
314 put_le16(p + 3, s->heads); /* Default heads */
315 put_le16(p + 6, s->sectors); /* Default sectors per track */
316 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
317 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
318 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
319 put_le16(p + 22, 0x0004); /* ECC bytes */
320 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
321 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
322 #if MAX_MULT_SECTORS > 1
323 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
325 put_le16(p + 47, 0x0000);
327 put_le16(p + 49, 0x0f00); /* Capabilities */
328 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
329 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
330 put_le16(p + 53, 0x0003); /* Translation params valid */
331 put_le16(p + 54, s->cylinders); /* Current cylinders */
332 put_le16(p + 55, s->heads); /* Current heads */
333 put_le16(p + 56, s->sectors); /* Current sectors */
334 put_le16(p + 57, cur_sec); /* Current capacity */
335 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
336 if (s->mult_sectors) /* Multiple sector setting */
337 put_le16(p + 59, 0x100 | s->mult_sectors);
338 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
339 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
340 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
341 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
342 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
343 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
344 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
345 put_le16(p + 82, 0x400c); /* Command Set supported */
346 put_le16(p + 83, 0x7068); /* Command Set supported */
347 put_le16(p + 84, 0x4000); /* Features supported */
348 put_le16(p + 85, 0x000c); /* Command Set enabled */
349 put_le16(p + 86, 0x7044); /* Command Set enabled */
350 put_le16(p + 87, 0x4000); /* Features enabled */
351 put_le16(p + 91, 0x4060); /* Current APM level */
352 put_le16(p + 129, 0x0002); /* Current features option */
353 put_le16(p + 130, 0x0005); /* Reassigned sectors */
354 put_le16(p + 131, 0x0001); /* Initial power mode */
355 put_le16(p + 132, 0x0000); /* User signature */
356 put_le16(p + 160, 0x8100); /* Power requirement */
357 put_le16(p + 161, 0x8001); /* CF command set */
359 ide_cfata_identify_size(s);
363 memcpy(s->io_buffer, p, sizeof(s->identify_data));
366 static void ide_set_signature(IDEState *s)
368 s->select &= 0xf0; /* clear head */
372 if (s->drive_kind == IDE_CD) {
384 static bool ide_sect_range_ok(IDEState *s,
385 uint64_t sector, uint64_t nb_sectors)
387 uint64_t total_sectors;
389 blk_get_geometry(s->blk, &total_sectors);
390 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
396 typedef struct TrimAIOCB {
406 static void trim_aio_cancel(BlockAIOCB *acb)
408 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
410 /* Exit the loop so ide_issue_trim_cb will not continue */
411 iocb->j = iocb->qiov->niov - 1;
412 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
414 iocb->ret = -ECANCELED;
417 blk_aio_cancel_async(iocb->aiocb);
422 static const AIOCBInfo trim_aiocb_info = {
423 .aiocb_size = sizeof(TrimAIOCB),
424 .cancel_async = trim_aio_cancel,
427 static void ide_trim_bh_cb(void *opaque)
429 TrimAIOCB *iocb = opaque;
431 iocb->common.cb(iocb->common.opaque, iocb->ret);
433 qemu_bh_delete(iocb->bh);
435 qemu_aio_unref(iocb);
438 static void ide_issue_trim_cb(void *opaque, int ret)
440 TrimAIOCB *iocb = opaque;
441 IDEState *s = iocb->s;
444 while (iocb->j < iocb->qiov->niov) {
446 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
448 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
450 /* 6-byte LBA + 2-byte range per entry */
451 uint64_t entry = le64_to_cpu(buffer[i]);
452 uint64_t sector = entry & 0x0000ffffffffffffULL;
453 uint16_t count = entry >> 48;
459 if (!ide_sect_range_ok(s, sector, count)) {
464 /* Got an entry! Submit and exit. */
465 iocb->aiocb = blk_aio_pdiscard(s->blk,
466 sector << BDRV_SECTOR_BITS,
467 count << BDRV_SECTOR_BITS,
468 ide_issue_trim_cb, opaque);
482 qemu_bh_schedule(iocb->bh);
486 BlockAIOCB *ide_issue_trim(
487 int64_t offset, QEMUIOVector *qiov,
488 BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
490 IDEState *s = opaque;
493 iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
495 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
500 ide_issue_trim_cb(iocb, 0);
501 return &iocb->common;
504 void ide_abort_command(IDEState *s)
506 ide_transfer_stop(s);
507 s->status = READY_STAT | ERR_STAT;
511 static void ide_set_retry(IDEState *s)
513 s->bus->retry_unit = s->unit;
514 s->bus->retry_sector_num = ide_get_sector(s);
515 s->bus->retry_nsector = s->nsector;
518 static void ide_clear_retry(IDEState *s)
520 s->bus->retry_unit = -1;
521 s->bus->retry_sector_num = 0;
522 s->bus->retry_nsector = 0;
525 /* prepare data transfer and tell what to do after */
526 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
527 EndTransferFunc *end_transfer_func)
530 s->data_end = buf + size;
532 if (!(s->status & ERR_STAT)) {
533 s->status |= DRQ_STAT;
535 if (!s->bus->dma->ops->pio_transfer) {
536 s->end_transfer_func = end_transfer_func;
539 s->bus->dma->ops->pio_transfer(s->bus->dma);
540 end_transfer_func(s);
543 static void ide_cmd_done(IDEState *s)
545 if (s->bus->dma->ops->cmd_done) {
546 s->bus->dma->ops->cmd_done(s->bus->dma);
550 static void ide_transfer_halt(IDEState *s)
552 s->end_transfer_func = ide_transfer_stop;
553 s->data_ptr = s->io_buffer;
554 s->data_end = s->io_buffer;
555 s->status &= ~DRQ_STAT;
558 void ide_transfer_stop(IDEState *s)
560 ide_transfer_halt(s);
564 int64_t ide_get_sector(IDEState *s)
567 if (s->select & 0x40) {
570 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
571 (s->lcyl << 8) | s->sector;
573 sector_num = ((int64_t)s->hob_hcyl << 40) |
574 ((int64_t) s->hob_lcyl << 32) |
575 ((int64_t) s->hob_sector << 24) |
576 ((int64_t) s->hcyl << 16) |
577 ((int64_t) s->lcyl << 8) | s->sector;
580 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
581 (s->select & 0x0f) * s->sectors + (s->sector - 1);
586 void ide_set_sector(IDEState *s, int64_t sector_num)
589 if (s->select & 0x40) {
591 s->select = (s->select & 0xf0) | (sector_num >> 24);
592 s->hcyl = (sector_num >> 16);
593 s->lcyl = (sector_num >> 8);
594 s->sector = (sector_num);
596 s->sector = sector_num;
597 s->lcyl = sector_num >> 8;
598 s->hcyl = sector_num >> 16;
599 s->hob_sector = sector_num >> 24;
600 s->hob_lcyl = sector_num >> 32;
601 s->hob_hcyl = sector_num >> 40;
604 cyl = sector_num / (s->heads * s->sectors);
605 r = sector_num % (s->heads * s->sectors);
608 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
609 s->sector = (r % s->sectors) + 1;
613 static void ide_rw_error(IDEState *s) {
614 ide_abort_command(s);
618 static void ide_buffered_readv_cb(void *opaque, int ret)
620 IDEBufferedRequest *req = opaque;
621 if (!req->orphaned) {
623 qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
624 req->original_qiov->size);
626 req->original_cb(req->original_opaque, ret);
628 QLIST_REMOVE(req, list);
629 qemu_vfree(req->iov.iov_base);
633 #define MAX_BUFFERED_REQS 16
635 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
636 QEMUIOVector *iov, int nb_sectors,
637 BlockCompletionFunc *cb, void *opaque)
640 IDEBufferedRequest *req;
643 QLIST_FOREACH(req, &s->buffered_requests, list) {
646 if (c > MAX_BUFFERED_REQS) {
647 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
650 req = g_new0(IDEBufferedRequest, 1);
651 req->original_qiov = iov;
652 req->original_cb = cb;
653 req->original_opaque = opaque;
654 req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
655 req->iov.iov_len = iov->size;
656 qemu_iovec_init_external(&req->qiov, &req->iov, 1);
658 aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
659 &req->qiov, 0, ide_buffered_readv_cb, req);
661 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
666 * Cancel all pending DMA requests.
667 * Any buffered DMA requests are instantly canceled,
668 * but any pending unbuffered DMA requests must be waited on.
670 void ide_cancel_dma_sync(IDEState *s)
672 IDEBufferedRequest *req;
674 /* First invoke the callbacks of all buffered requests
675 * and flag those requests as orphaned. Ideally there
676 * are no unbuffered (Scatter Gather DMA Requests or
677 * write requests) pending and we can avoid to drain. */
678 QLIST_FOREACH(req, &s->buffered_requests, list) {
679 if (!req->orphaned) {
680 trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
681 req->original_cb(req->original_opaque, -ECANCELED);
683 req->orphaned = true;
687 * We can't cancel Scatter Gather DMA in the middle of the
688 * operation or a partial (not full) DMA transfer would reach
689 * the storage so we wait for completion instead (we beahve
690 * like if the DMA was completed by the time the guest trying
691 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
694 * In the future we'll be able to safely cancel the I/O if the
695 * whole DMA operation will be submitted to disk with a single
696 * aio operation with preadv/pwritev.
698 if (s->bus->dma->aiocb) {
699 trace_ide_cancel_dma_sync_remaining();
701 assert(s->bus->dma->aiocb == NULL);
705 static void ide_sector_read(IDEState *s);
707 static void ide_sector_read_cb(void *opaque, int ret)
709 IDEState *s = opaque;
713 s->status &= ~BUSY_STAT;
715 if (ret == -ECANCELED) {
719 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
725 block_acct_done(blk_get_stats(s->blk), &s->acct);
728 if (n > s->req_nb_sectors) {
729 n = s->req_nb_sectors;
732 ide_set_sector(s, ide_get_sector(s) + n);
734 /* Allow the guest to read the io_buffer */
735 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
739 static void ide_sector_read(IDEState *s)
744 s->status = READY_STAT | SEEK_STAT;
745 s->error = 0; /* not needed by IDE spec, but needed by Windows */
746 sector_num = ide_get_sector(s);
750 ide_transfer_stop(s);
754 s->status |= BUSY_STAT;
756 if (n > s->req_nb_sectors) {
757 n = s->req_nb_sectors;
760 trace_ide_sector_read(sector_num, n);
762 if (!ide_sect_range_ok(s, sector_num, n)) {
764 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
768 s->iov.iov_base = s->io_buffer;
769 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
770 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
772 block_acct_start(blk_get_stats(s->blk), &s->acct,
773 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
774 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
775 ide_sector_read_cb, s);
778 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
780 if (s->bus->dma->ops->commit_buf) {
781 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
783 s->io_buffer_offset += tx_bytes;
784 qemu_sglist_destroy(&s->sg);
787 void ide_set_inactive(IDEState *s, bool more)
789 s->bus->dma->aiocb = NULL;
791 if (s->bus->dma->ops->set_inactive) {
792 s->bus->dma->ops->set_inactive(s->bus->dma, more);
797 void ide_dma_error(IDEState *s)
799 dma_buf_commit(s, 0);
800 ide_abort_command(s);
801 ide_set_inactive(s, false);
805 int ide_handle_rw_error(IDEState *s, int error, int op)
807 bool is_read = (op & IDE_RETRY_READ) != 0;
808 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
810 if (action == BLOCK_ERROR_ACTION_STOP) {
811 assert(s->bus->retry_unit == s->unit);
812 s->bus->error_status = op;
813 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
814 block_acct_failed(blk_get_stats(s->blk), &s->acct);
815 if (IS_IDE_RETRY_DMA(op)) {
817 } else if (IS_IDE_RETRY_ATAPI(op)) {
818 ide_atapi_io_error(s, -error);
823 blk_error_action(s->blk, action, is_read, error);
824 return action != BLOCK_ERROR_ACTION_IGNORE;
827 static void ide_dma_cb(void *opaque, int ret)
829 IDEState *s = opaque;
833 bool stay_active = false;
835 if (ret == -ECANCELED) {
839 if (ret == -EINVAL) {
845 if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
846 s->bus->dma->aiocb = NULL;
847 dma_buf_commit(s, 0);
852 n = s->io_buffer_size >> 9;
853 if (n > s->nsector) {
854 /* The PRDs were longer than needed for this request. Shorten them so
855 * we don't get a negative remainder. The Active bit must remain set
856 * after the request completes. */
861 sector_num = ide_get_sector(s);
863 assert(n * 512 == s->sg.size);
864 dma_buf_commit(s, s->sg.size);
866 ide_set_sector(s, sector_num);
870 /* end of transfer ? */
871 if (s->nsector == 0) {
872 s->status = READY_STAT | SEEK_STAT;
877 /* launch next transfer */
879 s->io_buffer_index = 0;
880 s->io_buffer_size = n * 512;
881 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
882 /* The PRDs were too short. Reset the Active bit, but don't raise an
884 s->status = READY_STAT | SEEK_STAT;
885 dma_buf_commit(s, 0);
889 trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
891 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
892 !ide_sect_range_ok(s, sector_num, n)) {
894 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
898 offset = sector_num << BDRV_SECTOR_BITS;
899 switch (s->dma_cmd) {
901 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
902 BDRV_SECTOR_SIZE, ide_dma_cb, s);
905 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
906 BDRV_SECTOR_SIZE, ide_dma_cb, s);
909 s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
910 &s->sg, offset, BDRV_SECTOR_SIZE,
911 ide_issue_trim, s, ide_dma_cb, s,
912 DMA_DIRECTION_TO_DEVICE);
920 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
921 block_acct_done(blk_get_stats(s->blk), &s->acct);
923 ide_set_inactive(s, stay_active);
926 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
928 s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
929 s->io_buffer_size = 0;
930 s->dma_cmd = dma_cmd;
934 block_acct_start(blk_get_stats(s->blk), &s->acct,
935 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
938 block_acct_start(blk_get_stats(s->blk), &s->acct,
939 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
945 ide_start_dma(s, ide_dma_cb);
948 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
950 s->io_buffer_index = 0;
952 if (s->bus->dma->ops->start_dma) {
953 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
957 static void ide_sector_write(IDEState *s);
959 static void ide_sector_write_timer_cb(void *opaque)
961 IDEState *s = opaque;
965 static void ide_sector_write_cb(void *opaque, int ret)
967 IDEState *s = opaque;
970 if (ret == -ECANCELED) {
975 s->status &= ~BUSY_STAT;
978 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
983 block_acct_done(blk_get_stats(s->blk), &s->acct);
986 if (n > s->req_nb_sectors) {
987 n = s->req_nb_sectors;
991 ide_set_sector(s, ide_get_sector(s) + n);
992 if (s->nsector == 0) {
993 /* no more sectors to write */
994 ide_transfer_stop(s);
997 if (n1 > s->req_nb_sectors) {
998 n1 = s->req_nb_sectors;
1000 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1004 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1005 /* It seems there is a bug in the Windows 2000 installer HDD
1006 IDE driver which fills the disk with empty logs when the
1007 IDE write IRQ comes too early. This hack tries to correct
1008 that at the expense of slower write performances. Use this
1009 option _only_ to install Windows 2000. You must disable it
1011 timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1012 (NANOSECONDS_PER_SECOND / 1000));
1014 ide_set_irq(s->bus);
1018 static void ide_sector_write(IDEState *s)
1023 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1024 sector_num = ide_get_sector(s);
1027 if (n > s->req_nb_sectors) {
1028 n = s->req_nb_sectors;
1031 trace_ide_sector_write(sector_num, n);
1033 if (!ide_sect_range_ok(s, sector_num, n)) {
1035 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1039 s->iov.iov_base = s->io_buffer;
1040 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
1041 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1043 block_acct_start(blk_get_stats(s->blk), &s->acct,
1044 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1045 s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1046 &s->qiov, 0, ide_sector_write_cb, s);
1049 static void ide_flush_cb(void *opaque, int ret)
1051 IDEState *s = opaque;
1053 s->pio_aiocb = NULL;
1055 if (ret == -ECANCELED) {
1059 /* XXX: What sector number to set here? */
1060 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1066 block_acct_done(blk_get_stats(s->blk), &s->acct);
1068 s->status = READY_STAT | SEEK_STAT;
1070 ide_set_irq(s->bus);
1073 static void ide_flush_cache(IDEState *s)
1075 if (s->blk == NULL) {
1080 s->status |= BUSY_STAT;
1082 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1083 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1086 static void ide_cfata_metadata_inquiry(IDEState *s)
1091 p = (uint16_t *) s->io_buffer;
1092 memset(p, 0, 0x200);
1093 spd = ((s->mdata_size - 1) >> 9) + 1;
1095 put_le16(p + 0, 0x0001); /* Data format revision */
1096 put_le16(p + 1, 0x0000); /* Media property: silicon */
1097 put_le16(p + 2, s->media_changed); /* Media status */
1098 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1099 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1100 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1101 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1104 static void ide_cfata_metadata_read(IDEState *s)
1108 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1109 s->status = ERR_STAT;
1110 s->error = ABRT_ERR;
1114 p = (uint16_t *) s->io_buffer;
1115 memset(p, 0, 0x200);
1117 put_le16(p + 0, s->media_changed); /* Media status */
1118 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1119 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1120 s->nsector << 9), 0x200 - 2));
1123 static void ide_cfata_metadata_write(IDEState *s)
1125 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1126 s->status = ERR_STAT;
1127 s->error = ABRT_ERR;
1131 s->media_changed = 0;
1133 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1135 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1136 s->nsector << 9), 0x200 - 2));
1139 /* called when the inserted state of the media has changed */
1140 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1142 IDEState *s = opaque;
1143 uint64_t nb_sectors;
1145 s->tray_open = !load;
1146 blk_get_geometry(s->blk, &nb_sectors);
1147 s->nb_sectors = nb_sectors;
1150 * First indicate to the guest that a CD has been removed. That's
1151 * done on the next command the guest sends us.
1153 * Then we set UNIT_ATTENTION, by which the guest will
1154 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1156 s->cdrom_changed = 1;
1157 s->events.new_media = true;
1158 s->events.eject_request = false;
1159 ide_set_irq(s->bus);
1162 static void ide_cd_eject_request_cb(void *opaque, bool force)
1164 IDEState *s = opaque;
1166 s->events.eject_request = true;
1168 s->tray_locked = false;
1170 ide_set_irq(s->bus);
1173 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1177 /* handle the 'magic' 0 nsector count conversion here. to avoid
1178 * fiddling with the rest of the read logic, we just store the
1179 * full sector count in ->nsector and ignore ->hob_nsector from now
1185 if (!s->nsector && !s->hob_nsector)
1188 int lo = s->nsector;
1189 int hi = s->hob_nsector;
1191 s->nsector = (hi << 8) | lo;
1196 static void ide_clear_hob(IDEBus *bus)
1198 /* any write clears HOB high bit of device control register */
1199 bus->ifs[0].select &= ~(1 << 7);
1200 bus->ifs[1].select &= ~(1 << 7);
1203 /* IOport [W]rite [R]egisters */
1204 enum ATA_IOPORT_WR {
1205 ATA_IOPORT_WR_DATA = 0,
1206 ATA_IOPORT_WR_FEATURES = 1,
1207 ATA_IOPORT_WR_SECTOR_COUNT = 2,
1208 ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1209 ATA_IOPORT_WR_CYLINDER_LOW = 4,
1210 ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1211 ATA_IOPORT_WR_DEVICE_HEAD = 6,
1212 ATA_IOPORT_WR_COMMAND = 7,
1213 ATA_IOPORT_WR_NUM_REGISTERS,
1216 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1217 [ATA_IOPORT_WR_DATA] = "Data",
1218 [ATA_IOPORT_WR_FEATURES] = "Features",
1219 [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1220 [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1221 [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1222 [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1223 [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1224 [ATA_IOPORT_WR_COMMAND] = "Command"
1227 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1229 IDEBus *bus = opaque;
1230 IDEState *s = idebus_active_if(bus);
1231 int reg_num = addr & 7;
1233 trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1235 /* ignore writes to command block while busy with previous command */
1236 if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1243 case ATA_IOPORT_WR_FEATURES:
1245 /* NOTE: data is written to the two drives */
1246 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1247 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1248 bus->ifs[0].feature = val;
1249 bus->ifs[1].feature = val;
1251 case ATA_IOPORT_WR_SECTOR_COUNT:
1253 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1254 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1255 bus->ifs[0].nsector = val;
1256 bus->ifs[1].nsector = val;
1258 case ATA_IOPORT_WR_SECTOR_NUMBER:
1260 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1261 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1262 bus->ifs[0].sector = val;
1263 bus->ifs[1].sector = val;
1265 case ATA_IOPORT_WR_CYLINDER_LOW:
1267 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1268 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1269 bus->ifs[0].lcyl = val;
1270 bus->ifs[1].lcyl = val;
1272 case ATA_IOPORT_WR_CYLINDER_HIGH:
1274 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1275 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1276 bus->ifs[0].hcyl = val;
1277 bus->ifs[1].hcyl = val;
1279 case ATA_IOPORT_WR_DEVICE_HEAD:
1280 /* FIXME: HOB readback uses bit 7 */
1281 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1282 bus->ifs[1].select = (val | 0x10) | 0xa0;
1284 bus->unit = (val >> 4) & 1;
1287 case ATA_IOPORT_WR_COMMAND:
1289 ide_exec_cmd(bus, val);
1294 static void ide_reset(IDEState *s)
1299 blk_aio_cancel(s->pio_aiocb);
1300 s->pio_aiocb = NULL;
1303 if (s->drive_kind == IDE_CFATA)
1304 s->mult_sectors = 0;
1306 s->mult_sectors = MAX_MULT_SECTORS;
1323 s->status = READY_STAT | SEEK_STAT;
1327 /* ATAPI specific */
1330 s->cdrom_changed = 0;
1331 s->packet_transfer_size = 0;
1332 s->elementary_transfer_size = 0;
1333 s->io_buffer_index = 0;
1334 s->cd_sector_size = 0;
1339 s->io_buffer_size = 0;
1340 s->req_nb_sectors = 0;
1342 ide_set_signature(s);
1343 /* init the transfer handler so that 0xffff is returned on data
1345 s->end_transfer_func = ide_dummy_transfer_stop;
1346 ide_dummy_transfer_stop(s);
1347 s->media_changed = 0;
1350 static bool cmd_nop(IDEState *s, uint8_t cmd)
1355 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1357 /* Halt PIO (in the DRQ phase), then DMA */
1358 ide_transfer_halt(s);
1359 ide_cancel_dma_sync(s);
1361 /* Reset any PIO commands, reset signature, etc */
1364 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1365 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1368 /* Do not overwrite status register */
1372 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1374 switch (s->feature) {
1377 ide_sector_start_dma(s, IDE_DMA_TRIM);
1383 ide_abort_command(s);
1387 static bool cmd_identify(IDEState *s, uint8_t cmd)
1389 if (s->blk && s->drive_kind != IDE_CD) {
1390 if (s->drive_kind != IDE_CFATA) {
1393 ide_cfata_identify(s);
1395 s->status = READY_STAT | SEEK_STAT;
1396 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1397 ide_set_irq(s->bus);
1400 if (s->drive_kind == IDE_CD) {
1401 ide_set_signature(s);
1403 ide_abort_command(s);
1409 static bool cmd_verify(IDEState *s, uint8_t cmd)
1411 bool lba48 = (cmd == WIN_VERIFY_EXT);
1413 /* do sector number check ? */
1414 ide_cmd_lba48_transform(s, lba48);
1419 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1421 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1422 /* Disable Read and Write Multiple */
1423 s->mult_sectors = 0;
1424 } else if ((s->nsector & 0xff) != 0 &&
1425 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1426 (s->nsector & (s->nsector - 1)) != 0)) {
1427 ide_abort_command(s);
1429 s->mult_sectors = s->nsector & 0xff;
1435 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1437 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1439 if (!s->blk || !s->mult_sectors) {
1440 ide_abort_command(s);
1444 ide_cmd_lba48_transform(s, lba48);
1445 s->req_nb_sectors = s->mult_sectors;
1450 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1452 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1455 if (!s->blk || !s->mult_sectors) {
1456 ide_abort_command(s);
1460 ide_cmd_lba48_transform(s, lba48);
1462 s->req_nb_sectors = s->mult_sectors;
1463 n = MIN(s->nsector, s->req_nb_sectors);
1465 s->status = SEEK_STAT | READY_STAT;
1466 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1468 s->media_changed = 1;
1473 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1475 bool lba48 = (cmd == WIN_READ_EXT);
1477 if (s->drive_kind == IDE_CD) {
1478 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1479 ide_abort_command(s);
1484 ide_abort_command(s);
1488 ide_cmd_lba48_transform(s, lba48);
1489 s->req_nb_sectors = 1;
1495 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1497 bool lba48 = (cmd == WIN_WRITE_EXT);
1500 ide_abort_command(s);
1504 ide_cmd_lba48_transform(s, lba48);
1506 s->req_nb_sectors = 1;
1507 s->status = SEEK_STAT | READY_STAT;
1508 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1510 s->media_changed = 1;
1515 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1517 bool lba48 = (cmd == WIN_READDMA_EXT);
1520 ide_abort_command(s);
1524 ide_cmd_lba48_transform(s, lba48);
1525 ide_sector_start_dma(s, IDE_DMA_READ);
1530 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1532 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1535 ide_abort_command(s);
1539 ide_cmd_lba48_transform(s, lba48);
1540 ide_sector_start_dma(s, IDE_DMA_WRITE);
1542 s->media_changed = 1;
1547 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1553 static bool cmd_seek(IDEState *s, uint8_t cmd)
1555 /* XXX: Check that seek is within bounds */
1559 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1561 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1563 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1564 if (s->nb_sectors == 0) {
1565 ide_abort_command(s);
1569 ide_cmd_lba48_transform(s, lba48);
1570 ide_set_sector(s, s->nb_sectors - 1);
1575 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1577 s->nsector = 0xff; /* device active or idle */
1581 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1583 uint16_t *identify_data;
1586 ide_abort_command(s);
1590 /* XXX: valid for CDROM ? */
1591 switch (s->feature) {
1592 case 0x02: /* write cache enable */
1593 blk_set_enable_write_cache(s->blk, true);
1594 identify_data = (uint16_t *)s->identify_data;
1595 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1597 case 0x82: /* write cache disable */
1598 blk_set_enable_write_cache(s->blk, false);
1599 identify_data = (uint16_t *)s->identify_data;
1600 put_le16(identify_data + 85, (1 << 14) | 1);
1603 case 0xcc: /* reverting to power-on defaults enable */
1604 case 0x66: /* reverting to power-on defaults disable */
1605 case 0xaa: /* read look-ahead enable */
1606 case 0x55: /* read look-ahead disable */
1607 case 0x05: /* set advanced power management mode */
1608 case 0x85: /* disable advanced power management mode */
1609 case 0x69: /* NOP */
1610 case 0x67: /* NOP */
1611 case 0x96: /* NOP */
1612 case 0x9a: /* NOP */
1613 case 0x42: /* enable Automatic Acoustic Mode */
1614 case 0xc2: /* disable Automatic Acoustic Mode */
1616 case 0x03: /* set transfer mode */
1618 uint8_t val = s->nsector & 0x07;
1619 identify_data = (uint16_t *)s->identify_data;
1621 switch (s->nsector >> 3) {
1622 case 0x00: /* pio default */
1623 case 0x01: /* pio mode */
1624 put_le16(identify_data + 62, 0x07);
1625 put_le16(identify_data + 63, 0x07);
1626 put_le16(identify_data + 88, 0x3f);
1628 case 0x02: /* sigle word dma mode*/
1629 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1630 put_le16(identify_data + 63, 0x07);
1631 put_le16(identify_data + 88, 0x3f);
1633 case 0x04: /* mdma mode */
1634 put_le16(identify_data + 62, 0x07);
1635 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1636 put_le16(identify_data + 88, 0x3f);
1638 case 0x08: /* udma mode */
1639 put_le16(identify_data + 62, 0x07);
1640 put_le16(identify_data + 63, 0x07);
1641 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1651 ide_abort_command(s);
1656 /*** ATAPI commands ***/
1658 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1660 ide_atapi_identify(s);
1661 s->status = READY_STAT | SEEK_STAT;
1662 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1663 ide_set_irq(s->bus);
1667 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1669 ide_set_signature(s);
1671 if (s->drive_kind == IDE_CD) {
1672 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1673 * devices to return a clear status register
1674 * with READY_STAT *not* set. */
1677 s->status = READY_STAT | SEEK_STAT;
1678 /* The bits of the error register are not as usual for this command!
1679 * They are part of the regular output (this is why ERR_STAT isn't set)
1680 * Device 0 passed, Device 1 passed or not present. */
1682 ide_set_irq(s->bus);
1688 static bool cmd_packet(IDEState *s, uint8_t cmd)
1690 /* overlapping commands not supported */
1691 if (s->feature & 0x02) {
1692 ide_abort_command(s);
1696 s->status = READY_STAT | SEEK_STAT;
1697 s->atapi_dma = s->feature & 1;
1699 s->dma_cmd = IDE_DMA_ATAPI;
1702 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1708 /*** CF-ATA commands ***/
1710 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1712 s->error = 0x09; /* miscellaneous error */
1713 s->status = READY_STAT | SEEK_STAT;
1714 ide_set_irq(s->bus);
1719 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1721 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1722 * required for Windows 8 to work with AHCI */
1724 if (cmd == CFA_WEAR_LEVEL) {
1728 if (cmd == CFA_ERASE_SECTORS) {
1729 s->media_changed = 1;
1735 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1737 s->status = READY_STAT | SEEK_STAT;
1739 memset(s->io_buffer, 0, 0x200);
1740 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1741 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1742 s->io_buffer[0x02] = s->select; /* Head */
1743 s->io_buffer[0x03] = s->sector; /* Sector */
1744 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1745 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1746 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1747 s->io_buffer[0x13] = 0x00; /* Erase flag */
1748 s->io_buffer[0x18] = 0x00; /* Hot count */
1749 s->io_buffer[0x19] = 0x00; /* Hot count */
1750 s->io_buffer[0x1a] = 0x01; /* Hot count */
1752 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1753 ide_set_irq(s->bus);
1758 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1760 switch (s->feature) {
1761 case 0x02: /* Inquiry Metadata Storage */
1762 ide_cfata_metadata_inquiry(s);
1764 case 0x03: /* Read Metadata Storage */
1765 ide_cfata_metadata_read(s);
1767 case 0x04: /* Write Metadata Storage */
1768 ide_cfata_metadata_write(s);
1771 ide_abort_command(s);
1775 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1776 s->status = 0x00; /* NOTE: READY is _not_ set */
1777 ide_set_irq(s->bus);
1782 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1784 switch (s->feature) {
1785 case 0x01: /* sense temperature in device */
1786 s->nsector = 0x50; /* +20 C */
1789 ide_abort_command(s);
1797 /*** SMART commands ***/
1799 static bool cmd_smart(IDEState *s, uint8_t cmd)
1803 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1807 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1811 switch (s->feature) {
1813 s->smart_enabled = 0;
1817 s->smart_enabled = 1;
1820 case SMART_ATTR_AUTOSAVE:
1821 switch (s->sector) {
1823 s->smart_autosave = 0;
1826 s->smart_autosave = 1;
1834 if (!s->smart_errors) {
1843 case SMART_READ_THRESH:
1844 memset(s->io_buffer, 0, 0x200);
1845 s->io_buffer[0] = 0x01; /* smart struct version */
1847 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1848 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1849 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1853 for (n = 0; n < 511; n++) {
1854 s->io_buffer[511] += s->io_buffer[n];
1856 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1858 s->status = READY_STAT | SEEK_STAT;
1859 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1860 ide_set_irq(s->bus);
1863 case SMART_READ_DATA:
1864 memset(s->io_buffer, 0, 0x200);
1865 s->io_buffer[0] = 0x01; /* smart struct version */
1867 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1869 for (i = 0; i < 11; i++) {
1870 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1874 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1875 if (s->smart_selftest_count == 0) {
1876 s->io_buffer[363] = 0;
1879 s->smart_selftest_data[3 +
1880 (s->smart_selftest_count - 1) *
1883 s->io_buffer[364] = 0x20;
1884 s->io_buffer[365] = 0x01;
1885 /* offline data collection capacity: execute + self-test*/
1886 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1887 s->io_buffer[368] = 0x03; /* smart capability (1) */
1888 s->io_buffer[369] = 0x00; /* smart capability (2) */
1889 s->io_buffer[370] = 0x01; /* error logging supported */
1890 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1891 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1892 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1894 for (n = 0; n < 511; n++) {
1895 s->io_buffer[511] += s->io_buffer[n];
1897 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1899 s->status = READY_STAT | SEEK_STAT;
1900 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1901 ide_set_irq(s->bus);
1904 case SMART_READ_LOG:
1905 switch (s->sector) {
1906 case 0x01: /* summary smart error log */
1907 memset(s->io_buffer, 0, 0x200);
1908 s->io_buffer[0] = 0x01;
1909 s->io_buffer[1] = 0x00; /* no error entries */
1910 s->io_buffer[452] = s->smart_errors & 0xff;
1911 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1913 for (n = 0; n < 511; n++) {
1914 s->io_buffer[511] += s->io_buffer[n];
1916 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1918 case 0x06: /* smart self test log */
1919 memset(s->io_buffer, 0, 0x200);
1920 s->io_buffer[0] = 0x01;
1921 if (s->smart_selftest_count == 0) {
1922 s->io_buffer[508] = 0;
1924 s->io_buffer[508] = s->smart_selftest_count;
1925 for (n = 2; n < 506; n++) {
1926 s->io_buffer[n] = s->smart_selftest_data[n];
1930 for (n = 0; n < 511; n++) {
1931 s->io_buffer[511] += s->io_buffer[n];
1933 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1938 s->status = READY_STAT | SEEK_STAT;
1939 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1940 ide_set_irq(s->bus);
1943 case SMART_EXECUTE_OFFLINE:
1944 switch (s->sector) {
1945 case 0: /* off-line routine */
1946 case 1: /* short self test */
1947 case 2: /* extended self test */
1948 s->smart_selftest_count++;
1949 if (s->smart_selftest_count > 21) {
1950 s->smart_selftest_count = 1;
1952 n = 2 + (s->smart_selftest_count - 1) * 24;
1953 s->smart_selftest_data[n] = s->sector;
1954 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1955 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1956 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1965 ide_abort_command(s);
1969 #define HD_OK (1u << IDE_HD)
1970 #define CD_OK (1u << IDE_CD)
1971 #define CFA_OK (1u << IDE_CFATA)
1972 #define HD_CFA_OK (HD_OK | CFA_OK)
1973 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1975 /* Set the Disk Seek Completed status bit during completion */
1976 #define SET_DSC (1u << 8)
1978 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1979 static const struct {
1980 /* Returns true if the completion code should be run */
1981 bool (*handler)(IDEState *s, uint8_t cmd);
1983 } ide_cmd_table[0x100] = {
1984 /* NOP not implemented, mandatory for CD */
1985 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1986 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
1987 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1988 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1989 [WIN_READ] = { cmd_read_pio, ALL_OK },
1990 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
1991 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1992 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1993 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1994 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1995 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1996 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1997 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1998 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
1999 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
2000 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
2001 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
2002 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
2003 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
2004 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
2005 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
2006 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
2007 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
2008 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
2009 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
2010 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
2011 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
2012 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
2013 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2014 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
2015 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
2016 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
2017 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
2018 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2019 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2020 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
2021 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
2022 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2023 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
2024 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
2025 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
2026 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
2027 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
2028 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
2029 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
2030 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
2031 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
2032 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2033 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
2034 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
2035 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
2036 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
2037 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
2038 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2039 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2040 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2043 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2045 return cmd < ARRAY_SIZE(ide_cmd_table)
2046 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2049 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2054 s = idebus_active_if(bus);
2055 trace_ide_exec_cmd(bus, s, val);
2057 /* ignore commands to non existent slave */
2058 if (s != bus->ifs && !s->blk) {
2062 /* Only RESET is allowed while BSY and/or DRQ are set,
2063 * and only to ATAPI devices. */
2064 if (s->status & (BUSY_STAT|DRQ_STAT)) {
2065 if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2070 if (!ide_cmd_permitted(s, val)) {
2071 ide_abort_command(s);
2072 ide_set_irq(s->bus);
2076 s->status = READY_STAT | BUSY_STAT;
2078 s->io_buffer_offset = 0;
2080 complete = ide_cmd_table[val].handler(s, val);
2082 s->status &= ~BUSY_STAT;
2083 assert(!!s->error == !!(s->status & ERR_STAT));
2085 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2086 s->status |= SEEK_STAT;
2090 ide_set_irq(s->bus);
2094 /* IOport [R]ead [R]egisters */
2095 enum ATA_IOPORT_RR {
2096 ATA_IOPORT_RR_DATA = 0,
2097 ATA_IOPORT_RR_ERROR = 1,
2098 ATA_IOPORT_RR_SECTOR_COUNT = 2,
2099 ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2100 ATA_IOPORT_RR_CYLINDER_LOW = 4,
2101 ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2102 ATA_IOPORT_RR_DEVICE_HEAD = 6,
2103 ATA_IOPORT_RR_STATUS = 7,
2104 ATA_IOPORT_RR_NUM_REGISTERS,
2107 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2108 [ATA_IOPORT_RR_DATA] = "Data",
2109 [ATA_IOPORT_RR_ERROR] = "Error",
2110 [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2111 [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2112 [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2113 [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2114 [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2115 [ATA_IOPORT_RR_STATUS] = "Status"
2118 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2120 IDEBus *bus = opaque;
2121 IDEState *s = idebus_active_if(bus);
2126 /* FIXME: HOB readback uses bit 7, but it's always set right now */
2127 //hob = s->select & (1 << 7);
2130 case ATA_IOPORT_RR_DATA:
2133 case ATA_IOPORT_RR_ERROR:
2134 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2135 (s != bus->ifs && !s->blk)) {
2140 ret = s->hob_feature;
2143 case ATA_IOPORT_RR_SECTOR_COUNT:
2144 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2147 ret = s->nsector & 0xff;
2149 ret = s->hob_nsector;
2152 case ATA_IOPORT_RR_SECTOR_NUMBER:
2153 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2158 ret = s->hob_sector;
2161 case ATA_IOPORT_RR_CYLINDER_LOW:
2162 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2170 case ATA_IOPORT_RR_CYLINDER_HIGH:
2171 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2179 case ATA_IOPORT_RR_DEVICE_HEAD:
2180 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2187 case ATA_IOPORT_RR_STATUS:
2188 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2189 (s != bus->ifs && !s->blk)) {
2194 qemu_irq_lower(bus->irq);
2198 trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2202 uint32_t ide_status_read(void *opaque, uint32_t addr)
2204 IDEBus *bus = opaque;
2205 IDEState *s = idebus_active_if(bus);
2208 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2209 (s != bus->ifs && !s->blk)) {
2215 trace_ide_status_read(addr, ret, bus, s);
2219 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2221 IDEBus *bus = opaque;
2225 trace_ide_cmd_write(addr, val, bus);
2227 /* common for both drives */
2228 if (!(bus->cmd & IDE_CMD_RESET) &&
2229 (val & IDE_CMD_RESET)) {
2230 /* reset low to high */
2231 for(i = 0;i < 2; i++) {
2233 s->status = BUSY_STAT | SEEK_STAT;
2236 } else if ((bus->cmd & IDE_CMD_RESET) &&
2237 !(val & IDE_CMD_RESET)) {
2239 for(i = 0;i < 2; i++) {
2241 if (s->drive_kind == IDE_CD)
2242 s->status = 0x00; /* NOTE: READY is _not_ set */
2244 s->status = READY_STAT | SEEK_STAT;
2245 ide_set_signature(s);
2253 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2254 * transferred from the device to the guest), false if it's a PIO in
2256 static bool ide_is_pio_out(IDEState *s)
2258 if (s->end_transfer_func == ide_sector_write ||
2259 s->end_transfer_func == ide_atapi_cmd) {
2261 } else if (s->end_transfer_func == ide_sector_read ||
2262 s->end_transfer_func == ide_transfer_stop ||
2263 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2264 s->end_transfer_func == ide_dummy_transfer_stop) {
2271 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2273 IDEBus *bus = opaque;
2274 IDEState *s = idebus_active_if(bus);
2277 trace_ide_data_writew(addr, val, bus, s);
2279 /* PIO data access allowed only when DRQ bit is set. The result of a write
2280 * during PIO out is indeterminate, just ignore it. */
2281 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2286 if (p + 2 > s->data_end) {
2290 *(uint16_t *)p = le16_to_cpu(val);
2293 if (p >= s->data_end) {
2294 s->status &= ~DRQ_STAT;
2295 s->end_transfer_func(s);
2299 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2301 IDEBus *bus = opaque;
2302 IDEState *s = idebus_active_if(bus);
2306 /* PIO data access allowed only when DRQ bit is set. The result of a read
2307 * during PIO in is indeterminate, return 0 and don't move forward. */
2308 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2313 if (p + 2 > s->data_end) {
2317 ret = cpu_to_le16(*(uint16_t *)p);
2320 if (p >= s->data_end) {
2321 s->status &= ~DRQ_STAT;
2322 s->end_transfer_func(s);
2325 trace_ide_data_readw(addr, ret, bus, s);
2329 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2331 IDEBus *bus = opaque;
2332 IDEState *s = idebus_active_if(bus);
2335 trace_ide_data_writel(addr, val, bus, s);
2337 /* PIO data access allowed only when DRQ bit is set. The result of a write
2338 * during PIO out is indeterminate, just ignore it. */
2339 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2344 if (p + 4 > s->data_end) {
2348 *(uint32_t *)p = le32_to_cpu(val);
2351 if (p >= s->data_end) {
2352 s->status &= ~DRQ_STAT;
2353 s->end_transfer_func(s);
2357 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2359 IDEBus *bus = opaque;
2360 IDEState *s = idebus_active_if(bus);
2364 /* PIO data access allowed only when DRQ bit is set. The result of a read
2365 * during PIO in is indeterminate, return 0 and don't move forward. */
2366 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2372 if (p + 4 > s->data_end) {
2376 ret = cpu_to_le32(*(uint32_t *)p);
2379 if (p >= s->data_end) {
2380 s->status &= ~DRQ_STAT;
2381 s->end_transfer_func(s);
2385 trace_ide_data_readl(addr, ret, bus, s);
2389 static void ide_dummy_transfer_stop(IDEState *s)
2391 s->data_ptr = s->io_buffer;
2392 s->data_end = s->io_buffer;
2393 s->io_buffer[0] = 0xff;
2394 s->io_buffer[1] = 0xff;
2395 s->io_buffer[2] = 0xff;
2396 s->io_buffer[3] = 0xff;
2399 void ide_bus_reset(IDEBus *bus)
2403 ide_reset(&bus->ifs[0]);
2404 ide_reset(&bus->ifs[1]);
2407 /* pending async DMA */
2408 if (bus->dma->aiocb) {
2409 trace_ide_bus_reset_aio();
2410 blk_aio_cancel(bus->dma->aiocb);
2411 bus->dma->aiocb = NULL;
2414 /* reset dma provider too */
2415 if (bus->dma->ops->reset) {
2416 bus->dma->ops->reset(bus->dma);
2420 static bool ide_cd_is_tray_open(void *opaque)
2422 return ((IDEState *)opaque)->tray_open;
2425 static bool ide_cd_is_medium_locked(void *opaque)
2427 return ((IDEState *)opaque)->tray_locked;
2430 static void ide_resize_cb(void *opaque)
2432 IDEState *s = opaque;
2433 uint64_t nb_sectors;
2435 if (!s->identify_set) {
2439 blk_get_geometry(s->blk, &nb_sectors);
2440 s->nb_sectors = nb_sectors;
2442 /* Update the identify data buffer. */
2443 if (s->drive_kind == IDE_CFATA) {
2444 ide_cfata_identify_size(s);
2446 /* IDE_CD uses a different set of callbacks entirely. */
2447 assert(s->drive_kind != IDE_CD);
2448 ide_identify_size(s);
2452 static const BlockDevOps ide_cd_block_ops = {
2453 .change_media_cb = ide_cd_change_cb,
2454 .eject_request_cb = ide_cd_eject_request_cb,
2455 .is_tray_open = ide_cd_is_tray_open,
2456 .is_medium_locked = ide_cd_is_medium_locked,
2459 static const BlockDevOps ide_hd_block_ops = {
2460 .resize_cb = ide_resize_cb,
2463 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2464 const char *version, const char *serial, const char *model,
2466 uint32_t cylinders, uint32_t heads, uint32_t secs,
2467 int chs_trans, Error **errp)
2469 uint64_t nb_sectors;
2472 s->drive_kind = kind;
2474 blk_get_geometry(blk, &nb_sectors);
2475 s->cylinders = cylinders;
2478 s->chs_trans = chs_trans;
2479 s->nb_sectors = nb_sectors;
2481 /* The SMART values should be preserved across power cycles
2483 s->smart_enabled = 1;
2484 s->smart_autosave = 1;
2485 s->smart_errors = 0;
2486 s->smart_selftest_count = 0;
2487 if (kind == IDE_CD) {
2488 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2489 blk_set_guest_block_size(blk, 2048);
2491 if (!blk_is_inserted(s->blk)) {
2492 error_setg(errp, "Device needs media, but drive is empty");
2495 if (blk_is_read_only(blk)) {
2496 error_setg(errp, "Can't use a read-only drive");
2499 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2502 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2504 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2505 "QM%05d", s->drive_serial);
2508 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2512 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2515 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2518 strcpy(s->drive_model_str, "QEMU HARDDISK");
2524 pstrcpy(s->version, sizeof(s->version), version);
2526 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2530 blk_iostatus_enable(blk);
2534 static void ide_init1(IDEBus *bus, int unit)
2536 static int drive_serial = 1;
2537 IDEState *s = &bus->ifs[unit];
2541 s->drive_serial = drive_serial++;
2542 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2543 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2544 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2545 memset(s->io_buffer, 0, s->io_buffer_total_len);
2547 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2548 memset(s->smart_selftest_data, 0, 512);
2550 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2551 ide_sector_write_timer_cb, s);
2554 static int ide_nop_int(IDEDMA *dma, int x)
2559 static void ide_nop(IDEDMA *dma)
2563 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2568 static const IDEDMAOps ide_dma_nop_ops = {
2569 .prepare_buf = ide_nop_int32,
2570 .restart_dma = ide_nop,
2571 .rw_buf = ide_nop_int,
2574 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2576 s->unit = s->bus->retry_unit;
2577 ide_set_sector(s, s->bus->retry_sector_num);
2578 s->nsector = s->bus->retry_nsector;
2579 s->bus->dma->ops->restart_dma(s->bus->dma);
2580 s->io_buffer_size = 0;
2581 s->dma_cmd = dma_cmd;
2582 ide_start_dma(s, ide_dma_cb);
2585 static void ide_restart_bh(void *opaque)
2587 IDEBus *bus = opaque;
2592 qemu_bh_delete(bus->bh);
2595 error_status = bus->error_status;
2596 if (bus->error_status == 0) {
2600 s = idebus_active_if(bus);
2601 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2603 /* The error status must be cleared before resubmitting the request: The
2604 * request may fail again, and this case can only be distinguished if the
2605 * called function can set a new error status. */
2606 bus->error_status = 0;
2608 /* The HBA has generically asked to be kicked on retry */
2609 if (error_status & IDE_RETRY_HBA) {
2610 if (s->bus->dma->ops->restart) {
2611 s->bus->dma->ops->restart(s->bus->dma);
2613 } else if (IS_IDE_RETRY_DMA(error_status)) {
2614 if (error_status & IDE_RETRY_TRIM) {
2615 ide_restart_dma(s, IDE_DMA_TRIM);
2617 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2619 } else if (IS_IDE_RETRY_PIO(error_status)) {
2623 ide_sector_write(s);
2625 } else if (error_status & IDE_RETRY_FLUSH) {
2627 } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2628 assert(s->end_transfer_func == ide_atapi_cmd);
2629 ide_atapi_dma_restart(s);
2635 static void ide_restart_cb(void *opaque, int running, RunState state)
2637 IDEBus *bus = opaque;
2643 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2644 qemu_bh_schedule(bus->bh);
2648 void ide_register_restart_cb(IDEBus *bus)
2650 if (bus->dma->ops->restart_dma) {
2651 bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2655 static IDEDMA ide_dma_nop = {
2656 .ops = &ide_dma_nop_ops,
2660 void ide_init2(IDEBus *bus, qemu_irq irq)
2664 for(i = 0; i < 2; i++) {
2666 ide_reset(&bus->ifs[i]);
2669 bus->dma = &ide_dma_nop;
2672 void ide_exit(IDEState *s)
2674 timer_del(s->sector_write_timer);
2675 timer_free(s->sector_write_timer);
2676 qemu_vfree(s->smart_selftest_data);
2677 qemu_vfree(s->io_buffer);
2680 static const MemoryRegionPortio ide_portio_list[] = {
2681 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2682 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2683 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2684 PORTIO_END_OF_LIST(),
2687 static const MemoryRegionPortio ide_portio2_list[] = {
2688 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2689 PORTIO_END_OF_LIST(),
2692 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2694 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2695 bridge has been setup properly to always register with ISA. */
2696 isa_register_portio_list(dev, &bus->portio_list,
2697 iobase, ide_portio_list, bus, "ide");
2700 isa_register_portio_list(dev, &bus->portio2_list,
2701 iobase2, ide_portio2_list, bus, "ide");
2705 static bool is_identify_set(void *opaque, int version_id)
2707 IDEState *s = opaque;
2709 return s->identify_set != 0;
2712 static EndTransferFunc* transfer_end_table[] = {
2716 ide_atapi_cmd_reply_end,
2718 ide_dummy_transfer_stop,
2721 static int transfer_end_table_idx(EndTransferFunc *fn)
2725 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2726 if (transfer_end_table[i] == fn)
2732 static int ide_drive_post_load(void *opaque, int version_id)
2734 IDEState *s = opaque;
2736 if (s->blk && s->identify_set) {
2737 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2742 static int ide_drive_pio_post_load(void *opaque, int version_id)
2744 IDEState *s = opaque;
2746 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2749 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2750 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2751 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2752 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2757 static int ide_drive_pio_pre_save(void *opaque)
2759 IDEState *s = opaque;
2762 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2763 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2765 idx = transfer_end_table_idx(s->end_transfer_func);
2767 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2769 s->end_transfer_fn_idx = 2;
2771 s->end_transfer_fn_idx = idx;
2777 static bool ide_drive_pio_state_needed(void *opaque)
2779 IDEState *s = opaque;
2781 return ((s->status & DRQ_STAT) != 0)
2782 || (s->bus->error_status & IDE_RETRY_PIO);
2785 static bool ide_tray_state_needed(void *opaque)
2787 IDEState *s = opaque;
2789 return s->tray_open || s->tray_locked;
2792 static bool ide_atapi_gesn_needed(void *opaque)
2794 IDEState *s = opaque;
2796 return s->events.new_media || s->events.eject_request;
2799 static bool ide_error_needed(void *opaque)
2801 IDEBus *bus = opaque;
2803 return (bus->error_status != 0);
2806 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2807 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2808 .name ="ide_drive/atapi/gesn_state",
2810 .minimum_version_id = 1,
2811 .needed = ide_atapi_gesn_needed,
2812 .fields = (VMStateField[]) {
2813 VMSTATE_BOOL(events.new_media, IDEState),
2814 VMSTATE_BOOL(events.eject_request, IDEState),
2815 VMSTATE_END_OF_LIST()
2819 static const VMStateDescription vmstate_ide_tray_state = {
2820 .name = "ide_drive/tray_state",
2822 .minimum_version_id = 1,
2823 .needed = ide_tray_state_needed,
2824 .fields = (VMStateField[]) {
2825 VMSTATE_BOOL(tray_open, IDEState),
2826 VMSTATE_BOOL(tray_locked, IDEState),
2827 VMSTATE_END_OF_LIST()
2831 static const VMStateDescription vmstate_ide_drive_pio_state = {
2832 .name = "ide_drive/pio_state",
2834 .minimum_version_id = 1,
2835 .pre_save = ide_drive_pio_pre_save,
2836 .post_load = ide_drive_pio_post_load,
2837 .needed = ide_drive_pio_state_needed,
2838 .fields = (VMStateField[]) {
2839 VMSTATE_INT32(req_nb_sectors, IDEState),
2840 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2841 vmstate_info_uint8, uint8_t),
2842 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2843 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2844 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2845 VMSTATE_INT32(elementary_transfer_size, IDEState),
2846 VMSTATE_INT32(packet_transfer_size, IDEState),
2847 VMSTATE_END_OF_LIST()
2851 const VMStateDescription vmstate_ide_drive = {
2852 .name = "ide_drive",
2854 .minimum_version_id = 0,
2855 .post_load = ide_drive_post_load,
2856 .fields = (VMStateField[]) {
2857 VMSTATE_INT32(mult_sectors, IDEState),
2858 VMSTATE_INT32(identify_set, IDEState),
2859 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2860 VMSTATE_UINT8(feature, IDEState),
2861 VMSTATE_UINT8(error, IDEState),
2862 VMSTATE_UINT32(nsector, IDEState),
2863 VMSTATE_UINT8(sector, IDEState),
2864 VMSTATE_UINT8(lcyl, IDEState),
2865 VMSTATE_UINT8(hcyl, IDEState),
2866 VMSTATE_UINT8(hob_feature, IDEState),
2867 VMSTATE_UINT8(hob_sector, IDEState),
2868 VMSTATE_UINT8(hob_nsector, IDEState),
2869 VMSTATE_UINT8(hob_lcyl, IDEState),
2870 VMSTATE_UINT8(hob_hcyl, IDEState),
2871 VMSTATE_UINT8(select, IDEState),
2872 VMSTATE_UINT8(status, IDEState),
2873 VMSTATE_UINT8(lba48, IDEState),
2874 VMSTATE_UINT8(sense_key, IDEState),
2875 VMSTATE_UINT8(asc, IDEState),
2876 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2877 VMSTATE_END_OF_LIST()
2879 .subsections = (const VMStateDescription*[]) {
2880 &vmstate_ide_drive_pio_state,
2881 &vmstate_ide_tray_state,
2882 &vmstate_ide_atapi_gesn_state,
2887 static const VMStateDescription vmstate_ide_error_status = {
2888 .name ="ide_bus/error",
2890 .minimum_version_id = 1,
2891 .needed = ide_error_needed,
2892 .fields = (VMStateField[]) {
2893 VMSTATE_INT32(error_status, IDEBus),
2894 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2895 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2896 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2897 VMSTATE_END_OF_LIST()
2901 const VMStateDescription vmstate_ide_bus = {
2904 .minimum_version_id = 1,
2905 .fields = (VMStateField[]) {
2906 VMSTATE_UINT8(cmd, IDEBus),
2907 VMSTATE_UINT8(unit, IDEBus),
2908 VMSTATE_END_OF_LIST()
2910 .subsections = (const VMStateDescription*[]) {
2911 &vmstate_ide_error_status,
2916 void ide_drive_get(DriveInfo **hd, int n)
2920 for (i = 0; i < n; i++) {
2921 hd[i] = drive_get_by_index(IF_IDE, i);