2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
38 #include "hw/ide/internal.h"
41 /* These values were based on a Seagate ST3500418AS but have been modified
42 to make more sense in QEMU */
43 static const int smart_attributes[][12] = {
44 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
45 /* raw read error rate*/
46 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
48 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
49 /* start stop count */
50 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
51 /* remapped sectors */
52 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
54 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55 /* power cycle count */
56 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
57 /* airflow-temperature-celsius */
58 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
61 static void ide_dummy_transfer_stop(IDEState *s);
63 static void padstr(char *str, const char *src, int len)
66 for(i = 0; i < len; i++) {
75 static void put_le16(uint16_t *p, unsigned int v)
80 static void ide_identify_size(IDEState *s)
82 uint16_t *p = (uint16_t *)s->identify_data;
83 put_le16(p + 60, s->nb_sectors);
84 put_le16(p + 61, s->nb_sectors >> 16);
85 put_le16(p + 100, s->nb_sectors);
86 put_le16(p + 101, s->nb_sectors >> 16);
87 put_le16(p + 102, s->nb_sectors >> 32);
88 put_le16(p + 103, s->nb_sectors >> 48);
91 static void ide_identify(IDEState *s)
95 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
97 p = (uint16_t *)s->identify_data;
98 if (s->identify_set) {
101 memset(p, 0, sizeof(s->identify_data));
103 put_le16(p + 0, 0x0040);
104 put_le16(p + 1, s->cylinders);
105 put_le16(p + 3, s->heads);
106 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
107 put_le16(p + 5, 512); /* XXX: retired, remove ? */
108 put_le16(p + 6, s->sectors);
109 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
110 put_le16(p + 20, 3); /* XXX: retired, remove ? */
111 put_le16(p + 21, 512); /* cache size in sectors */
112 put_le16(p + 22, 4); /* ecc bytes */
113 padstr((char *)(p + 23), s->version, 8); /* firmware version */
114 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
115 #if MAX_MULT_SECTORS > 1
116 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
118 put_le16(p + 48, 1); /* dword I/O */
119 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
120 put_le16(p + 51, 0x200); /* PIO transfer cycle */
121 put_le16(p + 52, 0x200); /* DMA transfer cycle */
122 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
123 put_le16(p + 54, s->cylinders);
124 put_le16(p + 55, s->heads);
125 put_le16(p + 56, s->sectors);
126 oldsize = s->cylinders * s->heads * s->sectors;
127 put_le16(p + 57, oldsize);
128 put_le16(p + 58, oldsize >> 16);
130 put_le16(p + 59, 0x100 | s->mult_sectors);
131 /* *(p + 60) := nb_sectors -- see ide_identify_size */
132 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
133 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
134 put_le16(p + 63, 0x07); /* mdma0-2 supported */
135 put_le16(p + 64, 0x03); /* pio3-4 supported */
136 put_le16(p + 65, 120);
137 put_le16(p + 66, 120);
138 put_le16(p + 67, 120);
139 put_le16(p + 68, 120);
140 if (dev && dev->conf.discard_granularity) {
141 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
145 put_le16(p + 75, s->ncq_queues - 1);
147 put_le16(p + 76, (1 << 8));
150 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
151 put_le16(p + 81, 0x16); /* conforms to ata5 */
152 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
153 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
154 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
155 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
156 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
158 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
160 put_le16(p + 84, (1 << 14) | 0);
162 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
163 if (blk_enable_write_cache(s->blk)) {
164 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
166 put_le16(p + 85, (1 << 14) | 1);
168 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
169 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
170 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
172 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
174 put_le16(p + 87, (1 << 14) | 0);
176 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
177 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
178 /* *(p + 100) := nb_sectors -- see ide_identify_size */
179 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
180 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
181 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
183 if (dev && dev->conf.physical_block_size)
184 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
186 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
187 put_le16(p + 108, s->wwn >> 48);
188 put_le16(p + 109, s->wwn >> 32);
189 put_le16(p + 110, s->wwn >> 16);
190 put_le16(p + 111, s->wwn);
192 if (dev && dev->conf.discard_granularity) {
193 put_le16(p + 169, 1); /* TRIM support */
196 ide_identify_size(s);
200 memcpy(s->io_buffer, p, sizeof(s->identify_data));
203 static void ide_atapi_identify(IDEState *s)
207 p = (uint16_t *)s->identify_data;
208 if (s->identify_set) {
211 memset(p, 0, sizeof(s->identify_data));
213 /* Removable CDROM, 50us response, 12 byte packets */
214 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
215 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
216 put_le16(p + 20, 3); /* buffer type */
217 put_le16(p + 21, 512); /* cache size in sectors */
218 put_le16(p + 22, 4); /* ecc bytes */
219 padstr((char *)(p + 23), s->version, 8); /* firmware version */
220 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
221 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
223 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
224 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
225 put_le16(p + 62, 7); /* single word dma0-2 supported */
226 put_le16(p + 63, 7); /* mdma0-2 supported */
228 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
229 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
230 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
232 put_le16(p + 64, 3); /* pio3-4 supported */
233 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
234 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
235 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
236 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
238 put_le16(p + 71, 30); /* in ns */
239 put_le16(p + 72, 30); /* in ns */
242 put_le16(p + 75, s->ncq_queues - 1);
244 put_le16(p + 76, (1 << 8));
247 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
249 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
250 put_le16(p + 87, (1 << 8)); /* WWN enabled */
254 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
258 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
259 put_le16(p + 108, s->wwn >> 48);
260 put_le16(p + 109, s->wwn >> 32);
261 put_le16(p + 110, s->wwn >> 16);
262 put_le16(p + 111, s->wwn);
268 memcpy(s->io_buffer, p, sizeof(s->identify_data));
271 static void ide_cfata_identify_size(IDEState *s)
273 uint16_t *p = (uint16_t *)s->identify_data;
274 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
275 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
276 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
277 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
280 static void ide_cfata_identify(IDEState *s)
285 p = (uint16_t *)s->identify_data;
286 if (s->identify_set) {
289 memset(p, 0, sizeof(s->identify_data));
291 cur_sec = s->cylinders * s->heads * s->sectors;
293 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
294 put_le16(p + 1, s->cylinders); /* Default cylinders */
295 put_le16(p + 3, s->heads); /* Default heads */
296 put_le16(p + 6, s->sectors); /* Default sectors per track */
297 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
298 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
299 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
300 put_le16(p + 22, 0x0004); /* ECC bytes */
301 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
302 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
303 #if MAX_MULT_SECTORS > 1
304 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
306 put_le16(p + 47, 0x0000);
308 put_le16(p + 49, 0x0f00); /* Capabilities */
309 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
310 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
311 put_le16(p + 53, 0x0003); /* Translation params valid */
312 put_le16(p + 54, s->cylinders); /* Current cylinders */
313 put_le16(p + 55, s->heads); /* Current heads */
314 put_le16(p + 56, s->sectors); /* Current sectors */
315 put_le16(p + 57, cur_sec); /* Current capacity */
316 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
317 if (s->mult_sectors) /* Multiple sector setting */
318 put_le16(p + 59, 0x100 | s->mult_sectors);
319 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
320 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
321 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
322 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
323 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
324 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
325 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
326 put_le16(p + 82, 0x400c); /* Command Set supported */
327 put_le16(p + 83, 0x7068); /* Command Set supported */
328 put_le16(p + 84, 0x4000); /* Features supported */
329 put_le16(p + 85, 0x000c); /* Command Set enabled */
330 put_le16(p + 86, 0x7044); /* Command Set enabled */
331 put_le16(p + 87, 0x4000); /* Features enabled */
332 put_le16(p + 91, 0x4060); /* Current APM level */
333 put_le16(p + 129, 0x0002); /* Current features option */
334 put_le16(p + 130, 0x0005); /* Reassigned sectors */
335 put_le16(p + 131, 0x0001); /* Initial power mode */
336 put_le16(p + 132, 0x0000); /* User signature */
337 put_le16(p + 160, 0x8100); /* Power requirement */
338 put_le16(p + 161, 0x8001); /* CF command set */
340 ide_cfata_identify_size(s);
344 memcpy(s->io_buffer, p, sizeof(s->identify_data));
347 static void ide_set_signature(IDEState *s)
349 s->select &= 0xf0; /* clear head */
353 if (s->drive_kind == IDE_CD) {
365 typedef struct TrimAIOCB {
375 static void trim_aio_cancel(BlockAIOCB *acb)
377 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
379 /* Exit the loop so ide_issue_trim_cb will not continue */
380 iocb->j = iocb->qiov->niov - 1;
381 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
383 iocb->ret = -ECANCELED;
386 blk_aio_cancel_async(iocb->aiocb);
391 static const AIOCBInfo trim_aiocb_info = {
392 .aiocb_size = sizeof(TrimAIOCB),
393 .cancel_async = trim_aio_cancel,
396 static void ide_trim_bh_cb(void *opaque)
398 TrimAIOCB *iocb = opaque;
400 iocb->common.cb(iocb->common.opaque, iocb->ret);
402 qemu_bh_delete(iocb->bh);
404 qemu_aio_unref(iocb);
407 static void ide_issue_trim_cb(void *opaque, int ret)
409 TrimAIOCB *iocb = opaque;
411 while (iocb->j < iocb->qiov->niov) {
413 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
415 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
417 /* 6-byte LBA + 2-byte range per entry */
418 uint64_t entry = le64_to_cpu(buffer[i]);
419 uint64_t sector = entry & 0x0000ffffffffffffULL;
420 uint16_t count = entry >> 48;
426 /* Got an entry! Submit and exit. */
427 iocb->aiocb = blk_aio_pdiscard(iocb->blk,
428 sector << BDRV_SECTOR_BITS,
429 count << BDRV_SECTOR_BITS,
430 ide_issue_trim_cb, opaque);
443 qemu_bh_schedule(iocb->bh);
447 BlockAIOCB *ide_issue_trim(
448 int64_t offset, QEMUIOVector *qiov,
449 BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
451 BlockBackend *blk = opaque;
454 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
456 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
461 ide_issue_trim_cb(iocb, 0);
462 return &iocb->common;
465 void ide_abort_command(IDEState *s)
467 ide_transfer_stop(s);
468 s->status = READY_STAT | ERR_STAT;
472 static void ide_set_retry(IDEState *s)
474 s->bus->retry_unit = s->unit;
475 s->bus->retry_sector_num = ide_get_sector(s);
476 s->bus->retry_nsector = s->nsector;
479 static void ide_clear_retry(IDEState *s)
481 s->bus->retry_unit = -1;
482 s->bus->retry_sector_num = 0;
483 s->bus->retry_nsector = 0;
486 /* prepare data transfer and tell what to do after */
487 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
488 EndTransferFunc *end_transfer_func)
490 s->end_transfer_func = end_transfer_func;
492 s->data_end = buf + size;
494 if (!(s->status & ERR_STAT)) {
495 s->status |= DRQ_STAT;
497 if (s->bus->dma->ops->start_transfer) {
498 s->bus->dma->ops->start_transfer(s->bus->dma);
502 static void ide_cmd_done(IDEState *s)
504 if (s->bus->dma->ops->cmd_done) {
505 s->bus->dma->ops->cmd_done(s->bus->dma);
509 static void ide_transfer_halt(IDEState *s,
510 void(*end_transfer_func)(IDEState *),
513 s->end_transfer_func = end_transfer_func;
514 s->data_ptr = s->io_buffer;
515 s->data_end = s->io_buffer;
516 s->status &= ~DRQ_STAT;
522 void ide_transfer_stop(IDEState *s)
524 ide_transfer_halt(s, ide_transfer_stop, true);
527 static void ide_transfer_cancel(IDEState *s)
529 ide_transfer_halt(s, ide_transfer_cancel, false);
532 int64_t ide_get_sector(IDEState *s)
535 if (s->select & 0x40) {
538 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
539 (s->lcyl << 8) | s->sector;
541 sector_num = ((int64_t)s->hob_hcyl << 40) |
542 ((int64_t) s->hob_lcyl << 32) |
543 ((int64_t) s->hob_sector << 24) |
544 ((int64_t) s->hcyl << 16) |
545 ((int64_t) s->lcyl << 8) | s->sector;
548 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
549 (s->select & 0x0f) * s->sectors + (s->sector - 1);
554 void ide_set_sector(IDEState *s, int64_t sector_num)
557 if (s->select & 0x40) {
559 s->select = (s->select & 0xf0) | (sector_num >> 24);
560 s->hcyl = (sector_num >> 16);
561 s->lcyl = (sector_num >> 8);
562 s->sector = (sector_num);
564 s->sector = sector_num;
565 s->lcyl = sector_num >> 8;
566 s->hcyl = sector_num >> 16;
567 s->hob_sector = sector_num >> 24;
568 s->hob_lcyl = sector_num >> 32;
569 s->hob_hcyl = sector_num >> 40;
572 cyl = sector_num / (s->heads * s->sectors);
573 r = sector_num % (s->heads * s->sectors);
576 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
577 s->sector = (r % s->sectors) + 1;
581 static void ide_rw_error(IDEState *s) {
582 ide_abort_command(s);
586 static bool ide_sect_range_ok(IDEState *s,
587 uint64_t sector, uint64_t nb_sectors)
589 uint64_t total_sectors;
591 blk_get_geometry(s->blk, &total_sectors);
592 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
598 static void ide_buffered_readv_cb(void *opaque, int ret)
600 IDEBufferedRequest *req = opaque;
601 if (!req->orphaned) {
603 qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
604 req->original_qiov->size);
606 req->original_cb(req->original_opaque, ret);
608 QLIST_REMOVE(req, list);
609 qemu_vfree(req->iov.iov_base);
613 #define MAX_BUFFERED_REQS 16
615 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
616 QEMUIOVector *iov, int nb_sectors,
617 BlockCompletionFunc *cb, void *opaque)
620 IDEBufferedRequest *req;
623 QLIST_FOREACH(req, &s->buffered_requests, list) {
626 if (c > MAX_BUFFERED_REQS) {
627 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
630 req = g_new0(IDEBufferedRequest, 1);
631 req->original_qiov = iov;
632 req->original_cb = cb;
633 req->original_opaque = opaque;
634 req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
635 req->iov.iov_len = iov->size;
636 qemu_iovec_init_external(&req->qiov, &req->iov, 1);
638 aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
639 &req->qiov, 0, ide_buffered_readv_cb, req);
641 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
646 * Cancel all pending DMA requests.
647 * Any buffered DMA requests are instantly canceled,
648 * but any pending unbuffered DMA requests must be waited on.
650 void ide_cancel_dma_sync(IDEState *s)
652 IDEBufferedRequest *req;
654 /* First invoke the callbacks of all buffered requests
655 * and flag those requests as orphaned. Ideally there
656 * are no unbuffered (Scatter Gather DMA Requests or
657 * write requests) pending and we can avoid to drain. */
658 QLIST_FOREACH(req, &s->buffered_requests, list) {
659 if (!req->orphaned) {
660 trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
661 req->original_cb(req->original_opaque, -ECANCELED);
663 req->orphaned = true;
667 * We can't cancel Scatter Gather DMA in the middle of the
668 * operation or a partial (not full) DMA transfer would reach
669 * the storage so we wait for completion instead (we beahve
670 * like if the DMA was completed by the time the guest trying
671 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
674 * In the future we'll be able to safely cancel the I/O if the
675 * whole DMA operation will be submitted to disk with a single
676 * aio operation with preadv/pwritev.
678 if (s->bus->dma->aiocb) {
679 trace_ide_cancel_dma_sync_remaining();
681 assert(s->bus->dma->aiocb == NULL);
685 static void ide_sector_read(IDEState *s);
687 static void ide_sector_read_cb(void *opaque, int ret)
689 IDEState *s = opaque;
693 s->status &= ~BUSY_STAT;
695 if (ret == -ECANCELED) {
699 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
705 block_acct_done(blk_get_stats(s->blk), &s->acct);
708 if (n > s->req_nb_sectors) {
709 n = s->req_nb_sectors;
712 ide_set_sector(s, ide_get_sector(s) + n);
714 /* Allow the guest to read the io_buffer */
715 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
719 static void ide_sector_read(IDEState *s)
724 s->status = READY_STAT | SEEK_STAT;
725 s->error = 0; /* not needed by IDE spec, but needed by Windows */
726 sector_num = ide_get_sector(s);
730 ide_transfer_stop(s);
734 s->status |= BUSY_STAT;
736 if (n > s->req_nb_sectors) {
737 n = s->req_nb_sectors;
740 trace_ide_sector_read(sector_num, n);
742 if (!ide_sect_range_ok(s, sector_num, n)) {
744 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
748 s->iov.iov_base = s->io_buffer;
749 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
750 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
752 block_acct_start(blk_get_stats(s->blk), &s->acct,
753 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
754 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
755 ide_sector_read_cb, s);
758 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
760 if (s->bus->dma->ops->commit_buf) {
761 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
763 s->io_buffer_offset += tx_bytes;
764 qemu_sglist_destroy(&s->sg);
767 void ide_set_inactive(IDEState *s, bool more)
769 s->bus->dma->aiocb = NULL;
771 if (s->bus->dma->ops->set_inactive) {
772 s->bus->dma->ops->set_inactive(s->bus->dma, more);
777 void ide_dma_error(IDEState *s)
779 dma_buf_commit(s, 0);
780 ide_abort_command(s);
781 ide_set_inactive(s, false);
785 int ide_handle_rw_error(IDEState *s, int error, int op)
787 bool is_read = (op & IDE_RETRY_READ) != 0;
788 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
790 if (action == BLOCK_ERROR_ACTION_STOP) {
791 assert(s->bus->retry_unit == s->unit);
792 s->bus->error_status = op;
793 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
794 block_acct_failed(blk_get_stats(s->blk), &s->acct);
795 if (IS_IDE_RETRY_DMA(op)) {
797 } else if (IS_IDE_RETRY_ATAPI(op)) {
798 ide_atapi_io_error(s, -error);
803 blk_error_action(s->blk, action, is_read, error);
804 return action != BLOCK_ERROR_ACTION_IGNORE;
807 static void ide_dma_cb(void *opaque, int ret)
809 IDEState *s = opaque;
813 bool stay_active = false;
815 if (ret == -ECANCELED) {
819 if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
820 s->bus->dma->aiocb = NULL;
821 dma_buf_commit(s, 0);
826 n = s->io_buffer_size >> 9;
827 if (n > s->nsector) {
828 /* The PRDs were longer than needed for this request. Shorten them so
829 * we don't get a negative remainder. The Active bit must remain set
830 * after the request completes. */
835 sector_num = ide_get_sector(s);
837 assert(n * 512 == s->sg.size);
838 dma_buf_commit(s, s->sg.size);
840 ide_set_sector(s, sector_num);
844 /* end of transfer ? */
845 if (s->nsector == 0) {
846 s->status = READY_STAT | SEEK_STAT;
851 /* launch next transfer */
853 s->io_buffer_index = 0;
854 s->io_buffer_size = n * 512;
855 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
856 /* The PRDs were too short. Reset the Active bit, but don't raise an
858 s->status = READY_STAT | SEEK_STAT;
859 dma_buf_commit(s, 0);
864 printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
865 sector_num, n, s->dma_cmd);
868 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
869 !ide_sect_range_ok(s, sector_num, n)) {
871 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
875 offset = sector_num << BDRV_SECTOR_BITS;
876 switch (s->dma_cmd) {
878 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
879 BDRV_SECTOR_SIZE, ide_dma_cb, s);
882 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
883 BDRV_SECTOR_SIZE, ide_dma_cb, s);
886 s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
887 &s->sg, offset, BDRV_SECTOR_SIZE,
888 ide_issue_trim, s->blk, ide_dma_cb, s,
889 DMA_DIRECTION_TO_DEVICE);
897 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
898 block_acct_done(blk_get_stats(s->blk), &s->acct);
900 ide_set_inactive(s, stay_active);
903 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
905 s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
906 s->io_buffer_size = 0;
907 s->dma_cmd = dma_cmd;
911 block_acct_start(blk_get_stats(s->blk), &s->acct,
912 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
915 block_acct_start(blk_get_stats(s->blk), &s->acct,
916 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
922 ide_start_dma(s, ide_dma_cb);
925 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
927 s->io_buffer_index = 0;
929 if (s->bus->dma->ops->start_dma) {
930 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
934 static void ide_sector_write(IDEState *s);
936 static void ide_sector_write_timer_cb(void *opaque)
938 IDEState *s = opaque;
942 static void ide_sector_write_cb(void *opaque, int ret)
944 IDEState *s = opaque;
947 if (ret == -ECANCELED) {
952 s->status &= ~BUSY_STAT;
955 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
960 block_acct_done(blk_get_stats(s->blk), &s->acct);
963 if (n > s->req_nb_sectors) {
964 n = s->req_nb_sectors;
968 ide_set_sector(s, ide_get_sector(s) + n);
969 if (s->nsector == 0) {
970 /* no more sectors to write */
971 ide_transfer_stop(s);
974 if (n1 > s->req_nb_sectors) {
975 n1 = s->req_nb_sectors;
977 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
981 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
982 /* It seems there is a bug in the Windows 2000 installer HDD
983 IDE driver which fills the disk with empty logs when the
984 IDE write IRQ comes too early. This hack tries to correct
985 that at the expense of slower write performances. Use this
986 option _only_ to install Windows 2000. You must disable it
988 timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
989 (NANOSECONDS_PER_SECOND / 1000));
995 static void ide_sector_write(IDEState *s)
1000 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1001 sector_num = ide_get_sector(s);
1004 if (n > s->req_nb_sectors) {
1005 n = s->req_nb_sectors;
1008 trace_ide_sector_write(sector_num, n);
1010 if (!ide_sect_range_ok(s, sector_num, n)) {
1012 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1016 s->iov.iov_base = s->io_buffer;
1017 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
1018 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1020 block_acct_start(blk_get_stats(s->blk), &s->acct,
1021 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1022 s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1023 &s->qiov, 0, ide_sector_write_cb, s);
1026 static void ide_flush_cb(void *opaque, int ret)
1028 IDEState *s = opaque;
1030 s->pio_aiocb = NULL;
1032 if (ret == -ECANCELED) {
1036 /* XXX: What sector number to set here? */
1037 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1043 block_acct_done(blk_get_stats(s->blk), &s->acct);
1045 s->status = READY_STAT | SEEK_STAT;
1047 ide_set_irq(s->bus);
1050 static void ide_flush_cache(IDEState *s)
1052 if (s->blk == NULL) {
1057 s->status |= BUSY_STAT;
1059 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1061 if (blk_bs(s->blk)) {
1062 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1064 /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1065 * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1071 static void ide_cfata_metadata_inquiry(IDEState *s)
1076 p = (uint16_t *) s->io_buffer;
1077 memset(p, 0, 0x200);
1078 spd = ((s->mdata_size - 1) >> 9) + 1;
1080 put_le16(p + 0, 0x0001); /* Data format revision */
1081 put_le16(p + 1, 0x0000); /* Media property: silicon */
1082 put_le16(p + 2, s->media_changed); /* Media status */
1083 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1084 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1085 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1086 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1089 static void ide_cfata_metadata_read(IDEState *s)
1093 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1094 s->status = ERR_STAT;
1095 s->error = ABRT_ERR;
1099 p = (uint16_t *) s->io_buffer;
1100 memset(p, 0, 0x200);
1102 put_le16(p + 0, s->media_changed); /* Media status */
1103 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1104 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1105 s->nsector << 9), 0x200 - 2));
1108 static void ide_cfata_metadata_write(IDEState *s)
1110 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1111 s->status = ERR_STAT;
1112 s->error = ABRT_ERR;
1116 s->media_changed = 0;
1118 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1120 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1121 s->nsector << 9), 0x200 - 2));
1124 /* called when the inserted state of the media has changed */
1125 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1127 IDEState *s = opaque;
1128 uint64_t nb_sectors;
1130 s->tray_open = !load;
1131 blk_get_geometry(s->blk, &nb_sectors);
1132 s->nb_sectors = nb_sectors;
1135 * First indicate to the guest that a CD has been removed. That's
1136 * done on the next command the guest sends us.
1138 * Then we set UNIT_ATTENTION, by which the guest will
1139 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1141 s->cdrom_changed = 1;
1142 s->events.new_media = true;
1143 s->events.eject_request = false;
1144 ide_set_irq(s->bus);
1147 static void ide_cd_eject_request_cb(void *opaque, bool force)
1149 IDEState *s = opaque;
1151 s->events.eject_request = true;
1153 s->tray_locked = false;
1155 ide_set_irq(s->bus);
1158 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1162 /* handle the 'magic' 0 nsector count conversion here. to avoid
1163 * fiddling with the rest of the read logic, we just store the
1164 * full sector count in ->nsector and ignore ->hob_nsector from now
1170 if (!s->nsector && !s->hob_nsector)
1173 int lo = s->nsector;
1174 int hi = s->hob_nsector;
1176 s->nsector = (hi << 8) | lo;
1181 static void ide_clear_hob(IDEBus *bus)
1183 /* any write clears HOB high bit of device control register */
1184 bus->ifs[0].select &= ~(1 << 7);
1185 bus->ifs[1].select &= ~(1 << 7);
1188 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1190 IDEBus *bus = opaque;
1191 IDEState *s = idebus_active_if(bus);
1192 int reg_num = addr & 7;
1194 trace_ide_ioport_write(addr, val, bus, s);
1196 /* ignore writes to command block while busy with previous command */
1197 if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1206 /* NOTE: data is written to the two drives */
1207 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1208 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1209 bus->ifs[0].feature = val;
1210 bus->ifs[1].feature = val;
1214 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1215 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1216 bus->ifs[0].nsector = val;
1217 bus->ifs[1].nsector = val;
1221 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1222 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1223 bus->ifs[0].sector = val;
1224 bus->ifs[1].sector = val;
1228 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1229 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1230 bus->ifs[0].lcyl = val;
1231 bus->ifs[1].lcyl = val;
1235 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1236 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1237 bus->ifs[0].hcyl = val;
1238 bus->ifs[1].hcyl = val;
1241 /* FIXME: HOB readback uses bit 7 */
1242 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1243 bus->ifs[1].select = (val | 0x10) | 0xa0;
1245 bus->unit = (val >> 4) & 1;
1250 ide_exec_cmd(bus, val);
1255 static void ide_reset(IDEState *s)
1260 blk_aio_cancel(s->pio_aiocb);
1261 s->pio_aiocb = NULL;
1264 if (s->drive_kind == IDE_CFATA)
1265 s->mult_sectors = 0;
1267 s->mult_sectors = MAX_MULT_SECTORS;
1284 s->status = READY_STAT | SEEK_STAT;
1288 /* ATAPI specific */
1291 s->cdrom_changed = 0;
1292 s->packet_transfer_size = 0;
1293 s->elementary_transfer_size = 0;
1294 s->io_buffer_index = 0;
1295 s->cd_sector_size = 0;
1300 s->io_buffer_size = 0;
1301 s->req_nb_sectors = 0;
1303 ide_set_signature(s);
1304 /* init the transfer handler so that 0xffff is returned on data
1306 s->end_transfer_func = ide_dummy_transfer_stop;
1307 ide_dummy_transfer_stop(s);
1308 s->media_changed = 0;
1311 static bool cmd_nop(IDEState *s, uint8_t cmd)
1316 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1318 /* Halt PIO (in the DRQ phase), then DMA */
1319 ide_transfer_cancel(s);
1320 ide_cancel_dma_sync(s);
1322 /* Reset any PIO commands, reset signature, etc */
1325 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1326 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1329 /* Do not overwrite status register */
1333 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1335 switch (s->feature) {
1338 ide_sector_start_dma(s, IDE_DMA_TRIM);
1344 ide_abort_command(s);
1348 static bool cmd_identify(IDEState *s, uint8_t cmd)
1350 if (s->blk && s->drive_kind != IDE_CD) {
1351 if (s->drive_kind != IDE_CFATA) {
1354 ide_cfata_identify(s);
1356 s->status = READY_STAT | SEEK_STAT;
1357 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1358 ide_set_irq(s->bus);
1361 if (s->drive_kind == IDE_CD) {
1362 ide_set_signature(s);
1364 ide_abort_command(s);
1370 static bool cmd_verify(IDEState *s, uint8_t cmd)
1372 bool lba48 = (cmd == WIN_VERIFY_EXT);
1374 /* do sector number check ? */
1375 ide_cmd_lba48_transform(s, lba48);
1380 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1382 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1383 /* Disable Read and Write Multiple */
1384 s->mult_sectors = 0;
1385 } else if ((s->nsector & 0xff) != 0 &&
1386 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1387 (s->nsector & (s->nsector - 1)) != 0)) {
1388 ide_abort_command(s);
1390 s->mult_sectors = s->nsector & 0xff;
1396 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1398 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1400 if (!s->blk || !s->mult_sectors) {
1401 ide_abort_command(s);
1405 ide_cmd_lba48_transform(s, lba48);
1406 s->req_nb_sectors = s->mult_sectors;
1411 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1413 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1416 if (!s->blk || !s->mult_sectors) {
1417 ide_abort_command(s);
1421 ide_cmd_lba48_transform(s, lba48);
1423 s->req_nb_sectors = s->mult_sectors;
1424 n = MIN(s->nsector, s->req_nb_sectors);
1426 s->status = SEEK_STAT | READY_STAT;
1427 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1429 s->media_changed = 1;
1434 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1436 bool lba48 = (cmd == WIN_READ_EXT);
1438 if (s->drive_kind == IDE_CD) {
1439 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1440 ide_abort_command(s);
1445 ide_abort_command(s);
1449 ide_cmd_lba48_transform(s, lba48);
1450 s->req_nb_sectors = 1;
1456 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1458 bool lba48 = (cmd == WIN_WRITE_EXT);
1461 ide_abort_command(s);
1465 ide_cmd_lba48_transform(s, lba48);
1467 s->req_nb_sectors = 1;
1468 s->status = SEEK_STAT | READY_STAT;
1469 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1471 s->media_changed = 1;
1476 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1478 bool lba48 = (cmd == WIN_READDMA_EXT);
1481 ide_abort_command(s);
1485 ide_cmd_lba48_transform(s, lba48);
1486 ide_sector_start_dma(s, IDE_DMA_READ);
1491 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1493 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1496 ide_abort_command(s);
1500 ide_cmd_lba48_transform(s, lba48);
1501 ide_sector_start_dma(s, IDE_DMA_WRITE);
1503 s->media_changed = 1;
1508 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1514 static bool cmd_seek(IDEState *s, uint8_t cmd)
1516 /* XXX: Check that seek is within bounds */
1520 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1522 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1524 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1525 if (s->nb_sectors == 0) {
1526 ide_abort_command(s);
1530 ide_cmd_lba48_transform(s, lba48);
1531 ide_set_sector(s, s->nb_sectors - 1);
1536 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1538 s->nsector = 0xff; /* device active or idle */
1542 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1544 uint16_t *identify_data;
1547 ide_abort_command(s);
1551 /* XXX: valid for CDROM ? */
1552 switch (s->feature) {
1553 case 0x02: /* write cache enable */
1554 blk_set_enable_write_cache(s->blk, true);
1555 identify_data = (uint16_t *)s->identify_data;
1556 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1558 case 0x82: /* write cache disable */
1559 blk_set_enable_write_cache(s->blk, false);
1560 identify_data = (uint16_t *)s->identify_data;
1561 put_le16(identify_data + 85, (1 << 14) | 1);
1564 case 0xcc: /* reverting to power-on defaults enable */
1565 case 0x66: /* reverting to power-on defaults disable */
1566 case 0xaa: /* read look-ahead enable */
1567 case 0x55: /* read look-ahead disable */
1568 case 0x05: /* set advanced power management mode */
1569 case 0x85: /* disable advanced power management mode */
1570 case 0x69: /* NOP */
1571 case 0x67: /* NOP */
1572 case 0x96: /* NOP */
1573 case 0x9a: /* NOP */
1574 case 0x42: /* enable Automatic Acoustic Mode */
1575 case 0xc2: /* disable Automatic Acoustic Mode */
1577 case 0x03: /* set transfer mode */
1579 uint8_t val = s->nsector & 0x07;
1580 identify_data = (uint16_t *)s->identify_data;
1582 switch (s->nsector >> 3) {
1583 case 0x00: /* pio default */
1584 case 0x01: /* pio mode */
1585 put_le16(identify_data + 62, 0x07);
1586 put_le16(identify_data + 63, 0x07);
1587 put_le16(identify_data + 88, 0x3f);
1589 case 0x02: /* sigle word dma mode*/
1590 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1591 put_le16(identify_data + 63, 0x07);
1592 put_le16(identify_data + 88, 0x3f);
1594 case 0x04: /* mdma mode */
1595 put_le16(identify_data + 62, 0x07);
1596 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1597 put_le16(identify_data + 88, 0x3f);
1599 case 0x08: /* udma mode */
1600 put_le16(identify_data + 62, 0x07);
1601 put_le16(identify_data + 63, 0x07);
1602 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1612 ide_abort_command(s);
1617 /*** ATAPI commands ***/
1619 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1621 ide_atapi_identify(s);
1622 s->status = READY_STAT | SEEK_STAT;
1623 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1624 ide_set_irq(s->bus);
1628 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1630 ide_set_signature(s);
1632 if (s->drive_kind == IDE_CD) {
1633 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1634 * devices to return a clear status register
1635 * with READY_STAT *not* set. */
1638 s->status = READY_STAT | SEEK_STAT;
1639 /* The bits of the error register are not as usual for this command!
1640 * They are part of the regular output (this is why ERR_STAT isn't set)
1641 * Device 0 passed, Device 1 passed or not present. */
1643 ide_set_irq(s->bus);
1649 static bool cmd_packet(IDEState *s, uint8_t cmd)
1651 /* overlapping commands not supported */
1652 if (s->feature & 0x02) {
1653 ide_abort_command(s);
1657 s->status = READY_STAT | SEEK_STAT;
1658 s->atapi_dma = s->feature & 1;
1660 s->dma_cmd = IDE_DMA_ATAPI;
1663 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1669 /*** CF-ATA commands ***/
1671 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1673 s->error = 0x09; /* miscellaneous error */
1674 s->status = READY_STAT | SEEK_STAT;
1675 ide_set_irq(s->bus);
1680 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1682 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1683 * required for Windows 8 to work with AHCI */
1685 if (cmd == CFA_WEAR_LEVEL) {
1689 if (cmd == CFA_ERASE_SECTORS) {
1690 s->media_changed = 1;
1696 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1698 s->status = READY_STAT | SEEK_STAT;
1700 memset(s->io_buffer, 0, 0x200);
1701 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1702 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1703 s->io_buffer[0x02] = s->select; /* Head */
1704 s->io_buffer[0x03] = s->sector; /* Sector */
1705 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1706 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1707 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1708 s->io_buffer[0x13] = 0x00; /* Erase flag */
1709 s->io_buffer[0x18] = 0x00; /* Hot count */
1710 s->io_buffer[0x19] = 0x00; /* Hot count */
1711 s->io_buffer[0x1a] = 0x01; /* Hot count */
1713 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1714 ide_set_irq(s->bus);
1719 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1721 switch (s->feature) {
1722 case 0x02: /* Inquiry Metadata Storage */
1723 ide_cfata_metadata_inquiry(s);
1725 case 0x03: /* Read Metadata Storage */
1726 ide_cfata_metadata_read(s);
1728 case 0x04: /* Write Metadata Storage */
1729 ide_cfata_metadata_write(s);
1732 ide_abort_command(s);
1736 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1737 s->status = 0x00; /* NOTE: READY is _not_ set */
1738 ide_set_irq(s->bus);
1743 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1745 switch (s->feature) {
1746 case 0x01: /* sense temperature in device */
1747 s->nsector = 0x50; /* +20 C */
1750 ide_abort_command(s);
1758 /*** SMART commands ***/
1760 static bool cmd_smart(IDEState *s, uint8_t cmd)
1764 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1768 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1772 switch (s->feature) {
1774 s->smart_enabled = 0;
1778 s->smart_enabled = 1;
1781 case SMART_ATTR_AUTOSAVE:
1782 switch (s->sector) {
1784 s->smart_autosave = 0;
1787 s->smart_autosave = 1;
1795 if (!s->smart_errors) {
1804 case SMART_READ_THRESH:
1805 memset(s->io_buffer, 0, 0x200);
1806 s->io_buffer[0] = 0x01; /* smart struct version */
1808 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1809 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1810 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1814 for (n = 0; n < 511; n++) {
1815 s->io_buffer[511] += s->io_buffer[n];
1817 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1819 s->status = READY_STAT | SEEK_STAT;
1820 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1821 ide_set_irq(s->bus);
1824 case SMART_READ_DATA:
1825 memset(s->io_buffer, 0, 0x200);
1826 s->io_buffer[0] = 0x01; /* smart struct version */
1828 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1830 for (i = 0; i < 11; i++) {
1831 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1835 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1836 if (s->smart_selftest_count == 0) {
1837 s->io_buffer[363] = 0;
1840 s->smart_selftest_data[3 +
1841 (s->smart_selftest_count - 1) *
1844 s->io_buffer[364] = 0x20;
1845 s->io_buffer[365] = 0x01;
1846 /* offline data collection capacity: execute + self-test*/
1847 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1848 s->io_buffer[368] = 0x03; /* smart capability (1) */
1849 s->io_buffer[369] = 0x00; /* smart capability (2) */
1850 s->io_buffer[370] = 0x01; /* error logging supported */
1851 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1852 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1853 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1855 for (n = 0; n < 511; n++) {
1856 s->io_buffer[511] += s->io_buffer[n];
1858 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1860 s->status = READY_STAT | SEEK_STAT;
1861 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1862 ide_set_irq(s->bus);
1865 case SMART_READ_LOG:
1866 switch (s->sector) {
1867 case 0x01: /* summary smart error log */
1868 memset(s->io_buffer, 0, 0x200);
1869 s->io_buffer[0] = 0x01;
1870 s->io_buffer[1] = 0x00; /* no error entries */
1871 s->io_buffer[452] = s->smart_errors & 0xff;
1872 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1874 for (n = 0; n < 511; n++) {
1875 s->io_buffer[511] += s->io_buffer[n];
1877 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1879 case 0x06: /* smart self test log */
1880 memset(s->io_buffer, 0, 0x200);
1881 s->io_buffer[0] = 0x01;
1882 if (s->smart_selftest_count == 0) {
1883 s->io_buffer[508] = 0;
1885 s->io_buffer[508] = s->smart_selftest_count;
1886 for (n = 2; n < 506; n++) {
1887 s->io_buffer[n] = s->smart_selftest_data[n];
1891 for (n = 0; n < 511; n++) {
1892 s->io_buffer[511] += s->io_buffer[n];
1894 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1899 s->status = READY_STAT | SEEK_STAT;
1900 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1901 ide_set_irq(s->bus);
1904 case SMART_EXECUTE_OFFLINE:
1905 switch (s->sector) {
1906 case 0: /* off-line routine */
1907 case 1: /* short self test */
1908 case 2: /* extended self test */
1909 s->smart_selftest_count++;
1910 if (s->smart_selftest_count > 21) {
1911 s->smart_selftest_count = 1;
1913 n = 2 + (s->smart_selftest_count - 1) * 24;
1914 s->smart_selftest_data[n] = s->sector;
1915 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1916 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1917 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1926 ide_abort_command(s);
1930 #define HD_OK (1u << IDE_HD)
1931 #define CD_OK (1u << IDE_CD)
1932 #define CFA_OK (1u << IDE_CFATA)
1933 #define HD_CFA_OK (HD_OK | CFA_OK)
1934 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1936 /* Set the Disk Seek Completed status bit during completion */
1937 #define SET_DSC (1u << 8)
1939 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1940 static const struct {
1941 /* Returns true if the completion code should be run */
1942 bool (*handler)(IDEState *s, uint8_t cmd);
1944 } ide_cmd_table[0x100] = {
1945 /* NOP not implemented, mandatory for CD */
1946 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1947 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
1948 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1949 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1950 [WIN_READ] = { cmd_read_pio, ALL_OK },
1951 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
1952 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1953 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1954 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1955 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1956 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1957 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1958 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1959 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
1960 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
1961 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
1962 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
1963 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
1964 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
1965 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
1966 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
1967 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
1968 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
1969 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
1970 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
1971 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
1972 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
1973 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
1974 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1975 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
1976 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
1977 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
1978 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
1979 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1980 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1981 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
1982 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
1983 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1984 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
1985 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
1986 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
1987 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
1988 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
1989 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
1990 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
1991 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
1992 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
1993 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1994 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
1995 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
1996 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
1997 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
1998 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
1999 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2000 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2001 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2004 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2006 return cmd < ARRAY_SIZE(ide_cmd_table)
2007 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2010 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2015 s = idebus_active_if(bus);
2016 trace_ide_exec_cmd(bus, s, val);
2018 /* ignore commands to non existent slave */
2019 if (s != bus->ifs && !s->blk) {
2023 /* Only RESET is allowed while BSY and/or DRQ are set,
2024 * and only to ATAPI devices. */
2025 if (s->status & (BUSY_STAT|DRQ_STAT)) {
2026 if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2031 if (!ide_cmd_permitted(s, val)) {
2032 ide_abort_command(s);
2033 ide_set_irq(s->bus);
2037 s->status = READY_STAT | BUSY_STAT;
2039 s->io_buffer_offset = 0;
2041 complete = ide_cmd_table[val].handler(s, val);
2043 s->status &= ~BUSY_STAT;
2044 assert(!!s->error == !!(s->status & ERR_STAT));
2046 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2047 s->status |= SEEK_STAT;
2051 ide_set_irq(s->bus);
2055 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2057 IDEBus *bus = opaque;
2058 IDEState *s = idebus_active_if(bus);
2063 /* FIXME: HOB readback uses bit 7, but it's always set right now */
2064 //hob = s->select & (1 << 7);
2071 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2072 (s != bus->ifs && !s->blk)) {
2077 ret = s->hob_feature;
2081 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2084 ret = s->nsector & 0xff;
2086 ret = s->hob_nsector;
2090 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2095 ret = s->hob_sector;
2099 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2108 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2117 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2125 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2126 (s != bus->ifs && !s->blk)) {
2131 qemu_irq_lower(bus->irq);
2135 trace_ide_ioport_read(addr, ret, bus, s);
2139 uint32_t ide_status_read(void *opaque, uint32_t addr)
2141 IDEBus *bus = opaque;
2142 IDEState *s = idebus_active_if(bus);
2145 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2146 (s != bus->ifs && !s->blk)) {
2152 trace_ide_status_read(addr, ret, bus, s);
2156 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2158 IDEBus *bus = opaque;
2162 trace_ide_cmd_write(addr, val, bus);
2164 /* common for both drives */
2165 if (!(bus->cmd & IDE_CMD_RESET) &&
2166 (val & IDE_CMD_RESET)) {
2167 /* reset low to high */
2168 for(i = 0;i < 2; i++) {
2170 s->status = BUSY_STAT | SEEK_STAT;
2173 } else if ((bus->cmd & IDE_CMD_RESET) &&
2174 !(val & IDE_CMD_RESET)) {
2176 for(i = 0;i < 2; i++) {
2178 if (s->drive_kind == IDE_CD)
2179 s->status = 0x00; /* NOTE: READY is _not_ set */
2181 s->status = READY_STAT | SEEK_STAT;
2182 ide_set_signature(s);
2190 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2191 * transferred from the device to the guest), false if it's a PIO in
2193 static bool ide_is_pio_out(IDEState *s)
2195 if (s->end_transfer_func == ide_sector_write ||
2196 s->end_transfer_func == ide_atapi_cmd) {
2198 } else if (s->end_transfer_func == ide_sector_read ||
2199 s->end_transfer_func == ide_transfer_stop ||
2200 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2201 s->end_transfer_func == ide_dummy_transfer_stop) {
2208 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2210 IDEBus *bus = opaque;
2211 IDEState *s = idebus_active_if(bus);
2214 /* PIO data access allowed only when DRQ bit is set. The result of a write
2215 * during PIO out is indeterminate, just ignore it. */
2216 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2221 if (p + 2 > s->data_end) {
2225 *(uint16_t *)p = le16_to_cpu(val);
2228 if (p >= s->data_end) {
2229 s->status &= ~DRQ_STAT;
2230 s->end_transfer_func(s);
2234 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2236 IDEBus *bus = opaque;
2237 IDEState *s = idebus_active_if(bus);
2241 /* PIO data access allowed only when DRQ bit is set. The result of a read
2242 * during PIO in is indeterminate, return 0 and don't move forward. */
2243 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2248 if (p + 2 > s->data_end) {
2252 ret = cpu_to_le16(*(uint16_t *)p);
2255 if (p >= s->data_end) {
2256 s->status &= ~DRQ_STAT;
2257 s->end_transfer_func(s);
2262 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2264 IDEBus *bus = opaque;
2265 IDEState *s = idebus_active_if(bus);
2268 /* PIO data access allowed only when DRQ bit is set. The result of a write
2269 * during PIO out is indeterminate, just ignore it. */
2270 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2275 if (p + 4 > s->data_end) {
2279 *(uint32_t *)p = le32_to_cpu(val);
2282 if (p >= s->data_end) {
2283 s->status &= ~DRQ_STAT;
2284 s->end_transfer_func(s);
2288 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2290 IDEBus *bus = opaque;
2291 IDEState *s = idebus_active_if(bus);
2295 /* PIO data access allowed only when DRQ bit is set. The result of a read
2296 * during PIO in is indeterminate, return 0 and don't move forward. */
2297 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2302 if (p + 4 > s->data_end) {
2306 ret = cpu_to_le32(*(uint32_t *)p);
2309 if (p >= s->data_end) {
2310 s->status &= ~DRQ_STAT;
2311 s->end_transfer_func(s);
2316 static void ide_dummy_transfer_stop(IDEState *s)
2318 s->data_ptr = s->io_buffer;
2319 s->data_end = s->io_buffer;
2320 s->io_buffer[0] = 0xff;
2321 s->io_buffer[1] = 0xff;
2322 s->io_buffer[2] = 0xff;
2323 s->io_buffer[3] = 0xff;
2326 void ide_bus_reset(IDEBus *bus)
2330 ide_reset(&bus->ifs[0]);
2331 ide_reset(&bus->ifs[1]);
2334 /* pending async DMA */
2335 if (bus->dma->aiocb) {
2337 printf("aio_cancel\n");
2339 blk_aio_cancel(bus->dma->aiocb);
2340 bus->dma->aiocb = NULL;
2343 /* reset dma provider too */
2344 if (bus->dma->ops->reset) {
2345 bus->dma->ops->reset(bus->dma);
2349 static bool ide_cd_is_tray_open(void *opaque)
2351 return ((IDEState *)opaque)->tray_open;
2354 static bool ide_cd_is_medium_locked(void *opaque)
2356 return ((IDEState *)opaque)->tray_locked;
2359 static void ide_resize_cb(void *opaque)
2361 IDEState *s = opaque;
2362 uint64_t nb_sectors;
2364 if (!s->identify_set) {
2368 blk_get_geometry(s->blk, &nb_sectors);
2369 s->nb_sectors = nb_sectors;
2371 /* Update the identify data buffer. */
2372 if (s->drive_kind == IDE_CFATA) {
2373 ide_cfata_identify_size(s);
2375 /* IDE_CD uses a different set of callbacks entirely. */
2376 assert(s->drive_kind != IDE_CD);
2377 ide_identify_size(s);
2381 static const BlockDevOps ide_cd_block_ops = {
2382 .change_media_cb = ide_cd_change_cb,
2383 .eject_request_cb = ide_cd_eject_request_cb,
2384 .is_tray_open = ide_cd_is_tray_open,
2385 .is_medium_locked = ide_cd_is_medium_locked,
2388 static const BlockDevOps ide_hd_block_ops = {
2389 .resize_cb = ide_resize_cb,
2392 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2393 const char *version, const char *serial, const char *model,
2395 uint32_t cylinders, uint32_t heads, uint32_t secs,
2398 uint64_t nb_sectors;
2401 s->drive_kind = kind;
2403 blk_get_geometry(blk, &nb_sectors);
2404 s->cylinders = cylinders;
2407 s->chs_trans = chs_trans;
2408 s->nb_sectors = nb_sectors;
2410 /* The SMART values should be preserved across power cycles
2412 s->smart_enabled = 1;
2413 s->smart_autosave = 1;
2414 s->smart_errors = 0;
2415 s->smart_selftest_count = 0;
2416 if (kind == IDE_CD) {
2417 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2418 blk_set_guest_block_size(blk, 2048);
2420 if (!blk_is_inserted(s->blk)) {
2421 error_report("Device needs media, but drive is empty");
2424 if (blk_is_read_only(blk)) {
2425 error_report("Can't use a read-only drive");
2428 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2431 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2433 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2434 "QM%05d", s->drive_serial);
2437 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2441 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2444 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2447 strcpy(s->drive_model_str, "QEMU HARDDISK");
2453 pstrcpy(s->version, sizeof(s->version), version);
2455 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2459 blk_iostatus_enable(blk);
2463 static void ide_init1(IDEBus *bus, int unit)
2465 static int drive_serial = 1;
2466 IDEState *s = &bus->ifs[unit];
2470 s->drive_serial = drive_serial++;
2471 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2472 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2473 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2474 memset(s->io_buffer, 0, s->io_buffer_total_len);
2476 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2477 memset(s->smart_selftest_data, 0, 512);
2479 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2480 ide_sector_write_timer_cb, s);
2483 static int ide_nop_int(IDEDMA *dma, int x)
2488 static void ide_nop(IDEDMA *dma)
2492 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2497 static const IDEDMAOps ide_dma_nop_ops = {
2498 .prepare_buf = ide_nop_int32,
2499 .restart_dma = ide_nop,
2500 .rw_buf = ide_nop_int,
2503 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2505 s->unit = s->bus->retry_unit;
2506 ide_set_sector(s, s->bus->retry_sector_num);
2507 s->nsector = s->bus->retry_nsector;
2508 s->bus->dma->ops->restart_dma(s->bus->dma);
2509 s->io_buffer_size = 0;
2510 s->dma_cmd = dma_cmd;
2511 ide_start_dma(s, ide_dma_cb);
2514 static void ide_restart_bh(void *opaque)
2516 IDEBus *bus = opaque;
2521 qemu_bh_delete(bus->bh);
2524 error_status = bus->error_status;
2525 if (bus->error_status == 0) {
2529 s = idebus_active_if(bus);
2530 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2532 /* The error status must be cleared before resubmitting the request: The
2533 * request may fail again, and this case can only be distinguished if the
2534 * called function can set a new error status. */
2535 bus->error_status = 0;
2537 /* The HBA has generically asked to be kicked on retry */
2538 if (error_status & IDE_RETRY_HBA) {
2539 if (s->bus->dma->ops->restart) {
2540 s->bus->dma->ops->restart(s->bus->dma);
2542 } else if (IS_IDE_RETRY_DMA(error_status)) {
2543 if (error_status & IDE_RETRY_TRIM) {
2544 ide_restart_dma(s, IDE_DMA_TRIM);
2546 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2548 } else if (IS_IDE_RETRY_PIO(error_status)) {
2552 ide_sector_write(s);
2554 } else if (error_status & IDE_RETRY_FLUSH) {
2556 } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2557 assert(s->end_transfer_func == ide_atapi_cmd);
2558 ide_atapi_dma_restart(s);
2564 static void ide_restart_cb(void *opaque, int running, RunState state)
2566 IDEBus *bus = opaque;
2572 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2573 qemu_bh_schedule(bus->bh);
2577 void ide_register_restart_cb(IDEBus *bus)
2579 if (bus->dma->ops->restart_dma) {
2580 bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2584 static IDEDMA ide_dma_nop = {
2585 .ops = &ide_dma_nop_ops,
2589 void ide_init2(IDEBus *bus, qemu_irq irq)
2593 for(i = 0; i < 2; i++) {
2595 ide_reset(&bus->ifs[i]);
2598 bus->dma = &ide_dma_nop;
2601 void ide_exit(IDEState *s)
2603 timer_del(s->sector_write_timer);
2604 timer_free(s->sector_write_timer);
2605 qemu_vfree(s->smart_selftest_data);
2606 qemu_vfree(s->io_buffer);
2609 static const MemoryRegionPortio ide_portio_list[] = {
2610 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2611 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2612 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2613 PORTIO_END_OF_LIST(),
2616 static const MemoryRegionPortio ide_portio2_list[] = {
2617 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2618 PORTIO_END_OF_LIST(),
2621 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2623 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2624 bridge has been setup properly to always register with ISA. */
2625 isa_register_portio_list(dev, &bus->portio_list,
2626 iobase, ide_portio_list, bus, "ide");
2629 isa_register_portio_list(dev, &bus->portio2_list,
2630 iobase2, ide_portio2_list, bus, "ide");
2634 static bool is_identify_set(void *opaque, int version_id)
2636 IDEState *s = opaque;
2638 return s->identify_set != 0;
2641 static EndTransferFunc* transfer_end_table[] = {
2645 ide_atapi_cmd_reply_end,
2647 ide_dummy_transfer_stop,
2650 static int transfer_end_table_idx(EndTransferFunc *fn)
2654 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2655 if (transfer_end_table[i] == fn)
2661 static int ide_drive_post_load(void *opaque, int version_id)
2663 IDEState *s = opaque;
2665 if (s->blk && s->identify_set) {
2666 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2671 static int ide_drive_pio_post_load(void *opaque, int version_id)
2673 IDEState *s = opaque;
2675 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2678 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2679 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2680 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2681 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2686 static void ide_drive_pio_pre_save(void *opaque)
2688 IDEState *s = opaque;
2691 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2692 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2694 idx = transfer_end_table_idx(s->end_transfer_func);
2696 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2698 s->end_transfer_fn_idx = 2;
2700 s->end_transfer_fn_idx = idx;
2704 static bool ide_drive_pio_state_needed(void *opaque)
2706 IDEState *s = opaque;
2708 return ((s->status & DRQ_STAT) != 0)
2709 || (s->bus->error_status & IDE_RETRY_PIO);
2712 static bool ide_tray_state_needed(void *opaque)
2714 IDEState *s = opaque;
2716 return s->tray_open || s->tray_locked;
2719 static bool ide_atapi_gesn_needed(void *opaque)
2721 IDEState *s = opaque;
2723 return s->events.new_media || s->events.eject_request;
2726 static bool ide_error_needed(void *opaque)
2728 IDEBus *bus = opaque;
2730 return (bus->error_status != 0);
2733 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2734 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2735 .name ="ide_drive/atapi/gesn_state",
2737 .minimum_version_id = 1,
2738 .needed = ide_atapi_gesn_needed,
2739 .fields = (VMStateField[]) {
2740 VMSTATE_BOOL(events.new_media, IDEState),
2741 VMSTATE_BOOL(events.eject_request, IDEState),
2742 VMSTATE_END_OF_LIST()
2746 static const VMStateDescription vmstate_ide_tray_state = {
2747 .name = "ide_drive/tray_state",
2749 .minimum_version_id = 1,
2750 .needed = ide_tray_state_needed,
2751 .fields = (VMStateField[]) {
2752 VMSTATE_BOOL(tray_open, IDEState),
2753 VMSTATE_BOOL(tray_locked, IDEState),
2754 VMSTATE_END_OF_LIST()
2758 static const VMStateDescription vmstate_ide_drive_pio_state = {
2759 .name = "ide_drive/pio_state",
2761 .minimum_version_id = 1,
2762 .pre_save = ide_drive_pio_pre_save,
2763 .post_load = ide_drive_pio_post_load,
2764 .needed = ide_drive_pio_state_needed,
2765 .fields = (VMStateField[]) {
2766 VMSTATE_INT32(req_nb_sectors, IDEState),
2767 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2768 vmstate_info_uint8, uint8_t),
2769 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2770 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2771 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2772 VMSTATE_INT32(elementary_transfer_size, IDEState),
2773 VMSTATE_INT32(packet_transfer_size, IDEState),
2774 VMSTATE_END_OF_LIST()
2778 const VMStateDescription vmstate_ide_drive = {
2779 .name = "ide_drive",
2781 .minimum_version_id = 0,
2782 .post_load = ide_drive_post_load,
2783 .fields = (VMStateField[]) {
2784 VMSTATE_INT32(mult_sectors, IDEState),
2785 VMSTATE_INT32(identify_set, IDEState),
2786 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2787 VMSTATE_UINT8(feature, IDEState),
2788 VMSTATE_UINT8(error, IDEState),
2789 VMSTATE_UINT32(nsector, IDEState),
2790 VMSTATE_UINT8(sector, IDEState),
2791 VMSTATE_UINT8(lcyl, IDEState),
2792 VMSTATE_UINT8(hcyl, IDEState),
2793 VMSTATE_UINT8(hob_feature, IDEState),
2794 VMSTATE_UINT8(hob_sector, IDEState),
2795 VMSTATE_UINT8(hob_nsector, IDEState),
2796 VMSTATE_UINT8(hob_lcyl, IDEState),
2797 VMSTATE_UINT8(hob_hcyl, IDEState),
2798 VMSTATE_UINT8(select, IDEState),
2799 VMSTATE_UINT8(status, IDEState),
2800 VMSTATE_UINT8(lba48, IDEState),
2801 VMSTATE_UINT8(sense_key, IDEState),
2802 VMSTATE_UINT8(asc, IDEState),
2803 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2804 VMSTATE_END_OF_LIST()
2806 .subsections = (const VMStateDescription*[]) {
2807 &vmstate_ide_drive_pio_state,
2808 &vmstate_ide_tray_state,
2809 &vmstate_ide_atapi_gesn_state,
2814 static const VMStateDescription vmstate_ide_error_status = {
2815 .name ="ide_bus/error",
2817 .minimum_version_id = 1,
2818 .needed = ide_error_needed,
2819 .fields = (VMStateField[]) {
2820 VMSTATE_INT32(error_status, IDEBus),
2821 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2822 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2823 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2824 VMSTATE_END_OF_LIST()
2828 const VMStateDescription vmstate_ide_bus = {
2831 .minimum_version_id = 1,
2832 .fields = (VMStateField[]) {
2833 VMSTATE_UINT8(cmd, IDEBus),
2834 VMSTATE_UINT8(unit, IDEBus),
2835 VMSTATE_END_OF_LIST()
2837 .subsections = (const VMStateDescription*[]) {
2838 &vmstate_ide_error_status,
2843 void ide_drive_get(DriveInfo **hd, int n)
2847 for (i = 0; i < n; i++) {
2848 hd[i] = drive_get_by_index(IF_IDE, i);