2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "hw/pci/pci.h"
28 #include "hw/isa/isa.h"
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/blockdev.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
38 #include "hw/ide/internal.h"
41 /* These values were based on a Seagate ST3500418AS but have been modified
42 to make more sense in QEMU */
43 static const int smart_attributes[][12] = {
44 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
45 /* raw read error rate*/
46 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
48 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
49 /* start stop count */
50 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
51 /* remapped sectors */
52 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
54 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55 /* power cycle count */
56 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
57 /* airflow-temperature-celsius */
58 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
61 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
62 [IDE_DMA_READ] = "DMA READ",
63 [IDE_DMA_WRITE] = "DMA WRITE",
64 [IDE_DMA_TRIM] = "DMA TRIM",
65 [IDE_DMA_ATAPI] = "DMA ATAPI"
68 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
70 if ((unsigned)enval < IDE_DMA__COUNT) {
71 return IDE_DMA_CMD_lookup[enval];
73 return "DMA UNKNOWN CMD";
76 static void ide_dummy_transfer_stop(IDEState *s);
78 static void padstr(char *str, const char *src, int len)
81 for(i = 0; i < len; i++) {
90 static void put_le16(uint16_t *p, unsigned int v)
95 static void ide_identify_size(IDEState *s)
97 uint16_t *p = (uint16_t *)s->identify_data;
98 put_le16(p + 60, s->nb_sectors);
99 put_le16(p + 61, s->nb_sectors >> 16);
100 put_le16(p + 100, s->nb_sectors);
101 put_le16(p + 101, s->nb_sectors >> 16);
102 put_le16(p + 102, s->nb_sectors >> 32);
103 put_le16(p + 103, s->nb_sectors >> 48);
106 static void ide_identify(IDEState *s)
109 unsigned int oldsize;
110 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
112 p = (uint16_t *)s->identify_data;
113 if (s->identify_set) {
116 memset(p, 0, sizeof(s->identify_data));
118 put_le16(p + 0, 0x0040);
119 put_le16(p + 1, s->cylinders);
120 put_le16(p + 3, s->heads);
121 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
122 put_le16(p + 5, 512); /* XXX: retired, remove ? */
123 put_le16(p + 6, s->sectors);
124 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
125 put_le16(p + 20, 3); /* XXX: retired, remove ? */
126 put_le16(p + 21, 512); /* cache size in sectors */
127 put_le16(p + 22, 4); /* ecc bytes */
128 padstr((char *)(p + 23), s->version, 8); /* firmware version */
129 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
130 #if MAX_MULT_SECTORS > 1
131 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
133 put_le16(p + 48, 1); /* dword I/O */
134 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
135 put_le16(p + 51, 0x200); /* PIO transfer cycle */
136 put_le16(p + 52, 0x200); /* DMA transfer cycle */
137 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
138 put_le16(p + 54, s->cylinders);
139 put_le16(p + 55, s->heads);
140 put_le16(p + 56, s->sectors);
141 oldsize = s->cylinders * s->heads * s->sectors;
142 put_le16(p + 57, oldsize);
143 put_le16(p + 58, oldsize >> 16);
145 put_le16(p + 59, 0x100 | s->mult_sectors);
146 /* *(p + 60) := nb_sectors -- see ide_identify_size */
147 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
148 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
149 put_le16(p + 63, 0x07); /* mdma0-2 supported */
150 put_le16(p + 64, 0x03); /* pio3-4 supported */
151 put_le16(p + 65, 120);
152 put_le16(p + 66, 120);
153 put_le16(p + 67, 120);
154 put_le16(p + 68, 120);
155 if (dev && dev->conf.discard_granularity) {
156 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
160 put_le16(p + 75, s->ncq_queues - 1);
162 put_le16(p + 76, (1 << 8));
165 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
166 put_le16(p + 81, 0x16); /* conforms to ata5 */
167 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
168 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
169 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
170 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
171 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
173 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
175 put_le16(p + 84, (1 << 14) | 0);
177 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
178 if (blk_enable_write_cache(s->blk)) {
179 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
181 put_le16(p + 85, (1 << 14) | 1);
183 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
184 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
185 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
187 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
189 put_le16(p + 87, (1 << 14) | 0);
191 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
192 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
193 /* *(p + 100) := nb_sectors -- see ide_identify_size */
194 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
195 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
196 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
198 if (dev && dev->conf.physical_block_size)
199 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
201 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
202 put_le16(p + 108, s->wwn >> 48);
203 put_le16(p + 109, s->wwn >> 32);
204 put_le16(p + 110, s->wwn >> 16);
205 put_le16(p + 111, s->wwn);
207 if (dev && dev->conf.discard_granularity) {
208 put_le16(p + 169, 1); /* TRIM support */
211 put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
214 ide_identify_size(s);
218 memcpy(s->io_buffer, p, sizeof(s->identify_data));
221 static void ide_atapi_identify(IDEState *s)
225 p = (uint16_t *)s->identify_data;
226 if (s->identify_set) {
229 memset(p, 0, sizeof(s->identify_data));
231 /* Removable CDROM, 50us response, 12 byte packets */
232 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
233 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
234 put_le16(p + 20, 3); /* buffer type */
235 put_le16(p + 21, 512); /* cache size in sectors */
236 put_le16(p + 22, 4); /* ecc bytes */
237 padstr((char *)(p + 23), s->version, 8); /* firmware version */
238 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
239 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
241 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
242 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
243 put_le16(p + 62, 7); /* single word dma0-2 supported */
244 put_le16(p + 63, 7); /* mdma0-2 supported */
246 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
247 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
248 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
250 put_le16(p + 64, 3); /* pio3-4 supported */
251 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
252 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
253 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
254 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
256 put_le16(p + 71, 30); /* in ns */
257 put_le16(p + 72, 30); /* in ns */
260 put_le16(p + 75, s->ncq_queues - 1);
262 put_le16(p + 76, (1 << 8));
265 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
267 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
268 put_le16(p + 87, (1 << 8)); /* WWN enabled */
272 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
276 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
277 put_le16(p + 108, s->wwn >> 48);
278 put_le16(p + 109, s->wwn >> 32);
279 put_le16(p + 110, s->wwn >> 16);
280 put_le16(p + 111, s->wwn);
286 memcpy(s->io_buffer, p, sizeof(s->identify_data));
289 static void ide_cfata_identify_size(IDEState *s)
291 uint16_t *p = (uint16_t *)s->identify_data;
292 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
293 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
294 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
295 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
298 static void ide_cfata_identify(IDEState *s)
303 p = (uint16_t *)s->identify_data;
304 if (s->identify_set) {
307 memset(p, 0, sizeof(s->identify_data));
309 cur_sec = s->cylinders * s->heads * s->sectors;
311 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
312 put_le16(p + 1, s->cylinders); /* Default cylinders */
313 put_le16(p + 3, s->heads); /* Default heads */
314 put_le16(p + 6, s->sectors); /* Default sectors per track */
315 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
316 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
317 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
318 put_le16(p + 22, 0x0004); /* ECC bytes */
319 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
320 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
321 #if MAX_MULT_SECTORS > 1
322 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
324 put_le16(p + 47, 0x0000);
326 put_le16(p + 49, 0x0f00); /* Capabilities */
327 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
328 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
329 put_le16(p + 53, 0x0003); /* Translation params valid */
330 put_le16(p + 54, s->cylinders); /* Current cylinders */
331 put_le16(p + 55, s->heads); /* Current heads */
332 put_le16(p + 56, s->sectors); /* Current sectors */
333 put_le16(p + 57, cur_sec); /* Current capacity */
334 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
335 if (s->mult_sectors) /* Multiple sector setting */
336 put_le16(p + 59, 0x100 | s->mult_sectors);
337 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
338 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
339 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
340 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
341 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
342 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
343 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
344 put_le16(p + 82, 0x400c); /* Command Set supported */
345 put_le16(p + 83, 0x7068); /* Command Set supported */
346 put_le16(p + 84, 0x4000); /* Features supported */
347 put_le16(p + 85, 0x000c); /* Command Set enabled */
348 put_le16(p + 86, 0x7044); /* Command Set enabled */
349 put_le16(p + 87, 0x4000); /* Features enabled */
350 put_le16(p + 91, 0x4060); /* Current APM level */
351 put_le16(p + 129, 0x0002); /* Current features option */
352 put_le16(p + 130, 0x0005); /* Reassigned sectors */
353 put_le16(p + 131, 0x0001); /* Initial power mode */
354 put_le16(p + 132, 0x0000); /* User signature */
355 put_le16(p + 160, 0x8100); /* Power requirement */
356 put_le16(p + 161, 0x8001); /* CF command set */
358 ide_cfata_identify_size(s);
362 memcpy(s->io_buffer, p, sizeof(s->identify_data));
365 static void ide_set_signature(IDEState *s)
367 s->select &= 0xf0; /* clear head */
371 if (s->drive_kind == IDE_CD) {
383 static bool ide_sect_range_ok(IDEState *s,
384 uint64_t sector, uint64_t nb_sectors)
386 uint64_t total_sectors;
388 blk_get_geometry(s->blk, &total_sectors);
389 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
395 typedef struct TrimAIOCB {
406 static void trim_aio_cancel(BlockAIOCB *acb)
408 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
410 /* Exit the loop so ide_issue_trim_cb will not continue */
411 iocb->j = iocb->qiov->niov - 1;
412 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
414 iocb->ret = -ECANCELED;
417 blk_aio_cancel_async(iocb->aiocb);
422 static const AIOCBInfo trim_aiocb_info = {
423 .aiocb_size = sizeof(TrimAIOCB),
424 .cancel_async = trim_aio_cancel,
427 static void ide_trim_bh_cb(void *opaque)
429 TrimAIOCB *iocb = opaque;
431 if (iocb->is_invalid) {
432 ide_dma_error(iocb->s);
434 iocb->common.cb(iocb->common.opaque, iocb->ret);
436 qemu_bh_delete(iocb->bh);
438 qemu_aio_unref(iocb);
441 static void ide_issue_trim_cb(void *opaque, int ret)
443 TrimAIOCB *iocb = opaque;
444 IDEState *s = iocb->s;
447 while (iocb->j < iocb->qiov->niov) {
449 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
451 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
453 /* 6-byte LBA + 2-byte range per entry */
454 uint64_t entry = le64_to_cpu(buffer[i]);
455 uint64_t sector = entry & 0x0000ffffffffffffULL;
456 uint16_t count = entry >> 48;
462 if (!ide_sect_range_ok(s, sector, count)) {
463 iocb->is_invalid = true;
467 /* Got an entry! Submit and exit. */
468 iocb->aiocb = blk_aio_pdiscard(s->blk,
469 sector << BDRV_SECTOR_BITS,
470 count << BDRV_SECTOR_BITS,
471 ide_issue_trim_cb, opaque);
485 qemu_bh_schedule(iocb->bh);
489 BlockAIOCB *ide_issue_trim(
490 int64_t offset, QEMUIOVector *qiov,
491 BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
493 IDEState *s = opaque;
496 iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
498 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
503 iocb->is_invalid = false;
504 ide_issue_trim_cb(iocb, 0);
505 return &iocb->common;
508 void ide_abort_command(IDEState *s)
510 ide_transfer_stop(s);
511 s->status = READY_STAT | ERR_STAT;
515 static void ide_set_retry(IDEState *s)
517 s->bus->retry_unit = s->unit;
518 s->bus->retry_sector_num = ide_get_sector(s);
519 s->bus->retry_nsector = s->nsector;
522 static void ide_clear_retry(IDEState *s)
524 s->bus->retry_unit = -1;
525 s->bus->retry_sector_num = 0;
526 s->bus->retry_nsector = 0;
529 /* prepare data transfer and tell what to do after */
530 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
531 EndTransferFunc *end_transfer_func)
533 s->end_transfer_func = end_transfer_func;
535 s->data_end = buf + size;
537 if (!(s->status & ERR_STAT)) {
538 s->status |= DRQ_STAT;
540 if (s->bus->dma->ops->start_transfer) {
541 s->bus->dma->ops->start_transfer(s->bus->dma);
545 static void ide_cmd_done(IDEState *s)
547 if (s->bus->dma->ops->cmd_done) {
548 s->bus->dma->ops->cmd_done(s->bus->dma);
552 static void ide_transfer_halt(IDEState *s,
553 void(*end_transfer_func)(IDEState *),
556 s->end_transfer_func = end_transfer_func;
557 s->data_ptr = s->io_buffer;
558 s->data_end = s->io_buffer;
559 s->status &= ~DRQ_STAT;
565 void ide_transfer_stop(IDEState *s)
567 ide_transfer_halt(s, ide_transfer_stop, true);
570 static void ide_transfer_cancel(IDEState *s)
572 ide_transfer_halt(s, ide_transfer_cancel, false);
575 int64_t ide_get_sector(IDEState *s)
578 if (s->select & 0x40) {
581 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
582 (s->lcyl << 8) | s->sector;
584 sector_num = ((int64_t)s->hob_hcyl << 40) |
585 ((int64_t) s->hob_lcyl << 32) |
586 ((int64_t) s->hob_sector << 24) |
587 ((int64_t) s->hcyl << 16) |
588 ((int64_t) s->lcyl << 8) | s->sector;
591 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
592 (s->select & 0x0f) * s->sectors + (s->sector - 1);
597 void ide_set_sector(IDEState *s, int64_t sector_num)
600 if (s->select & 0x40) {
602 s->select = (s->select & 0xf0) | (sector_num >> 24);
603 s->hcyl = (sector_num >> 16);
604 s->lcyl = (sector_num >> 8);
605 s->sector = (sector_num);
607 s->sector = sector_num;
608 s->lcyl = sector_num >> 8;
609 s->hcyl = sector_num >> 16;
610 s->hob_sector = sector_num >> 24;
611 s->hob_lcyl = sector_num >> 32;
612 s->hob_hcyl = sector_num >> 40;
615 cyl = sector_num / (s->heads * s->sectors);
616 r = sector_num % (s->heads * s->sectors);
619 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
620 s->sector = (r % s->sectors) + 1;
624 static void ide_rw_error(IDEState *s) {
625 ide_abort_command(s);
629 static void ide_buffered_readv_cb(void *opaque, int ret)
631 IDEBufferedRequest *req = opaque;
632 if (!req->orphaned) {
634 qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
635 req->original_qiov->size);
637 req->original_cb(req->original_opaque, ret);
639 QLIST_REMOVE(req, list);
640 qemu_vfree(req->iov.iov_base);
644 #define MAX_BUFFERED_REQS 16
646 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
647 QEMUIOVector *iov, int nb_sectors,
648 BlockCompletionFunc *cb, void *opaque)
651 IDEBufferedRequest *req;
654 QLIST_FOREACH(req, &s->buffered_requests, list) {
657 if (c > MAX_BUFFERED_REQS) {
658 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
661 req = g_new0(IDEBufferedRequest, 1);
662 req->original_qiov = iov;
663 req->original_cb = cb;
664 req->original_opaque = opaque;
665 req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
666 req->iov.iov_len = iov->size;
667 qemu_iovec_init_external(&req->qiov, &req->iov, 1);
669 aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
670 &req->qiov, 0, ide_buffered_readv_cb, req);
672 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
677 * Cancel all pending DMA requests.
678 * Any buffered DMA requests are instantly canceled,
679 * but any pending unbuffered DMA requests must be waited on.
681 void ide_cancel_dma_sync(IDEState *s)
683 IDEBufferedRequest *req;
685 /* First invoke the callbacks of all buffered requests
686 * and flag those requests as orphaned. Ideally there
687 * are no unbuffered (Scatter Gather DMA Requests or
688 * write requests) pending and we can avoid to drain. */
689 QLIST_FOREACH(req, &s->buffered_requests, list) {
690 if (!req->orphaned) {
691 trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
692 req->original_cb(req->original_opaque, -ECANCELED);
694 req->orphaned = true;
698 * We can't cancel Scatter Gather DMA in the middle of the
699 * operation or a partial (not full) DMA transfer would reach
700 * the storage so we wait for completion instead (we beahve
701 * like if the DMA was completed by the time the guest trying
702 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
705 * In the future we'll be able to safely cancel the I/O if the
706 * whole DMA operation will be submitted to disk with a single
707 * aio operation with preadv/pwritev.
709 if (s->bus->dma->aiocb) {
710 trace_ide_cancel_dma_sync_remaining();
712 assert(s->bus->dma->aiocb == NULL);
716 static void ide_sector_read(IDEState *s);
718 static void ide_sector_read_cb(void *opaque, int ret)
720 IDEState *s = opaque;
724 s->status &= ~BUSY_STAT;
726 if (ret == -ECANCELED) {
730 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
736 block_acct_done(blk_get_stats(s->blk), &s->acct);
739 if (n > s->req_nb_sectors) {
740 n = s->req_nb_sectors;
743 ide_set_sector(s, ide_get_sector(s) + n);
745 /* Allow the guest to read the io_buffer */
746 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
750 static void ide_sector_read(IDEState *s)
755 s->status = READY_STAT | SEEK_STAT;
756 s->error = 0; /* not needed by IDE spec, but needed by Windows */
757 sector_num = ide_get_sector(s);
761 ide_transfer_stop(s);
765 s->status |= BUSY_STAT;
767 if (n > s->req_nb_sectors) {
768 n = s->req_nb_sectors;
771 trace_ide_sector_read(sector_num, n);
773 if (!ide_sect_range_ok(s, sector_num, n)) {
775 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
779 s->iov.iov_base = s->io_buffer;
780 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
781 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
783 block_acct_start(blk_get_stats(s->blk), &s->acct,
784 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
785 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
786 ide_sector_read_cb, s);
789 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
791 if (s->bus->dma->ops->commit_buf) {
792 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
794 s->io_buffer_offset += tx_bytes;
795 qemu_sglist_destroy(&s->sg);
798 void ide_set_inactive(IDEState *s, bool more)
800 s->bus->dma->aiocb = NULL;
802 if (s->bus->dma->ops->set_inactive) {
803 s->bus->dma->ops->set_inactive(s->bus->dma, more);
808 void ide_dma_error(IDEState *s)
810 dma_buf_commit(s, 0);
811 ide_abort_command(s);
812 ide_set_inactive(s, false);
816 int ide_handle_rw_error(IDEState *s, int error, int op)
818 bool is_read = (op & IDE_RETRY_READ) != 0;
819 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
821 if (action == BLOCK_ERROR_ACTION_STOP) {
822 assert(s->bus->retry_unit == s->unit);
823 s->bus->error_status = op;
824 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
825 block_acct_failed(blk_get_stats(s->blk), &s->acct);
826 if (IS_IDE_RETRY_DMA(op)) {
828 } else if (IS_IDE_RETRY_ATAPI(op)) {
829 ide_atapi_io_error(s, -error);
834 blk_error_action(s->blk, action, is_read, error);
835 return action != BLOCK_ERROR_ACTION_IGNORE;
838 static void ide_dma_cb(void *opaque, int ret)
840 IDEState *s = opaque;
844 bool stay_active = false;
846 if (ret == -ECANCELED) {
850 if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
851 s->bus->dma->aiocb = NULL;
852 dma_buf_commit(s, 0);
857 n = s->io_buffer_size >> 9;
858 if (n > s->nsector) {
859 /* The PRDs were longer than needed for this request. Shorten them so
860 * we don't get a negative remainder. The Active bit must remain set
861 * after the request completes. */
866 sector_num = ide_get_sector(s);
868 assert(n * 512 == s->sg.size);
869 dma_buf_commit(s, s->sg.size);
871 ide_set_sector(s, sector_num);
875 /* end of transfer ? */
876 if (s->nsector == 0) {
877 s->status = READY_STAT | SEEK_STAT;
882 /* launch next transfer */
884 s->io_buffer_index = 0;
885 s->io_buffer_size = n * 512;
886 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
887 /* The PRDs were too short. Reset the Active bit, but don't raise an
889 s->status = READY_STAT | SEEK_STAT;
890 dma_buf_commit(s, 0);
894 trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
896 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
897 !ide_sect_range_ok(s, sector_num, n)) {
899 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
903 offset = sector_num << BDRV_SECTOR_BITS;
904 switch (s->dma_cmd) {
906 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
907 BDRV_SECTOR_SIZE, ide_dma_cb, s);
910 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
911 BDRV_SECTOR_SIZE, ide_dma_cb, s);
914 s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
915 &s->sg, offset, BDRV_SECTOR_SIZE,
916 ide_issue_trim, s, ide_dma_cb, s,
917 DMA_DIRECTION_TO_DEVICE);
925 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
926 block_acct_done(blk_get_stats(s->blk), &s->acct);
928 ide_set_inactive(s, stay_active);
931 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
933 s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
934 s->io_buffer_size = 0;
935 s->dma_cmd = dma_cmd;
939 block_acct_start(blk_get_stats(s->blk), &s->acct,
940 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
943 block_acct_start(blk_get_stats(s->blk), &s->acct,
944 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
950 ide_start_dma(s, ide_dma_cb);
953 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
955 s->io_buffer_index = 0;
957 if (s->bus->dma->ops->start_dma) {
958 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
962 static void ide_sector_write(IDEState *s);
964 static void ide_sector_write_timer_cb(void *opaque)
966 IDEState *s = opaque;
970 static void ide_sector_write_cb(void *opaque, int ret)
972 IDEState *s = opaque;
975 if (ret == -ECANCELED) {
980 s->status &= ~BUSY_STAT;
983 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
988 block_acct_done(blk_get_stats(s->blk), &s->acct);
991 if (n > s->req_nb_sectors) {
992 n = s->req_nb_sectors;
996 ide_set_sector(s, ide_get_sector(s) + n);
997 if (s->nsector == 0) {
998 /* no more sectors to write */
999 ide_transfer_stop(s);
1001 int n1 = s->nsector;
1002 if (n1 > s->req_nb_sectors) {
1003 n1 = s->req_nb_sectors;
1005 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1009 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1010 /* It seems there is a bug in the Windows 2000 installer HDD
1011 IDE driver which fills the disk with empty logs when the
1012 IDE write IRQ comes too early. This hack tries to correct
1013 that at the expense of slower write performances. Use this
1014 option _only_ to install Windows 2000. You must disable it
1016 timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1017 (NANOSECONDS_PER_SECOND / 1000));
1019 ide_set_irq(s->bus);
1023 static void ide_sector_write(IDEState *s)
1028 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1029 sector_num = ide_get_sector(s);
1032 if (n > s->req_nb_sectors) {
1033 n = s->req_nb_sectors;
1036 trace_ide_sector_write(sector_num, n);
1038 if (!ide_sect_range_ok(s, sector_num, n)) {
1040 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1044 s->iov.iov_base = s->io_buffer;
1045 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
1046 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1048 block_acct_start(blk_get_stats(s->blk), &s->acct,
1049 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1050 s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1051 &s->qiov, 0, ide_sector_write_cb, s);
1054 static void ide_flush_cb(void *opaque, int ret)
1056 IDEState *s = opaque;
1058 s->pio_aiocb = NULL;
1060 if (ret == -ECANCELED) {
1064 /* XXX: What sector number to set here? */
1065 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1071 block_acct_done(blk_get_stats(s->blk), &s->acct);
1073 s->status = READY_STAT | SEEK_STAT;
1075 ide_set_irq(s->bus);
1078 static void ide_flush_cache(IDEState *s)
1080 if (s->blk == NULL) {
1085 s->status |= BUSY_STAT;
1087 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1089 if (blk_bs(s->blk)) {
1090 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1092 /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1093 * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1099 static void ide_cfata_metadata_inquiry(IDEState *s)
1104 p = (uint16_t *) s->io_buffer;
1105 memset(p, 0, 0x200);
1106 spd = ((s->mdata_size - 1) >> 9) + 1;
1108 put_le16(p + 0, 0x0001); /* Data format revision */
1109 put_le16(p + 1, 0x0000); /* Media property: silicon */
1110 put_le16(p + 2, s->media_changed); /* Media status */
1111 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1112 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1113 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1114 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1117 static void ide_cfata_metadata_read(IDEState *s)
1121 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1122 s->status = ERR_STAT;
1123 s->error = ABRT_ERR;
1127 p = (uint16_t *) s->io_buffer;
1128 memset(p, 0, 0x200);
1130 put_le16(p + 0, s->media_changed); /* Media status */
1131 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1132 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1133 s->nsector << 9), 0x200 - 2));
1136 static void ide_cfata_metadata_write(IDEState *s)
1138 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1139 s->status = ERR_STAT;
1140 s->error = ABRT_ERR;
1144 s->media_changed = 0;
1146 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1148 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1149 s->nsector << 9), 0x200 - 2));
1152 /* called when the inserted state of the media has changed */
1153 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1155 IDEState *s = opaque;
1156 uint64_t nb_sectors;
1158 s->tray_open = !load;
1159 blk_get_geometry(s->blk, &nb_sectors);
1160 s->nb_sectors = nb_sectors;
1163 * First indicate to the guest that a CD has been removed. That's
1164 * done on the next command the guest sends us.
1166 * Then we set UNIT_ATTENTION, by which the guest will
1167 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1169 s->cdrom_changed = 1;
1170 s->events.new_media = true;
1171 s->events.eject_request = false;
1172 ide_set_irq(s->bus);
1175 static void ide_cd_eject_request_cb(void *opaque, bool force)
1177 IDEState *s = opaque;
1179 s->events.eject_request = true;
1181 s->tray_locked = false;
1183 ide_set_irq(s->bus);
1186 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1190 /* handle the 'magic' 0 nsector count conversion here. to avoid
1191 * fiddling with the rest of the read logic, we just store the
1192 * full sector count in ->nsector and ignore ->hob_nsector from now
1198 if (!s->nsector && !s->hob_nsector)
1201 int lo = s->nsector;
1202 int hi = s->hob_nsector;
1204 s->nsector = (hi << 8) | lo;
1209 static void ide_clear_hob(IDEBus *bus)
1211 /* any write clears HOB high bit of device control register */
1212 bus->ifs[0].select &= ~(1 << 7);
1213 bus->ifs[1].select &= ~(1 << 7);
1216 /* IOport [W]rite [R]egisters */
1217 enum ATA_IOPORT_WR {
1218 ATA_IOPORT_WR_DATA = 0,
1219 ATA_IOPORT_WR_FEATURES = 1,
1220 ATA_IOPORT_WR_SECTOR_COUNT = 2,
1221 ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1222 ATA_IOPORT_WR_CYLINDER_LOW = 4,
1223 ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1224 ATA_IOPORT_WR_DEVICE_HEAD = 6,
1225 ATA_IOPORT_WR_COMMAND = 7,
1226 ATA_IOPORT_WR_NUM_REGISTERS,
1229 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1230 [ATA_IOPORT_WR_DATA] = "Data",
1231 [ATA_IOPORT_WR_FEATURES] = "Features",
1232 [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1233 [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1234 [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1235 [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1236 [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1237 [ATA_IOPORT_WR_COMMAND] = "Command"
1240 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1242 IDEBus *bus = opaque;
1243 IDEState *s = idebus_active_if(bus);
1244 int reg_num = addr & 7;
1246 trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1248 /* ignore writes to command block while busy with previous command */
1249 if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1256 case ATA_IOPORT_WR_FEATURES:
1258 /* NOTE: data is written to the two drives */
1259 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1260 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1261 bus->ifs[0].feature = val;
1262 bus->ifs[1].feature = val;
1264 case ATA_IOPORT_WR_SECTOR_COUNT:
1266 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1267 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1268 bus->ifs[0].nsector = val;
1269 bus->ifs[1].nsector = val;
1271 case ATA_IOPORT_WR_SECTOR_NUMBER:
1273 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1274 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1275 bus->ifs[0].sector = val;
1276 bus->ifs[1].sector = val;
1278 case ATA_IOPORT_WR_CYLINDER_LOW:
1280 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1281 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1282 bus->ifs[0].lcyl = val;
1283 bus->ifs[1].lcyl = val;
1285 case ATA_IOPORT_WR_CYLINDER_HIGH:
1287 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1288 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1289 bus->ifs[0].hcyl = val;
1290 bus->ifs[1].hcyl = val;
1292 case ATA_IOPORT_WR_DEVICE_HEAD:
1293 /* FIXME: HOB readback uses bit 7 */
1294 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1295 bus->ifs[1].select = (val | 0x10) | 0xa0;
1297 bus->unit = (val >> 4) & 1;
1300 case ATA_IOPORT_WR_COMMAND:
1302 ide_exec_cmd(bus, val);
1307 static void ide_reset(IDEState *s)
1312 blk_aio_cancel(s->pio_aiocb);
1313 s->pio_aiocb = NULL;
1316 if (s->drive_kind == IDE_CFATA)
1317 s->mult_sectors = 0;
1319 s->mult_sectors = MAX_MULT_SECTORS;
1336 s->status = READY_STAT | SEEK_STAT;
1340 /* ATAPI specific */
1343 s->cdrom_changed = 0;
1344 s->packet_transfer_size = 0;
1345 s->elementary_transfer_size = 0;
1346 s->io_buffer_index = 0;
1347 s->cd_sector_size = 0;
1352 s->io_buffer_size = 0;
1353 s->req_nb_sectors = 0;
1355 ide_set_signature(s);
1356 /* init the transfer handler so that 0xffff is returned on data
1358 s->end_transfer_func = ide_dummy_transfer_stop;
1359 ide_dummy_transfer_stop(s);
1360 s->media_changed = 0;
1363 static bool cmd_nop(IDEState *s, uint8_t cmd)
1368 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1370 /* Halt PIO (in the DRQ phase), then DMA */
1371 ide_transfer_cancel(s);
1372 ide_cancel_dma_sync(s);
1374 /* Reset any PIO commands, reset signature, etc */
1377 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1378 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1381 /* Do not overwrite status register */
1385 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1387 switch (s->feature) {
1390 ide_sector_start_dma(s, IDE_DMA_TRIM);
1396 ide_abort_command(s);
1400 static bool cmd_identify(IDEState *s, uint8_t cmd)
1402 if (s->blk && s->drive_kind != IDE_CD) {
1403 if (s->drive_kind != IDE_CFATA) {
1406 ide_cfata_identify(s);
1408 s->status = READY_STAT | SEEK_STAT;
1409 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1410 ide_set_irq(s->bus);
1413 if (s->drive_kind == IDE_CD) {
1414 ide_set_signature(s);
1416 ide_abort_command(s);
1422 static bool cmd_verify(IDEState *s, uint8_t cmd)
1424 bool lba48 = (cmd == WIN_VERIFY_EXT);
1426 /* do sector number check ? */
1427 ide_cmd_lba48_transform(s, lba48);
1432 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1434 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1435 /* Disable Read and Write Multiple */
1436 s->mult_sectors = 0;
1437 } else if ((s->nsector & 0xff) != 0 &&
1438 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1439 (s->nsector & (s->nsector - 1)) != 0)) {
1440 ide_abort_command(s);
1442 s->mult_sectors = s->nsector & 0xff;
1448 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1450 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1452 if (!s->blk || !s->mult_sectors) {
1453 ide_abort_command(s);
1457 ide_cmd_lba48_transform(s, lba48);
1458 s->req_nb_sectors = s->mult_sectors;
1463 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1465 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1468 if (!s->blk || !s->mult_sectors) {
1469 ide_abort_command(s);
1473 ide_cmd_lba48_transform(s, lba48);
1475 s->req_nb_sectors = s->mult_sectors;
1476 n = MIN(s->nsector, s->req_nb_sectors);
1478 s->status = SEEK_STAT | READY_STAT;
1479 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1481 s->media_changed = 1;
1486 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1488 bool lba48 = (cmd == WIN_READ_EXT);
1490 if (s->drive_kind == IDE_CD) {
1491 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1492 ide_abort_command(s);
1497 ide_abort_command(s);
1501 ide_cmd_lba48_transform(s, lba48);
1502 s->req_nb_sectors = 1;
1508 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1510 bool lba48 = (cmd == WIN_WRITE_EXT);
1513 ide_abort_command(s);
1517 ide_cmd_lba48_transform(s, lba48);
1519 s->req_nb_sectors = 1;
1520 s->status = SEEK_STAT | READY_STAT;
1521 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1523 s->media_changed = 1;
1528 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1530 bool lba48 = (cmd == WIN_READDMA_EXT);
1533 ide_abort_command(s);
1537 ide_cmd_lba48_transform(s, lba48);
1538 ide_sector_start_dma(s, IDE_DMA_READ);
1543 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1545 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1548 ide_abort_command(s);
1552 ide_cmd_lba48_transform(s, lba48);
1553 ide_sector_start_dma(s, IDE_DMA_WRITE);
1555 s->media_changed = 1;
1560 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1566 static bool cmd_seek(IDEState *s, uint8_t cmd)
1568 /* XXX: Check that seek is within bounds */
1572 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1574 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1576 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1577 if (s->nb_sectors == 0) {
1578 ide_abort_command(s);
1582 ide_cmd_lba48_transform(s, lba48);
1583 ide_set_sector(s, s->nb_sectors - 1);
1588 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1590 s->nsector = 0xff; /* device active or idle */
1594 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1596 uint16_t *identify_data;
1599 ide_abort_command(s);
1603 /* XXX: valid for CDROM ? */
1604 switch (s->feature) {
1605 case 0x02: /* write cache enable */
1606 blk_set_enable_write_cache(s->blk, true);
1607 identify_data = (uint16_t *)s->identify_data;
1608 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1610 case 0x82: /* write cache disable */
1611 blk_set_enable_write_cache(s->blk, false);
1612 identify_data = (uint16_t *)s->identify_data;
1613 put_le16(identify_data + 85, (1 << 14) | 1);
1616 case 0xcc: /* reverting to power-on defaults enable */
1617 case 0x66: /* reverting to power-on defaults disable */
1618 case 0xaa: /* read look-ahead enable */
1619 case 0x55: /* read look-ahead disable */
1620 case 0x05: /* set advanced power management mode */
1621 case 0x85: /* disable advanced power management mode */
1622 case 0x69: /* NOP */
1623 case 0x67: /* NOP */
1624 case 0x96: /* NOP */
1625 case 0x9a: /* NOP */
1626 case 0x42: /* enable Automatic Acoustic Mode */
1627 case 0xc2: /* disable Automatic Acoustic Mode */
1629 case 0x03: /* set transfer mode */
1631 uint8_t val = s->nsector & 0x07;
1632 identify_data = (uint16_t *)s->identify_data;
1634 switch (s->nsector >> 3) {
1635 case 0x00: /* pio default */
1636 case 0x01: /* pio mode */
1637 put_le16(identify_data + 62, 0x07);
1638 put_le16(identify_data + 63, 0x07);
1639 put_le16(identify_data + 88, 0x3f);
1641 case 0x02: /* sigle word dma mode*/
1642 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1643 put_le16(identify_data + 63, 0x07);
1644 put_le16(identify_data + 88, 0x3f);
1646 case 0x04: /* mdma mode */
1647 put_le16(identify_data + 62, 0x07);
1648 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1649 put_le16(identify_data + 88, 0x3f);
1651 case 0x08: /* udma mode */
1652 put_le16(identify_data + 62, 0x07);
1653 put_le16(identify_data + 63, 0x07);
1654 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1664 ide_abort_command(s);
1669 /*** ATAPI commands ***/
1671 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1673 ide_atapi_identify(s);
1674 s->status = READY_STAT | SEEK_STAT;
1675 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1676 ide_set_irq(s->bus);
1680 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1682 ide_set_signature(s);
1684 if (s->drive_kind == IDE_CD) {
1685 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1686 * devices to return a clear status register
1687 * with READY_STAT *not* set. */
1690 s->status = READY_STAT | SEEK_STAT;
1691 /* The bits of the error register are not as usual for this command!
1692 * They are part of the regular output (this is why ERR_STAT isn't set)
1693 * Device 0 passed, Device 1 passed or not present. */
1695 ide_set_irq(s->bus);
1701 static bool cmd_packet(IDEState *s, uint8_t cmd)
1703 /* overlapping commands not supported */
1704 if (s->feature & 0x02) {
1705 ide_abort_command(s);
1709 s->status = READY_STAT | SEEK_STAT;
1710 s->atapi_dma = s->feature & 1;
1712 s->dma_cmd = IDE_DMA_ATAPI;
1715 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1721 /*** CF-ATA commands ***/
1723 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1725 s->error = 0x09; /* miscellaneous error */
1726 s->status = READY_STAT | SEEK_STAT;
1727 ide_set_irq(s->bus);
1732 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1734 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1735 * required for Windows 8 to work with AHCI */
1737 if (cmd == CFA_WEAR_LEVEL) {
1741 if (cmd == CFA_ERASE_SECTORS) {
1742 s->media_changed = 1;
1748 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1750 s->status = READY_STAT | SEEK_STAT;
1752 memset(s->io_buffer, 0, 0x200);
1753 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1754 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1755 s->io_buffer[0x02] = s->select; /* Head */
1756 s->io_buffer[0x03] = s->sector; /* Sector */
1757 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1758 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1759 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1760 s->io_buffer[0x13] = 0x00; /* Erase flag */
1761 s->io_buffer[0x18] = 0x00; /* Hot count */
1762 s->io_buffer[0x19] = 0x00; /* Hot count */
1763 s->io_buffer[0x1a] = 0x01; /* Hot count */
1765 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1766 ide_set_irq(s->bus);
1771 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1773 switch (s->feature) {
1774 case 0x02: /* Inquiry Metadata Storage */
1775 ide_cfata_metadata_inquiry(s);
1777 case 0x03: /* Read Metadata Storage */
1778 ide_cfata_metadata_read(s);
1780 case 0x04: /* Write Metadata Storage */
1781 ide_cfata_metadata_write(s);
1784 ide_abort_command(s);
1788 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1789 s->status = 0x00; /* NOTE: READY is _not_ set */
1790 ide_set_irq(s->bus);
1795 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1797 switch (s->feature) {
1798 case 0x01: /* sense temperature in device */
1799 s->nsector = 0x50; /* +20 C */
1802 ide_abort_command(s);
1810 /*** SMART commands ***/
1812 static bool cmd_smart(IDEState *s, uint8_t cmd)
1816 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1820 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1824 switch (s->feature) {
1826 s->smart_enabled = 0;
1830 s->smart_enabled = 1;
1833 case SMART_ATTR_AUTOSAVE:
1834 switch (s->sector) {
1836 s->smart_autosave = 0;
1839 s->smart_autosave = 1;
1847 if (!s->smart_errors) {
1856 case SMART_READ_THRESH:
1857 memset(s->io_buffer, 0, 0x200);
1858 s->io_buffer[0] = 0x01; /* smart struct version */
1860 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1861 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1862 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1866 for (n = 0; n < 511; n++) {
1867 s->io_buffer[511] += s->io_buffer[n];
1869 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1871 s->status = READY_STAT | SEEK_STAT;
1872 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1873 ide_set_irq(s->bus);
1876 case SMART_READ_DATA:
1877 memset(s->io_buffer, 0, 0x200);
1878 s->io_buffer[0] = 0x01; /* smart struct version */
1880 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1882 for (i = 0; i < 11; i++) {
1883 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1887 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1888 if (s->smart_selftest_count == 0) {
1889 s->io_buffer[363] = 0;
1892 s->smart_selftest_data[3 +
1893 (s->smart_selftest_count - 1) *
1896 s->io_buffer[364] = 0x20;
1897 s->io_buffer[365] = 0x01;
1898 /* offline data collection capacity: execute + self-test*/
1899 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1900 s->io_buffer[368] = 0x03; /* smart capability (1) */
1901 s->io_buffer[369] = 0x00; /* smart capability (2) */
1902 s->io_buffer[370] = 0x01; /* error logging supported */
1903 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1904 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1905 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1907 for (n = 0; n < 511; n++) {
1908 s->io_buffer[511] += s->io_buffer[n];
1910 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1912 s->status = READY_STAT | SEEK_STAT;
1913 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1914 ide_set_irq(s->bus);
1917 case SMART_READ_LOG:
1918 switch (s->sector) {
1919 case 0x01: /* summary smart error log */
1920 memset(s->io_buffer, 0, 0x200);
1921 s->io_buffer[0] = 0x01;
1922 s->io_buffer[1] = 0x00; /* no error entries */
1923 s->io_buffer[452] = s->smart_errors & 0xff;
1924 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1926 for (n = 0; n < 511; n++) {
1927 s->io_buffer[511] += s->io_buffer[n];
1929 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1931 case 0x06: /* smart self test log */
1932 memset(s->io_buffer, 0, 0x200);
1933 s->io_buffer[0] = 0x01;
1934 if (s->smart_selftest_count == 0) {
1935 s->io_buffer[508] = 0;
1937 s->io_buffer[508] = s->smart_selftest_count;
1938 for (n = 2; n < 506; n++) {
1939 s->io_buffer[n] = s->smart_selftest_data[n];
1943 for (n = 0; n < 511; n++) {
1944 s->io_buffer[511] += s->io_buffer[n];
1946 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1951 s->status = READY_STAT | SEEK_STAT;
1952 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1953 ide_set_irq(s->bus);
1956 case SMART_EXECUTE_OFFLINE:
1957 switch (s->sector) {
1958 case 0: /* off-line routine */
1959 case 1: /* short self test */
1960 case 2: /* extended self test */
1961 s->smart_selftest_count++;
1962 if (s->smart_selftest_count > 21) {
1963 s->smart_selftest_count = 1;
1965 n = 2 + (s->smart_selftest_count - 1) * 24;
1966 s->smart_selftest_data[n] = s->sector;
1967 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1968 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1969 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1978 ide_abort_command(s);
1982 #define HD_OK (1u << IDE_HD)
1983 #define CD_OK (1u << IDE_CD)
1984 #define CFA_OK (1u << IDE_CFATA)
1985 #define HD_CFA_OK (HD_OK | CFA_OK)
1986 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1988 /* Set the Disk Seek Completed status bit during completion */
1989 #define SET_DSC (1u << 8)
1991 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1992 static const struct {
1993 /* Returns true if the completion code should be run */
1994 bool (*handler)(IDEState *s, uint8_t cmd);
1996 } ide_cmd_table[0x100] = {
1997 /* NOP not implemented, mandatory for CD */
1998 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1999 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
2000 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
2001 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
2002 [WIN_READ] = { cmd_read_pio, ALL_OK },
2003 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
2004 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
2005 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
2006 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2007 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
2008 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
2009 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
2010 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
2011 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
2012 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
2013 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
2014 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
2015 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
2016 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
2017 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
2018 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
2019 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
2020 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
2021 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
2022 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
2023 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
2024 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
2025 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
2026 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2027 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
2028 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
2029 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
2030 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
2031 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2032 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2033 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
2034 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
2035 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2036 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
2037 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
2038 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
2039 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
2040 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
2041 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
2042 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
2043 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
2044 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
2045 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2046 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
2047 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
2048 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
2049 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
2050 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
2051 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2052 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2053 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2056 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2058 return cmd < ARRAY_SIZE(ide_cmd_table)
2059 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2062 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2067 s = idebus_active_if(bus);
2068 trace_ide_exec_cmd(bus, s, val);
2070 /* ignore commands to non existent slave */
2071 if (s != bus->ifs && !s->blk) {
2075 /* Only RESET is allowed while BSY and/or DRQ are set,
2076 * and only to ATAPI devices. */
2077 if (s->status & (BUSY_STAT|DRQ_STAT)) {
2078 if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2083 if (!ide_cmd_permitted(s, val)) {
2084 ide_abort_command(s);
2085 ide_set_irq(s->bus);
2089 s->status = READY_STAT | BUSY_STAT;
2091 s->io_buffer_offset = 0;
2093 complete = ide_cmd_table[val].handler(s, val);
2095 s->status &= ~BUSY_STAT;
2096 assert(!!s->error == !!(s->status & ERR_STAT));
2098 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2099 s->status |= SEEK_STAT;
2103 ide_set_irq(s->bus);
2107 /* IOport [R]ead [R]egisters */
2108 enum ATA_IOPORT_RR {
2109 ATA_IOPORT_RR_DATA = 0,
2110 ATA_IOPORT_RR_ERROR = 1,
2111 ATA_IOPORT_RR_SECTOR_COUNT = 2,
2112 ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2113 ATA_IOPORT_RR_CYLINDER_LOW = 4,
2114 ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2115 ATA_IOPORT_RR_DEVICE_HEAD = 6,
2116 ATA_IOPORT_RR_STATUS = 7,
2117 ATA_IOPORT_RR_NUM_REGISTERS,
2120 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2121 [ATA_IOPORT_RR_DATA] = "Data",
2122 [ATA_IOPORT_RR_ERROR] = "Error",
2123 [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2124 [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2125 [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2126 [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2127 [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2128 [ATA_IOPORT_RR_STATUS] = "Status"
2131 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2133 IDEBus *bus = opaque;
2134 IDEState *s = idebus_active_if(bus);
2139 /* FIXME: HOB readback uses bit 7, but it's always set right now */
2140 //hob = s->select & (1 << 7);
2143 case ATA_IOPORT_RR_DATA:
2146 case ATA_IOPORT_RR_ERROR:
2147 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2148 (s != bus->ifs && !s->blk)) {
2153 ret = s->hob_feature;
2156 case ATA_IOPORT_RR_SECTOR_COUNT:
2157 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2160 ret = s->nsector & 0xff;
2162 ret = s->hob_nsector;
2165 case ATA_IOPORT_RR_SECTOR_NUMBER:
2166 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2171 ret = s->hob_sector;
2174 case ATA_IOPORT_RR_CYLINDER_LOW:
2175 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2183 case ATA_IOPORT_RR_CYLINDER_HIGH:
2184 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2192 case ATA_IOPORT_RR_DEVICE_HEAD:
2193 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2200 case ATA_IOPORT_RR_STATUS:
2201 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2202 (s != bus->ifs && !s->blk)) {
2207 qemu_irq_lower(bus->irq);
2211 trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2215 uint32_t ide_status_read(void *opaque, uint32_t addr)
2217 IDEBus *bus = opaque;
2218 IDEState *s = idebus_active_if(bus);
2221 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2222 (s != bus->ifs && !s->blk)) {
2228 trace_ide_status_read(addr, ret, bus, s);
2232 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2234 IDEBus *bus = opaque;
2238 trace_ide_cmd_write(addr, val, bus);
2240 /* common for both drives */
2241 if (!(bus->cmd & IDE_CMD_RESET) &&
2242 (val & IDE_CMD_RESET)) {
2243 /* reset low to high */
2244 for(i = 0;i < 2; i++) {
2246 s->status = BUSY_STAT | SEEK_STAT;
2249 } else if ((bus->cmd & IDE_CMD_RESET) &&
2250 !(val & IDE_CMD_RESET)) {
2252 for(i = 0;i < 2; i++) {
2254 if (s->drive_kind == IDE_CD)
2255 s->status = 0x00; /* NOTE: READY is _not_ set */
2257 s->status = READY_STAT | SEEK_STAT;
2258 ide_set_signature(s);
2266 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2267 * transferred from the device to the guest), false if it's a PIO in
2269 static bool ide_is_pio_out(IDEState *s)
2271 if (s->end_transfer_func == ide_sector_write ||
2272 s->end_transfer_func == ide_atapi_cmd) {
2274 } else if (s->end_transfer_func == ide_sector_read ||
2275 s->end_transfer_func == ide_transfer_stop ||
2276 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2277 s->end_transfer_func == ide_dummy_transfer_stop) {
2284 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2286 IDEBus *bus = opaque;
2287 IDEState *s = idebus_active_if(bus);
2290 trace_ide_data_writew(addr, val, bus, s);
2292 /* PIO data access allowed only when DRQ bit is set. The result of a write
2293 * during PIO out is indeterminate, just ignore it. */
2294 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2299 if (p + 2 > s->data_end) {
2303 *(uint16_t *)p = le16_to_cpu(val);
2306 if (p >= s->data_end) {
2307 s->status &= ~DRQ_STAT;
2308 s->end_transfer_func(s);
2312 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2314 IDEBus *bus = opaque;
2315 IDEState *s = idebus_active_if(bus);
2319 /* PIO data access allowed only when DRQ bit is set. The result of a read
2320 * during PIO in is indeterminate, return 0 and don't move forward. */
2321 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2326 if (p + 2 > s->data_end) {
2330 ret = cpu_to_le16(*(uint16_t *)p);
2333 if (p >= s->data_end) {
2334 s->status &= ~DRQ_STAT;
2335 s->end_transfer_func(s);
2338 trace_ide_data_readw(addr, ret, bus, s);
2342 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2344 IDEBus *bus = opaque;
2345 IDEState *s = idebus_active_if(bus);
2348 trace_ide_data_writel(addr, val, bus, s);
2350 /* PIO data access allowed only when DRQ bit is set. The result of a write
2351 * during PIO out is indeterminate, just ignore it. */
2352 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2357 if (p + 4 > s->data_end) {
2361 *(uint32_t *)p = le32_to_cpu(val);
2364 if (p >= s->data_end) {
2365 s->status &= ~DRQ_STAT;
2366 s->end_transfer_func(s);
2370 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2372 IDEBus *bus = opaque;
2373 IDEState *s = idebus_active_if(bus);
2377 /* PIO data access allowed only when DRQ bit is set. The result of a read
2378 * during PIO in is indeterminate, return 0 and don't move forward. */
2379 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2385 if (p + 4 > s->data_end) {
2389 ret = cpu_to_le32(*(uint32_t *)p);
2392 if (p >= s->data_end) {
2393 s->status &= ~DRQ_STAT;
2394 s->end_transfer_func(s);
2398 trace_ide_data_readl(addr, ret, bus, s);
2402 static void ide_dummy_transfer_stop(IDEState *s)
2404 s->data_ptr = s->io_buffer;
2405 s->data_end = s->io_buffer;
2406 s->io_buffer[0] = 0xff;
2407 s->io_buffer[1] = 0xff;
2408 s->io_buffer[2] = 0xff;
2409 s->io_buffer[3] = 0xff;
2412 void ide_bus_reset(IDEBus *bus)
2416 ide_reset(&bus->ifs[0]);
2417 ide_reset(&bus->ifs[1]);
2420 /* pending async DMA */
2421 if (bus->dma->aiocb) {
2422 trace_ide_bus_reset_aio();
2423 blk_aio_cancel(bus->dma->aiocb);
2424 bus->dma->aiocb = NULL;
2427 /* reset dma provider too */
2428 if (bus->dma->ops->reset) {
2429 bus->dma->ops->reset(bus->dma);
2433 static bool ide_cd_is_tray_open(void *opaque)
2435 return ((IDEState *)opaque)->tray_open;
2438 static bool ide_cd_is_medium_locked(void *opaque)
2440 return ((IDEState *)opaque)->tray_locked;
2443 static void ide_resize_cb(void *opaque)
2445 IDEState *s = opaque;
2446 uint64_t nb_sectors;
2448 if (!s->identify_set) {
2452 blk_get_geometry(s->blk, &nb_sectors);
2453 s->nb_sectors = nb_sectors;
2455 /* Update the identify data buffer. */
2456 if (s->drive_kind == IDE_CFATA) {
2457 ide_cfata_identify_size(s);
2459 /* IDE_CD uses a different set of callbacks entirely. */
2460 assert(s->drive_kind != IDE_CD);
2461 ide_identify_size(s);
2465 static const BlockDevOps ide_cd_block_ops = {
2466 .change_media_cb = ide_cd_change_cb,
2467 .eject_request_cb = ide_cd_eject_request_cb,
2468 .is_tray_open = ide_cd_is_tray_open,
2469 .is_medium_locked = ide_cd_is_medium_locked,
2472 static const BlockDevOps ide_hd_block_ops = {
2473 .resize_cb = ide_resize_cb,
2476 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2477 const char *version, const char *serial, const char *model,
2479 uint32_t cylinders, uint32_t heads, uint32_t secs,
2480 int chs_trans, Error **errp)
2482 uint64_t nb_sectors;
2485 s->drive_kind = kind;
2487 blk_get_geometry(blk, &nb_sectors);
2488 s->cylinders = cylinders;
2491 s->chs_trans = chs_trans;
2492 s->nb_sectors = nb_sectors;
2494 /* The SMART values should be preserved across power cycles
2496 s->smart_enabled = 1;
2497 s->smart_autosave = 1;
2498 s->smart_errors = 0;
2499 s->smart_selftest_count = 0;
2500 if (kind == IDE_CD) {
2501 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2502 blk_set_guest_block_size(blk, 2048);
2504 if (!blk_is_inserted(s->blk)) {
2505 error_setg(errp, "Device needs media, but drive is empty");
2508 if (blk_is_read_only(blk)) {
2509 error_setg(errp, "Can't use a read-only drive");
2512 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2515 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2517 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2518 "QM%05d", s->drive_serial);
2521 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2525 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2528 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2531 strcpy(s->drive_model_str, "QEMU HARDDISK");
2537 pstrcpy(s->version, sizeof(s->version), version);
2539 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2543 blk_iostatus_enable(blk);
2547 static void ide_init1(IDEBus *bus, int unit)
2549 static int drive_serial = 1;
2550 IDEState *s = &bus->ifs[unit];
2554 s->drive_serial = drive_serial++;
2555 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2556 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2557 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2558 memset(s->io_buffer, 0, s->io_buffer_total_len);
2560 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2561 memset(s->smart_selftest_data, 0, 512);
2563 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2564 ide_sector_write_timer_cb, s);
2567 static int ide_nop_int(IDEDMA *dma, int x)
2572 static void ide_nop(IDEDMA *dma)
2576 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2581 static const IDEDMAOps ide_dma_nop_ops = {
2582 .prepare_buf = ide_nop_int32,
2583 .restart_dma = ide_nop,
2584 .rw_buf = ide_nop_int,
2587 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2589 s->unit = s->bus->retry_unit;
2590 ide_set_sector(s, s->bus->retry_sector_num);
2591 s->nsector = s->bus->retry_nsector;
2592 s->bus->dma->ops->restart_dma(s->bus->dma);
2593 s->io_buffer_size = 0;
2594 s->dma_cmd = dma_cmd;
2595 ide_start_dma(s, ide_dma_cb);
2598 static void ide_restart_bh(void *opaque)
2600 IDEBus *bus = opaque;
2605 qemu_bh_delete(bus->bh);
2608 error_status = bus->error_status;
2609 if (bus->error_status == 0) {
2613 s = idebus_active_if(bus);
2614 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2616 /* The error status must be cleared before resubmitting the request: The
2617 * request may fail again, and this case can only be distinguished if the
2618 * called function can set a new error status. */
2619 bus->error_status = 0;
2621 /* The HBA has generically asked to be kicked on retry */
2622 if (error_status & IDE_RETRY_HBA) {
2623 if (s->bus->dma->ops->restart) {
2624 s->bus->dma->ops->restart(s->bus->dma);
2626 } else if (IS_IDE_RETRY_DMA(error_status)) {
2627 if (error_status & IDE_RETRY_TRIM) {
2628 ide_restart_dma(s, IDE_DMA_TRIM);
2630 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2632 } else if (IS_IDE_RETRY_PIO(error_status)) {
2636 ide_sector_write(s);
2638 } else if (error_status & IDE_RETRY_FLUSH) {
2640 } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2641 assert(s->end_transfer_func == ide_atapi_cmd);
2642 ide_atapi_dma_restart(s);
2648 static void ide_restart_cb(void *opaque, int running, RunState state)
2650 IDEBus *bus = opaque;
2656 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2657 qemu_bh_schedule(bus->bh);
2661 void ide_register_restart_cb(IDEBus *bus)
2663 if (bus->dma->ops->restart_dma) {
2664 bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2668 static IDEDMA ide_dma_nop = {
2669 .ops = &ide_dma_nop_ops,
2673 void ide_init2(IDEBus *bus, qemu_irq irq)
2677 for(i = 0; i < 2; i++) {
2679 ide_reset(&bus->ifs[i]);
2682 bus->dma = &ide_dma_nop;
2685 void ide_exit(IDEState *s)
2687 timer_del(s->sector_write_timer);
2688 timer_free(s->sector_write_timer);
2689 qemu_vfree(s->smart_selftest_data);
2690 qemu_vfree(s->io_buffer);
2693 static const MemoryRegionPortio ide_portio_list[] = {
2694 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2695 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2696 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2697 PORTIO_END_OF_LIST(),
2700 static const MemoryRegionPortio ide_portio2_list[] = {
2701 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2702 PORTIO_END_OF_LIST(),
2705 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2707 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2708 bridge has been setup properly to always register with ISA. */
2709 isa_register_portio_list(dev, &bus->portio_list,
2710 iobase, ide_portio_list, bus, "ide");
2713 isa_register_portio_list(dev, &bus->portio2_list,
2714 iobase2, ide_portio2_list, bus, "ide");
2718 static bool is_identify_set(void *opaque, int version_id)
2720 IDEState *s = opaque;
2722 return s->identify_set != 0;
2725 static EndTransferFunc* transfer_end_table[] = {
2729 ide_atapi_cmd_reply_end,
2731 ide_dummy_transfer_stop,
2734 static int transfer_end_table_idx(EndTransferFunc *fn)
2738 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2739 if (transfer_end_table[i] == fn)
2745 static int ide_drive_post_load(void *opaque, int version_id)
2747 IDEState *s = opaque;
2749 if (s->blk && s->identify_set) {
2750 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2755 static int ide_drive_pio_post_load(void *opaque, int version_id)
2757 IDEState *s = opaque;
2759 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2762 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2763 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2764 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2765 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2770 static int ide_drive_pio_pre_save(void *opaque)
2772 IDEState *s = opaque;
2775 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2776 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2778 idx = transfer_end_table_idx(s->end_transfer_func);
2780 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2782 s->end_transfer_fn_idx = 2;
2784 s->end_transfer_fn_idx = idx;
2790 static bool ide_drive_pio_state_needed(void *opaque)
2792 IDEState *s = opaque;
2794 return ((s->status & DRQ_STAT) != 0)
2795 || (s->bus->error_status & IDE_RETRY_PIO);
2798 static bool ide_tray_state_needed(void *opaque)
2800 IDEState *s = opaque;
2802 return s->tray_open || s->tray_locked;
2805 static bool ide_atapi_gesn_needed(void *opaque)
2807 IDEState *s = opaque;
2809 return s->events.new_media || s->events.eject_request;
2812 static bool ide_error_needed(void *opaque)
2814 IDEBus *bus = opaque;
2816 return (bus->error_status != 0);
2819 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2820 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2821 .name ="ide_drive/atapi/gesn_state",
2823 .minimum_version_id = 1,
2824 .needed = ide_atapi_gesn_needed,
2825 .fields = (VMStateField[]) {
2826 VMSTATE_BOOL(events.new_media, IDEState),
2827 VMSTATE_BOOL(events.eject_request, IDEState),
2828 VMSTATE_END_OF_LIST()
2832 static const VMStateDescription vmstate_ide_tray_state = {
2833 .name = "ide_drive/tray_state",
2835 .minimum_version_id = 1,
2836 .needed = ide_tray_state_needed,
2837 .fields = (VMStateField[]) {
2838 VMSTATE_BOOL(tray_open, IDEState),
2839 VMSTATE_BOOL(tray_locked, IDEState),
2840 VMSTATE_END_OF_LIST()
2844 static const VMStateDescription vmstate_ide_drive_pio_state = {
2845 .name = "ide_drive/pio_state",
2847 .minimum_version_id = 1,
2848 .pre_save = ide_drive_pio_pre_save,
2849 .post_load = ide_drive_pio_post_load,
2850 .needed = ide_drive_pio_state_needed,
2851 .fields = (VMStateField[]) {
2852 VMSTATE_INT32(req_nb_sectors, IDEState),
2853 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2854 vmstate_info_uint8, uint8_t),
2855 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2856 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2857 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2858 VMSTATE_INT32(elementary_transfer_size, IDEState),
2859 VMSTATE_INT32(packet_transfer_size, IDEState),
2860 VMSTATE_END_OF_LIST()
2864 const VMStateDescription vmstate_ide_drive = {
2865 .name = "ide_drive",
2867 .minimum_version_id = 0,
2868 .post_load = ide_drive_post_load,
2869 .fields = (VMStateField[]) {
2870 VMSTATE_INT32(mult_sectors, IDEState),
2871 VMSTATE_INT32(identify_set, IDEState),
2872 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2873 VMSTATE_UINT8(feature, IDEState),
2874 VMSTATE_UINT8(error, IDEState),
2875 VMSTATE_UINT32(nsector, IDEState),
2876 VMSTATE_UINT8(sector, IDEState),
2877 VMSTATE_UINT8(lcyl, IDEState),
2878 VMSTATE_UINT8(hcyl, IDEState),
2879 VMSTATE_UINT8(hob_feature, IDEState),
2880 VMSTATE_UINT8(hob_sector, IDEState),
2881 VMSTATE_UINT8(hob_nsector, IDEState),
2882 VMSTATE_UINT8(hob_lcyl, IDEState),
2883 VMSTATE_UINT8(hob_hcyl, IDEState),
2884 VMSTATE_UINT8(select, IDEState),
2885 VMSTATE_UINT8(status, IDEState),
2886 VMSTATE_UINT8(lba48, IDEState),
2887 VMSTATE_UINT8(sense_key, IDEState),
2888 VMSTATE_UINT8(asc, IDEState),
2889 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2890 VMSTATE_END_OF_LIST()
2892 .subsections = (const VMStateDescription*[]) {
2893 &vmstate_ide_drive_pio_state,
2894 &vmstate_ide_tray_state,
2895 &vmstate_ide_atapi_gesn_state,
2900 static const VMStateDescription vmstate_ide_error_status = {
2901 .name ="ide_bus/error",
2903 .minimum_version_id = 1,
2904 .needed = ide_error_needed,
2905 .fields = (VMStateField[]) {
2906 VMSTATE_INT32(error_status, IDEBus),
2907 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2908 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2909 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2910 VMSTATE_END_OF_LIST()
2914 const VMStateDescription vmstate_ide_bus = {
2917 .minimum_version_id = 1,
2918 .fields = (VMStateField[]) {
2919 VMSTATE_UINT8(cmd, IDEBus),
2920 VMSTATE_UINT8(unit, IDEBus),
2921 VMSTATE_END_OF_LIST()
2923 .subsections = (const VMStateDescription*[]) {
2924 &vmstate_ide_error_status,
2929 void ide_drive_get(DriveInfo **hd, int n)
2933 for (i = 0; i < n; i++) {
2934 hd[i] = drive_get_by_index(IF_IDE, i);