2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include <hw/i386/pc.h>
28 #include <hw/pci/pci.h>
29 #include <hw/isa/isa.h>
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
37 #include <hw/ide/internal.h>
39 /* These values were based on a Seagate ST3500418AS but have been modified
40 to make more sense in QEMU */
41 static const int smart_attributes[][12] = {
42 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
43 /* raw read error rate*/
44 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
46 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
47 /* start stop count */
48 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
49 /* remapped sectors */
50 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
52 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
53 /* power cycle count */
54 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55 /* airflow-temperature-celsius */
56 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
59 static int ide_handle_rw_error(IDEState *s, int error, int op);
60 static void ide_dummy_transfer_stop(IDEState *s);
62 static void padstr(char *str, const char *src, int len)
65 for(i = 0; i < len; i++) {
74 static void put_le16(uint16_t *p, unsigned int v)
79 static void ide_identify_size(IDEState *s)
81 uint16_t *p = (uint16_t *)s->identify_data;
82 put_le16(p + 60, s->nb_sectors);
83 put_le16(p + 61, s->nb_sectors >> 16);
84 put_le16(p + 100, s->nb_sectors);
85 put_le16(p + 101, s->nb_sectors >> 16);
86 put_le16(p + 102, s->nb_sectors >> 32);
87 put_le16(p + 103, s->nb_sectors >> 48);
90 static void ide_identify(IDEState *s)
94 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
96 p = (uint16_t *)s->identify_data;
97 if (s->identify_set) {
100 memset(p, 0, sizeof(s->identify_data));
102 put_le16(p + 0, 0x0040);
103 put_le16(p + 1, s->cylinders);
104 put_le16(p + 3, s->heads);
105 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
106 put_le16(p + 5, 512); /* XXX: retired, remove ? */
107 put_le16(p + 6, s->sectors);
108 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
109 put_le16(p + 20, 3); /* XXX: retired, remove ? */
110 put_le16(p + 21, 512); /* cache size in sectors */
111 put_le16(p + 22, 4); /* ecc bytes */
112 padstr((char *)(p + 23), s->version, 8); /* firmware version */
113 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
114 #if MAX_MULT_SECTORS > 1
115 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
117 put_le16(p + 48, 1); /* dword I/O */
118 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
119 put_le16(p + 51, 0x200); /* PIO transfer cycle */
120 put_le16(p + 52, 0x200); /* DMA transfer cycle */
121 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
122 put_le16(p + 54, s->cylinders);
123 put_le16(p + 55, s->heads);
124 put_le16(p + 56, s->sectors);
125 oldsize = s->cylinders * s->heads * s->sectors;
126 put_le16(p + 57, oldsize);
127 put_le16(p + 58, oldsize >> 16);
129 put_le16(p + 59, 0x100 | s->mult_sectors);
130 /* *(p + 60) := nb_sectors -- see ide_identify_size */
131 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
132 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
133 put_le16(p + 63, 0x07); /* mdma0-2 supported */
134 put_le16(p + 64, 0x03); /* pio3-4 supported */
135 put_le16(p + 65, 120);
136 put_le16(p + 66, 120);
137 put_le16(p + 67, 120);
138 put_le16(p + 68, 120);
139 if (dev && dev->conf.discard_granularity) {
140 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
144 put_le16(p + 75, s->ncq_queues - 1);
146 put_le16(p + 76, (1 << 8));
149 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
150 put_le16(p + 81, 0x16); /* conforms to ata5 */
151 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
152 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
153 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
154 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
155 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
157 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
159 put_le16(p + 84, (1 << 14) | 0);
161 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
162 if (blk_enable_write_cache(s->blk)) {
163 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
165 put_le16(p + 85, (1 << 14) | 1);
167 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
168 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
169 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
171 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
173 put_le16(p + 87, (1 << 14) | 0);
175 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
176 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
177 /* *(p + 100) := nb_sectors -- see ide_identify_size */
178 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
179 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
180 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
182 if (dev && dev->conf.physical_block_size)
183 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
185 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
186 put_le16(p + 108, s->wwn >> 48);
187 put_le16(p + 109, s->wwn >> 32);
188 put_le16(p + 110, s->wwn >> 16);
189 put_le16(p + 111, s->wwn);
191 if (dev && dev->conf.discard_granularity) {
192 put_le16(p + 169, 1); /* TRIM support */
195 ide_identify_size(s);
199 memcpy(s->io_buffer, p, sizeof(s->identify_data));
202 static void ide_atapi_identify(IDEState *s)
206 p = (uint16_t *)s->identify_data;
207 if (s->identify_set) {
210 memset(p, 0, sizeof(s->identify_data));
212 /* Removable CDROM, 50us response, 12 byte packets */
213 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
214 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
215 put_le16(p + 20, 3); /* buffer type */
216 put_le16(p + 21, 512); /* cache size in sectors */
217 put_le16(p + 22, 4); /* ecc bytes */
218 padstr((char *)(p + 23), s->version, 8); /* firmware version */
219 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
220 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
222 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
223 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
224 put_le16(p + 62, 7); /* single word dma0-2 supported */
225 put_le16(p + 63, 7); /* mdma0-2 supported */
227 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
228 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
229 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
231 put_le16(p + 64, 3); /* pio3-4 supported */
232 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
233 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
234 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
235 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
237 put_le16(p + 71, 30); /* in ns */
238 put_le16(p + 72, 30); /* in ns */
241 put_le16(p + 75, s->ncq_queues - 1);
243 put_le16(p + 76, (1 << 8));
246 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
248 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
249 put_le16(p + 87, (1 << 8)); /* WWN enabled */
253 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
257 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
258 put_le16(p + 108, s->wwn >> 48);
259 put_le16(p + 109, s->wwn >> 32);
260 put_le16(p + 110, s->wwn >> 16);
261 put_le16(p + 111, s->wwn);
267 memcpy(s->io_buffer, p, sizeof(s->identify_data));
270 static void ide_cfata_identify_size(IDEState *s)
272 uint16_t *p = (uint16_t *)s->identify_data;
273 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
274 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
275 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
276 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
279 static void ide_cfata_identify(IDEState *s)
284 p = (uint16_t *)s->identify_data;
285 if (s->identify_set) {
288 memset(p, 0, sizeof(s->identify_data));
290 cur_sec = s->cylinders * s->heads * s->sectors;
292 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
293 put_le16(p + 1, s->cylinders); /* Default cylinders */
294 put_le16(p + 3, s->heads); /* Default heads */
295 put_le16(p + 6, s->sectors); /* Default sectors per track */
296 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
297 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
298 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
299 put_le16(p + 22, 0x0004); /* ECC bytes */
300 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
301 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
302 #if MAX_MULT_SECTORS > 1
303 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
305 put_le16(p + 47, 0x0000);
307 put_le16(p + 49, 0x0f00); /* Capabilities */
308 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
309 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
310 put_le16(p + 53, 0x0003); /* Translation params valid */
311 put_le16(p + 54, s->cylinders); /* Current cylinders */
312 put_le16(p + 55, s->heads); /* Current heads */
313 put_le16(p + 56, s->sectors); /* Current sectors */
314 put_le16(p + 57, cur_sec); /* Current capacity */
315 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
316 if (s->mult_sectors) /* Multiple sector setting */
317 put_le16(p + 59, 0x100 | s->mult_sectors);
318 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
319 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
320 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
321 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
322 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
323 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
324 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
325 put_le16(p + 82, 0x400c); /* Command Set supported */
326 put_le16(p + 83, 0x7068); /* Command Set supported */
327 put_le16(p + 84, 0x4000); /* Features supported */
328 put_le16(p + 85, 0x000c); /* Command Set enabled */
329 put_le16(p + 86, 0x7044); /* Command Set enabled */
330 put_le16(p + 87, 0x4000); /* Features enabled */
331 put_le16(p + 91, 0x4060); /* Current APM level */
332 put_le16(p + 129, 0x0002); /* Current features option */
333 put_le16(p + 130, 0x0005); /* Reassigned sectors */
334 put_le16(p + 131, 0x0001); /* Initial power mode */
335 put_le16(p + 132, 0x0000); /* User signature */
336 put_le16(p + 160, 0x8100); /* Power requirement */
337 put_le16(p + 161, 0x8001); /* CF command set */
339 ide_cfata_identify_size(s);
343 memcpy(s->io_buffer, p, sizeof(s->identify_data));
346 static void ide_set_signature(IDEState *s)
348 s->select &= 0xf0; /* clear head */
352 if (s->drive_kind == IDE_CD) {
364 typedef struct TrimAIOCB {
374 static void trim_aio_cancel(BlockAIOCB *acb)
376 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
378 /* Exit the loop so ide_issue_trim_cb will not continue */
379 iocb->j = iocb->qiov->niov - 1;
380 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
382 iocb->ret = -ECANCELED;
385 blk_aio_cancel_async(iocb->aiocb);
390 static const AIOCBInfo trim_aiocb_info = {
391 .aiocb_size = sizeof(TrimAIOCB),
392 .cancel_async = trim_aio_cancel,
395 static void ide_trim_bh_cb(void *opaque)
397 TrimAIOCB *iocb = opaque;
399 iocb->common.cb(iocb->common.opaque, iocb->ret);
401 qemu_bh_delete(iocb->bh);
403 qemu_aio_unref(iocb);
406 static void ide_issue_trim_cb(void *opaque, int ret)
408 TrimAIOCB *iocb = opaque;
410 while (iocb->j < iocb->qiov->niov) {
412 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
414 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
416 /* 6-byte LBA + 2-byte range per entry */
417 uint64_t entry = le64_to_cpu(buffer[i]);
418 uint64_t sector = entry & 0x0000ffffffffffffULL;
419 uint16_t count = entry >> 48;
425 /* Got an entry! Submit and exit. */
426 iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
427 ide_issue_trim_cb, opaque);
440 qemu_bh_schedule(iocb->bh);
444 BlockAIOCB *ide_issue_trim(BlockBackend *blk,
445 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
446 BlockCompletionFunc *cb, void *opaque)
450 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque);
452 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
457 ide_issue_trim_cb(iocb, 0);
458 return &iocb->common;
461 void ide_abort_command(IDEState *s)
463 ide_transfer_stop(s);
464 s->status = READY_STAT | ERR_STAT;
468 /* prepare data transfer and tell what to do after */
469 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
470 EndTransferFunc *end_transfer_func)
472 s->end_transfer_func = end_transfer_func;
474 s->data_end = buf + size;
475 if (!(s->status & ERR_STAT)) {
476 s->status |= DRQ_STAT;
478 if (s->bus->dma->ops->start_transfer) {
479 s->bus->dma->ops->start_transfer(s->bus->dma);
483 static void ide_cmd_done(IDEState *s)
485 if (s->bus->dma->ops->cmd_done) {
486 s->bus->dma->ops->cmd_done(s->bus->dma);
490 void ide_transfer_stop(IDEState *s)
492 s->end_transfer_func = ide_transfer_stop;
493 s->data_ptr = s->io_buffer;
494 s->data_end = s->io_buffer;
495 s->status &= ~DRQ_STAT;
499 int64_t ide_get_sector(IDEState *s)
502 if (s->select & 0x40) {
505 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
506 (s->lcyl << 8) | s->sector;
508 sector_num = ((int64_t)s->hob_hcyl << 40) |
509 ((int64_t) s->hob_lcyl << 32) |
510 ((int64_t) s->hob_sector << 24) |
511 ((int64_t) s->hcyl << 16) |
512 ((int64_t) s->lcyl << 8) | s->sector;
515 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
516 (s->select & 0x0f) * s->sectors + (s->sector - 1);
521 void ide_set_sector(IDEState *s, int64_t sector_num)
524 if (s->select & 0x40) {
526 s->select = (s->select & 0xf0) | (sector_num >> 24);
527 s->hcyl = (sector_num >> 16);
528 s->lcyl = (sector_num >> 8);
529 s->sector = (sector_num);
531 s->sector = sector_num;
532 s->lcyl = sector_num >> 8;
533 s->hcyl = sector_num >> 16;
534 s->hob_sector = sector_num >> 24;
535 s->hob_lcyl = sector_num >> 32;
536 s->hob_hcyl = sector_num >> 40;
539 cyl = sector_num / (s->heads * s->sectors);
540 r = sector_num % (s->heads * s->sectors);
543 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
544 s->sector = (r % s->sectors) + 1;
548 static void ide_rw_error(IDEState *s) {
549 ide_abort_command(s);
553 static bool ide_sect_range_ok(IDEState *s,
554 uint64_t sector, uint64_t nb_sectors)
556 uint64_t total_sectors;
558 blk_get_geometry(s->blk, &total_sectors);
559 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
565 static void ide_buffered_readv_cb(void *opaque, int ret)
567 IDEBufferedRequest *req = opaque;
568 if (!req->orphaned) {
570 qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
571 req->original_qiov->size);
573 req->original_cb(req->original_opaque, ret);
575 QLIST_REMOVE(req, list);
576 qemu_vfree(req->iov.iov_base);
580 #define MAX_BUFFERED_REQS 16
582 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
583 QEMUIOVector *iov, int nb_sectors,
584 BlockCompletionFunc *cb, void *opaque)
587 IDEBufferedRequest *req;
590 QLIST_FOREACH(req, &s->buffered_requests, list) {
593 if (c > MAX_BUFFERED_REQS) {
594 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
597 req = g_new0(IDEBufferedRequest, 1);
598 req->original_qiov = iov;
599 req->original_cb = cb;
600 req->original_opaque = opaque;
601 req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
602 req->iov.iov_len = iov->size;
603 qemu_iovec_init_external(&req->qiov, &req->iov, 1);
605 aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors,
606 ide_buffered_readv_cb, req);
608 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
612 static void ide_sector_read(IDEState *s);
614 static void ide_sector_read_cb(void *opaque, int ret)
616 IDEState *s = opaque;
620 s->status &= ~BUSY_STAT;
622 if (ret == -ECANCELED) {
626 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
632 block_acct_done(blk_get_stats(s->blk), &s->acct);
635 if (n > s->req_nb_sectors) {
636 n = s->req_nb_sectors;
639 ide_set_sector(s, ide_get_sector(s) + n);
641 /* Allow the guest to read the io_buffer */
642 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
646 static void ide_sector_read(IDEState *s)
651 s->status = READY_STAT | SEEK_STAT;
652 s->error = 0; /* not needed by IDE spec, but needed by Windows */
653 sector_num = ide_get_sector(s);
657 ide_transfer_stop(s);
661 s->status |= BUSY_STAT;
663 if (n > s->req_nb_sectors) {
664 n = s->req_nb_sectors;
667 #if defined(DEBUG_IDE)
668 printf("sector=%" PRId64 "\n", sector_num);
671 if (!ide_sect_range_ok(s, sector_num, n)) {
673 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
677 s->iov.iov_base = s->io_buffer;
678 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
679 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
681 block_acct_start(blk_get_stats(s->blk), &s->acct,
682 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
683 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
684 ide_sector_read_cb, s);
687 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
689 if (s->bus->dma->ops->commit_buf) {
690 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
692 s->io_buffer_offset += tx_bytes;
693 qemu_sglist_destroy(&s->sg);
696 void ide_set_inactive(IDEState *s, bool more)
698 s->bus->dma->aiocb = NULL;
699 s->bus->retry_unit = -1;
700 s->bus->retry_sector_num = 0;
701 s->bus->retry_nsector = 0;
702 if (s->bus->dma->ops->set_inactive) {
703 s->bus->dma->ops->set_inactive(s->bus->dma, more);
708 void ide_dma_error(IDEState *s)
710 dma_buf_commit(s, 0);
711 ide_abort_command(s);
712 ide_set_inactive(s, false);
716 static int ide_handle_rw_error(IDEState *s, int error, int op)
718 bool is_read = (op & IDE_RETRY_READ) != 0;
719 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
721 if (action == BLOCK_ERROR_ACTION_STOP) {
722 assert(s->bus->retry_unit == s->unit);
723 s->bus->error_status = op;
724 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
725 block_acct_failed(blk_get_stats(s->blk), &s->acct);
726 if (op & IDE_RETRY_DMA) {
732 blk_error_action(s->blk, action, is_read, error);
733 return action != BLOCK_ERROR_ACTION_IGNORE;
736 static void ide_dma_cb(void *opaque, int ret)
738 IDEState *s = opaque;
741 bool stay_active = false;
743 if (ret == -ECANCELED) {
747 int op = IDE_RETRY_DMA;
749 if (s->dma_cmd == IDE_DMA_READ)
750 op |= IDE_RETRY_READ;
751 else if (s->dma_cmd == IDE_DMA_TRIM)
752 op |= IDE_RETRY_TRIM;
754 if (ide_handle_rw_error(s, -ret, op)) {
759 n = s->io_buffer_size >> 9;
760 if (n > s->nsector) {
761 /* The PRDs were longer than needed for this request. Shorten them so
762 * we don't get a negative remainder. The Active bit must remain set
763 * after the request completes. */
768 sector_num = ide_get_sector(s);
770 assert(n * 512 == s->sg.size);
771 dma_buf_commit(s, s->sg.size);
773 ide_set_sector(s, sector_num);
777 /* end of transfer ? */
778 if (s->nsector == 0) {
779 s->status = READY_STAT | SEEK_STAT;
784 /* launch next transfer */
786 s->io_buffer_index = 0;
787 s->io_buffer_size = n * 512;
788 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
789 /* The PRDs were too short. Reset the Active bit, but don't raise an
791 s->status = READY_STAT | SEEK_STAT;
792 dma_buf_commit(s, 0);
797 printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
798 sector_num, n, s->dma_cmd);
801 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
802 !ide_sect_range_ok(s, sector_num, n)) {
804 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
808 switch (s->dma_cmd) {
810 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
814 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
818 s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
819 ide_issue_trim, ide_dma_cb, s,
820 DMA_DIRECTION_TO_DEVICE);
826 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
827 block_acct_done(blk_get_stats(s->blk), &s->acct);
829 ide_set_inactive(s, stay_active);
832 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
834 s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
835 s->io_buffer_size = 0;
836 s->dma_cmd = dma_cmd;
840 block_acct_start(blk_get_stats(s->blk), &s->acct,
841 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
844 block_acct_start(blk_get_stats(s->blk), &s->acct,
845 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
851 ide_start_dma(s, ide_dma_cb);
854 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
856 s->io_buffer_index = 0;
857 s->bus->retry_unit = s->unit;
858 s->bus->retry_sector_num = ide_get_sector(s);
859 s->bus->retry_nsector = s->nsector;
860 if (s->bus->dma->ops->start_dma) {
861 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
865 static void ide_sector_write(IDEState *s);
867 static void ide_sector_write_timer_cb(void *opaque)
869 IDEState *s = opaque;
873 static void ide_sector_write_cb(void *opaque, int ret)
875 IDEState *s = opaque;
878 if (ret == -ECANCELED) {
883 s->status &= ~BUSY_STAT;
886 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
891 block_acct_done(blk_get_stats(s->blk), &s->acct);
894 if (n > s->req_nb_sectors) {
895 n = s->req_nb_sectors;
899 ide_set_sector(s, ide_get_sector(s) + n);
900 if (s->nsector == 0) {
901 /* no more sectors to write */
902 ide_transfer_stop(s);
905 if (n1 > s->req_nb_sectors) {
906 n1 = s->req_nb_sectors;
908 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
912 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
913 /* It seems there is a bug in the Windows 2000 installer HDD
914 IDE driver which fills the disk with empty logs when the
915 IDE write IRQ comes too early. This hack tries to correct
916 that at the expense of slower write performances. Use this
917 option _only_ to install Windows 2000. You must disable it
919 timer_mod(s->sector_write_timer,
920 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
926 static void ide_sector_write(IDEState *s)
931 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
932 sector_num = ide_get_sector(s);
933 #if defined(DEBUG_IDE)
934 printf("sector=%" PRId64 "\n", sector_num);
937 if (n > s->req_nb_sectors) {
938 n = s->req_nb_sectors;
941 if (!ide_sect_range_ok(s, sector_num, n)) {
943 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
947 s->iov.iov_base = s->io_buffer;
948 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
949 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
951 block_acct_start(blk_get_stats(s->blk), &s->acct,
952 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
953 s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
954 ide_sector_write_cb, s);
957 static void ide_flush_cb(void *opaque, int ret)
959 IDEState *s = opaque;
963 if (ret == -ECANCELED) {
967 /* XXX: What sector number to set here? */
968 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
974 block_acct_done(blk_get_stats(s->blk), &s->acct);
976 s->status = READY_STAT | SEEK_STAT;
981 static void ide_flush_cache(IDEState *s)
983 if (s->blk == NULL) {
988 s->status |= BUSY_STAT;
989 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
990 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
993 static void ide_cfata_metadata_inquiry(IDEState *s)
998 p = (uint16_t *) s->io_buffer;
1000 spd = ((s->mdata_size - 1) >> 9) + 1;
1002 put_le16(p + 0, 0x0001); /* Data format revision */
1003 put_le16(p + 1, 0x0000); /* Media property: silicon */
1004 put_le16(p + 2, s->media_changed); /* Media status */
1005 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1006 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1007 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1008 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1011 static void ide_cfata_metadata_read(IDEState *s)
1015 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1016 s->status = ERR_STAT;
1017 s->error = ABRT_ERR;
1021 p = (uint16_t *) s->io_buffer;
1022 memset(p, 0, 0x200);
1024 put_le16(p + 0, s->media_changed); /* Media status */
1025 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1026 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1027 s->nsector << 9), 0x200 - 2));
1030 static void ide_cfata_metadata_write(IDEState *s)
1032 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1033 s->status = ERR_STAT;
1034 s->error = ABRT_ERR;
1038 s->media_changed = 0;
1040 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1042 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1043 s->nsector << 9), 0x200 - 2));
1046 /* called when the inserted state of the media has changed */
1047 static void ide_cd_change_cb(void *opaque, bool load)
1049 IDEState *s = opaque;
1050 uint64_t nb_sectors;
1052 s->tray_open = !load;
1053 blk_get_geometry(s->blk, &nb_sectors);
1054 s->nb_sectors = nb_sectors;
1057 * First indicate to the guest that a CD has been removed. That's
1058 * done on the next command the guest sends us.
1060 * Then we set UNIT_ATTENTION, by which the guest will
1061 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1063 s->cdrom_changed = 1;
1064 s->events.new_media = true;
1065 s->events.eject_request = false;
1066 ide_set_irq(s->bus);
1069 static void ide_cd_eject_request_cb(void *opaque, bool force)
1071 IDEState *s = opaque;
1073 s->events.eject_request = true;
1075 s->tray_locked = false;
1077 ide_set_irq(s->bus);
1080 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1084 /* handle the 'magic' 0 nsector count conversion here. to avoid
1085 * fiddling with the rest of the read logic, we just store the
1086 * full sector count in ->nsector and ignore ->hob_nsector from now
1092 if (!s->nsector && !s->hob_nsector)
1095 int lo = s->nsector;
1096 int hi = s->hob_nsector;
1098 s->nsector = (hi << 8) | lo;
1103 static void ide_clear_hob(IDEBus *bus)
1105 /* any write clears HOB high bit of device control register */
1106 bus->ifs[0].select &= ~(1 << 7);
1107 bus->ifs[1].select &= ~(1 << 7);
1110 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1112 IDEBus *bus = opaque;
1115 printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1120 /* ignore writes to command block while busy with previous command */
1121 if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1129 /* NOTE: data is written to the two drives */
1130 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1131 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1132 bus->ifs[0].feature = val;
1133 bus->ifs[1].feature = val;
1137 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1138 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1139 bus->ifs[0].nsector = val;
1140 bus->ifs[1].nsector = val;
1144 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1145 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1146 bus->ifs[0].sector = val;
1147 bus->ifs[1].sector = val;
1151 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1152 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1153 bus->ifs[0].lcyl = val;
1154 bus->ifs[1].lcyl = val;
1158 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1159 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1160 bus->ifs[0].hcyl = val;
1161 bus->ifs[1].hcyl = val;
1164 /* FIXME: HOB readback uses bit 7 */
1165 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1166 bus->ifs[1].select = (val | 0x10) | 0xa0;
1168 bus->unit = (val >> 4) & 1;
1173 ide_exec_cmd(bus, val);
1178 static bool cmd_nop(IDEState *s, uint8_t cmd)
1183 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1185 switch (s->feature) {
1188 ide_sector_start_dma(s, IDE_DMA_TRIM);
1194 ide_abort_command(s);
1198 static bool cmd_identify(IDEState *s, uint8_t cmd)
1200 if (s->blk && s->drive_kind != IDE_CD) {
1201 if (s->drive_kind != IDE_CFATA) {
1204 ide_cfata_identify(s);
1206 s->status = READY_STAT | SEEK_STAT;
1207 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1208 ide_set_irq(s->bus);
1211 if (s->drive_kind == IDE_CD) {
1212 ide_set_signature(s);
1214 ide_abort_command(s);
1220 static bool cmd_verify(IDEState *s, uint8_t cmd)
1222 bool lba48 = (cmd == WIN_VERIFY_EXT);
1224 /* do sector number check ? */
1225 ide_cmd_lba48_transform(s, lba48);
1230 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1232 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1233 /* Disable Read and Write Multiple */
1234 s->mult_sectors = 0;
1235 } else if ((s->nsector & 0xff) != 0 &&
1236 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1237 (s->nsector & (s->nsector - 1)) != 0)) {
1238 ide_abort_command(s);
1240 s->mult_sectors = s->nsector & 0xff;
1246 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1248 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1250 if (!s->blk || !s->mult_sectors) {
1251 ide_abort_command(s);
1255 ide_cmd_lba48_transform(s, lba48);
1256 s->req_nb_sectors = s->mult_sectors;
1261 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1263 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1266 if (!s->blk || !s->mult_sectors) {
1267 ide_abort_command(s);
1271 ide_cmd_lba48_transform(s, lba48);
1273 s->req_nb_sectors = s->mult_sectors;
1274 n = MIN(s->nsector, s->req_nb_sectors);
1276 s->status = SEEK_STAT | READY_STAT;
1277 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1279 s->media_changed = 1;
1284 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1286 bool lba48 = (cmd == WIN_READ_EXT);
1288 if (s->drive_kind == IDE_CD) {
1289 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1290 ide_abort_command(s);
1295 ide_abort_command(s);
1299 ide_cmd_lba48_transform(s, lba48);
1300 s->req_nb_sectors = 1;
1306 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1308 bool lba48 = (cmd == WIN_WRITE_EXT);
1311 ide_abort_command(s);
1315 ide_cmd_lba48_transform(s, lba48);
1317 s->req_nb_sectors = 1;
1318 s->status = SEEK_STAT | READY_STAT;
1319 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1321 s->media_changed = 1;
1326 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1328 bool lba48 = (cmd == WIN_READDMA_EXT);
1331 ide_abort_command(s);
1335 ide_cmd_lba48_transform(s, lba48);
1336 ide_sector_start_dma(s, IDE_DMA_READ);
1341 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1343 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1346 ide_abort_command(s);
1350 ide_cmd_lba48_transform(s, lba48);
1351 ide_sector_start_dma(s, IDE_DMA_WRITE);
1353 s->media_changed = 1;
1358 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1364 static bool cmd_seek(IDEState *s, uint8_t cmd)
1366 /* XXX: Check that seek is within bounds */
1370 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1372 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1374 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1375 if (s->nb_sectors == 0) {
1376 ide_abort_command(s);
1380 ide_cmd_lba48_transform(s, lba48);
1381 ide_set_sector(s, s->nb_sectors - 1);
1386 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1388 s->nsector = 0xff; /* device active or idle */
1392 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1394 uint16_t *identify_data;
1397 ide_abort_command(s);
1401 /* XXX: valid for CDROM ? */
1402 switch (s->feature) {
1403 case 0x02: /* write cache enable */
1404 blk_set_enable_write_cache(s->blk, true);
1405 identify_data = (uint16_t *)s->identify_data;
1406 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1408 case 0x82: /* write cache disable */
1409 blk_set_enable_write_cache(s->blk, false);
1410 identify_data = (uint16_t *)s->identify_data;
1411 put_le16(identify_data + 85, (1 << 14) | 1);
1414 case 0xcc: /* reverting to power-on defaults enable */
1415 case 0x66: /* reverting to power-on defaults disable */
1416 case 0xaa: /* read look-ahead enable */
1417 case 0x55: /* read look-ahead disable */
1418 case 0x05: /* set advanced power management mode */
1419 case 0x85: /* disable advanced power management mode */
1420 case 0x69: /* NOP */
1421 case 0x67: /* NOP */
1422 case 0x96: /* NOP */
1423 case 0x9a: /* NOP */
1424 case 0x42: /* enable Automatic Acoustic Mode */
1425 case 0xc2: /* disable Automatic Acoustic Mode */
1427 case 0x03: /* set transfer mode */
1429 uint8_t val = s->nsector & 0x07;
1430 identify_data = (uint16_t *)s->identify_data;
1432 switch (s->nsector >> 3) {
1433 case 0x00: /* pio default */
1434 case 0x01: /* pio mode */
1435 put_le16(identify_data + 62, 0x07);
1436 put_le16(identify_data + 63, 0x07);
1437 put_le16(identify_data + 88, 0x3f);
1439 case 0x02: /* sigle word dma mode*/
1440 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1441 put_le16(identify_data + 63, 0x07);
1442 put_le16(identify_data + 88, 0x3f);
1444 case 0x04: /* mdma mode */
1445 put_le16(identify_data + 62, 0x07);
1446 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1447 put_le16(identify_data + 88, 0x3f);
1449 case 0x08: /* udma mode */
1450 put_le16(identify_data + 62, 0x07);
1451 put_le16(identify_data + 63, 0x07);
1452 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1462 ide_abort_command(s);
1467 /*** ATAPI commands ***/
1469 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1471 ide_atapi_identify(s);
1472 s->status = READY_STAT | SEEK_STAT;
1473 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1474 ide_set_irq(s->bus);
1478 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1480 ide_set_signature(s);
1482 if (s->drive_kind == IDE_CD) {
1483 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1484 * devices to return a clear status register
1485 * with READY_STAT *not* set. */
1488 s->status = READY_STAT | SEEK_STAT;
1489 /* The bits of the error register are not as usual for this command!
1490 * They are part of the regular output (this is why ERR_STAT isn't set)
1491 * Device 0 passed, Device 1 passed or not present. */
1493 ide_set_irq(s->bus);
1499 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1501 ide_set_signature(s);
1502 s->status = 0x00; /* NOTE: READY is _not_ set */
1508 static bool cmd_packet(IDEState *s, uint8_t cmd)
1510 /* overlapping commands not supported */
1511 if (s->feature & 0x02) {
1512 ide_abort_command(s);
1516 s->status = READY_STAT | SEEK_STAT;
1517 s->atapi_dma = s->feature & 1;
1519 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1525 /*** CF-ATA commands ***/
1527 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1529 s->error = 0x09; /* miscellaneous error */
1530 s->status = READY_STAT | SEEK_STAT;
1531 ide_set_irq(s->bus);
1536 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1538 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1539 * required for Windows 8 to work with AHCI */
1541 if (cmd == CFA_WEAR_LEVEL) {
1545 if (cmd == CFA_ERASE_SECTORS) {
1546 s->media_changed = 1;
1552 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1554 s->status = READY_STAT | SEEK_STAT;
1556 memset(s->io_buffer, 0, 0x200);
1557 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1558 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1559 s->io_buffer[0x02] = s->select; /* Head */
1560 s->io_buffer[0x03] = s->sector; /* Sector */
1561 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1562 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1563 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1564 s->io_buffer[0x13] = 0x00; /* Erase flag */
1565 s->io_buffer[0x18] = 0x00; /* Hot count */
1566 s->io_buffer[0x19] = 0x00; /* Hot count */
1567 s->io_buffer[0x1a] = 0x01; /* Hot count */
1569 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1570 ide_set_irq(s->bus);
1575 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1577 switch (s->feature) {
1578 case 0x02: /* Inquiry Metadata Storage */
1579 ide_cfata_metadata_inquiry(s);
1581 case 0x03: /* Read Metadata Storage */
1582 ide_cfata_metadata_read(s);
1584 case 0x04: /* Write Metadata Storage */
1585 ide_cfata_metadata_write(s);
1588 ide_abort_command(s);
1592 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1593 s->status = 0x00; /* NOTE: READY is _not_ set */
1594 ide_set_irq(s->bus);
1599 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1601 switch (s->feature) {
1602 case 0x01: /* sense temperature in device */
1603 s->nsector = 0x50; /* +20 C */
1606 ide_abort_command(s);
1614 /*** SMART commands ***/
1616 static bool cmd_smart(IDEState *s, uint8_t cmd)
1620 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1624 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1628 switch (s->feature) {
1630 s->smart_enabled = 0;
1634 s->smart_enabled = 1;
1637 case SMART_ATTR_AUTOSAVE:
1638 switch (s->sector) {
1640 s->smart_autosave = 0;
1643 s->smart_autosave = 1;
1651 if (!s->smart_errors) {
1660 case SMART_READ_THRESH:
1661 memset(s->io_buffer, 0, 0x200);
1662 s->io_buffer[0] = 0x01; /* smart struct version */
1664 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1665 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1666 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1670 for (n = 0; n < 511; n++) {
1671 s->io_buffer[511] += s->io_buffer[n];
1673 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1675 s->status = READY_STAT | SEEK_STAT;
1676 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1677 ide_set_irq(s->bus);
1680 case SMART_READ_DATA:
1681 memset(s->io_buffer, 0, 0x200);
1682 s->io_buffer[0] = 0x01; /* smart struct version */
1684 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1686 for (i = 0; i < 11; i++) {
1687 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1691 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1692 if (s->smart_selftest_count == 0) {
1693 s->io_buffer[363] = 0;
1696 s->smart_selftest_data[3 +
1697 (s->smart_selftest_count - 1) *
1700 s->io_buffer[364] = 0x20;
1701 s->io_buffer[365] = 0x01;
1702 /* offline data collection capacity: execute + self-test*/
1703 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1704 s->io_buffer[368] = 0x03; /* smart capability (1) */
1705 s->io_buffer[369] = 0x00; /* smart capability (2) */
1706 s->io_buffer[370] = 0x01; /* error logging supported */
1707 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1708 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1709 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1711 for (n = 0; n < 511; n++) {
1712 s->io_buffer[511] += s->io_buffer[n];
1714 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1716 s->status = READY_STAT | SEEK_STAT;
1717 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1718 ide_set_irq(s->bus);
1721 case SMART_READ_LOG:
1722 switch (s->sector) {
1723 case 0x01: /* summary smart error log */
1724 memset(s->io_buffer, 0, 0x200);
1725 s->io_buffer[0] = 0x01;
1726 s->io_buffer[1] = 0x00; /* no error entries */
1727 s->io_buffer[452] = s->smart_errors & 0xff;
1728 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1730 for (n = 0; n < 511; n++) {
1731 s->io_buffer[511] += s->io_buffer[n];
1733 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1735 case 0x06: /* smart self test log */
1736 memset(s->io_buffer, 0, 0x200);
1737 s->io_buffer[0] = 0x01;
1738 if (s->smart_selftest_count == 0) {
1739 s->io_buffer[508] = 0;
1741 s->io_buffer[508] = s->smart_selftest_count;
1742 for (n = 2; n < 506; n++) {
1743 s->io_buffer[n] = s->smart_selftest_data[n];
1747 for (n = 0; n < 511; n++) {
1748 s->io_buffer[511] += s->io_buffer[n];
1750 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1755 s->status = READY_STAT | SEEK_STAT;
1756 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1757 ide_set_irq(s->bus);
1760 case SMART_EXECUTE_OFFLINE:
1761 switch (s->sector) {
1762 case 0: /* off-line routine */
1763 case 1: /* short self test */
1764 case 2: /* extended self test */
1765 s->smart_selftest_count++;
1766 if (s->smart_selftest_count > 21) {
1767 s->smart_selftest_count = 1;
1769 n = 2 + (s->smart_selftest_count - 1) * 24;
1770 s->smart_selftest_data[n] = s->sector;
1771 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1772 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1773 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1782 ide_abort_command(s);
1786 #define HD_OK (1u << IDE_HD)
1787 #define CD_OK (1u << IDE_CD)
1788 #define CFA_OK (1u << IDE_CFATA)
1789 #define HD_CFA_OK (HD_OK | CFA_OK)
1790 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1792 /* Set the Disk Seek Completed status bit during completion */
1793 #define SET_DSC (1u << 8)
1795 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1796 static const struct {
1797 /* Returns true if the completion code should be run */
1798 bool (*handler)(IDEState *s, uint8_t cmd);
1800 } ide_cmd_table[0x100] = {
1801 /* NOP not implemented, mandatory for CD */
1802 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1803 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
1804 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1805 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1806 [WIN_READ] = { cmd_read_pio, ALL_OK },
1807 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
1808 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1809 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1810 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1811 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1812 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1813 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1814 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1815 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
1816 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
1817 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
1818 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
1819 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
1820 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
1821 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
1822 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
1823 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
1824 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
1825 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
1826 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
1827 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
1828 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
1829 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
1830 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1831 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
1832 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
1833 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
1834 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
1835 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1836 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1837 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
1838 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
1839 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1840 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
1841 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
1842 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
1843 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
1844 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
1845 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
1846 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
1847 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
1848 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
1849 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1850 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
1851 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
1852 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
1853 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
1854 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
1855 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
1856 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
1857 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1860 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
1862 return cmd < ARRAY_SIZE(ide_cmd_table)
1863 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
1866 void ide_exec_cmd(IDEBus *bus, uint32_t val)
1871 #if defined(DEBUG_IDE)
1872 printf("ide: CMD=%02x\n", val);
1874 s = idebus_active_if(bus);
1875 /* ignore commands to non existent slave */
1876 if (s != bus->ifs && !s->blk) {
1880 /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */
1881 if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET)
1884 if (!ide_cmd_permitted(s, val)) {
1885 ide_abort_command(s);
1886 ide_set_irq(s->bus);
1890 s->status = READY_STAT | BUSY_STAT;
1892 s->io_buffer_offset = 0;
1894 complete = ide_cmd_table[val].handler(s, val);
1896 s->status &= ~BUSY_STAT;
1897 assert(!!s->error == !!(s->status & ERR_STAT));
1899 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
1900 s->status |= SEEK_STAT;
1904 ide_set_irq(s->bus);
1908 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
1910 IDEBus *bus = opaque;
1911 IDEState *s = idebus_active_if(bus);
1916 /* FIXME: HOB readback uses bit 7, but it's always set right now */
1917 //hob = s->select & (1 << 7);
1924 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1925 (s != bus->ifs && !s->blk)) {
1930 ret = s->hob_feature;
1934 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1937 ret = s->nsector & 0xff;
1939 ret = s->hob_nsector;
1943 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1948 ret = s->hob_sector;
1952 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1961 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1970 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1978 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1979 (s != bus->ifs && !s->blk)) {
1984 qemu_irq_lower(bus->irq);
1988 printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
1993 uint32_t ide_status_read(void *opaque, uint32_t addr)
1995 IDEBus *bus = opaque;
1996 IDEState *s = idebus_active_if(bus);
1999 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2000 (s != bus->ifs && !s->blk)) {
2006 printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
2011 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2013 IDEBus *bus = opaque;
2018 printf("ide: write control addr=0x%x val=%02x\n", addr, val);
2020 /* common for both drives */
2021 if (!(bus->cmd & IDE_CMD_RESET) &&
2022 (val & IDE_CMD_RESET)) {
2023 /* reset low to high */
2024 for(i = 0;i < 2; i++) {
2026 s->status = BUSY_STAT | SEEK_STAT;
2029 } else if ((bus->cmd & IDE_CMD_RESET) &&
2030 !(val & IDE_CMD_RESET)) {
2032 for(i = 0;i < 2; i++) {
2034 if (s->drive_kind == IDE_CD)
2035 s->status = 0x00; /* NOTE: READY is _not_ set */
2037 s->status = READY_STAT | SEEK_STAT;
2038 ide_set_signature(s);
2046 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2047 * transferred from the device to the guest), false if it's a PIO in
2049 static bool ide_is_pio_out(IDEState *s)
2051 if (s->end_transfer_func == ide_sector_write ||
2052 s->end_transfer_func == ide_atapi_cmd) {
2054 } else if (s->end_transfer_func == ide_sector_read ||
2055 s->end_transfer_func == ide_transfer_stop ||
2056 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2057 s->end_transfer_func == ide_dummy_transfer_stop) {
2064 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2066 IDEBus *bus = opaque;
2067 IDEState *s = idebus_active_if(bus);
2070 /* PIO data access allowed only when DRQ bit is set. The result of a write
2071 * during PIO out is indeterminate, just ignore it. */
2072 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2077 if (p + 2 > s->data_end) {
2081 *(uint16_t *)p = le16_to_cpu(val);
2084 if (p >= s->data_end) {
2085 s->status &= ~DRQ_STAT;
2086 s->end_transfer_func(s);
2090 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2092 IDEBus *bus = opaque;
2093 IDEState *s = idebus_active_if(bus);
2097 /* PIO data access allowed only when DRQ bit is set. The result of a read
2098 * during PIO in is indeterminate, return 0 and don't move forward. */
2099 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2104 if (p + 2 > s->data_end) {
2108 ret = cpu_to_le16(*(uint16_t *)p);
2111 if (p >= s->data_end) {
2112 s->status &= ~DRQ_STAT;
2113 s->end_transfer_func(s);
2118 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2120 IDEBus *bus = opaque;
2121 IDEState *s = idebus_active_if(bus);
2124 /* PIO data access allowed only when DRQ bit is set. The result of a write
2125 * during PIO out is indeterminate, just ignore it. */
2126 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2131 if (p + 4 > s->data_end) {
2135 *(uint32_t *)p = le32_to_cpu(val);
2138 if (p >= s->data_end) {
2139 s->status &= ~DRQ_STAT;
2140 s->end_transfer_func(s);
2144 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2146 IDEBus *bus = opaque;
2147 IDEState *s = idebus_active_if(bus);
2151 /* PIO data access allowed only when DRQ bit is set. The result of a read
2152 * during PIO in is indeterminate, return 0 and don't move forward. */
2153 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2158 if (p + 4 > s->data_end) {
2162 ret = cpu_to_le32(*(uint32_t *)p);
2165 if (p >= s->data_end) {
2166 s->status &= ~DRQ_STAT;
2167 s->end_transfer_func(s);
2172 static void ide_dummy_transfer_stop(IDEState *s)
2174 s->data_ptr = s->io_buffer;
2175 s->data_end = s->io_buffer;
2176 s->io_buffer[0] = 0xff;
2177 s->io_buffer[1] = 0xff;
2178 s->io_buffer[2] = 0xff;
2179 s->io_buffer[3] = 0xff;
2182 static void ide_reset(IDEState *s)
2185 printf("ide: reset\n");
2189 blk_aio_cancel(s->pio_aiocb);
2190 s->pio_aiocb = NULL;
2193 if (s->drive_kind == IDE_CFATA)
2194 s->mult_sectors = 0;
2196 s->mult_sectors = MAX_MULT_SECTORS;
2213 s->status = READY_STAT | SEEK_STAT;
2217 /* ATAPI specific */
2220 s->cdrom_changed = 0;
2221 s->packet_transfer_size = 0;
2222 s->elementary_transfer_size = 0;
2223 s->io_buffer_index = 0;
2224 s->cd_sector_size = 0;
2229 s->io_buffer_size = 0;
2230 s->req_nb_sectors = 0;
2232 ide_set_signature(s);
2233 /* init the transfer handler so that 0xffff is returned on data
2235 s->end_transfer_func = ide_dummy_transfer_stop;
2236 ide_dummy_transfer_stop(s);
2237 s->media_changed = 0;
2240 void ide_bus_reset(IDEBus *bus)
2244 ide_reset(&bus->ifs[0]);
2245 ide_reset(&bus->ifs[1]);
2248 /* pending async DMA */
2249 if (bus->dma->aiocb) {
2251 printf("aio_cancel\n");
2253 blk_aio_cancel(bus->dma->aiocb);
2254 bus->dma->aiocb = NULL;
2257 /* reset dma provider too */
2258 if (bus->dma->ops->reset) {
2259 bus->dma->ops->reset(bus->dma);
2263 static bool ide_cd_is_tray_open(void *opaque)
2265 return ((IDEState *)opaque)->tray_open;
2268 static bool ide_cd_is_medium_locked(void *opaque)
2270 return ((IDEState *)opaque)->tray_locked;
2273 static void ide_resize_cb(void *opaque)
2275 IDEState *s = opaque;
2276 uint64_t nb_sectors;
2278 if (!s->identify_set) {
2282 blk_get_geometry(s->blk, &nb_sectors);
2283 s->nb_sectors = nb_sectors;
2285 /* Update the identify data buffer. */
2286 if (s->drive_kind == IDE_CFATA) {
2287 ide_cfata_identify_size(s);
2289 /* IDE_CD uses a different set of callbacks entirely. */
2290 assert(s->drive_kind != IDE_CD);
2291 ide_identify_size(s);
2295 static const BlockDevOps ide_cd_block_ops = {
2296 .change_media_cb = ide_cd_change_cb,
2297 .eject_request_cb = ide_cd_eject_request_cb,
2298 .is_tray_open = ide_cd_is_tray_open,
2299 .is_medium_locked = ide_cd_is_medium_locked,
2302 static const BlockDevOps ide_hd_block_ops = {
2303 .resize_cb = ide_resize_cb,
2306 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2307 const char *version, const char *serial, const char *model,
2309 uint32_t cylinders, uint32_t heads, uint32_t secs,
2312 uint64_t nb_sectors;
2315 s->drive_kind = kind;
2317 blk_get_geometry(blk, &nb_sectors);
2318 s->cylinders = cylinders;
2321 s->chs_trans = chs_trans;
2322 s->nb_sectors = nb_sectors;
2324 /* The SMART values should be preserved across power cycles
2326 s->smart_enabled = 1;
2327 s->smart_autosave = 1;
2328 s->smart_errors = 0;
2329 s->smart_selftest_count = 0;
2330 if (kind == IDE_CD) {
2331 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2332 blk_set_guest_block_size(blk, 2048);
2334 if (!blk_is_inserted(s->blk)) {
2335 error_report("Device needs media, but drive is empty");
2338 if (blk_is_read_only(blk)) {
2339 error_report("Can't use a read-only drive");
2342 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2345 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2347 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2348 "QM%05d", s->drive_serial);
2351 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2355 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2358 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2361 strcpy(s->drive_model_str, "QEMU HARDDISK");
2367 pstrcpy(s->version, sizeof(s->version), version);
2369 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2373 blk_iostatus_enable(blk);
2377 static void ide_init1(IDEBus *bus, int unit)
2379 static int drive_serial = 1;
2380 IDEState *s = &bus->ifs[unit];
2384 s->drive_serial = drive_serial++;
2385 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2386 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2387 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2388 memset(s->io_buffer, 0, s->io_buffer_total_len);
2390 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2391 memset(s->smart_selftest_data, 0, 512);
2393 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2394 ide_sector_write_timer_cb, s);
2397 static int ide_nop_int(IDEDMA *dma, int x)
2402 static void ide_nop(IDEDMA *dma)
2406 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2411 static const IDEDMAOps ide_dma_nop_ops = {
2412 .prepare_buf = ide_nop_int32,
2413 .restart_dma = ide_nop,
2414 .rw_buf = ide_nop_int,
2417 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2419 s->unit = s->bus->retry_unit;
2420 ide_set_sector(s, s->bus->retry_sector_num);
2421 s->nsector = s->bus->retry_nsector;
2422 s->bus->dma->ops->restart_dma(s->bus->dma);
2423 s->io_buffer_size = 0;
2424 s->dma_cmd = dma_cmd;
2425 ide_start_dma(s, ide_dma_cb);
2428 static void ide_restart_bh(void *opaque)
2430 IDEBus *bus = opaque;
2435 qemu_bh_delete(bus->bh);
2438 error_status = bus->error_status;
2439 if (bus->error_status == 0) {
2443 s = idebus_active_if(bus);
2444 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2446 /* The error status must be cleared before resubmitting the request: The
2447 * request may fail again, and this case can only be distinguished if the
2448 * called function can set a new error status. */
2449 bus->error_status = 0;
2451 /* The HBA has generically asked to be kicked on retry */
2452 if (error_status & IDE_RETRY_HBA) {
2453 if (s->bus->dma->ops->restart) {
2454 s->bus->dma->ops->restart(s->bus->dma);
2458 if (error_status & IDE_RETRY_DMA) {
2459 if (error_status & IDE_RETRY_TRIM) {
2460 ide_restart_dma(s, IDE_DMA_TRIM);
2462 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2464 } else if (error_status & IDE_RETRY_PIO) {
2468 ide_sector_write(s);
2470 } else if (error_status & IDE_RETRY_FLUSH) {
2474 * We've not got any bits to tell us about ATAPI - but
2475 * we do have the end_transfer_func that tells us what
2476 * we're trying to do.
2478 if (s->end_transfer_func == ide_atapi_cmd) {
2479 ide_atapi_dma_restart(s);
2484 static void ide_restart_cb(void *opaque, int running, RunState state)
2486 IDEBus *bus = opaque;
2492 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2493 qemu_bh_schedule(bus->bh);
2497 void ide_register_restart_cb(IDEBus *bus)
2499 if (bus->dma->ops->restart_dma) {
2500 qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2504 static IDEDMA ide_dma_nop = {
2505 .ops = &ide_dma_nop_ops,
2509 void ide_init2(IDEBus *bus, qemu_irq irq)
2513 for(i = 0; i < 2; i++) {
2515 ide_reset(&bus->ifs[i]);
2518 bus->dma = &ide_dma_nop;
2521 static const MemoryRegionPortio ide_portio_list[] = {
2522 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2523 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2524 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2525 PORTIO_END_OF_LIST(),
2528 static const MemoryRegionPortio ide_portio2_list[] = {
2529 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2530 PORTIO_END_OF_LIST(),
2533 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2535 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2536 bridge has been setup properly to always register with ISA. */
2537 isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2540 isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2544 static bool is_identify_set(void *opaque, int version_id)
2546 IDEState *s = opaque;
2548 return s->identify_set != 0;
2551 static EndTransferFunc* transfer_end_table[] = {
2555 ide_atapi_cmd_reply_end,
2557 ide_dummy_transfer_stop,
2560 static int transfer_end_table_idx(EndTransferFunc *fn)
2564 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2565 if (transfer_end_table[i] == fn)
2571 static int ide_drive_post_load(void *opaque, int version_id)
2573 IDEState *s = opaque;
2575 if (s->blk && s->identify_set) {
2576 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2581 static int ide_drive_pio_post_load(void *opaque, int version_id)
2583 IDEState *s = opaque;
2585 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2588 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2589 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2590 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2591 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2596 static void ide_drive_pio_pre_save(void *opaque)
2598 IDEState *s = opaque;
2601 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2602 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2604 idx = transfer_end_table_idx(s->end_transfer_func);
2606 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2608 s->end_transfer_fn_idx = 2;
2610 s->end_transfer_fn_idx = idx;
2614 static bool ide_drive_pio_state_needed(void *opaque)
2616 IDEState *s = opaque;
2618 return ((s->status & DRQ_STAT) != 0)
2619 || (s->bus->error_status & IDE_RETRY_PIO);
2622 static bool ide_tray_state_needed(void *opaque)
2624 IDEState *s = opaque;
2626 return s->tray_open || s->tray_locked;
2629 static bool ide_atapi_gesn_needed(void *opaque)
2631 IDEState *s = opaque;
2633 return s->events.new_media || s->events.eject_request;
2636 static bool ide_error_needed(void *opaque)
2638 IDEBus *bus = opaque;
2640 return (bus->error_status != 0);
2643 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2644 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2645 .name ="ide_drive/atapi/gesn_state",
2647 .minimum_version_id = 1,
2648 .needed = ide_atapi_gesn_needed,
2649 .fields = (VMStateField[]) {
2650 VMSTATE_BOOL(events.new_media, IDEState),
2651 VMSTATE_BOOL(events.eject_request, IDEState),
2652 VMSTATE_END_OF_LIST()
2656 static const VMStateDescription vmstate_ide_tray_state = {
2657 .name = "ide_drive/tray_state",
2659 .minimum_version_id = 1,
2660 .needed = ide_tray_state_needed,
2661 .fields = (VMStateField[]) {
2662 VMSTATE_BOOL(tray_open, IDEState),
2663 VMSTATE_BOOL(tray_locked, IDEState),
2664 VMSTATE_END_OF_LIST()
2668 static const VMStateDescription vmstate_ide_drive_pio_state = {
2669 .name = "ide_drive/pio_state",
2671 .minimum_version_id = 1,
2672 .pre_save = ide_drive_pio_pre_save,
2673 .post_load = ide_drive_pio_post_load,
2674 .needed = ide_drive_pio_state_needed,
2675 .fields = (VMStateField[]) {
2676 VMSTATE_INT32(req_nb_sectors, IDEState),
2677 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2678 vmstate_info_uint8, uint8_t),
2679 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2680 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2681 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2682 VMSTATE_INT32(elementary_transfer_size, IDEState),
2683 VMSTATE_INT32(packet_transfer_size, IDEState),
2684 VMSTATE_END_OF_LIST()
2688 const VMStateDescription vmstate_ide_drive = {
2689 .name = "ide_drive",
2691 .minimum_version_id = 0,
2692 .post_load = ide_drive_post_load,
2693 .fields = (VMStateField[]) {
2694 VMSTATE_INT32(mult_sectors, IDEState),
2695 VMSTATE_INT32(identify_set, IDEState),
2696 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2697 VMSTATE_UINT8(feature, IDEState),
2698 VMSTATE_UINT8(error, IDEState),
2699 VMSTATE_UINT32(nsector, IDEState),
2700 VMSTATE_UINT8(sector, IDEState),
2701 VMSTATE_UINT8(lcyl, IDEState),
2702 VMSTATE_UINT8(hcyl, IDEState),
2703 VMSTATE_UINT8(hob_feature, IDEState),
2704 VMSTATE_UINT8(hob_sector, IDEState),
2705 VMSTATE_UINT8(hob_nsector, IDEState),
2706 VMSTATE_UINT8(hob_lcyl, IDEState),
2707 VMSTATE_UINT8(hob_hcyl, IDEState),
2708 VMSTATE_UINT8(select, IDEState),
2709 VMSTATE_UINT8(status, IDEState),
2710 VMSTATE_UINT8(lba48, IDEState),
2711 VMSTATE_UINT8(sense_key, IDEState),
2712 VMSTATE_UINT8(asc, IDEState),
2713 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2714 VMSTATE_END_OF_LIST()
2716 .subsections = (const VMStateDescription*[]) {
2717 &vmstate_ide_drive_pio_state,
2718 &vmstate_ide_tray_state,
2719 &vmstate_ide_atapi_gesn_state,
2724 static const VMStateDescription vmstate_ide_error_status = {
2725 .name ="ide_bus/error",
2727 .minimum_version_id = 1,
2728 .needed = ide_error_needed,
2729 .fields = (VMStateField[]) {
2730 VMSTATE_INT32(error_status, IDEBus),
2731 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2732 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2733 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2734 VMSTATE_END_OF_LIST()
2738 const VMStateDescription vmstate_ide_bus = {
2741 .minimum_version_id = 1,
2742 .fields = (VMStateField[]) {
2743 VMSTATE_UINT8(cmd, IDEBus),
2744 VMSTATE_UINT8(unit, IDEBus),
2745 VMSTATE_END_OF_LIST()
2747 .subsections = (const VMStateDescription*[]) {
2748 &vmstate_ide_error_status,
2753 void ide_drive_get(DriveInfo **hd, int n)
2756 int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2757 int max_devs = drive_get_max_devs(IF_IDE);
2758 int n_buses = max_devs ? (n / max_devs) : n;
2761 * Note: The number of actual buses available is not known.
2762 * We compute this based on the size of the DriveInfo* array, n.
2763 * If it is less than max_devs * <num_real_buses>,
2764 * We will stop looking for drives prematurely instead of overfilling
2768 if (highest_bus > n_buses) {
2769 error_report("Too many IDE buses defined (%d > %d)",
2770 highest_bus, n_buses);
2774 for (i = 0; i < n; i++) {
2775 hd[i] = drive_get_by_index(IF_IDE, i);