]> Git Repo - qemu.git/blame_incremental - hw/ide/core.c
IDE: replace DEBUG_IDE with tracing system
[qemu.git] / hw / ide / core.c
... / ...
CommitLineData
1/*
2 * QEMU IDE disk and CD/DVD-ROM Emulator
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25#include "qemu/osdep.h"
26#include "hw/hw.h"
27#include "hw/i386/pc.h"
28#include "hw/pci/pci.h"
29#include "hw/isa/isa.h"
30#include "qemu/error-report.h"
31#include "qemu/timer.h"
32#include "sysemu/sysemu.h"
33#include "sysemu/dma.h"
34#include "hw/block/block.h"
35#include "sysemu/block-backend.h"
36#include "qemu/cutils.h"
37
38#include "hw/ide/internal.h"
39#include "trace.h"
40
41/* These values were based on a Seagate ST3500418AS but have been modified
42 to make more sense in QEMU */
43static const int smart_attributes[][12] = {
44 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
45 /* raw read error rate*/
46 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
47 /* spin up */
48 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
49 /* start stop count */
50 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
51 /* remapped sectors */
52 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
53 /* power on hours */
54 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55 /* power cycle count */
56 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
57 /* airflow-temperature-celsius */
58 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
59};
60
61static void ide_dummy_transfer_stop(IDEState *s);
62
63static void padstr(char *str, const char *src, int len)
64{
65 int i, v;
66 for(i = 0; i < len; i++) {
67 if (*src)
68 v = *src++;
69 else
70 v = ' ';
71 str[i^1] = v;
72 }
73}
74
75static void put_le16(uint16_t *p, unsigned int v)
76{
77 *p = cpu_to_le16(v);
78}
79
80static void ide_identify_size(IDEState *s)
81{
82 uint16_t *p = (uint16_t *)s->identify_data;
83 put_le16(p + 60, s->nb_sectors);
84 put_le16(p + 61, s->nb_sectors >> 16);
85 put_le16(p + 100, s->nb_sectors);
86 put_le16(p + 101, s->nb_sectors >> 16);
87 put_le16(p + 102, s->nb_sectors >> 32);
88 put_le16(p + 103, s->nb_sectors >> 48);
89}
90
91static void ide_identify(IDEState *s)
92{
93 uint16_t *p;
94 unsigned int oldsize;
95 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
96
97 p = (uint16_t *)s->identify_data;
98 if (s->identify_set) {
99 goto fill_buffer;
100 }
101 memset(p, 0, sizeof(s->identify_data));
102
103 put_le16(p + 0, 0x0040);
104 put_le16(p + 1, s->cylinders);
105 put_le16(p + 3, s->heads);
106 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
107 put_le16(p + 5, 512); /* XXX: retired, remove ? */
108 put_le16(p + 6, s->sectors);
109 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
110 put_le16(p + 20, 3); /* XXX: retired, remove ? */
111 put_le16(p + 21, 512); /* cache size in sectors */
112 put_le16(p + 22, 4); /* ecc bytes */
113 padstr((char *)(p + 23), s->version, 8); /* firmware version */
114 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
115#if MAX_MULT_SECTORS > 1
116 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
117#endif
118 put_le16(p + 48, 1); /* dword I/O */
119 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
120 put_le16(p + 51, 0x200); /* PIO transfer cycle */
121 put_le16(p + 52, 0x200); /* DMA transfer cycle */
122 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
123 put_le16(p + 54, s->cylinders);
124 put_le16(p + 55, s->heads);
125 put_le16(p + 56, s->sectors);
126 oldsize = s->cylinders * s->heads * s->sectors;
127 put_le16(p + 57, oldsize);
128 put_le16(p + 58, oldsize >> 16);
129 if (s->mult_sectors)
130 put_le16(p + 59, 0x100 | s->mult_sectors);
131 /* *(p + 60) := nb_sectors -- see ide_identify_size */
132 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
133 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
134 put_le16(p + 63, 0x07); /* mdma0-2 supported */
135 put_le16(p + 64, 0x03); /* pio3-4 supported */
136 put_le16(p + 65, 120);
137 put_le16(p + 66, 120);
138 put_le16(p + 67, 120);
139 put_le16(p + 68, 120);
140 if (dev && dev->conf.discard_granularity) {
141 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
142 }
143
144 if (s->ncq_queues) {
145 put_le16(p + 75, s->ncq_queues - 1);
146 /* NCQ supported */
147 put_le16(p + 76, (1 << 8));
148 }
149
150 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
151 put_le16(p + 81, 0x16); /* conforms to ata5 */
152 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
153 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
154 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
155 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
156 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
157 if (s->wwn) {
158 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
159 } else {
160 put_le16(p + 84, (1 << 14) | 0);
161 }
162 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
163 if (blk_enable_write_cache(s->blk)) {
164 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
165 } else {
166 put_le16(p + 85, (1 << 14) | 1);
167 }
168 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
169 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
170 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
171 if (s->wwn) {
172 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
173 } else {
174 put_le16(p + 87, (1 << 14) | 0);
175 }
176 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
177 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
178 /* *(p + 100) := nb_sectors -- see ide_identify_size */
179 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
180 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
181 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
182
183 if (dev && dev->conf.physical_block_size)
184 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
185 if (s->wwn) {
186 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
187 put_le16(p + 108, s->wwn >> 48);
188 put_le16(p + 109, s->wwn >> 32);
189 put_le16(p + 110, s->wwn >> 16);
190 put_le16(p + 111, s->wwn);
191 }
192 if (dev && dev->conf.discard_granularity) {
193 put_le16(p + 169, 1); /* TRIM support */
194 }
195
196 ide_identify_size(s);
197 s->identify_set = 1;
198
199fill_buffer:
200 memcpy(s->io_buffer, p, sizeof(s->identify_data));
201}
202
203static void ide_atapi_identify(IDEState *s)
204{
205 uint16_t *p;
206
207 p = (uint16_t *)s->identify_data;
208 if (s->identify_set) {
209 goto fill_buffer;
210 }
211 memset(p, 0, sizeof(s->identify_data));
212
213 /* Removable CDROM, 50us response, 12 byte packets */
214 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
215 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
216 put_le16(p + 20, 3); /* buffer type */
217 put_le16(p + 21, 512); /* cache size in sectors */
218 put_le16(p + 22, 4); /* ecc bytes */
219 padstr((char *)(p + 23), s->version, 8); /* firmware version */
220 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
221 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
222#ifdef USE_DMA_CDROM
223 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
224 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
225 put_le16(p + 62, 7); /* single word dma0-2 supported */
226 put_le16(p + 63, 7); /* mdma0-2 supported */
227#else
228 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
229 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
230 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
231#endif
232 put_le16(p + 64, 3); /* pio3-4 supported */
233 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
234 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
235 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
236 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
237
238 put_le16(p + 71, 30); /* in ns */
239 put_le16(p + 72, 30); /* in ns */
240
241 if (s->ncq_queues) {
242 put_le16(p + 75, s->ncq_queues - 1);
243 /* NCQ supported */
244 put_le16(p + 76, (1 << 8));
245 }
246
247 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
248 if (s->wwn) {
249 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
250 put_le16(p + 87, (1 << 8)); /* WWN enabled */
251 }
252
253#ifdef USE_DMA_CDROM
254 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
255#endif
256
257 if (s->wwn) {
258 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
259 put_le16(p + 108, s->wwn >> 48);
260 put_le16(p + 109, s->wwn >> 32);
261 put_le16(p + 110, s->wwn >> 16);
262 put_le16(p + 111, s->wwn);
263 }
264
265 s->identify_set = 1;
266
267fill_buffer:
268 memcpy(s->io_buffer, p, sizeof(s->identify_data));
269}
270
271static void ide_cfata_identify_size(IDEState *s)
272{
273 uint16_t *p = (uint16_t *)s->identify_data;
274 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
275 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
276 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
277 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
278}
279
280static void ide_cfata_identify(IDEState *s)
281{
282 uint16_t *p;
283 uint32_t cur_sec;
284
285 p = (uint16_t *)s->identify_data;
286 if (s->identify_set) {
287 goto fill_buffer;
288 }
289 memset(p, 0, sizeof(s->identify_data));
290
291 cur_sec = s->cylinders * s->heads * s->sectors;
292
293 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
294 put_le16(p + 1, s->cylinders); /* Default cylinders */
295 put_le16(p + 3, s->heads); /* Default heads */
296 put_le16(p + 6, s->sectors); /* Default sectors per track */
297 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
298 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
299 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
300 put_le16(p + 22, 0x0004); /* ECC bytes */
301 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
302 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
303#if MAX_MULT_SECTORS > 1
304 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
305#else
306 put_le16(p + 47, 0x0000);
307#endif
308 put_le16(p + 49, 0x0f00); /* Capabilities */
309 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
310 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
311 put_le16(p + 53, 0x0003); /* Translation params valid */
312 put_le16(p + 54, s->cylinders); /* Current cylinders */
313 put_le16(p + 55, s->heads); /* Current heads */
314 put_le16(p + 56, s->sectors); /* Current sectors */
315 put_le16(p + 57, cur_sec); /* Current capacity */
316 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
317 if (s->mult_sectors) /* Multiple sector setting */
318 put_le16(p + 59, 0x100 | s->mult_sectors);
319 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
320 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
321 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
322 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
323 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
324 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
325 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
326 put_le16(p + 82, 0x400c); /* Command Set supported */
327 put_le16(p + 83, 0x7068); /* Command Set supported */
328 put_le16(p + 84, 0x4000); /* Features supported */
329 put_le16(p + 85, 0x000c); /* Command Set enabled */
330 put_le16(p + 86, 0x7044); /* Command Set enabled */
331 put_le16(p + 87, 0x4000); /* Features enabled */
332 put_le16(p + 91, 0x4060); /* Current APM level */
333 put_le16(p + 129, 0x0002); /* Current features option */
334 put_le16(p + 130, 0x0005); /* Reassigned sectors */
335 put_le16(p + 131, 0x0001); /* Initial power mode */
336 put_le16(p + 132, 0x0000); /* User signature */
337 put_le16(p + 160, 0x8100); /* Power requirement */
338 put_le16(p + 161, 0x8001); /* CF command set */
339
340 ide_cfata_identify_size(s);
341 s->identify_set = 1;
342
343fill_buffer:
344 memcpy(s->io_buffer, p, sizeof(s->identify_data));
345}
346
347static void ide_set_signature(IDEState *s)
348{
349 s->select &= 0xf0; /* clear head */
350 /* put signature */
351 s->nsector = 1;
352 s->sector = 1;
353 if (s->drive_kind == IDE_CD) {
354 s->lcyl = 0x14;
355 s->hcyl = 0xeb;
356 } else if (s->blk) {
357 s->lcyl = 0;
358 s->hcyl = 0;
359 } else {
360 s->lcyl = 0xff;
361 s->hcyl = 0xff;
362 }
363}
364
365typedef struct TrimAIOCB {
366 BlockAIOCB common;
367 BlockBackend *blk;
368 QEMUBH *bh;
369 int ret;
370 QEMUIOVector *qiov;
371 BlockAIOCB *aiocb;
372 int i, j;
373} TrimAIOCB;
374
375static void trim_aio_cancel(BlockAIOCB *acb)
376{
377 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
378
379 /* Exit the loop so ide_issue_trim_cb will not continue */
380 iocb->j = iocb->qiov->niov - 1;
381 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
382
383 iocb->ret = -ECANCELED;
384
385 if (iocb->aiocb) {
386 blk_aio_cancel_async(iocb->aiocb);
387 iocb->aiocb = NULL;
388 }
389}
390
391static const AIOCBInfo trim_aiocb_info = {
392 .aiocb_size = sizeof(TrimAIOCB),
393 .cancel_async = trim_aio_cancel,
394};
395
396static void ide_trim_bh_cb(void *opaque)
397{
398 TrimAIOCB *iocb = opaque;
399
400 iocb->common.cb(iocb->common.opaque, iocb->ret);
401
402 qemu_bh_delete(iocb->bh);
403 iocb->bh = NULL;
404 qemu_aio_unref(iocb);
405}
406
407static void ide_issue_trim_cb(void *opaque, int ret)
408{
409 TrimAIOCB *iocb = opaque;
410 if (ret >= 0) {
411 while (iocb->j < iocb->qiov->niov) {
412 int j = iocb->j;
413 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
414 int i = iocb->i;
415 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
416
417 /* 6-byte LBA + 2-byte range per entry */
418 uint64_t entry = le64_to_cpu(buffer[i]);
419 uint64_t sector = entry & 0x0000ffffffffffffULL;
420 uint16_t count = entry >> 48;
421
422 if (count == 0) {
423 continue;
424 }
425
426 /* Got an entry! Submit and exit. */
427 iocb->aiocb = blk_aio_pdiscard(iocb->blk,
428 sector << BDRV_SECTOR_BITS,
429 count << BDRV_SECTOR_BITS,
430 ide_issue_trim_cb, opaque);
431 return;
432 }
433
434 iocb->j++;
435 iocb->i = -1;
436 }
437 } else {
438 iocb->ret = ret;
439 }
440
441 iocb->aiocb = NULL;
442 if (iocb->bh) {
443 qemu_bh_schedule(iocb->bh);
444 }
445}
446
447BlockAIOCB *ide_issue_trim(
448 int64_t offset, QEMUIOVector *qiov,
449 BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
450{
451 BlockBackend *blk = opaque;
452 TrimAIOCB *iocb;
453
454 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
455 iocb->blk = blk;
456 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
457 iocb->ret = 0;
458 iocb->qiov = qiov;
459 iocb->i = -1;
460 iocb->j = 0;
461 ide_issue_trim_cb(iocb, 0);
462 return &iocb->common;
463}
464
465void ide_abort_command(IDEState *s)
466{
467 ide_transfer_stop(s);
468 s->status = READY_STAT | ERR_STAT;
469 s->error = ABRT_ERR;
470}
471
472static void ide_set_retry(IDEState *s)
473{
474 s->bus->retry_unit = s->unit;
475 s->bus->retry_sector_num = ide_get_sector(s);
476 s->bus->retry_nsector = s->nsector;
477}
478
479static void ide_clear_retry(IDEState *s)
480{
481 s->bus->retry_unit = -1;
482 s->bus->retry_sector_num = 0;
483 s->bus->retry_nsector = 0;
484}
485
486/* prepare data transfer and tell what to do after */
487void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
488 EndTransferFunc *end_transfer_func)
489{
490 s->end_transfer_func = end_transfer_func;
491 s->data_ptr = buf;
492 s->data_end = buf + size;
493 ide_set_retry(s);
494 if (!(s->status & ERR_STAT)) {
495 s->status |= DRQ_STAT;
496 }
497 if (s->bus->dma->ops->start_transfer) {
498 s->bus->dma->ops->start_transfer(s->bus->dma);
499 }
500}
501
502static void ide_cmd_done(IDEState *s)
503{
504 if (s->bus->dma->ops->cmd_done) {
505 s->bus->dma->ops->cmd_done(s->bus->dma);
506 }
507}
508
509static void ide_transfer_halt(IDEState *s,
510 void(*end_transfer_func)(IDEState *),
511 bool notify)
512{
513 s->end_transfer_func = end_transfer_func;
514 s->data_ptr = s->io_buffer;
515 s->data_end = s->io_buffer;
516 s->status &= ~DRQ_STAT;
517 if (notify) {
518 ide_cmd_done(s);
519 }
520}
521
522void ide_transfer_stop(IDEState *s)
523{
524 ide_transfer_halt(s, ide_transfer_stop, true);
525}
526
527static void ide_transfer_cancel(IDEState *s)
528{
529 ide_transfer_halt(s, ide_transfer_cancel, false);
530}
531
532int64_t ide_get_sector(IDEState *s)
533{
534 int64_t sector_num;
535 if (s->select & 0x40) {
536 /* lba */
537 if (!s->lba48) {
538 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
539 (s->lcyl << 8) | s->sector;
540 } else {
541 sector_num = ((int64_t)s->hob_hcyl << 40) |
542 ((int64_t) s->hob_lcyl << 32) |
543 ((int64_t) s->hob_sector << 24) |
544 ((int64_t) s->hcyl << 16) |
545 ((int64_t) s->lcyl << 8) | s->sector;
546 }
547 } else {
548 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
549 (s->select & 0x0f) * s->sectors + (s->sector - 1);
550 }
551 return sector_num;
552}
553
554void ide_set_sector(IDEState *s, int64_t sector_num)
555{
556 unsigned int cyl, r;
557 if (s->select & 0x40) {
558 if (!s->lba48) {
559 s->select = (s->select & 0xf0) | (sector_num >> 24);
560 s->hcyl = (sector_num >> 16);
561 s->lcyl = (sector_num >> 8);
562 s->sector = (sector_num);
563 } else {
564 s->sector = sector_num;
565 s->lcyl = sector_num >> 8;
566 s->hcyl = sector_num >> 16;
567 s->hob_sector = sector_num >> 24;
568 s->hob_lcyl = sector_num >> 32;
569 s->hob_hcyl = sector_num >> 40;
570 }
571 } else {
572 cyl = sector_num / (s->heads * s->sectors);
573 r = sector_num % (s->heads * s->sectors);
574 s->hcyl = cyl >> 8;
575 s->lcyl = cyl;
576 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
577 s->sector = (r % s->sectors) + 1;
578 }
579}
580
581static void ide_rw_error(IDEState *s) {
582 ide_abort_command(s);
583 ide_set_irq(s->bus);
584}
585
586static bool ide_sect_range_ok(IDEState *s,
587 uint64_t sector, uint64_t nb_sectors)
588{
589 uint64_t total_sectors;
590
591 blk_get_geometry(s->blk, &total_sectors);
592 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
593 return false;
594 }
595 return true;
596}
597
598static void ide_buffered_readv_cb(void *opaque, int ret)
599{
600 IDEBufferedRequest *req = opaque;
601 if (!req->orphaned) {
602 if (!ret) {
603 qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
604 req->original_qiov->size);
605 }
606 req->original_cb(req->original_opaque, ret);
607 }
608 QLIST_REMOVE(req, list);
609 qemu_vfree(req->iov.iov_base);
610 g_free(req);
611}
612
613#define MAX_BUFFERED_REQS 16
614
615BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
616 QEMUIOVector *iov, int nb_sectors,
617 BlockCompletionFunc *cb, void *opaque)
618{
619 BlockAIOCB *aioreq;
620 IDEBufferedRequest *req;
621 int c = 0;
622
623 QLIST_FOREACH(req, &s->buffered_requests, list) {
624 c++;
625 }
626 if (c > MAX_BUFFERED_REQS) {
627 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
628 }
629
630 req = g_new0(IDEBufferedRequest, 1);
631 req->original_qiov = iov;
632 req->original_cb = cb;
633 req->original_opaque = opaque;
634 req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
635 req->iov.iov_len = iov->size;
636 qemu_iovec_init_external(&req->qiov, &req->iov, 1);
637
638 aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
639 &req->qiov, 0, ide_buffered_readv_cb, req);
640
641 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
642 return aioreq;
643}
644
645/**
646 * Cancel all pending DMA requests.
647 * Any buffered DMA requests are instantly canceled,
648 * but any pending unbuffered DMA requests must be waited on.
649 */
650void ide_cancel_dma_sync(IDEState *s)
651{
652 IDEBufferedRequest *req;
653
654 /* First invoke the callbacks of all buffered requests
655 * and flag those requests as orphaned. Ideally there
656 * are no unbuffered (Scatter Gather DMA Requests or
657 * write requests) pending and we can avoid to drain. */
658 QLIST_FOREACH(req, &s->buffered_requests, list) {
659 if (!req->orphaned) {
660 trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
661 req->original_cb(req->original_opaque, -ECANCELED);
662 }
663 req->orphaned = true;
664 }
665
666 /*
667 * We can't cancel Scatter Gather DMA in the middle of the
668 * operation or a partial (not full) DMA transfer would reach
669 * the storage so we wait for completion instead (we beahve
670 * like if the DMA was completed by the time the guest trying
671 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
672 * set).
673 *
674 * In the future we'll be able to safely cancel the I/O if the
675 * whole DMA operation will be submitted to disk with a single
676 * aio operation with preadv/pwritev.
677 */
678 if (s->bus->dma->aiocb) {
679 trace_ide_cancel_dma_sync_remaining();
680 blk_drain(s->blk);
681 assert(s->bus->dma->aiocb == NULL);
682 }
683}
684
685static void ide_sector_read(IDEState *s);
686
687static void ide_sector_read_cb(void *opaque, int ret)
688{
689 IDEState *s = opaque;
690 int n;
691
692 s->pio_aiocb = NULL;
693 s->status &= ~BUSY_STAT;
694
695 if (ret == -ECANCELED) {
696 return;
697 }
698 if (ret != 0) {
699 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
700 IDE_RETRY_READ)) {
701 return;
702 }
703 }
704
705 block_acct_done(blk_get_stats(s->blk), &s->acct);
706
707 n = s->nsector;
708 if (n > s->req_nb_sectors) {
709 n = s->req_nb_sectors;
710 }
711
712 ide_set_sector(s, ide_get_sector(s) + n);
713 s->nsector -= n;
714 /* Allow the guest to read the io_buffer */
715 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
716 ide_set_irq(s->bus);
717}
718
719static void ide_sector_read(IDEState *s)
720{
721 int64_t sector_num;
722 int n;
723
724 s->status = READY_STAT | SEEK_STAT;
725 s->error = 0; /* not needed by IDE spec, but needed by Windows */
726 sector_num = ide_get_sector(s);
727 n = s->nsector;
728
729 if (n == 0) {
730 ide_transfer_stop(s);
731 return;
732 }
733
734 s->status |= BUSY_STAT;
735
736 if (n > s->req_nb_sectors) {
737 n = s->req_nb_sectors;
738 }
739
740 trace_ide_sector_read(sector_num, n);
741
742 if (!ide_sect_range_ok(s, sector_num, n)) {
743 ide_rw_error(s);
744 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
745 return;
746 }
747
748 s->iov.iov_base = s->io_buffer;
749 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
750 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
751
752 block_acct_start(blk_get_stats(s->blk), &s->acct,
753 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
754 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
755 ide_sector_read_cb, s);
756}
757
758void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
759{
760 if (s->bus->dma->ops->commit_buf) {
761 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
762 }
763 s->io_buffer_offset += tx_bytes;
764 qemu_sglist_destroy(&s->sg);
765}
766
767void ide_set_inactive(IDEState *s, bool more)
768{
769 s->bus->dma->aiocb = NULL;
770 ide_clear_retry(s);
771 if (s->bus->dma->ops->set_inactive) {
772 s->bus->dma->ops->set_inactive(s->bus->dma, more);
773 }
774 ide_cmd_done(s);
775}
776
777void ide_dma_error(IDEState *s)
778{
779 dma_buf_commit(s, 0);
780 ide_abort_command(s);
781 ide_set_inactive(s, false);
782 ide_set_irq(s->bus);
783}
784
785int ide_handle_rw_error(IDEState *s, int error, int op)
786{
787 bool is_read = (op & IDE_RETRY_READ) != 0;
788 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
789
790 if (action == BLOCK_ERROR_ACTION_STOP) {
791 assert(s->bus->retry_unit == s->unit);
792 s->bus->error_status = op;
793 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
794 block_acct_failed(blk_get_stats(s->blk), &s->acct);
795 if (IS_IDE_RETRY_DMA(op)) {
796 ide_dma_error(s);
797 } else if (IS_IDE_RETRY_ATAPI(op)) {
798 ide_atapi_io_error(s, -error);
799 } else {
800 ide_rw_error(s);
801 }
802 }
803 blk_error_action(s->blk, action, is_read, error);
804 return action != BLOCK_ERROR_ACTION_IGNORE;
805}
806
807static void ide_dma_cb(void *opaque, int ret)
808{
809 IDEState *s = opaque;
810 int n;
811 int64_t sector_num;
812 uint64_t offset;
813 bool stay_active = false;
814
815 if (ret == -ECANCELED) {
816 return;
817 }
818 if (ret < 0) {
819 if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
820 s->bus->dma->aiocb = NULL;
821 dma_buf_commit(s, 0);
822 return;
823 }
824 }
825
826 n = s->io_buffer_size >> 9;
827 if (n > s->nsector) {
828 /* The PRDs were longer than needed for this request. Shorten them so
829 * we don't get a negative remainder. The Active bit must remain set
830 * after the request completes. */
831 n = s->nsector;
832 stay_active = true;
833 }
834
835 sector_num = ide_get_sector(s);
836 if (n > 0) {
837 assert(n * 512 == s->sg.size);
838 dma_buf_commit(s, s->sg.size);
839 sector_num += n;
840 ide_set_sector(s, sector_num);
841 s->nsector -= n;
842 }
843
844 /* end of transfer ? */
845 if (s->nsector == 0) {
846 s->status = READY_STAT | SEEK_STAT;
847 ide_set_irq(s->bus);
848 goto eot;
849 }
850
851 /* launch next transfer */
852 n = s->nsector;
853 s->io_buffer_index = 0;
854 s->io_buffer_size = n * 512;
855 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
856 /* The PRDs were too short. Reset the Active bit, but don't raise an
857 * interrupt. */
858 s->status = READY_STAT | SEEK_STAT;
859 dma_buf_commit(s, 0);
860 goto eot;
861 }
862
863#ifdef DEBUG_AIO
864 printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
865 sector_num, n, s->dma_cmd);
866#endif
867
868 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
869 !ide_sect_range_ok(s, sector_num, n)) {
870 ide_dma_error(s);
871 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
872 return;
873 }
874
875 offset = sector_num << BDRV_SECTOR_BITS;
876 switch (s->dma_cmd) {
877 case IDE_DMA_READ:
878 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
879 BDRV_SECTOR_SIZE, ide_dma_cb, s);
880 break;
881 case IDE_DMA_WRITE:
882 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
883 BDRV_SECTOR_SIZE, ide_dma_cb, s);
884 break;
885 case IDE_DMA_TRIM:
886 s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
887 &s->sg, offset, BDRV_SECTOR_SIZE,
888 ide_issue_trim, s->blk, ide_dma_cb, s,
889 DMA_DIRECTION_TO_DEVICE);
890 break;
891 default:
892 abort();
893 }
894 return;
895
896eot:
897 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
898 block_acct_done(blk_get_stats(s->blk), &s->acct);
899 }
900 ide_set_inactive(s, stay_active);
901}
902
903static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
904{
905 s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
906 s->io_buffer_size = 0;
907 s->dma_cmd = dma_cmd;
908
909 switch (dma_cmd) {
910 case IDE_DMA_READ:
911 block_acct_start(blk_get_stats(s->blk), &s->acct,
912 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
913 break;
914 case IDE_DMA_WRITE:
915 block_acct_start(blk_get_stats(s->blk), &s->acct,
916 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
917 break;
918 default:
919 break;
920 }
921
922 ide_start_dma(s, ide_dma_cb);
923}
924
925void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
926{
927 s->io_buffer_index = 0;
928 ide_set_retry(s);
929 if (s->bus->dma->ops->start_dma) {
930 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
931 }
932}
933
934static void ide_sector_write(IDEState *s);
935
936static void ide_sector_write_timer_cb(void *opaque)
937{
938 IDEState *s = opaque;
939 ide_set_irq(s->bus);
940}
941
942static void ide_sector_write_cb(void *opaque, int ret)
943{
944 IDEState *s = opaque;
945 int n;
946
947 if (ret == -ECANCELED) {
948 return;
949 }
950
951 s->pio_aiocb = NULL;
952 s->status &= ~BUSY_STAT;
953
954 if (ret != 0) {
955 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
956 return;
957 }
958 }
959
960 block_acct_done(blk_get_stats(s->blk), &s->acct);
961
962 n = s->nsector;
963 if (n > s->req_nb_sectors) {
964 n = s->req_nb_sectors;
965 }
966 s->nsector -= n;
967
968 ide_set_sector(s, ide_get_sector(s) + n);
969 if (s->nsector == 0) {
970 /* no more sectors to write */
971 ide_transfer_stop(s);
972 } else {
973 int n1 = s->nsector;
974 if (n1 > s->req_nb_sectors) {
975 n1 = s->req_nb_sectors;
976 }
977 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
978 ide_sector_write);
979 }
980
981 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
982 /* It seems there is a bug in the Windows 2000 installer HDD
983 IDE driver which fills the disk with empty logs when the
984 IDE write IRQ comes too early. This hack tries to correct
985 that at the expense of slower write performances. Use this
986 option _only_ to install Windows 2000. You must disable it
987 for normal use. */
988 timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
989 (NANOSECONDS_PER_SECOND / 1000));
990 } else {
991 ide_set_irq(s->bus);
992 }
993}
994
995static void ide_sector_write(IDEState *s)
996{
997 int64_t sector_num;
998 int n;
999
1000 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1001 sector_num = ide_get_sector(s);
1002
1003 n = s->nsector;
1004 if (n > s->req_nb_sectors) {
1005 n = s->req_nb_sectors;
1006 }
1007
1008 trace_ide_sector_write(sector_num, n);
1009
1010 if (!ide_sect_range_ok(s, sector_num, n)) {
1011 ide_rw_error(s);
1012 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1013 return;
1014 }
1015
1016 s->iov.iov_base = s->io_buffer;
1017 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
1018 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1019
1020 block_acct_start(blk_get_stats(s->blk), &s->acct,
1021 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1022 s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1023 &s->qiov, 0, ide_sector_write_cb, s);
1024}
1025
1026static void ide_flush_cb(void *opaque, int ret)
1027{
1028 IDEState *s = opaque;
1029
1030 s->pio_aiocb = NULL;
1031
1032 if (ret == -ECANCELED) {
1033 return;
1034 }
1035 if (ret < 0) {
1036 /* XXX: What sector number to set here? */
1037 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1038 return;
1039 }
1040 }
1041
1042 if (s->blk) {
1043 block_acct_done(blk_get_stats(s->blk), &s->acct);
1044 }
1045 s->status = READY_STAT | SEEK_STAT;
1046 ide_cmd_done(s);
1047 ide_set_irq(s->bus);
1048}
1049
1050static void ide_flush_cache(IDEState *s)
1051{
1052 if (s->blk == NULL) {
1053 ide_flush_cb(s, 0);
1054 return;
1055 }
1056
1057 s->status |= BUSY_STAT;
1058 ide_set_retry(s);
1059 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1060
1061 if (blk_bs(s->blk)) {
1062 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1063 } else {
1064 /* XXX blk_aio_flush() crashes when blk_bs(blk) is NULL, remove this
1065 * temporary workaround when blk_aio_*() functions handle NULL blk_bs.
1066 */
1067 ide_flush_cb(s, 0);
1068 }
1069}
1070
1071static void ide_cfata_metadata_inquiry(IDEState *s)
1072{
1073 uint16_t *p;
1074 uint32_t spd;
1075
1076 p = (uint16_t *) s->io_buffer;
1077 memset(p, 0, 0x200);
1078 spd = ((s->mdata_size - 1) >> 9) + 1;
1079
1080 put_le16(p + 0, 0x0001); /* Data format revision */
1081 put_le16(p + 1, 0x0000); /* Media property: silicon */
1082 put_le16(p + 2, s->media_changed); /* Media status */
1083 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1084 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1085 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1086 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1087}
1088
1089static void ide_cfata_metadata_read(IDEState *s)
1090{
1091 uint16_t *p;
1092
1093 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1094 s->status = ERR_STAT;
1095 s->error = ABRT_ERR;
1096 return;
1097 }
1098
1099 p = (uint16_t *) s->io_buffer;
1100 memset(p, 0, 0x200);
1101
1102 put_le16(p + 0, s->media_changed); /* Media status */
1103 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1104 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1105 s->nsector << 9), 0x200 - 2));
1106}
1107
1108static void ide_cfata_metadata_write(IDEState *s)
1109{
1110 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1111 s->status = ERR_STAT;
1112 s->error = ABRT_ERR;
1113 return;
1114 }
1115
1116 s->media_changed = 0;
1117
1118 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1119 s->io_buffer + 2,
1120 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1121 s->nsector << 9), 0x200 - 2));
1122}
1123
1124/* called when the inserted state of the media has changed */
1125static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1126{
1127 IDEState *s = opaque;
1128 uint64_t nb_sectors;
1129
1130 s->tray_open = !load;
1131 blk_get_geometry(s->blk, &nb_sectors);
1132 s->nb_sectors = nb_sectors;
1133
1134 /*
1135 * First indicate to the guest that a CD has been removed. That's
1136 * done on the next command the guest sends us.
1137 *
1138 * Then we set UNIT_ATTENTION, by which the guest will
1139 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1140 */
1141 s->cdrom_changed = 1;
1142 s->events.new_media = true;
1143 s->events.eject_request = false;
1144 ide_set_irq(s->bus);
1145}
1146
1147static void ide_cd_eject_request_cb(void *opaque, bool force)
1148{
1149 IDEState *s = opaque;
1150
1151 s->events.eject_request = true;
1152 if (force) {
1153 s->tray_locked = false;
1154 }
1155 ide_set_irq(s->bus);
1156}
1157
1158static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1159{
1160 s->lba48 = lba48;
1161
1162 /* handle the 'magic' 0 nsector count conversion here. to avoid
1163 * fiddling with the rest of the read logic, we just store the
1164 * full sector count in ->nsector and ignore ->hob_nsector from now
1165 */
1166 if (!s->lba48) {
1167 if (!s->nsector)
1168 s->nsector = 256;
1169 } else {
1170 if (!s->nsector && !s->hob_nsector)
1171 s->nsector = 65536;
1172 else {
1173 int lo = s->nsector;
1174 int hi = s->hob_nsector;
1175
1176 s->nsector = (hi << 8) | lo;
1177 }
1178 }
1179}
1180
1181static void ide_clear_hob(IDEBus *bus)
1182{
1183 /* any write clears HOB high bit of device control register */
1184 bus->ifs[0].select &= ~(1 << 7);
1185 bus->ifs[1].select &= ~(1 << 7);
1186}
1187
1188void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1189{
1190 IDEBus *bus = opaque;
1191 IDEState *s = idebus_active_if(bus);
1192 int reg_num = addr & 7;
1193
1194 trace_ide_ioport_write(addr, val, bus, s);
1195
1196 /* ignore writes to command block while busy with previous command */
1197 if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1198 return;
1199 }
1200
1201 switch (reg_num) {
1202 case 0:
1203 break;
1204 case 1:
1205 ide_clear_hob(bus);
1206 /* NOTE: data is written to the two drives */
1207 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1208 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1209 bus->ifs[0].feature = val;
1210 bus->ifs[1].feature = val;
1211 break;
1212 case 2:
1213 ide_clear_hob(bus);
1214 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1215 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1216 bus->ifs[0].nsector = val;
1217 bus->ifs[1].nsector = val;
1218 break;
1219 case 3:
1220 ide_clear_hob(bus);
1221 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1222 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1223 bus->ifs[0].sector = val;
1224 bus->ifs[1].sector = val;
1225 break;
1226 case 4:
1227 ide_clear_hob(bus);
1228 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1229 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1230 bus->ifs[0].lcyl = val;
1231 bus->ifs[1].lcyl = val;
1232 break;
1233 case 5:
1234 ide_clear_hob(bus);
1235 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1236 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1237 bus->ifs[0].hcyl = val;
1238 bus->ifs[1].hcyl = val;
1239 break;
1240 case 6:
1241 /* FIXME: HOB readback uses bit 7 */
1242 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1243 bus->ifs[1].select = (val | 0x10) | 0xa0;
1244 /* select drive */
1245 bus->unit = (val >> 4) & 1;
1246 break;
1247 default:
1248 case 7:
1249 /* command */
1250 ide_exec_cmd(bus, val);
1251 break;
1252 }
1253}
1254
1255static void ide_reset(IDEState *s)
1256{
1257 trace_ide_reset(s);
1258
1259 if (s->pio_aiocb) {
1260 blk_aio_cancel(s->pio_aiocb);
1261 s->pio_aiocb = NULL;
1262 }
1263
1264 if (s->drive_kind == IDE_CFATA)
1265 s->mult_sectors = 0;
1266 else
1267 s->mult_sectors = MAX_MULT_SECTORS;
1268 /* ide regs */
1269 s->feature = 0;
1270 s->error = 0;
1271 s->nsector = 0;
1272 s->sector = 0;
1273 s->lcyl = 0;
1274 s->hcyl = 0;
1275
1276 /* lba48 */
1277 s->hob_feature = 0;
1278 s->hob_sector = 0;
1279 s->hob_nsector = 0;
1280 s->hob_lcyl = 0;
1281 s->hob_hcyl = 0;
1282
1283 s->select = 0xa0;
1284 s->status = READY_STAT | SEEK_STAT;
1285
1286 s->lba48 = 0;
1287
1288 /* ATAPI specific */
1289 s->sense_key = 0;
1290 s->asc = 0;
1291 s->cdrom_changed = 0;
1292 s->packet_transfer_size = 0;
1293 s->elementary_transfer_size = 0;
1294 s->io_buffer_index = 0;
1295 s->cd_sector_size = 0;
1296 s->atapi_dma = 0;
1297 s->tray_locked = 0;
1298 s->tray_open = 0;
1299 /* ATA DMA state */
1300 s->io_buffer_size = 0;
1301 s->req_nb_sectors = 0;
1302
1303 ide_set_signature(s);
1304 /* init the transfer handler so that 0xffff is returned on data
1305 accesses */
1306 s->end_transfer_func = ide_dummy_transfer_stop;
1307 ide_dummy_transfer_stop(s);
1308 s->media_changed = 0;
1309}
1310
1311static bool cmd_nop(IDEState *s, uint8_t cmd)
1312{
1313 return true;
1314}
1315
1316static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1317{
1318 /* Halt PIO (in the DRQ phase), then DMA */
1319 ide_transfer_cancel(s);
1320 ide_cancel_dma_sync(s);
1321
1322 /* Reset any PIO commands, reset signature, etc */
1323 ide_reset(s);
1324
1325 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1326 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1327 s->status = 0x00;
1328
1329 /* Do not overwrite status register */
1330 return false;
1331}
1332
1333static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1334{
1335 switch (s->feature) {
1336 case DSM_TRIM:
1337 if (s->blk) {
1338 ide_sector_start_dma(s, IDE_DMA_TRIM);
1339 return false;
1340 }
1341 break;
1342 }
1343
1344 ide_abort_command(s);
1345 return true;
1346}
1347
1348static bool cmd_identify(IDEState *s, uint8_t cmd)
1349{
1350 if (s->blk && s->drive_kind != IDE_CD) {
1351 if (s->drive_kind != IDE_CFATA) {
1352 ide_identify(s);
1353 } else {
1354 ide_cfata_identify(s);
1355 }
1356 s->status = READY_STAT | SEEK_STAT;
1357 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1358 ide_set_irq(s->bus);
1359 return false;
1360 } else {
1361 if (s->drive_kind == IDE_CD) {
1362 ide_set_signature(s);
1363 }
1364 ide_abort_command(s);
1365 }
1366
1367 return true;
1368}
1369
1370static bool cmd_verify(IDEState *s, uint8_t cmd)
1371{
1372 bool lba48 = (cmd == WIN_VERIFY_EXT);
1373
1374 /* do sector number check ? */
1375 ide_cmd_lba48_transform(s, lba48);
1376
1377 return true;
1378}
1379
1380static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1381{
1382 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1383 /* Disable Read and Write Multiple */
1384 s->mult_sectors = 0;
1385 } else if ((s->nsector & 0xff) != 0 &&
1386 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1387 (s->nsector & (s->nsector - 1)) != 0)) {
1388 ide_abort_command(s);
1389 } else {
1390 s->mult_sectors = s->nsector & 0xff;
1391 }
1392
1393 return true;
1394}
1395
1396static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1397{
1398 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1399
1400 if (!s->blk || !s->mult_sectors) {
1401 ide_abort_command(s);
1402 return true;
1403 }
1404
1405 ide_cmd_lba48_transform(s, lba48);
1406 s->req_nb_sectors = s->mult_sectors;
1407 ide_sector_read(s);
1408 return false;
1409}
1410
1411static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1412{
1413 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1414 int n;
1415
1416 if (!s->blk || !s->mult_sectors) {
1417 ide_abort_command(s);
1418 return true;
1419 }
1420
1421 ide_cmd_lba48_transform(s, lba48);
1422
1423 s->req_nb_sectors = s->mult_sectors;
1424 n = MIN(s->nsector, s->req_nb_sectors);
1425
1426 s->status = SEEK_STAT | READY_STAT;
1427 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1428
1429 s->media_changed = 1;
1430
1431 return false;
1432}
1433
1434static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1435{
1436 bool lba48 = (cmd == WIN_READ_EXT);
1437
1438 if (s->drive_kind == IDE_CD) {
1439 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1440 ide_abort_command(s);
1441 return true;
1442 }
1443
1444 if (!s->blk) {
1445 ide_abort_command(s);
1446 return true;
1447 }
1448
1449 ide_cmd_lba48_transform(s, lba48);
1450 s->req_nb_sectors = 1;
1451 ide_sector_read(s);
1452
1453 return false;
1454}
1455
1456static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1457{
1458 bool lba48 = (cmd == WIN_WRITE_EXT);
1459
1460 if (!s->blk) {
1461 ide_abort_command(s);
1462 return true;
1463 }
1464
1465 ide_cmd_lba48_transform(s, lba48);
1466
1467 s->req_nb_sectors = 1;
1468 s->status = SEEK_STAT | READY_STAT;
1469 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1470
1471 s->media_changed = 1;
1472
1473 return false;
1474}
1475
1476static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1477{
1478 bool lba48 = (cmd == WIN_READDMA_EXT);
1479
1480 if (!s->blk) {
1481 ide_abort_command(s);
1482 return true;
1483 }
1484
1485 ide_cmd_lba48_transform(s, lba48);
1486 ide_sector_start_dma(s, IDE_DMA_READ);
1487
1488 return false;
1489}
1490
1491static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1492{
1493 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1494
1495 if (!s->blk) {
1496 ide_abort_command(s);
1497 return true;
1498 }
1499
1500 ide_cmd_lba48_transform(s, lba48);
1501 ide_sector_start_dma(s, IDE_DMA_WRITE);
1502
1503 s->media_changed = 1;
1504
1505 return false;
1506}
1507
1508static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1509{
1510 ide_flush_cache(s);
1511 return false;
1512}
1513
1514static bool cmd_seek(IDEState *s, uint8_t cmd)
1515{
1516 /* XXX: Check that seek is within bounds */
1517 return true;
1518}
1519
1520static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1521{
1522 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1523
1524 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1525 if (s->nb_sectors == 0) {
1526 ide_abort_command(s);
1527 return true;
1528 }
1529
1530 ide_cmd_lba48_transform(s, lba48);
1531 ide_set_sector(s, s->nb_sectors - 1);
1532
1533 return true;
1534}
1535
1536static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1537{
1538 s->nsector = 0xff; /* device active or idle */
1539 return true;
1540}
1541
1542static bool cmd_set_features(IDEState *s, uint8_t cmd)
1543{
1544 uint16_t *identify_data;
1545
1546 if (!s->blk) {
1547 ide_abort_command(s);
1548 return true;
1549 }
1550
1551 /* XXX: valid for CDROM ? */
1552 switch (s->feature) {
1553 case 0x02: /* write cache enable */
1554 blk_set_enable_write_cache(s->blk, true);
1555 identify_data = (uint16_t *)s->identify_data;
1556 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1557 return true;
1558 case 0x82: /* write cache disable */
1559 blk_set_enable_write_cache(s->blk, false);
1560 identify_data = (uint16_t *)s->identify_data;
1561 put_le16(identify_data + 85, (1 << 14) | 1);
1562 ide_flush_cache(s);
1563 return false;
1564 case 0xcc: /* reverting to power-on defaults enable */
1565 case 0x66: /* reverting to power-on defaults disable */
1566 case 0xaa: /* read look-ahead enable */
1567 case 0x55: /* read look-ahead disable */
1568 case 0x05: /* set advanced power management mode */
1569 case 0x85: /* disable advanced power management mode */
1570 case 0x69: /* NOP */
1571 case 0x67: /* NOP */
1572 case 0x96: /* NOP */
1573 case 0x9a: /* NOP */
1574 case 0x42: /* enable Automatic Acoustic Mode */
1575 case 0xc2: /* disable Automatic Acoustic Mode */
1576 return true;
1577 case 0x03: /* set transfer mode */
1578 {
1579 uint8_t val = s->nsector & 0x07;
1580 identify_data = (uint16_t *)s->identify_data;
1581
1582 switch (s->nsector >> 3) {
1583 case 0x00: /* pio default */
1584 case 0x01: /* pio mode */
1585 put_le16(identify_data + 62, 0x07);
1586 put_le16(identify_data + 63, 0x07);
1587 put_le16(identify_data + 88, 0x3f);
1588 break;
1589 case 0x02: /* sigle word dma mode*/
1590 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1591 put_le16(identify_data + 63, 0x07);
1592 put_le16(identify_data + 88, 0x3f);
1593 break;
1594 case 0x04: /* mdma mode */
1595 put_le16(identify_data + 62, 0x07);
1596 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1597 put_le16(identify_data + 88, 0x3f);
1598 break;
1599 case 0x08: /* udma mode */
1600 put_le16(identify_data + 62, 0x07);
1601 put_le16(identify_data + 63, 0x07);
1602 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1603 break;
1604 default:
1605 goto abort_cmd;
1606 }
1607 return true;
1608 }
1609 }
1610
1611abort_cmd:
1612 ide_abort_command(s);
1613 return true;
1614}
1615
1616
1617/*** ATAPI commands ***/
1618
1619static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1620{
1621 ide_atapi_identify(s);
1622 s->status = READY_STAT | SEEK_STAT;
1623 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1624 ide_set_irq(s->bus);
1625 return false;
1626}
1627
1628static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1629{
1630 ide_set_signature(s);
1631
1632 if (s->drive_kind == IDE_CD) {
1633 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1634 * devices to return a clear status register
1635 * with READY_STAT *not* set. */
1636 s->error = 0x01;
1637 } else {
1638 s->status = READY_STAT | SEEK_STAT;
1639 /* The bits of the error register are not as usual for this command!
1640 * They are part of the regular output (this is why ERR_STAT isn't set)
1641 * Device 0 passed, Device 1 passed or not present. */
1642 s->error = 0x01;
1643 ide_set_irq(s->bus);
1644 }
1645
1646 return false;
1647}
1648
1649static bool cmd_packet(IDEState *s, uint8_t cmd)
1650{
1651 /* overlapping commands not supported */
1652 if (s->feature & 0x02) {
1653 ide_abort_command(s);
1654 return true;
1655 }
1656
1657 s->status = READY_STAT | SEEK_STAT;
1658 s->atapi_dma = s->feature & 1;
1659 if (s->atapi_dma) {
1660 s->dma_cmd = IDE_DMA_ATAPI;
1661 }
1662 s->nsector = 1;
1663 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1664 ide_atapi_cmd);
1665 return false;
1666}
1667
1668
1669/*** CF-ATA commands ***/
1670
1671static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1672{
1673 s->error = 0x09; /* miscellaneous error */
1674 s->status = READY_STAT | SEEK_STAT;
1675 ide_set_irq(s->bus);
1676
1677 return false;
1678}
1679
1680static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1681{
1682 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1683 * required for Windows 8 to work with AHCI */
1684
1685 if (cmd == CFA_WEAR_LEVEL) {
1686 s->nsector = 0;
1687 }
1688
1689 if (cmd == CFA_ERASE_SECTORS) {
1690 s->media_changed = 1;
1691 }
1692
1693 return true;
1694}
1695
1696static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1697{
1698 s->status = READY_STAT | SEEK_STAT;
1699
1700 memset(s->io_buffer, 0, 0x200);
1701 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1702 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1703 s->io_buffer[0x02] = s->select; /* Head */
1704 s->io_buffer[0x03] = s->sector; /* Sector */
1705 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1706 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1707 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1708 s->io_buffer[0x13] = 0x00; /* Erase flag */
1709 s->io_buffer[0x18] = 0x00; /* Hot count */
1710 s->io_buffer[0x19] = 0x00; /* Hot count */
1711 s->io_buffer[0x1a] = 0x01; /* Hot count */
1712
1713 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1714 ide_set_irq(s->bus);
1715
1716 return false;
1717}
1718
1719static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1720{
1721 switch (s->feature) {
1722 case 0x02: /* Inquiry Metadata Storage */
1723 ide_cfata_metadata_inquiry(s);
1724 break;
1725 case 0x03: /* Read Metadata Storage */
1726 ide_cfata_metadata_read(s);
1727 break;
1728 case 0x04: /* Write Metadata Storage */
1729 ide_cfata_metadata_write(s);
1730 break;
1731 default:
1732 ide_abort_command(s);
1733 return true;
1734 }
1735
1736 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1737 s->status = 0x00; /* NOTE: READY is _not_ set */
1738 ide_set_irq(s->bus);
1739
1740 return false;
1741}
1742
1743static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1744{
1745 switch (s->feature) {
1746 case 0x01: /* sense temperature in device */
1747 s->nsector = 0x50; /* +20 C */
1748 break;
1749 default:
1750 ide_abort_command(s);
1751 return true;
1752 }
1753
1754 return true;
1755}
1756
1757
1758/*** SMART commands ***/
1759
1760static bool cmd_smart(IDEState *s, uint8_t cmd)
1761{
1762 int n;
1763
1764 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1765 goto abort_cmd;
1766 }
1767
1768 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1769 goto abort_cmd;
1770 }
1771
1772 switch (s->feature) {
1773 case SMART_DISABLE:
1774 s->smart_enabled = 0;
1775 return true;
1776
1777 case SMART_ENABLE:
1778 s->smart_enabled = 1;
1779 return true;
1780
1781 case SMART_ATTR_AUTOSAVE:
1782 switch (s->sector) {
1783 case 0x00:
1784 s->smart_autosave = 0;
1785 break;
1786 case 0xf1:
1787 s->smart_autosave = 1;
1788 break;
1789 default:
1790 goto abort_cmd;
1791 }
1792 return true;
1793
1794 case SMART_STATUS:
1795 if (!s->smart_errors) {
1796 s->hcyl = 0xc2;
1797 s->lcyl = 0x4f;
1798 } else {
1799 s->hcyl = 0x2c;
1800 s->lcyl = 0xf4;
1801 }
1802 return true;
1803
1804 case SMART_READ_THRESH:
1805 memset(s->io_buffer, 0, 0x200);
1806 s->io_buffer[0] = 0x01; /* smart struct version */
1807
1808 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1809 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1810 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1811 }
1812
1813 /* checksum */
1814 for (n = 0; n < 511; n++) {
1815 s->io_buffer[511] += s->io_buffer[n];
1816 }
1817 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1818
1819 s->status = READY_STAT | SEEK_STAT;
1820 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1821 ide_set_irq(s->bus);
1822 return false;
1823
1824 case SMART_READ_DATA:
1825 memset(s->io_buffer, 0, 0x200);
1826 s->io_buffer[0] = 0x01; /* smart struct version */
1827
1828 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1829 int i;
1830 for (i = 0; i < 11; i++) {
1831 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1832 }
1833 }
1834
1835 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1836 if (s->smart_selftest_count == 0) {
1837 s->io_buffer[363] = 0;
1838 } else {
1839 s->io_buffer[363] =
1840 s->smart_selftest_data[3 +
1841 (s->smart_selftest_count - 1) *
1842 24];
1843 }
1844 s->io_buffer[364] = 0x20;
1845 s->io_buffer[365] = 0x01;
1846 /* offline data collection capacity: execute + self-test*/
1847 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1848 s->io_buffer[368] = 0x03; /* smart capability (1) */
1849 s->io_buffer[369] = 0x00; /* smart capability (2) */
1850 s->io_buffer[370] = 0x01; /* error logging supported */
1851 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1852 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1853 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1854
1855 for (n = 0; n < 511; n++) {
1856 s->io_buffer[511] += s->io_buffer[n];
1857 }
1858 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1859
1860 s->status = READY_STAT | SEEK_STAT;
1861 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1862 ide_set_irq(s->bus);
1863 return false;
1864
1865 case SMART_READ_LOG:
1866 switch (s->sector) {
1867 case 0x01: /* summary smart error log */
1868 memset(s->io_buffer, 0, 0x200);
1869 s->io_buffer[0] = 0x01;
1870 s->io_buffer[1] = 0x00; /* no error entries */
1871 s->io_buffer[452] = s->smart_errors & 0xff;
1872 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1873
1874 for (n = 0; n < 511; n++) {
1875 s->io_buffer[511] += s->io_buffer[n];
1876 }
1877 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1878 break;
1879 case 0x06: /* smart self test log */
1880 memset(s->io_buffer, 0, 0x200);
1881 s->io_buffer[0] = 0x01;
1882 if (s->smart_selftest_count == 0) {
1883 s->io_buffer[508] = 0;
1884 } else {
1885 s->io_buffer[508] = s->smart_selftest_count;
1886 for (n = 2; n < 506; n++) {
1887 s->io_buffer[n] = s->smart_selftest_data[n];
1888 }
1889 }
1890
1891 for (n = 0; n < 511; n++) {
1892 s->io_buffer[511] += s->io_buffer[n];
1893 }
1894 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1895 break;
1896 default:
1897 goto abort_cmd;
1898 }
1899 s->status = READY_STAT | SEEK_STAT;
1900 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1901 ide_set_irq(s->bus);
1902 return false;
1903
1904 case SMART_EXECUTE_OFFLINE:
1905 switch (s->sector) {
1906 case 0: /* off-line routine */
1907 case 1: /* short self test */
1908 case 2: /* extended self test */
1909 s->smart_selftest_count++;
1910 if (s->smart_selftest_count > 21) {
1911 s->smart_selftest_count = 1;
1912 }
1913 n = 2 + (s->smart_selftest_count - 1) * 24;
1914 s->smart_selftest_data[n] = s->sector;
1915 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1916 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1917 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1918 break;
1919 default:
1920 goto abort_cmd;
1921 }
1922 return true;
1923 }
1924
1925abort_cmd:
1926 ide_abort_command(s);
1927 return true;
1928}
1929
1930#define HD_OK (1u << IDE_HD)
1931#define CD_OK (1u << IDE_CD)
1932#define CFA_OK (1u << IDE_CFATA)
1933#define HD_CFA_OK (HD_OK | CFA_OK)
1934#define ALL_OK (HD_OK | CD_OK | CFA_OK)
1935
1936/* Set the Disk Seek Completed status bit during completion */
1937#define SET_DSC (1u << 8)
1938
1939/* See ACS-2 T13/2015-D Table B.2 Command codes */
1940static const struct {
1941 /* Returns true if the completion code should be run */
1942 bool (*handler)(IDEState *s, uint8_t cmd);
1943 int flags;
1944} ide_cmd_table[0x100] = {
1945 /* NOP not implemented, mandatory for CD */
1946 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1947 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
1948 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1949 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1950 [WIN_READ] = { cmd_read_pio, ALL_OK },
1951 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
1952 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1953 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1954 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1955 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1956 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1957 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1958 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1959 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
1960 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
1961 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
1962 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
1963 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
1964 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
1965 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
1966 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
1967 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
1968 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
1969 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
1970 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
1971 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
1972 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
1973 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
1974 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1975 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
1976 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
1977 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
1978 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
1979 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1980 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1981 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
1982 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
1983 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1984 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
1985 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
1986 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
1987 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
1988 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
1989 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
1990 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
1991 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
1992 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
1993 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1994 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
1995 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
1996 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
1997 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
1998 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
1999 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2000 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2001 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2002};
2003
2004static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2005{
2006 return cmd < ARRAY_SIZE(ide_cmd_table)
2007 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2008}
2009
2010void ide_exec_cmd(IDEBus *bus, uint32_t val)
2011{
2012 IDEState *s;
2013 bool complete;
2014
2015 s = idebus_active_if(bus);
2016 trace_ide_exec_cmd(bus, s, val);
2017
2018 /* ignore commands to non existent slave */
2019 if (s != bus->ifs && !s->blk) {
2020 return;
2021 }
2022
2023 /* Only RESET is allowed while BSY and/or DRQ are set,
2024 * and only to ATAPI devices. */
2025 if (s->status & (BUSY_STAT|DRQ_STAT)) {
2026 if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2027 return;
2028 }
2029 }
2030
2031 if (!ide_cmd_permitted(s, val)) {
2032 ide_abort_command(s);
2033 ide_set_irq(s->bus);
2034 return;
2035 }
2036
2037 s->status = READY_STAT | BUSY_STAT;
2038 s->error = 0;
2039 s->io_buffer_offset = 0;
2040
2041 complete = ide_cmd_table[val].handler(s, val);
2042 if (complete) {
2043 s->status &= ~BUSY_STAT;
2044 assert(!!s->error == !!(s->status & ERR_STAT));
2045
2046 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2047 s->status |= SEEK_STAT;
2048 }
2049
2050 ide_cmd_done(s);
2051 ide_set_irq(s->bus);
2052 }
2053}
2054
2055uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2056{
2057 IDEBus *bus = opaque;
2058 IDEState *s = idebus_active_if(bus);
2059 uint32_t reg_num;
2060 int ret, hob;
2061
2062 reg_num = addr & 7;
2063 /* FIXME: HOB readback uses bit 7, but it's always set right now */
2064 //hob = s->select & (1 << 7);
2065 hob = 0;
2066 switch (reg_num) {
2067 case 0:
2068 ret = 0xff;
2069 break;
2070 case 1:
2071 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2072 (s != bus->ifs && !s->blk)) {
2073 ret = 0;
2074 } else if (!hob) {
2075 ret = s->error;
2076 } else {
2077 ret = s->hob_feature;
2078 }
2079 break;
2080 case 2:
2081 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2082 ret = 0;
2083 } else if (!hob) {
2084 ret = s->nsector & 0xff;
2085 } else {
2086 ret = s->hob_nsector;
2087 }
2088 break;
2089 case 3:
2090 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2091 ret = 0;
2092 } else if (!hob) {
2093 ret = s->sector;
2094 } else {
2095 ret = s->hob_sector;
2096 }
2097 break;
2098 case 4:
2099 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2100 ret = 0;
2101 } else if (!hob) {
2102 ret = s->lcyl;
2103 } else {
2104 ret = s->hob_lcyl;
2105 }
2106 break;
2107 case 5:
2108 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2109 ret = 0;
2110 } else if (!hob) {
2111 ret = s->hcyl;
2112 } else {
2113 ret = s->hob_hcyl;
2114 }
2115 break;
2116 case 6:
2117 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2118 ret = 0;
2119 } else {
2120 ret = s->select;
2121 }
2122 break;
2123 default:
2124 case 7:
2125 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2126 (s != bus->ifs && !s->blk)) {
2127 ret = 0;
2128 } else {
2129 ret = s->status;
2130 }
2131 qemu_irq_lower(bus->irq);
2132 break;
2133 }
2134
2135 trace_ide_ioport_read(addr, ret, bus, s);
2136 return ret;
2137}
2138
2139uint32_t ide_status_read(void *opaque, uint32_t addr)
2140{
2141 IDEBus *bus = opaque;
2142 IDEState *s = idebus_active_if(bus);
2143 int ret;
2144
2145 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2146 (s != bus->ifs && !s->blk)) {
2147 ret = 0;
2148 } else {
2149 ret = s->status;
2150 }
2151
2152 trace_ide_status_read(addr, ret, bus, s);
2153 return ret;
2154}
2155
2156void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2157{
2158 IDEBus *bus = opaque;
2159 IDEState *s;
2160 int i;
2161
2162 trace_ide_cmd_write(addr, val, bus);
2163
2164 /* common for both drives */
2165 if (!(bus->cmd & IDE_CMD_RESET) &&
2166 (val & IDE_CMD_RESET)) {
2167 /* reset low to high */
2168 for(i = 0;i < 2; i++) {
2169 s = &bus->ifs[i];
2170 s->status = BUSY_STAT | SEEK_STAT;
2171 s->error = 0x01;
2172 }
2173 } else if ((bus->cmd & IDE_CMD_RESET) &&
2174 !(val & IDE_CMD_RESET)) {
2175 /* high to low */
2176 for(i = 0;i < 2; i++) {
2177 s = &bus->ifs[i];
2178 if (s->drive_kind == IDE_CD)
2179 s->status = 0x00; /* NOTE: READY is _not_ set */
2180 else
2181 s->status = READY_STAT | SEEK_STAT;
2182 ide_set_signature(s);
2183 }
2184 }
2185
2186 bus->cmd = val;
2187}
2188
2189/*
2190 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2191 * transferred from the device to the guest), false if it's a PIO in
2192 */
2193static bool ide_is_pio_out(IDEState *s)
2194{
2195 if (s->end_transfer_func == ide_sector_write ||
2196 s->end_transfer_func == ide_atapi_cmd) {
2197 return false;
2198 } else if (s->end_transfer_func == ide_sector_read ||
2199 s->end_transfer_func == ide_transfer_stop ||
2200 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2201 s->end_transfer_func == ide_dummy_transfer_stop) {
2202 return true;
2203 }
2204
2205 abort();
2206}
2207
2208void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2209{
2210 IDEBus *bus = opaque;
2211 IDEState *s = idebus_active_if(bus);
2212 uint8_t *p;
2213
2214 /* PIO data access allowed only when DRQ bit is set. The result of a write
2215 * during PIO out is indeterminate, just ignore it. */
2216 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2217 return;
2218 }
2219
2220 p = s->data_ptr;
2221 if (p + 2 > s->data_end) {
2222 return;
2223 }
2224
2225 *(uint16_t *)p = le16_to_cpu(val);
2226 p += 2;
2227 s->data_ptr = p;
2228 if (p >= s->data_end) {
2229 s->status &= ~DRQ_STAT;
2230 s->end_transfer_func(s);
2231 }
2232}
2233
2234uint32_t ide_data_readw(void *opaque, uint32_t addr)
2235{
2236 IDEBus *bus = opaque;
2237 IDEState *s = idebus_active_if(bus);
2238 uint8_t *p;
2239 int ret;
2240
2241 /* PIO data access allowed only when DRQ bit is set. The result of a read
2242 * during PIO in is indeterminate, return 0 and don't move forward. */
2243 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2244 return 0;
2245 }
2246
2247 p = s->data_ptr;
2248 if (p + 2 > s->data_end) {
2249 return 0;
2250 }
2251
2252 ret = cpu_to_le16(*(uint16_t *)p);
2253 p += 2;
2254 s->data_ptr = p;
2255 if (p >= s->data_end) {
2256 s->status &= ~DRQ_STAT;
2257 s->end_transfer_func(s);
2258 }
2259 return ret;
2260}
2261
2262void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2263{
2264 IDEBus *bus = opaque;
2265 IDEState *s = idebus_active_if(bus);
2266 uint8_t *p;
2267
2268 /* PIO data access allowed only when DRQ bit is set. The result of a write
2269 * during PIO out is indeterminate, just ignore it. */
2270 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2271 return;
2272 }
2273
2274 p = s->data_ptr;
2275 if (p + 4 > s->data_end) {
2276 return;
2277 }
2278
2279 *(uint32_t *)p = le32_to_cpu(val);
2280 p += 4;
2281 s->data_ptr = p;
2282 if (p >= s->data_end) {
2283 s->status &= ~DRQ_STAT;
2284 s->end_transfer_func(s);
2285 }
2286}
2287
2288uint32_t ide_data_readl(void *opaque, uint32_t addr)
2289{
2290 IDEBus *bus = opaque;
2291 IDEState *s = idebus_active_if(bus);
2292 uint8_t *p;
2293 int ret;
2294
2295 /* PIO data access allowed only when DRQ bit is set. The result of a read
2296 * during PIO in is indeterminate, return 0 and don't move forward. */
2297 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2298 return 0;
2299 }
2300
2301 p = s->data_ptr;
2302 if (p + 4 > s->data_end) {
2303 return 0;
2304 }
2305
2306 ret = cpu_to_le32(*(uint32_t *)p);
2307 p += 4;
2308 s->data_ptr = p;
2309 if (p >= s->data_end) {
2310 s->status &= ~DRQ_STAT;
2311 s->end_transfer_func(s);
2312 }
2313 return ret;
2314}
2315
2316static void ide_dummy_transfer_stop(IDEState *s)
2317{
2318 s->data_ptr = s->io_buffer;
2319 s->data_end = s->io_buffer;
2320 s->io_buffer[0] = 0xff;
2321 s->io_buffer[1] = 0xff;
2322 s->io_buffer[2] = 0xff;
2323 s->io_buffer[3] = 0xff;
2324}
2325
2326void ide_bus_reset(IDEBus *bus)
2327{
2328 bus->unit = 0;
2329 bus->cmd = 0;
2330 ide_reset(&bus->ifs[0]);
2331 ide_reset(&bus->ifs[1]);
2332 ide_clear_hob(bus);
2333
2334 /* pending async DMA */
2335 if (bus->dma->aiocb) {
2336#ifdef DEBUG_AIO
2337 printf("aio_cancel\n");
2338#endif
2339 blk_aio_cancel(bus->dma->aiocb);
2340 bus->dma->aiocb = NULL;
2341 }
2342
2343 /* reset dma provider too */
2344 if (bus->dma->ops->reset) {
2345 bus->dma->ops->reset(bus->dma);
2346 }
2347}
2348
2349static bool ide_cd_is_tray_open(void *opaque)
2350{
2351 return ((IDEState *)opaque)->tray_open;
2352}
2353
2354static bool ide_cd_is_medium_locked(void *opaque)
2355{
2356 return ((IDEState *)opaque)->tray_locked;
2357}
2358
2359static void ide_resize_cb(void *opaque)
2360{
2361 IDEState *s = opaque;
2362 uint64_t nb_sectors;
2363
2364 if (!s->identify_set) {
2365 return;
2366 }
2367
2368 blk_get_geometry(s->blk, &nb_sectors);
2369 s->nb_sectors = nb_sectors;
2370
2371 /* Update the identify data buffer. */
2372 if (s->drive_kind == IDE_CFATA) {
2373 ide_cfata_identify_size(s);
2374 } else {
2375 /* IDE_CD uses a different set of callbacks entirely. */
2376 assert(s->drive_kind != IDE_CD);
2377 ide_identify_size(s);
2378 }
2379}
2380
2381static const BlockDevOps ide_cd_block_ops = {
2382 .change_media_cb = ide_cd_change_cb,
2383 .eject_request_cb = ide_cd_eject_request_cb,
2384 .is_tray_open = ide_cd_is_tray_open,
2385 .is_medium_locked = ide_cd_is_medium_locked,
2386};
2387
2388static const BlockDevOps ide_hd_block_ops = {
2389 .resize_cb = ide_resize_cb,
2390};
2391
2392int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2393 const char *version, const char *serial, const char *model,
2394 uint64_t wwn,
2395 uint32_t cylinders, uint32_t heads, uint32_t secs,
2396 int chs_trans)
2397{
2398 uint64_t nb_sectors;
2399
2400 s->blk = blk;
2401 s->drive_kind = kind;
2402
2403 blk_get_geometry(blk, &nb_sectors);
2404 s->cylinders = cylinders;
2405 s->heads = heads;
2406 s->sectors = secs;
2407 s->chs_trans = chs_trans;
2408 s->nb_sectors = nb_sectors;
2409 s->wwn = wwn;
2410 /* The SMART values should be preserved across power cycles
2411 but they aren't. */
2412 s->smart_enabled = 1;
2413 s->smart_autosave = 1;
2414 s->smart_errors = 0;
2415 s->smart_selftest_count = 0;
2416 if (kind == IDE_CD) {
2417 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2418 blk_set_guest_block_size(blk, 2048);
2419 } else {
2420 if (!blk_is_inserted(s->blk)) {
2421 error_report("Device needs media, but drive is empty");
2422 return -1;
2423 }
2424 if (blk_is_read_only(blk)) {
2425 error_report("Can't use a read-only drive");
2426 return -1;
2427 }
2428 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2429 }
2430 if (serial) {
2431 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2432 } else {
2433 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2434 "QM%05d", s->drive_serial);
2435 }
2436 if (model) {
2437 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2438 } else {
2439 switch (kind) {
2440 case IDE_CD:
2441 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2442 break;
2443 case IDE_CFATA:
2444 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2445 break;
2446 default:
2447 strcpy(s->drive_model_str, "QEMU HARDDISK");
2448 break;
2449 }
2450 }
2451
2452 if (version) {
2453 pstrcpy(s->version, sizeof(s->version), version);
2454 } else {
2455 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2456 }
2457
2458 ide_reset(s);
2459 blk_iostatus_enable(blk);
2460 return 0;
2461}
2462
2463static void ide_init1(IDEBus *bus, int unit)
2464{
2465 static int drive_serial = 1;
2466 IDEState *s = &bus->ifs[unit];
2467
2468 s->bus = bus;
2469 s->unit = unit;
2470 s->drive_serial = drive_serial++;
2471 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2472 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2473 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2474 memset(s->io_buffer, 0, s->io_buffer_total_len);
2475
2476 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2477 memset(s->smart_selftest_data, 0, 512);
2478
2479 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2480 ide_sector_write_timer_cb, s);
2481}
2482
2483static int ide_nop_int(IDEDMA *dma, int x)
2484{
2485 return 0;
2486}
2487
2488static void ide_nop(IDEDMA *dma)
2489{
2490}
2491
2492static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2493{
2494 return 0;
2495}
2496
2497static const IDEDMAOps ide_dma_nop_ops = {
2498 .prepare_buf = ide_nop_int32,
2499 .restart_dma = ide_nop,
2500 .rw_buf = ide_nop_int,
2501};
2502
2503static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2504{
2505 s->unit = s->bus->retry_unit;
2506 ide_set_sector(s, s->bus->retry_sector_num);
2507 s->nsector = s->bus->retry_nsector;
2508 s->bus->dma->ops->restart_dma(s->bus->dma);
2509 s->io_buffer_size = 0;
2510 s->dma_cmd = dma_cmd;
2511 ide_start_dma(s, ide_dma_cb);
2512}
2513
2514static void ide_restart_bh(void *opaque)
2515{
2516 IDEBus *bus = opaque;
2517 IDEState *s;
2518 bool is_read;
2519 int error_status;
2520
2521 qemu_bh_delete(bus->bh);
2522 bus->bh = NULL;
2523
2524 error_status = bus->error_status;
2525 if (bus->error_status == 0) {
2526 return;
2527 }
2528
2529 s = idebus_active_if(bus);
2530 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2531
2532 /* The error status must be cleared before resubmitting the request: The
2533 * request may fail again, and this case can only be distinguished if the
2534 * called function can set a new error status. */
2535 bus->error_status = 0;
2536
2537 /* The HBA has generically asked to be kicked on retry */
2538 if (error_status & IDE_RETRY_HBA) {
2539 if (s->bus->dma->ops->restart) {
2540 s->bus->dma->ops->restart(s->bus->dma);
2541 }
2542 } else if (IS_IDE_RETRY_DMA(error_status)) {
2543 if (error_status & IDE_RETRY_TRIM) {
2544 ide_restart_dma(s, IDE_DMA_TRIM);
2545 } else {
2546 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2547 }
2548 } else if (IS_IDE_RETRY_PIO(error_status)) {
2549 if (is_read) {
2550 ide_sector_read(s);
2551 } else {
2552 ide_sector_write(s);
2553 }
2554 } else if (error_status & IDE_RETRY_FLUSH) {
2555 ide_flush_cache(s);
2556 } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2557 assert(s->end_transfer_func == ide_atapi_cmd);
2558 ide_atapi_dma_restart(s);
2559 } else {
2560 abort();
2561 }
2562}
2563
2564static void ide_restart_cb(void *opaque, int running, RunState state)
2565{
2566 IDEBus *bus = opaque;
2567
2568 if (!running)
2569 return;
2570
2571 if (!bus->bh) {
2572 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2573 qemu_bh_schedule(bus->bh);
2574 }
2575}
2576
2577void ide_register_restart_cb(IDEBus *bus)
2578{
2579 if (bus->dma->ops->restart_dma) {
2580 bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2581 }
2582}
2583
2584static IDEDMA ide_dma_nop = {
2585 .ops = &ide_dma_nop_ops,
2586 .aiocb = NULL,
2587};
2588
2589void ide_init2(IDEBus *bus, qemu_irq irq)
2590{
2591 int i;
2592
2593 for(i = 0; i < 2; i++) {
2594 ide_init1(bus, i);
2595 ide_reset(&bus->ifs[i]);
2596 }
2597 bus->irq = irq;
2598 bus->dma = &ide_dma_nop;
2599}
2600
2601void ide_exit(IDEState *s)
2602{
2603 timer_del(s->sector_write_timer);
2604 timer_free(s->sector_write_timer);
2605 qemu_vfree(s->smart_selftest_data);
2606 qemu_vfree(s->io_buffer);
2607}
2608
2609static const MemoryRegionPortio ide_portio_list[] = {
2610 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2611 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2612 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2613 PORTIO_END_OF_LIST(),
2614};
2615
2616static const MemoryRegionPortio ide_portio2_list[] = {
2617 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2618 PORTIO_END_OF_LIST(),
2619};
2620
2621void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2622{
2623 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2624 bridge has been setup properly to always register with ISA. */
2625 isa_register_portio_list(dev, &bus->portio_list,
2626 iobase, ide_portio_list, bus, "ide");
2627
2628 if (iobase2) {
2629 isa_register_portio_list(dev, &bus->portio2_list,
2630 iobase2, ide_portio2_list, bus, "ide");
2631 }
2632}
2633
2634static bool is_identify_set(void *opaque, int version_id)
2635{
2636 IDEState *s = opaque;
2637
2638 return s->identify_set != 0;
2639}
2640
2641static EndTransferFunc* transfer_end_table[] = {
2642 ide_sector_read,
2643 ide_sector_write,
2644 ide_transfer_stop,
2645 ide_atapi_cmd_reply_end,
2646 ide_atapi_cmd,
2647 ide_dummy_transfer_stop,
2648};
2649
2650static int transfer_end_table_idx(EndTransferFunc *fn)
2651{
2652 int i;
2653
2654 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2655 if (transfer_end_table[i] == fn)
2656 return i;
2657
2658 return -1;
2659}
2660
2661static int ide_drive_post_load(void *opaque, int version_id)
2662{
2663 IDEState *s = opaque;
2664
2665 if (s->blk && s->identify_set) {
2666 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2667 }
2668 return 0;
2669}
2670
2671static int ide_drive_pio_post_load(void *opaque, int version_id)
2672{
2673 IDEState *s = opaque;
2674
2675 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2676 return -EINVAL;
2677 }
2678 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2679 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2680 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2681 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2682
2683 return 0;
2684}
2685
2686static void ide_drive_pio_pre_save(void *opaque)
2687{
2688 IDEState *s = opaque;
2689 int idx;
2690
2691 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2692 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2693
2694 idx = transfer_end_table_idx(s->end_transfer_func);
2695 if (idx == -1) {
2696 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2697 __func__);
2698 s->end_transfer_fn_idx = 2;
2699 } else {
2700 s->end_transfer_fn_idx = idx;
2701 }
2702}
2703
2704static bool ide_drive_pio_state_needed(void *opaque)
2705{
2706 IDEState *s = opaque;
2707
2708 return ((s->status & DRQ_STAT) != 0)
2709 || (s->bus->error_status & IDE_RETRY_PIO);
2710}
2711
2712static bool ide_tray_state_needed(void *opaque)
2713{
2714 IDEState *s = opaque;
2715
2716 return s->tray_open || s->tray_locked;
2717}
2718
2719static bool ide_atapi_gesn_needed(void *opaque)
2720{
2721 IDEState *s = opaque;
2722
2723 return s->events.new_media || s->events.eject_request;
2724}
2725
2726static bool ide_error_needed(void *opaque)
2727{
2728 IDEBus *bus = opaque;
2729
2730 return (bus->error_status != 0);
2731}
2732
2733/* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2734static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2735 .name ="ide_drive/atapi/gesn_state",
2736 .version_id = 1,
2737 .minimum_version_id = 1,
2738 .needed = ide_atapi_gesn_needed,
2739 .fields = (VMStateField[]) {
2740 VMSTATE_BOOL(events.new_media, IDEState),
2741 VMSTATE_BOOL(events.eject_request, IDEState),
2742 VMSTATE_END_OF_LIST()
2743 }
2744};
2745
2746static const VMStateDescription vmstate_ide_tray_state = {
2747 .name = "ide_drive/tray_state",
2748 .version_id = 1,
2749 .minimum_version_id = 1,
2750 .needed = ide_tray_state_needed,
2751 .fields = (VMStateField[]) {
2752 VMSTATE_BOOL(tray_open, IDEState),
2753 VMSTATE_BOOL(tray_locked, IDEState),
2754 VMSTATE_END_OF_LIST()
2755 }
2756};
2757
2758static const VMStateDescription vmstate_ide_drive_pio_state = {
2759 .name = "ide_drive/pio_state",
2760 .version_id = 1,
2761 .minimum_version_id = 1,
2762 .pre_save = ide_drive_pio_pre_save,
2763 .post_load = ide_drive_pio_post_load,
2764 .needed = ide_drive_pio_state_needed,
2765 .fields = (VMStateField[]) {
2766 VMSTATE_INT32(req_nb_sectors, IDEState),
2767 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2768 vmstate_info_uint8, uint8_t),
2769 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2770 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2771 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2772 VMSTATE_INT32(elementary_transfer_size, IDEState),
2773 VMSTATE_INT32(packet_transfer_size, IDEState),
2774 VMSTATE_END_OF_LIST()
2775 }
2776};
2777
2778const VMStateDescription vmstate_ide_drive = {
2779 .name = "ide_drive",
2780 .version_id = 3,
2781 .minimum_version_id = 0,
2782 .post_load = ide_drive_post_load,
2783 .fields = (VMStateField[]) {
2784 VMSTATE_INT32(mult_sectors, IDEState),
2785 VMSTATE_INT32(identify_set, IDEState),
2786 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2787 VMSTATE_UINT8(feature, IDEState),
2788 VMSTATE_UINT8(error, IDEState),
2789 VMSTATE_UINT32(nsector, IDEState),
2790 VMSTATE_UINT8(sector, IDEState),
2791 VMSTATE_UINT8(lcyl, IDEState),
2792 VMSTATE_UINT8(hcyl, IDEState),
2793 VMSTATE_UINT8(hob_feature, IDEState),
2794 VMSTATE_UINT8(hob_sector, IDEState),
2795 VMSTATE_UINT8(hob_nsector, IDEState),
2796 VMSTATE_UINT8(hob_lcyl, IDEState),
2797 VMSTATE_UINT8(hob_hcyl, IDEState),
2798 VMSTATE_UINT8(select, IDEState),
2799 VMSTATE_UINT8(status, IDEState),
2800 VMSTATE_UINT8(lba48, IDEState),
2801 VMSTATE_UINT8(sense_key, IDEState),
2802 VMSTATE_UINT8(asc, IDEState),
2803 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2804 VMSTATE_END_OF_LIST()
2805 },
2806 .subsections = (const VMStateDescription*[]) {
2807 &vmstate_ide_drive_pio_state,
2808 &vmstate_ide_tray_state,
2809 &vmstate_ide_atapi_gesn_state,
2810 NULL
2811 }
2812};
2813
2814static const VMStateDescription vmstate_ide_error_status = {
2815 .name ="ide_bus/error",
2816 .version_id = 2,
2817 .minimum_version_id = 1,
2818 .needed = ide_error_needed,
2819 .fields = (VMStateField[]) {
2820 VMSTATE_INT32(error_status, IDEBus),
2821 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2822 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2823 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2824 VMSTATE_END_OF_LIST()
2825 }
2826};
2827
2828const VMStateDescription vmstate_ide_bus = {
2829 .name = "ide_bus",
2830 .version_id = 1,
2831 .minimum_version_id = 1,
2832 .fields = (VMStateField[]) {
2833 VMSTATE_UINT8(cmd, IDEBus),
2834 VMSTATE_UINT8(unit, IDEBus),
2835 VMSTATE_END_OF_LIST()
2836 },
2837 .subsections = (const VMStateDescription*[]) {
2838 &vmstate_ide_error_status,
2839 NULL
2840 }
2841};
2842
2843void ide_drive_get(DriveInfo **hd, int n)
2844{
2845 int i;
2846
2847 for (i = 0; i < n; i++) {
2848 hd[i] = drive_get_by_index(IF_IDE, i);
2849 }
2850}
This page took 0.054532 seconds and 4 git commands to generate.