1 #include <linux/kernel.h>
5 int generic_ide_suspend(struct device *dev, pm_message_t mesg)
7 ide_drive_t *drive = to_ide_device(dev);
8 ide_drive_t *pair = ide_get_pair_dev(drive);
9 ide_hwif_t *hwif = drive->hwif;
11 struct ide_pm_state rqpm;
14 if (ide_port_acpi(hwif)) {
15 /* call ACPI _GTM only once */
16 if ((drive->dn & 1) == 0 || pair == NULL)
17 ide_acpi_get_timing(hwif);
20 memset(&rqpm, 0, sizeof(rqpm));
21 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
22 ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
24 rqpm.pm_step = IDE_PM_START_SUSPEND;
25 if (mesg.event == PM_EVENT_PRETHAW)
26 mesg.event = PM_EVENT_FREEZE;
27 rqpm.pm_state = mesg.event;
29 blk_execute_rq(drive->queue, NULL, rq, 0);
30 ret = scsi_req(rq)->result ? -EIO : 0;
33 if (ret == 0 && ide_port_acpi(hwif)) {
34 /* call ACPI _PS3 only after both devices are suspended */
35 if ((drive->dn & 1) || pair == NULL)
36 ide_acpi_set_state(hwif, 0);
42 static void ide_end_sync_rq(struct request *rq, blk_status_t error)
44 complete(rq->end_io_data);
47 static int ide_pm_execute_rq(struct request *rq)
49 struct request_queue *q = rq->q;
50 DECLARE_COMPLETION_ONSTACK(wait);
52 rq->end_io_data = &wait;
53 rq->end_io = ide_end_sync_rq;
55 spin_lock_irq(q->queue_lock);
56 if (unlikely(blk_queue_dying(q))) {
57 rq->rq_flags |= RQF_QUIET;
58 scsi_req(rq)->result = -ENXIO;
59 __blk_end_request_all(rq, BLK_STS_OK);
60 spin_unlock_irq(q->queue_lock);
63 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
64 __blk_run_queue_uncond(q);
65 spin_unlock_irq(q->queue_lock);
67 wait_for_completion_io(&wait);
69 return scsi_req(rq)->result ? -EIO : 0;
72 int generic_ide_resume(struct device *dev)
74 ide_drive_t *drive = to_ide_device(dev);
75 ide_drive_t *pair = ide_get_pair_dev(drive);
76 ide_hwif_t *hwif = drive->hwif;
78 struct ide_pm_state rqpm;
81 if (ide_port_acpi(hwif)) {
82 /* call ACPI _PS0 / _STM only once */
83 if ((drive->dn & 1) == 0 || pair == NULL) {
84 ide_acpi_set_state(hwif, 1);
85 ide_acpi_push_timing(hwif);
88 ide_acpi_exec_tfs(drive);
91 memset(&rqpm, 0, sizeof(rqpm));
92 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
93 ide_req(rq)->type = ATA_PRIV_PM_RESUME;
94 rq->rq_flags |= RQF_PREEMPT;
96 rqpm.pm_step = IDE_PM_START_RESUME;
97 rqpm.pm_state = PM_EVENT_ON;
99 err = ide_pm_execute_rq(rq);
102 if (err == 0 && dev->driver) {
103 struct ide_driver *drv = to_ide_driver(dev->driver);
112 void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
114 struct ide_pm_state *pm = rq->special;
117 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
118 drive->name, pm->pm_step);
120 if (drive->media != ide_disk)
123 switch (pm->pm_step) {
124 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
125 if (pm->pm_state == PM_EVENT_FREEZE)
126 pm->pm_step = IDE_PM_COMPLETED;
128 pm->pm_step = IDE_PM_STANDBY;
130 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
131 pm->pm_step = IDE_PM_COMPLETED;
133 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
134 pm->pm_step = IDE_PM_IDLE;
136 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
137 pm->pm_step = IDE_PM_RESTORE_DMA;
142 ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
144 struct ide_pm_state *pm = rq->special;
145 struct ide_cmd cmd = { };
147 switch (pm->pm_step) {
148 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
149 if (drive->media != ide_disk)
151 /* Not supported? Switch to next step now. */
152 if (ata_id_flush_enabled(drive->id) == 0 ||
153 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
154 ide_complete_power_step(drive, rq);
157 if (ata_id_flush_ext_enabled(drive->id))
158 cmd.tf.command = ATA_CMD_FLUSH_EXT;
160 cmd.tf.command = ATA_CMD_FLUSH;
162 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
163 cmd.tf.command = ATA_CMD_STANDBYNOW1;
165 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
166 ide_set_max_pio(drive);
168 * skip IDE_PM_IDLE for ATAPI devices
170 if (drive->media != ide_disk)
171 pm->pm_step = IDE_PM_RESTORE_DMA;
173 ide_complete_power_step(drive, rq);
175 case IDE_PM_IDLE: /* Resume step 2 (idle) */
176 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
178 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
180 * Right now, all we do is call ide_set_dma(drive),
181 * we could be smarter and check for current xfer_speed
182 * in struct drive etc...
184 if (drive->hwif->dma_ops == NULL)
187 * TODO: respect IDE_DFLAG_USING_DMA
193 pm->pm_step = IDE_PM_COMPLETED;
198 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
199 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
200 cmd.protocol = ATA_PROT_NODATA;
202 return do_rw_taskfile(drive, &cmd);
206 * ide_complete_pm_rq - end the current Power Management request
207 * @drive: target drive
210 * This function cleans up the current PM request and stops the queue
213 void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
215 struct request_queue *q = drive->queue;
216 struct ide_pm_state *pm = rq->special;
219 ide_complete_power_step(drive, rq);
220 if (pm->pm_step != IDE_PM_COMPLETED)
224 printk("%s: completing PM request, %s\n", drive->name,
225 (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
227 spin_lock_irqsave(q->queue_lock, flags);
228 if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
231 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
232 spin_unlock_irqrestore(q->queue_lock, flags);
234 drive->hwif->rq = NULL;
236 if (blk_end_request(rq, BLK_STS_OK, 0))
240 void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
242 struct ide_pm_state *pm = rq->special;
244 if (blk_rq_is_private(rq) &&
245 ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
246 pm->pm_step == IDE_PM_START_SUSPEND)
247 /* Mark drive blocked when starting the suspend sequence. */
248 drive->dev_flags |= IDE_DFLAG_BLOCKED;
249 else if (blk_rq_is_private(rq) &&
250 ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
251 pm->pm_step == IDE_PM_START_RESUME) {
253 * The first thing we do on wakeup is to wait for BSY bit to
254 * go away (with a looong timeout) as a drive on this hwif may
255 * just be POSTing itself.
256 * We do that before even selecting as the "other" device on
257 * the bus may be broken enough to walk on our toes at this
260 ide_hwif_t *hwif = drive->hwif;
261 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
262 struct request_queue *q = drive->queue;
266 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
268 rc = ide_wait_not_busy(hwif, 35000);
270 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
271 tp_ops->dev_select(drive);
272 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
273 rc = ide_wait_not_busy(hwif, 100000);
275 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
277 spin_lock_irqsave(q->queue_lock, flags);
279 spin_unlock_irqrestore(q->queue_lock, flags);