]>
Commit | Line | Data |
---|---|---|
e2984c62 BZ |
1 | #include <linux/kernel.h> |
2 | #include <linux/ide.h> | |
e2984c62 BZ |
3 | |
4 | int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |
5 | { | |
fcb52077 GKH |
6 | ide_drive_t *drive = dev_get_drvdata(dev); |
7 | ide_drive_t *pair = ide_get_pair_dev(drive); | |
898ec223 | 8 | ide_hwif_t *hwif = drive->hwif; |
e2984c62 BZ |
9 | struct request *rq; |
10 | struct request_pm_state rqpm; | |
e2984c62 BZ |
11 | int ret; |
12 | ||
2bf427b2 BZ |
13 | if (ide_port_acpi(hwif)) { |
14 | /* call ACPI _GTM only once */ | |
15 | if ((drive->dn & 1) == 0 || pair == NULL) | |
16 | ide_acpi_get_timing(hwif); | |
17 | } | |
e2984c62 BZ |
18 | |
19 | memset(&rqpm, 0, sizeof(rqpm)); | |
e2984c62 BZ |
20 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
21 | rq->cmd_type = REQ_TYPE_PM_SUSPEND; | |
fc38b521 | 22 | rq->special = &rqpm; |
e2984c62 BZ |
23 | rqpm.pm_step = IDE_PM_START_SUSPEND; |
24 | if (mesg.event == PM_EVENT_PRETHAW) | |
25 | mesg.event = PM_EVENT_FREEZE; | |
26 | rqpm.pm_state = mesg.event; | |
27 | ||
28 | ret = blk_execute_rq(drive->queue, NULL, rq, 0); | |
29 | blk_put_request(rq); | |
30 | ||
2bf427b2 BZ |
31 | if (ret == 0 && ide_port_acpi(hwif)) { |
32 | /* call ACPI _PS3 only after both devices are suspended */ | |
33 | if ((drive->dn & 1) || pair == NULL) | |
34 | ide_acpi_set_state(hwif, 0); | |
35 | } | |
e2984c62 BZ |
36 | |
37 | return ret; | |
38 | } | |
39 | ||
40 | int generic_ide_resume(struct device *dev) | |
41 | { | |
fcb52077 GKH |
42 | ide_drive_t *drive = dev_get_drvdata(dev); |
43 | ide_drive_t *pair = ide_get_pair_dev(drive); | |
898ec223 | 44 | ide_hwif_t *hwif = drive->hwif; |
e2984c62 BZ |
45 | struct request *rq; |
46 | struct request_pm_state rqpm; | |
e2984c62 BZ |
47 | int err; |
48 | ||
2bf427b2 BZ |
49 | if (ide_port_acpi(hwif)) { |
50 | /* call ACPI _PS0 / _STM only once */ | |
51 | if ((drive->dn & 1) == 0 || pair == NULL) { | |
52 | ide_acpi_set_state(hwif, 1); | |
53 | ide_acpi_push_timing(hwif); | |
54 | } | |
e2984c62 | 55 | |
2bf427b2 BZ |
56 | ide_acpi_exec_tfs(drive); |
57 | } | |
e2984c62 BZ |
58 | |
59 | memset(&rqpm, 0, sizeof(rqpm)); | |
e2984c62 BZ |
60 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
61 | rq->cmd_type = REQ_TYPE_PM_RESUME; | |
62 | rq->cmd_flags |= REQ_PREEMPT; | |
fc38b521 | 63 | rq->special = &rqpm; |
e2984c62 BZ |
64 | rqpm.pm_step = IDE_PM_START_RESUME; |
65 | rqpm.pm_state = PM_EVENT_ON; | |
66 | ||
67 | err = blk_execute_rq(drive->queue, NULL, rq, 1); | |
68 | blk_put_request(rq); | |
69 | ||
70 | if (err == 0 && dev->driver) { | |
7f3c868b | 71 | struct ide_driver *drv = to_ide_driver(dev->driver); |
e2984c62 BZ |
72 | |
73 | if (drv->resume) | |
74 | drv->resume(drive); | |
75 | } | |
76 | ||
77 | return err; | |
78 | } | |
79 | ||
80 | void ide_complete_power_step(ide_drive_t *drive, struct request *rq) | |
81 | { | |
fc38b521 | 82 | struct request_pm_state *pm = rq->special; |
e2984c62 BZ |
83 | |
84 | #ifdef DEBUG_PM | |
85 | printk(KERN_INFO "%s: complete_power_step(step: %d)\n", | |
86 | drive->name, pm->pm_step); | |
87 | #endif | |
88 | if (drive->media != ide_disk) | |
89 | return; | |
90 | ||
91 | switch (pm->pm_step) { | |
92 | case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ | |
93 | if (pm->pm_state == PM_EVENT_FREEZE) | |
94 | pm->pm_step = IDE_PM_COMPLETED; | |
95 | else | |
96 | pm->pm_step = IDE_PM_STANDBY; | |
97 | break; | |
98 | case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ | |
99 | pm->pm_step = IDE_PM_COMPLETED; | |
100 | break; | |
101 | case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ | |
102 | pm->pm_step = IDE_PM_IDLE; | |
103 | break; | |
104 | case IDE_PM_IDLE: /* Resume step 2 (idle)*/ | |
105 | pm->pm_step = IDE_PM_RESTORE_DMA; | |
106 | break; | |
107 | } | |
108 | } | |
109 | ||
110 | ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) | |
111 | { | |
fc38b521 TH |
112 | struct request_pm_state *pm = rq->special; |
113 | struct ide_cmd cmd = { }; | |
e2984c62 BZ |
114 | |
115 | switch (pm->pm_step) { | |
116 | case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ | |
117 | if (drive->media != ide_disk) | |
118 | break; | |
119 | /* Not supported? Switch to next step now. */ | |
120 | if (ata_id_flush_enabled(drive->id) == 0 || | |
121 | (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { | |
122 | ide_complete_power_step(drive, rq); | |
123 | return ide_stopped; | |
124 | } | |
125 | if (ata_id_flush_ext_enabled(drive->id)) | |
fc38b521 | 126 | cmd.tf.command = ATA_CMD_FLUSH_EXT; |
e2984c62 | 127 | else |
fc38b521 | 128 | cmd.tf.command = ATA_CMD_FLUSH; |
e2984c62 BZ |
129 | goto out_do_tf; |
130 | case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ | |
fc38b521 | 131 | cmd.tf.command = ATA_CMD_STANDBYNOW1; |
e2984c62 BZ |
132 | goto out_do_tf; |
133 | case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ | |
134 | ide_set_max_pio(drive); | |
135 | /* | |
136 | * skip IDE_PM_IDLE for ATAPI devices | |
137 | */ | |
138 | if (drive->media != ide_disk) | |
139 | pm->pm_step = IDE_PM_RESTORE_DMA; | |
140 | else | |
141 | ide_complete_power_step(drive, rq); | |
142 | return ide_stopped; | |
143 | case IDE_PM_IDLE: /* Resume step 2 (idle) */ | |
fc38b521 | 144 | cmd.tf.command = ATA_CMD_IDLEIMMEDIATE; |
e2984c62 BZ |
145 | goto out_do_tf; |
146 | case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ | |
147 | /* | |
148 | * Right now, all we do is call ide_set_dma(drive), | |
149 | * we could be smarter and check for current xfer_speed | |
150 | * in struct drive etc... | |
151 | */ | |
152 | if (drive->hwif->dma_ops == NULL) | |
153 | break; | |
154 | /* | |
155 | * TODO: respect IDE_DFLAG_USING_DMA | |
156 | */ | |
157 | ide_set_dma(drive); | |
158 | break; | |
159 | } | |
160 | ||
161 | pm->pm_step = IDE_PM_COMPLETED; | |
22aa4b32 | 162 | |
e2984c62 BZ |
163 | return ide_stopped; |
164 | ||
165 | out_do_tf: | |
fc38b521 TH |
166 | cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; |
167 | cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; | |
168 | cmd.protocol = ATA_PROT_NODATA; | |
22aa4b32 | 169 | |
fc38b521 | 170 | return do_rw_taskfile(drive, &cmd); |
e2984c62 BZ |
171 | } |
172 | ||
173 | /** | |
3616b653 | 174 | * ide_complete_pm_rq - end the current Power Management request |
e2984c62 BZ |
175 | * @drive: target drive |
176 | * @rq: request | |
177 | * | |
178 | * This function cleans up the current PM request and stops the queue | |
179 | * if necessary. | |
180 | */ | |
3616b653 | 181 | void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) |
e2984c62 BZ |
182 | { |
183 | struct request_queue *q = drive->queue; | |
fc38b521 | 184 | struct request_pm_state *pm = rq->special; |
e2984c62 BZ |
185 | unsigned long flags; |
186 | ||
3616b653 BZ |
187 | ide_complete_power_step(drive, rq); |
188 | if (pm->pm_step != IDE_PM_COMPLETED) | |
189 | return; | |
190 | ||
e2984c62 BZ |
191 | #ifdef DEBUG_PM |
192 | printk("%s: completing PM request, %s\n", drive->name, | |
193 | blk_pm_suspend_request(rq) ? "suspend" : "resume"); | |
194 | #endif | |
195 | spin_lock_irqsave(q->queue_lock, flags); | |
2ea55210 | 196 | if (blk_pm_suspend_request(rq)) |
e2984c62 | 197 | blk_stop_queue(q); |
2ea55210 | 198 | else |
e2984c62 | 199 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; |
e2984c62 BZ |
200 | spin_unlock_irqrestore(q->queue_lock, flags); |
201 | ||
b65fac32 | 202 | drive->hwif->rq = NULL; |
e2984c62 BZ |
203 | |
204 | if (blk_end_request(rq, 0, 0)) | |
205 | BUG(); | |
206 | } | |
207 | ||
208 | void ide_check_pm_state(ide_drive_t *drive, struct request *rq) | |
209 | { | |
fc38b521 | 210 | struct request_pm_state *pm = rq->special; |
e2984c62 BZ |
211 | |
212 | if (blk_pm_suspend_request(rq) && | |
213 | pm->pm_step == IDE_PM_START_SUSPEND) | |
214 | /* Mark drive blocked when starting the suspend sequence. */ | |
215 | drive->dev_flags |= IDE_DFLAG_BLOCKED; | |
216 | else if (blk_pm_resume_request(rq) && | |
217 | pm->pm_step == IDE_PM_START_RESUME) { | |
218 | /* | |
219 | * The first thing we do on wakeup is to wait for BSY bit to | |
220 | * go away (with a looong timeout) as a drive on this hwif may | |
221 | * just be POSTing itself. | |
222 | * We do that before even selecting as the "other" device on | |
223 | * the bus may be broken enough to walk on our toes at this | |
224 | * point. | |
225 | */ | |
226 | ide_hwif_t *hwif = drive->hwif; | |
fdd88f0a | 227 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; |
2ea55210 BZ |
228 | struct request_queue *q = drive->queue; |
229 | unsigned long flags; | |
e2984c62 BZ |
230 | int rc; |
231 | #ifdef DEBUG_PM | |
232 | printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); | |
233 | #endif | |
234 | rc = ide_wait_not_busy(hwif, 35000); | |
235 | if (rc) | |
236 | printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); | |
fdd88f0a SS |
237 | tp_ops->dev_select(drive); |
238 | tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); | |
e2984c62 BZ |
239 | rc = ide_wait_not_busy(hwif, 100000); |
240 | if (rc) | |
241 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); | |
2ea55210 BZ |
242 | |
243 | spin_lock_irqsave(q->queue_lock, flags); | |
244 | blk_start_queue(q); | |
245 | spin_unlock_irqrestore(q->queue_lock, flags); | |
e2984c62 BZ |
246 | } |
247 | } |