]>
Commit | Line | Data |
---|---|---|
88358ab0 BH |
1 | /* |
2 | * Libata based driver for Apple "macio" family of PATA controllers | |
3 | * | |
4 | * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp | |
5 | * <[email protected]> | |
6 | * | |
7 | * Some bits and pieces from drivers/ide/ppc/pmac.c | |
8 | * | |
9 | */ | |
10 | ||
11 | #undef DEBUG | |
12 | #undef DEBUG_DMA | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/ata.h> | |
19 | #include <linux/libata.h> | |
20 | #include <linux/adb.h> | |
21 | #include <linux/pmu.h> | |
22 | #include <linux/scatterlist.h> | |
23 | #include <linux/of.h> | |
5a0e3ad6 | 24 | #include <linux/gfp.h> |
952bbcb0 | 25 | #include <linux/pci.h> |
88358ab0 BH |
26 | |
27 | #include <scsi/scsi.h> | |
28 | #include <scsi/scsi_host.h> | |
29 | #include <scsi/scsi_device.h> | |
30 | ||
31 | #include <asm/macio.h> | |
32 | #include <asm/io.h> | |
33 | #include <asm/dbdma.h> | |
88358ab0 BH |
34 | #include <asm/machdep.h> |
35 | #include <asm/pmac_feature.h> | |
36 | #include <asm/mediabay.h> | |
37 | ||
38 | #ifdef DEBUG_DMA | |
39 | #define dev_dbgdma(dev, format, arg...) \ | |
40 | dev_printk(KERN_DEBUG , dev , format , ## arg) | |
41 | #else | |
42 | #define dev_dbgdma(dev, format, arg...) \ | |
43 | ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) | |
44 | #endif | |
45 | ||
46 | #define DRV_NAME "pata_macio" | |
47 | #define DRV_VERSION "0.9" | |
48 | ||
49 | /* Models of macio ATA controller */ | |
50 | enum { | |
51 | controller_ohare, /* OHare based */ | |
52 | controller_heathrow, /* Heathrow/Paddington */ | |
53 | controller_kl_ata3, /* KeyLargo ATA-3 */ | |
54 | controller_kl_ata4, /* KeyLargo ATA-4 */ | |
55 | controller_un_ata6, /* UniNorth2 ATA-6 */ | |
56 | controller_k2_ata6, /* K2 ATA-6 */ | |
57 | controller_sh_ata6, /* Shasta ATA-6 */ | |
58 | }; | |
59 | ||
60 | static const char* macio_ata_names[] = { | |
61 | "OHare ATA", /* OHare based */ | |
62 | "Heathrow ATA", /* Heathrow/Paddington */ | |
63 | "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */ | |
64 | "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */ | |
65 | "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */ | |
66 | "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */ | |
67 | "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */ | |
68 | }; | |
69 | ||
70 | /* | |
71 | * Extra registers, both 32-bit little-endian | |
72 | */ | |
73 | #define IDE_TIMING_CONFIG 0x200 | |
74 | #define IDE_INTERRUPT 0x300 | |
75 | ||
76 | /* Kauai (U2) ATA has different register setup */ | |
77 | #define IDE_KAUAI_PIO_CONFIG 0x200 | |
78 | #define IDE_KAUAI_ULTRA_CONFIG 0x210 | |
79 | #define IDE_KAUAI_POLL_CONFIG 0x220 | |
80 | ||
81 | /* | |
82 | * Timing configuration register definitions | |
83 | */ | |
84 | ||
85 | /* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */ | |
86 | #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS) | |
87 | #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS) | |
88 | #define IDE_SYSCLK_NS 30 /* 33Mhz cell */ | |
89 | #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */ | |
90 | ||
91 | /* 133Mhz cell, found in shasta. | |
92 | * See comments about 100 Mhz Uninorth 2... | |
93 | * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just | |
94 | * weird and I don't now why .. at this stage | |
95 | */ | |
96 | #define TR_133_PIOREG_PIO_MASK 0xff000fff | |
97 | #define TR_133_PIOREG_MDMA_MASK 0x00fff800 | |
98 | #define TR_133_UDMAREG_UDMA_MASK 0x0003ffff | |
99 | #define TR_133_UDMAREG_UDMA_EN 0x00000001 | |
100 | ||
101 | /* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device | |
102 | * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is | |
103 | * controlled like gem or fw. It appears to be an evolution of keylargo | |
104 | * ATA4 with a timing register extended to 2x32bits registers (one | |
105 | * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel. | |
106 | * It has it's own local feature control register as well. | |
107 | * | |
108 | * After scratching my mind over the timing values, at least for PIO | |
109 | * and MDMA, I think I've figured the format of the timing register, | |
110 | * though I use pre-calculated tables for UDMA as usual... | |
111 | */ | |
112 | #define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */ | |
113 | #define TR_100_PIO_ADDRSETUP_SHIFT 24 | |
114 | #define TR_100_MDMA_MASK 0x00fff000 | |
115 | #define TR_100_MDMA_RECOVERY_MASK 0x00fc0000 | |
116 | #define TR_100_MDMA_RECOVERY_SHIFT 18 | |
117 | #define TR_100_MDMA_ACCESS_MASK 0x0003f000 | |
118 | #define TR_100_MDMA_ACCESS_SHIFT 12 | |
119 | #define TR_100_PIO_MASK 0xff000fff | |
120 | #define TR_100_PIO_RECOVERY_MASK 0x00000fc0 | |
121 | #define TR_100_PIO_RECOVERY_SHIFT 6 | |
122 | #define TR_100_PIO_ACCESS_MASK 0x0000003f | |
123 | #define TR_100_PIO_ACCESS_SHIFT 0 | |
124 | ||
125 | #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff | |
126 | #define TR_100_UDMAREG_UDMA_EN 0x00000001 | |
127 | ||
128 | ||
129 | /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on | |
130 | * 40 connector cable and to 4 on 80 connector one. | |
131 | * Clock unit is 15ns (66Mhz) | |
132 | * | |
133 | * 3 Values can be programmed: | |
134 | * - Write data setup, which appears to match the cycle time. They | |
135 | * also call it DIOW setup. | |
136 | * - Ready to pause time (from spec) | |
137 | * - Address setup. That one is weird. I don't see where exactly | |
138 | * it fits in UDMA cycles, I got it's name from an obscure piece | |
139 | * of commented out code in Darwin. They leave it to 0, we do as | |
140 | * well, despite a comment that would lead to think it has a | |
141 | * min value of 45ns. | |
142 | * Apple also add 60ns to the write data setup (or cycle time ?) on | |
143 | * reads. | |
144 | */ | |
145 | #define TR_66_UDMA_MASK 0xfff00000 | |
146 | #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */ | |
147 | #define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */ | |
148 | #define TR_66_PIO_ADDRSETUP_SHIFT 29 | |
149 | #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */ | |
150 | #define TR_66_UDMA_RDY2PAUS_SHIFT 25 | |
151 | #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */ | |
152 | #define TR_66_UDMA_WRDATASETUP_SHIFT 21 | |
153 | #define TR_66_MDMA_MASK 0x000ffc00 | |
154 | #define TR_66_MDMA_RECOVERY_MASK 0x000f8000 | |
155 | #define TR_66_MDMA_RECOVERY_SHIFT 15 | |
156 | #define TR_66_MDMA_ACCESS_MASK 0x00007c00 | |
157 | #define TR_66_MDMA_ACCESS_SHIFT 10 | |
158 | #define TR_66_PIO_MASK 0xe00003ff | |
159 | #define TR_66_PIO_RECOVERY_MASK 0x000003e0 | |
160 | #define TR_66_PIO_RECOVERY_SHIFT 5 | |
161 | #define TR_66_PIO_ACCESS_MASK 0x0000001f | |
162 | #define TR_66_PIO_ACCESS_SHIFT 0 | |
163 | ||
164 | /* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo | |
165 | * Can do pio & mdma modes, clock unit is 30ns (33Mhz) | |
166 | * | |
167 | * The access time and recovery time can be programmed. Some older | |
168 | * Darwin code base limit OHare to 150ns cycle time. I decided to do | |
169 | * the same here fore safety against broken old hardware ;) | |
170 | * The HalfTick bit, when set, adds half a clock (15ns) to the access | |
171 | * time and removes one from recovery. It's not supported on KeyLargo | |
172 | * implementation afaik. The E bit appears to be set for PIO mode 0 and | |
173 | * is used to reach long timings used in this mode. | |
174 | */ | |
175 | #define TR_33_MDMA_MASK 0x003ff800 | |
176 | #define TR_33_MDMA_RECOVERY_MASK 0x001f0000 | |
177 | #define TR_33_MDMA_RECOVERY_SHIFT 16 | |
178 | #define TR_33_MDMA_ACCESS_MASK 0x0000f800 | |
179 | #define TR_33_MDMA_ACCESS_SHIFT 11 | |
180 | #define TR_33_MDMA_HALFTICK 0x00200000 | |
181 | #define TR_33_PIO_MASK 0x000007ff | |
182 | #define TR_33_PIO_E 0x00000400 | |
183 | #define TR_33_PIO_RECOVERY_MASK 0x000003e0 | |
184 | #define TR_33_PIO_RECOVERY_SHIFT 5 | |
185 | #define TR_33_PIO_ACCESS_MASK 0x0000001f | |
186 | #define TR_33_PIO_ACCESS_SHIFT 0 | |
187 | ||
188 | /* | |
189 | * Interrupt register definitions. Only present on newer cells | |
190 | * (Keylargo and later afaik) so we don't use it. | |
191 | */ | |
192 | #define IDE_INTR_DMA 0x80000000 | |
193 | #define IDE_INTR_DEVICE 0x40000000 | |
194 | ||
195 | /* | |
196 | * FCR Register on Kauai. Not sure what bit 0x4 is ... | |
197 | */ | |
198 | #define KAUAI_FCR_UATA_MAGIC 0x00000004 | |
199 | #define KAUAI_FCR_UATA_RESET_N 0x00000002 | |
200 | #define KAUAI_FCR_UATA_ENABLE 0x00000001 | |
201 | ||
202 | ||
203 | /* Allow up to 256 DBDMA commands per xfer */ | |
204 | #define MAX_DCMDS 256 | |
205 | ||
206 | /* Don't let a DMA segment go all the way to 64K */ | |
207 | #define MAX_DBDMA_SEG 0xff00 | |
208 | ||
209 | ||
210 | /* | |
211 | * Wait 1s for disk to answer on IDE bus after a hard reset | |
212 | * of the device (via GPIO/FCR). | |
213 | * | |
214 | * Some devices seem to "pollute" the bus even after dropping | |
215 | * the BSY bit (typically some combo drives slave on the UDMA | |
216 | * bus) after a hard reset. Since we hard reset all drives on | |
217 | * KeyLargo ATA66, we have to keep that delay around. I may end | |
218 | * up not hard resetting anymore on these and keep the delay only | |
219 | * for older interfaces instead (we have to reset when coming | |
220 | * from MacOS...) --BenH. | |
221 | */ | |
222 | #define IDE_WAKEUP_DELAY_MS 1000 | |
223 | ||
224 | struct pata_macio_timing; | |
225 | ||
226 | struct pata_macio_priv { | |
227 | int kind; | |
228 | int aapl_bus_id; | |
229 | int mediabay : 1; | |
230 | struct device_node *node; | |
231 | struct macio_dev *mdev; | |
232 | struct pci_dev *pdev; | |
233 | struct device *dev; | |
234 | int irq; | |
235 | u32 treg[2][2]; | |
236 | void __iomem *tfregs; | |
237 | void __iomem *kauai_fcr; | |
238 | struct dbdma_cmd * dma_table_cpu; | |
239 | dma_addr_t dma_table_dma; | |
240 | struct ata_host *host; | |
241 | const struct pata_macio_timing *timings; | |
242 | }; | |
243 | ||
244 | /* Previous variants of this driver used to calculate timings | |
245 | * for various variants of the chip and use tables for others. | |
246 | * | |
247 | * Not only was this confusing, but in addition, it isn't clear | |
248 | * whether our calculation code was correct. It didn't entirely | |
249 | * match the darwin code and whatever documentation I could find | |
250 | * on these cells | |
251 | * | |
252 | * I decided to entirely rely on a table instead for this version | |
253 | * of the driver. Also, because I don't really care about derated | |
254 | * modes and really old HW other than making it work, I'm not going | |
255 | * to calculate / snoop timing values for something else than the | |
256 | * standard modes. | |
257 | */ | |
258 | struct pata_macio_timing { | |
259 | int mode; | |
260 | u32 reg1; /* Bits to set in first timing reg */ | |
261 | u32 reg2; /* Bits to set in second timing reg */ | |
262 | }; | |
263 | ||
264 | static const struct pata_macio_timing pata_macio_ohare_timings[] = { | |
265 | { XFER_PIO_0, 0x00000526, 0, }, | |
266 | { XFER_PIO_1, 0x00000085, 0, }, | |
267 | { XFER_PIO_2, 0x00000025, 0, }, | |
268 | { XFER_PIO_3, 0x00000025, 0, }, | |
269 | { XFER_PIO_4, 0x00000025, 0, }, | |
270 | { XFER_MW_DMA_0, 0x00074000, 0, }, | |
271 | { XFER_MW_DMA_1, 0x00221000, 0, }, | |
272 | { XFER_MW_DMA_2, 0x00211000, 0, }, | |
273 | { -1, 0, 0 } | |
274 | }; | |
275 | ||
276 | static const struct pata_macio_timing pata_macio_heathrow_timings[] = { | |
277 | { XFER_PIO_0, 0x00000526, 0, }, | |
278 | { XFER_PIO_1, 0x00000085, 0, }, | |
279 | { XFER_PIO_2, 0x00000025, 0, }, | |
280 | { XFER_PIO_3, 0x00000025, 0, }, | |
281 | { XFER_PIO_4, 0x00000025, 0, }, | |
282 | { XFER_MW_DMA_0, 0x00074000, 0, }, | |
283 | { XFER_MW_DMA_1, 0x00221000, 0, }, | |
284 | { XFER_MW_DMA_2, 0x00211000, 0, }, | |
285 | { -1, 0, 0 } | |
286 | }; | |
287 | ||
288 | static const struct pata_macio_timing pata_macio_kl33_timings[] = { | |
289 | { XFER_PIO_0, 0x00000526, 0, }, | |
290 | { XFER_PIO_1, 0x00000085, 0, }, | |
291 | { XFER_PIO_2, 0x00000025, 0, }, | |
292 | { XFER_PIO_3, 0x00000025, 0, }, | |
293 | { XFER_PIO_4, 0x00000025, 0, }, | |
294 | { XFER_MW_DMA_0, 0x00084000, 0, }, | |
295 | { XFER_MW_DMA_1, 0x00021800, 0, }, | |
296 | { XFER_MW_DMA_2, 0x00011800, 0, }, | |
297 | { -1, 0, 0 } | |
298 | }; | |
299 | ||
300 | static const struct pata_macio_timing pata_macio_kl66_timings[] = { | |
301 | { XFER_PIO_0, 0x0000038c, 0, }, | |
302 | { XFER_PIO_1, 0x0000020a, 0, }, | |
303 | { XFER_PIO_2, 0x00000127, 0, }, | |
304 | { XFER_PIO_3, 0x000000c6, 0, }, | |
305 | { XFER_PIO_4, 0x00000065, 0, }, | |
306 | { XFER_MW_DMA_0, 0x00084000, 0, }, | |
307 | { XFER_MW_DMA_1, 0x00029800, 0, }, | |
308 | { XFER_MW_DMA_2, 0x00019400, 0, }, | |
309 | { XFER_UDMA_0, 0x19100000, 0, }, | |
310 | { XFER_UDMA_1, 0x14d00000, 0, }, | |
311 | { XFER_UDMA_2, 0x10900000, 0, }, | |
312 | { XFER_UDMA_3, 0x0c700000, 0, }, | |
313 | { XFER_UDMA_4, 0x0c500000, 0, }, | |
314 | { -1, 0, 0 } | |
315 | }; | |
316 | ||
317 | static const struct pata_macio_timing pata_macio_kauai_timings[] = { | |
318 | { XFER_PIO_0, 0x08000a92, 0, }, | |
319 | { XFER_PIO_1, 0x0800060f, 0, }, | |
320 | { XFER_PIO_2, 0x0800038b, 0, }, | |
321 | { XFER_PIO_3, 0x05000249, 0, }, | |
322 | { XFER_PIO_4, 0x04000148, 0, }, | |
323 | { XFER_MW_DMA_0, 0x00618000, 0, }, | |
324 | { XFER_MW_DMA_1, 0x00209000, 0, }, | |
325 | { XFER_MW_DMA_2, 0x00148000, 0, }, | |
326 | { XFER_UDMA_0, 0, 0x000070c1, }, | |
327 | { XFER_UDMA_1, 0, 0x00005d81, }, | |
328 | { XFER_UDMA_2, 0, 0x00004a61, }, | |
329 | { XFER_UDMA_3, 0, 0x00003a51, }, | |
330 | { XFER_UDMA_4, 0, 0x00002a31, }, | |
331 | { XFER_UDMA_5, 0, 0x00002921, }, | |
332 | { -1, 0, 0 } | |
333 | }; | |
334 | ||
335 | static const struct pata_macio_timing pata_macio_shasta_timings[] = { | |
336 | { XFER_PIO_0, 0x0a000c97, 0, }, | |
337 | { XFER_PIO_1, 0x07000712, 0, }, | |
338 | { XFER_PIO_2, 0x040003cd, 0, }, | |
339 | { XFER_PIO_3, 0x0500028b, 0, }, | |
340 | { XFER_PIO_4, 0x0400010a, 0, }, | |
341 | { XFER_MW_DMA_0, 0x00820800, 0, }, | |
342 | { XFER_MW_DMA_1, 0x0028b000, 0, }, | |
343 | { XFER_MW_DMA_2, 0x001ca000, 0, }, | |
344 | { XFER_UDMA_0, 0, 0x00035901, }, | |
345 | { XFER_UDMA_1, 0, 0x000348b1, }, | |
346 | { XFER_UDMA_2, 0, 0x00033881, }, | |
347 | { XFER_UDMA_3, 0, 0x00033861, }, | |
348 | { XFER_UDMA_4, 0, 0x00033841, }, | |
349 | { XFER_UDMA_5, 0, 0x00033031, }, | |
350 | { XFER_UDMA_6, 0, 0x00033021, }, | |
351 | { -1, 0, 0 } | |
352 | }; | |
353 | ||
354 | static const struct pata_macio_timing *pata_macio_find_timing( | |
355 | struct pata_macio_priv *priv, | |
356 | int mode) | |
357 | { | |
358 | int i; | |
359 | ||
360 | for (i = 0; priv->timings[i].mode > 0; i++) { | |
361 | if (priv->timings[i].mode == mode) | |
362 | return &priv->timings[i]; | |
363 | } | |
364 | return NULL; | |
365 | } | |
366 | ||
367 | ||
368 | static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device) | |
369 | { | |
370 | struct pata_macio_priv *priv = ap->private_data; | |
371 | void __iomem *rbase = ap->ioaddr.cmd_addr; | |
372 | ||
373 | if (priv->kind == controller_sh_ata6 || | |
374 | priv->kind == controller_un_ata6 || | |
375 | priv->kind == controller_k2_ata6) { | |
376 | writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG); | |
377 | writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG); | |
378 | } else | |
379 | writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG); | |
380 | } | |
381 | ||
382 | static void pata_macio_dev_select(struct ata_port *ap, unsigned int device) | |
383 | { | |
384 | ata_sff_dev_select(ap, device); | |
385 | ||
386 | /* Apply timings */ | |
387 | pata_macio_apply_timings(ap, device); | |
388 | } | |
389 | ||
390 | static void pata_macio_set_timings(struct ata_port *ap, | |
391 | struct ata_device *adev) | |
392 | { | |
393 | struct pata_macio_priv *priv = ap->private_data; | |
394 | const struct pata_macio_timing *t; | |
395 | ||
396 | dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n", | |
397 | adev->devno, | |
398 | adev->pio_mode, | |
399 | ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)), | |
400 | adev->dma_mode, | |
401 | ata_mode_string(ata_xfer_mode2mask(adev->dma_mode))); | |
402 | ||
403 | /* First clear timings */ | |
404 | priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0; | |
405 | ||
406 | /* Now get the PIO timings */ | |
407 | t = pata_macio_find_timing(priv, adev->pio_mode); | |
408 | if (t == NULL) { | |
409 | dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n", | |
410 | adev->pio_mode); | |
411 | t = pata_macio_find_timing(priv, XFER_PIO_0); | |
412 | } | |
413 | BUG_ON(t == NULL); | |
414 | ||
415 | /* PIO timings only ever use the first treg */ | |
416 | priv->treg[adev->devno][0] |= t->reg1; | |
417 | ||
418 | /* Now get DMA timings */ | |
419 | t = pata_macio_find_timing(priv, adev->dma_mode); | |
420 | if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) { | |
421 | dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n"); | |
422 | t = pata_macio_find_timing(priv, XFER_MW_DMA_0); | |
423 | } | |
424 | BUG_ON(t == NULL); | |
425 | ||
426 | /* DMA timings can use both tregs */ | |
427 | priv->treg[adev->devno][0] |= t->reg1; | |
428 | priv->treg[adev->devno][1] |= t->reg2; | |
429 | ||
430 | dev_dbg(priv->dev, " -> %08x %08x\n", | |
431 | priv->treg[adev->devno][0], | |
432 | priv->treg[adev->devno][1]); | |
433 | ||
434 | /* Apply to hardware */ | |
435 | pata_macio_apply_timings(ap, adev->devno); | |
436 | } | |
437 | ||
438 | /* | |
439 | * Blast some well known "safe" values to the timing registers at init or | |
440 | * wakeup from sleep time, before we do real calculation | |
441 | */ | |
442 | static void pata_macio_default_timings(struct pata_macio_priv *priv) | |
443 | { | |
444 | unsigned int value, value2 = 0; | |
445 | ||
446 | switch(priv->kind) { | |
447 | case controller_sh_ata6: | |
448 | value = 0x0a820c97; | |
449 | value2 = 0x00033031; | |
450 | break; | |
451 | case controller_un_ata6: | |
452 | case controller_k2_ata6: | |
453 | value = 0x08618a92; | |
454 | value2 = 0x00002921; | |
455 | break; | |
456 | case controller_kl_ata4: | |
457 | value = 0x0008438c; | |
458 | break; | |
459 | case controller_kl_ata3: | |
460 | value = 0x00084526; | |
461 | break; | |
462 | case controller_heathrow: | |
463 | case controller_ohare: | |
464 | default: | |
465 | value = 0x00074526; | |
466 | break; | |
467 | } | |
468 | priv->treg[0][0] = priv->treg[1][0] = value; | |
469 | priv->treg[0][1] = priv->treg[1][1] = value2; | |
470 | } | |
471 | ||
472 | static int pata_macio_cable_detect(struct ata_port *ap) | |
473 | { | |
474 | struct pata_macio_priv *priv = ap->private_data; | |
475 | ||
476 | /* Get cable type from device-tree */ | |
477 | if (priv->kind == controller_kl_ata4 || | |
478 | priv->kind == controller_un_ata6 || | |
479 | priv->kind == controller_k2_ata6 || | |
480 | priv->kind == controller_sh_ata6) { | |
481 | const char* cable = of_get_property(priv->node, "cable-type", | |
482 | NULL); | |
483 | struct device_node *root = of_find_node_by_path("/"); | |
484 | const char *model = of_get_property(root, "model", NULL); | |
485 | ||
486 | if (cable && !strncmp(cable, "80-", 3)) { | |
487 | /* Some drives fail to detect 80c cable in PowerBook | |
488 | * These machine use proprietary short IDE cable | |
489 | * anyway | |
490 | */ | |
491 | if (!strncmp(model, "PowerBook", 9)) | |
492 | return ATA_CBL_PATA40_SHORT; | |
493 | else | |
494 | return ATA_CBL_PATA80; | |
495 | } | |
496 | } | |
497 | ||
498 | /* G5's seem to have incorrect cable type in device-tree. | |
499 | * Let's assume they always have a 80 conductor cable, this seem to | |
500 | * be always the case unless the user mucked around | |
501 | */ | |
502 | if (of_device_is_compatible(priv->node, "K2-UATA") || | |
503 | of_device_is_compatible(priv->node, "shasta-ata")) | |
504 | return ATA_CBL_PATA80; | |
505 | ||
506 | /* Anything else is 40 connectors */ | |
507 | return ATA_CBL_PATA40; | |
508 | } | |
509 | ||
510 | static void pata_macio_qc_prep(struct ata_queued_cmd *qc) | |
511 | { | |
512 | unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); | |
513 | struct ata_port *ap = qc->ap; | |
514 | struct pata_macio_priv *priv = ap->private_data; | |
515 | struct scatterlist *sg; | |
516 | struct dbdma_cmd *table; | |
517 | unsigned int si, pi; | |
518 | ||
519 | dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n", | |
520 | __func__, qc, qc->flags, write, qc->dev->devno); | |
521 | ||
522 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | |
523 | return; | |
524 | ||
525 | table = (struct dbdma_cmd *) priv->dma_table_cpu; | |
526 | ||
527 | pi = 0; | |
528 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | |
529 | u32 addr, sg_len, len; | |
530 | ||
531 | /* determine if physical DMA addr spans 64K boundary. | |
532 | * Note h/w doesn't support 64-bit, so we unconditionally | |
533 | * truncate dma_addr_t to u32. | |
534 | */ | |
535 | addr = (u32) sg_dma_address(sg); | |
536 | sg_len = sg_dma_len(sg); | |
537 | ||
538 | while (sg_len) { | |
539 | /* table overflow should never happen */ | |
540 | BUG_ON (pi++ >= MAX_DCMDS); | |
541 | ||
542 | len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG; | |
f5718726 DG |
543 | table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE); |
544 | table->req_count = cpu_to_le16(len); | |
545 | table->phy_addr = cpu_to_le32(addr); | |
88358ab0 BH |
546 | table->cmd_dep = 0; |
547 | table->xfer_status = 0; | |
548 | table->res_count = 0; | |
549 | addr += len; | |
550 | sg_len -= len; | |
551 | ++table; | |
552 | } | |
553 | } | |
554 | ||
555 | /* Should never happen according to Tejun */ | |
556 | BUG_ON(!pi); | |
557 | ||
558 | /* Convert the last command to an input/output */ | |
559 | table--; | |
f5718726 | 560 | table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST); |
88358ab0 BH |
561 | table++; |
562 | ||
563 | /* Add the stop command to the end of the list */ | |
564 | memset(table, 0, sizeof(struct dbdma_cmd)); | |
f5718726 | 565 | table->command = cpu_to_le16(DBDMA_STOP); |
88358ab0 BH |
566 | |
567 | dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); | |
568 | } | |
569 | ||
570 | ||
571 | static void pata_macio_freeze(struct ata_port *ap) | |
572 | { | |
573 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | |
574 | ||
575 | if (dma_regs) { | |
576 | unsigned int timeout = 1000000; | |
577 | ||
578 | /* Make sure DMA controller is stopped */ | |
579 | writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control); | |
580 | while (--timeout && (readl(&dma_regs->status) & RUN)) | |
581 | udelay(1); | |
582 | } | |
583 | ||
584 | ata_sff_freeze(ap); | |
585 | } | |
586 | ||
587 | ||
588 | static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc) | |
589 | { | |
590 | struct ata_port *ap = qc->ap; | |
591 | struct pata_macio_priv *priv = ap->private_data; | |
592 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | |
593 | int dev = qc->dev->devno; | |
594 | ||
595 | dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); | |
596 | ||
597 | /* Make sure DMA commands updates are visible */ | |
598 | writel(priv->dma_table_dma, &dma_regs->cmdptr); | |
599 | ||
600 | /* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on | |
601 | * UDMA reads | |
602 | */ | |
603 | if (priv->kind == controller_kl_ata4 && | |
604 | (priv->treg[dev][0] & TR_66_UDMA_EN)) { | |
605 | void __iomem *rbase = ap->ioaddr.cmd_addr; | |
606 | u32 reg = priv->treg[dev][0]; | |
607 | ||
608 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | |
609 | reg += 0x00800000; | |
610 | writel(reg, rbase + IDE_TIMING_CONFIG); | |
611 | } | |
612 | ||
613 | /* issue r/w command */ | |
614 | ap->ops->sff_exec_command(ap, &qc->tf); | |
615 | } | |
616 | ||
617 | static void pata_macio_bmdma_start(struct ata_queued_cmd *qc) | |
618 | { | |
619 | struct ata_port *ap = qc->ap; | |
620 | struct pata_macio_priv *priv = ap->private_data; | |
621 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | |
622 | ||
623 | dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); | |
624 | ||
625 | writel((RUN << 16) | RUN, &dma_regs->control); | |
626 | /* Make sure it gets to the controller right now */ | |
627 | (void)readl(&dma_regs->control); | |
628 | } | |
629 | ||
630 | static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc) | |
631 | { | |
632 | struct ata_port *ap = qc->ap; | |
633 | struct pata_macio_priv *priv = ap->private_data; | |
634 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | |
635 | unsigned int timeout = 1000000; | |
636 | ||
637 | dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); | |
638 | ||
639 | /* Stop the DMA engine and wait for it to full halt */ | |
640 | writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control); | |
641 | while (--timeout && (readl(&dma_regs->status) & RUN)) | |
642 | udelay(1); | |
643 | } | |
644 | ||
645 | static u8 pata_macio_bmdma_status(struct ata_port *ap) | |
646 | { | |
647 | struct pata_macio_priv *priv = ap->private_data; | |
648 | struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; | |
649 | u32 dstat, rstat = ATA_DMA_INTR; | |
650 | unsigned long timeout = 0; | |
651 | ||
652 | dstat = readl(&dma_regs->status); | |
653 | ||
654 | dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat); | |
655 | ||
656 | /* We have two things to deal with here: | |
657 | * | |
658 | * - The dbdma won't stop if the command was started | |
659 | * but completed with an error without transferring all | |
660 | * datas. This happens when bad blocks are met during | |
661 | * a multi-block transfer. | |
662 | * | |
663 | * - The dbdma fifo hasn't yet finished flushing to | |
664 | * to system memory when the disk interrupt occurs. | |
665 | * | |
666 | */ | |
667 | ||
668 | /* First check for errors */ | |
669 | if ((dstat & (RUN|DEAD)) != RUN) | |
670 | rstat |= ATA_DMA_ERR; | |
671 | ||
672 | /* If ACTIVE is cleared, the STOP command has been hit and | |
673 | * the transfer is complete. If not, we have to flush the | |
674 | * channel. | |
675 | */ | |
676 | if ((dstat & ACTIVE) == 0) | |
677 | return rstat; | |
678 | ||
679 | dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__); | |
680 | ||
681 | /* If dbdma didn't execute the STOP command yet, the | |
682 | * active bit is still set. We consider that we aren't | |
683 | * sharing interrupts (which is hopefully the case with | |
684 | * those controllers) and so we just try to flush the | |
685 | * channel for pending data in the fifo | |
686 | */ | |
687 | udelay(1); | |
688 | writel((FLUSH << 16) | FLUSH, &dma_regs->control); | |
689 | for (;;) { | |
690 | udelay(1); | |
691 | dstat = readl(&dma_regs->status); | |
692 | if ((dstat & FLUSH) == 0) | |
693 | break; | |
694 | if (++timeout > 1000) { | |
695 | dev_warn(priv->dev, "timeout flushing DMA\n"); | |
696 | rstat |= ATA_DMA_ERR; | |
697 | break; | |
698 | } | |
699 | } | |
700 | return rstat; | |
701 | } | |
702 | ||
703 | /* port_start is when we allocate the DMA command list */ | |
704 | static int pata_macio_port_start(struct ata_port *ap) | |
705 | { | |
706 | struct pata_macio_priv *priv = ap->private_data; | |
707 | ||
708 | if (ap->ioaddr.bmdma_addr == NULL) | |
709 | return 0; | |
710 | ||
711 | /* Allocate space for the DBDMA commands. | |
712 | * | |
713 | * The +2 is +1 for the stop command and +1 to allow for | |
714 | * aligning the start address to a multiple of 16 bytes. | |
715 | */ | |
716 | priv->dma_table_cpu = | |
717 | dmam_alloc_coherent(priv->dev, | |
718 | (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), | |
719 | &priv->dma_table_dma, GFP_KERNEL); | |
720 | if (priv->dma_table_cpu == NULL) { | |
721 | dev_err(priv->dev, "Unable to allocate DMA command list\n"); | |
722 | ap->ioaddr.bmdma_addr = NULL; | |
c7087652 TH |
723 | ap->mwdma_mask = 0; |
724 | ap->udma_mask = 0; | |
88358ab0 BH |
725 | } |
726 | return 0; | |
727 | } | |
728 | ||
729 | static void pata_macio_irq_clear(struct ata_port *ap) | |
730 | { | |
731 | struct pata_macio_priv *priv = ap->private_data; | |
732 | ||
733 | /* Nothing to do here */ | |
734 | ||
735 | dev_dbgdma(priv->dev, "%s\n", __func__); | |
736 | } | |
737 | ||
738 | static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume) | |
739 | { | |
740 | dev_dbg(priv->dev, "Enabling & resetting... \n"); | |
741 | ||
742 | if (priv->mediabay) | |
743 | return; | |
744 | ||
745 | if (priv->kind == controller_ohare && !resume) { | |
746 | /* The code below is having trouble on some ohare machines | |
747 | * (timing related ?). Until I can put my hand on one of these | |
748 | * units, I keep the old way | |
749 | */ | |
750 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1); | |
751 | } else { | |
752 | int rc; | |
753 | ||
754 | /* Reset and enable controller */ | |
755 | rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET, | |
756 | priv->node, priv->aapl_bus_id, 1); | |
757 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, | |
758 | priv->node, priv->aapl_bus_id, 1); | |
759 | msleep(10); | |
760 | /* Only bother waiting if there's a reset control */ | |
761 | if (rc == 0) { | |
762 | ppc_md.feature_call(PMAC_FTR_IDE_RESET, | |
763 | priv->node, priv->aapl_bus_id, 0); | |
764 | msleep(IDE_WAKEUP_DELAY_MS); | |
765 | } | |
766 | } | |
767 | ||
768 | /* If resuming a PCI device, restore the config space here */ | |
769 | if (priv->pdev && resume) { | |
770 | int rc; | |
771 | ||
772 | pci_restore_state(priv->pdev); | |
773 | rc = pcim_enable_device(priv->pdev); | |
774 | if (rc) | |
a44fec1f JP |
775 | dev_err(&priv->pdev->dev, |
776 | "Failed to enable device after resume (%d)\n", | |
777 | rc); | |
88358ab0 BH |
778 | else |
779 | pci_set_master(priv->pdev); | |
780 | } | |
781 | ||
782 | /* On Kauai, initialize the FCR. We don't perform a reset, doesn't really | |
783 | * seem necessary and speeds up the boot process | |
784 | */ | |
785 | if (priv->kauai_fcr) | |
786 | writel(KAUAI_FCR_UATA_MAGIC | | |
787 | KAUAI_FCR_UATA_RESET_N | | |
788 | KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr); | |
789 | } | |
790 | ||
791 | /* Hook the standard slave config to fixup some HW related alignment | |
792 | * restrictions | |
793 | */ | |
794 | static int pata_macio_slave_config(struct scsi_device *sdev) | |
795 | { | |
796 | struct ata_port *ap = ata_shost_to_port(sdev->host); | |
797 | struct pata_macio_priv *priv = ap->private_data; | |
798 | struct ata_device *dev; | |
799 | u16 cmd; | |
800 | int rc; | |
801 | ||
802 | /* First call original */ | |
803 | rc = ata_scsi_slave_config(sdev); | |
804 | if (rc) | |
805 | return rc; | |
806 | ||
807 | /* This is lifted from sata_nv */ | |
808 | dev = &ap->link.device[sdev->id]; | |
809 | ||
810 | /* OHare has issues with non cache aligned DMA on some chipsets */ | |
811 | if (priv->kind == controller_ohare) { | |
812 | blk_queue_update_dma_alignment(sdev->request_queue, 31); | |
813 | blk_queue_update_dma_pad(sdev->request_queue, 31); | |
814 | ||
815 | /* Tell the world about it */ | |
a9a79dfe | 816 | ata_dev_info(dev, "OHare alignment limits applied\n"); |
88358ab0 BH |
817 | return 0; |
818 | } | |
819 | ||
820 | /* We only have issues with ATAPI */ | |
821 | if (dev->class != ATA_DEV_ATAPI) | |
822 | return 0; | |
823 | ||
824 | /* Shasta and K2 seem to have "issues" with reads ... */ | |
825 | if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) { | |
826 | /* Allright these are bad, apply restrictions */ | |
827 | blk_queue_update_dma_alignment(sdev->request_queue, 15); | |
828 | blk_queue_update_dma_pad(sdev->request_queue, 15); | |
829 | ||
830 | /* We enable MWI and hack cache line size directly here, this | |
831 | * is specific to this chipset and not normal values, we happen | |
832 | * to somewhat know what we are doing here (which is basically | |
833 | * to do the same Apple does and pray they did not get it wrong :-) | |
834 | */ | |
835 | BUG_ON(!priv->pdev); | |
836 | pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08); | |
837 | pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd); | |
838 | pci_write_config_word(priv->pdev, PCI_COMMAND, | |
839 | cmd | PCI_COMMAND_INVALIDATE); | |
840 | ||
841 | /* Tell the world about it */ | |
a9a79dfe | 842 | ata_dev_info(dev, "K2/Shasta alignment limits applied\n"); |
88358ab0 BH |
843 | } |
844 | ||
845 | return 0; | |
846 | } | |
847 | ||
58eb8cd5 | 848 | #ifdef CONFIG_PM_SLEEP |
88358ab0 BH |
849 | static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg) |
850 | { | |
851 | int rc; | |
852 | ||
853 | /* First, core libata suspend to do most of the work */ | |
854 | rc = ata_host_suspend(priv->host, mesg); | |
855 | if (rc) | |
856 | return rc; | |
857 | ||
858 | /* Restore to default timings */ | |
859 | pata_macio_default_timings(priv); | |
860 | ||
861 | /* Mask interrupt. Not strictly necessary but old driver did | |
862 | * it and I'd rather not change that here */ | |
863 | disable_irq(priv->irq); | |
864 | ||
865 | /* The media bay will handle itself just fine */ | |
866 | if (priv->mediabay) | |
867 | return 0; | |
868 | ||
869 | /* Kauai has bus control FCRs directly here */ | |
870 | if (priv->kauai_fcr) { | |
871 | u32 fcr = readl(priv->kauai_fcr); | |
872 | fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE); | |
873 | writel(fcr, priv->kauai_fcr); | |
874 | } | |
875 | ||
876 | /* For PCI, save state and disable DMA. No need to call | |
877 | * pci_set_power_state(), the HW doesn't do D states that | |
878 | * way, the platform code will take care of suspending the | |
879 | * ASIC properly | |
880 | */ | |
881 | if (priv->pdev) { | |
882 | pci_save_state(priv->pdev); | |
883 | pci_disable_device(priv->pdev); | |
884 | } | |
885 | ||
886 | /* Disable the bus on older machines and the cell on kauai */ | |
887 | ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, | |
888 | priv->aapl_bus_id, 0); | |
889 | ||
890 | return 0; | |
891 | } | |
892 | ||
893 | static int pata_macio_do_resume(struct pata_macio_priv *priv) | |
894 | { | |
895 | /* Reset and re-enable the HW */ | |
896 | pata_macio_reset_hw(priv, 1); | |
897 | ||
898 | /* Sanitize drive timings */ | |
899 | pata_macio_apply_timings(priv->host->ports[0], 0); | |
900 | ||
901 | /* We want our IRQ back ! */ | |
902 | enable_irq(priv->irq); | |
903 | ||
904 | /* Let the libata core take it from there */ | |
905 | ata_host_resume(priv->host); | |
906 | ||
907 | return 0; | |
908 | } | |
58eb8cd5 | 909 | #endif /* CONFIG_PM_SLEEP */ |
88358ab0 BH |
910 | |
911 | static struct scsi_host_template pata_macio_sht = { | |
912 | ATA_BASE_SHT(DRV_NAME), | |
913 | .sg_tablesize = MAX_DCMDS, | |
914 | /* We may not need that strict one */ | |
915 | .dma_boundary = ATA_DMA_BOUNDARY, | |
916 | .slave_configure = pata_macio_slave_config, | |
917 | }; | |
918 | ||
919 | static struct ata_port_operations pata_macio_ops = { | |
8930ff25 | 920 | .inherits = &ata_bmdma_port_ops, |
88358ab0 BH |
921 | |
922 | .freeze = pata_macio_freeze, | |
923 | .set_piomode = pata_macio_set_timings, | |
924 | .set_dmamode = pata_macio_set_timings, | |
925 | .cable_detect = pata_macio_cable_detect, | |
926 | .sff_dev_select = pata_macio_dev_select, | |
927 | .qc_prep = pata_macio_qc_prep, | |
88358ab0 BH |
928 | .bmdma_setup = pata_macio_bmdma_setup, |
929 | .bmdma_start = pata_macio_bmdma_start, | |
930 | .bmdma_stop = pata_macio_bmdma_stop, | |
931 | .bmdma_status = pata_macio_bmdma_status, | |
932 | .port_start = pata_macio_port_start, | |
933 | .sff_irq_clear = pata_macio_irq_clear, | |
934 | }; | |
935 | ||
0ec24914 | 936 | static void pata_macio_invariants(struct pata_macio_priv *priv) |
88358ab0 BH |
937 | { |
938 | const int *bidp; | |
939 | ||
940 | /* Identify the type of controller */ | |
941 | if (of_device_is_compatible(priv->node, "shasta-ata")) { | |
942 | priv->kind = controller_sh_ata6; | |
943 | priv->timings = pata_macio_shasta_timings; | |
944 | } else if (of_device_is_compatible(priv->node, "kauai-ata")) { | |
945 | priv->kind = controller_un_ata6; | |
946 | priv->timings = pata_macio_kauai_timings; | |
947 | } else if (of_device_is_compatible(priv->node, "K2-UATA")) { | |
948 | priv->kind = controller_k2_ata6; | |
949 | priv->timings = pata_macio_kauai_timings; | |
950 | } else if (of_device_is_compatible(priv->node, "keylargo-ata")) { | |
951 | if (strcmp(priv->node->name, "ata-4") == 0) { | |
952 | priv->kind = controller_kl_ata4; | |
953 | priv->timings = pata_macio_kl66_timings; | |
954 | } else { | |
955 | priv->kind = controller_kl_ata3; | |
956 | priv->timings = pata_macio_kl33_timings; | |
957 | } | |
958 | } else if (of_device_is_compatible(priv->node, "heathrow-ata")) { | |
959 | priv->kind = controller_heathrow; | |
960 | priv->timings = pata_macio_heathrow_timings; | |
961 | } else { | |
962 | priv->kind = controller_ohare; | |
963 | priv->timings = pata_macio_ohare_timings; | |
964 | } | |
965 | ||
966 | /* XXX FIXME --- setup priv->mediabay here */ | |
967 | ||
968 | /* Get Apple bus ID (for clock and ASIC control) */ | |
969 | bidp = of_get_property(priv->node, "AAPL,bus-id", NULL); | |
970 | priv->aapl_bus_id = bidp ? *bidp : 0; | |
971 | ||
972 | /* Fixup missing Apple bus ID in case of media-bay */ | |
973 | if (priv->mediabay && bidp == 0) | |
974 | priv->aapl_bus_id = 1; | |
975 | } | |
976 | ||
0ec24914 GKH |
977 | static void pata_macio_setup_ios(struct ata_ioports *ioaddr, |
978 | void __iomem * base, void __iomem * dma) | |
88358ab0 BH |
979 | { |
980 | /* cmd_addr is the base of regs for that port */ | |
981 | ioaddr->cmd_addr = base; | |
982 | ||
983 | /* taskfile registers */ | |
984 | ioaddr->data_addr = base + (ATA_REG_DATA << 4); | |
985 | ioaddr->error_addr = base + (ATA_REG_ERR << 4); | |
986 | ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4); | |
987 | ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4); | |
988 | ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4); | |
989 | ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4); | |
990 | ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4); | |
991 | ioaddr->device_addr = base + (ATA_REG_DEVICE << 4); | |
992 | ioaddr->status_addr = base + (ATA_REG_STATUS << 4); | |
993 | ioaddr->command_addr = base + (ATA_REG_CMD << 4); | |
994 | ioaddr->altstatus_addr = base + 0x160; | |
995 | ioaddr->ctl_addr = base + 0x160; | |
996 | ioaddr->bmdma_addr = dma; | |
997 | } | |
998 | ||
0ec24914 GKH |
999 | static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv, |
1000 | struct ata_port_info *pinfo) | |
88358ab0 BH |
1001 | { |
1002 | int i = 0; | |
1003 | ||
1004 | pinfo->pio_mask = 0; | |
1005 | pinfo->mwdma_mask = 0; | |
1006 | pinfo->udma_mask = 0; | |
1007 | ||
1008 | while (priv->timings[i].mode > 0) { | |
1009 | unsigned int mask = 1U << (priv->timings[i].mode & 0x0f); | |
1010 | switch(priv->timings[i].mode & 0xf0) { | |
1011 | case 0x00: /* PIO */ | |
1012 | pinfo->pio_mask |= (mask >> 8); | |
1013 | break; | |
1014 | case 0x20: /* MWDMA */ | |
1015 | pinfo->mwdma_mask |= mask; | |
1016 | break; | |
1017 | case 0x40: /* UDMA */ | |
1018 | pinfo->udma_mask |= mask; | |
1019 | break; | |
1020 | } | |
1021 | i++; | |
1022 | } | |
1023 | dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n", | |
1024 | pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask); | |
1025 | } | |
1026 | ||
0ec24914 GKH |
1027 | static int pata_macio_common_init(struct pata_macio_priv *priv, |
1028 | resource_size_t tfregs, | |
1029 | resource_size_t dmaregs, | |
1030 | resource_size_t fcregs, | |
1031 | unsigned long irq) | |
88358ab0 BH |
1032 | { |
1033 | struct ata_port_info pinfo; | |
1034 | const struct ata_port_info *ppi[] = { &pinfo, NULL }; | |
1035 | void __iomem *dma_regs = NULL; | |
1036 | ||
1037 | /* Fill up privates with various invariants collected from the | |
1038 | * device-tree | |
1039 | */ | |
1040 | pata_macio_invariants(priv); | |
1041 | ||
1042 | /* Make sure we have sane initial timings in the cache */ | |
1043 | pata_macio_default_timings(priv); | |
1044 | ||
1045 | /* Not sure what the real max is but we know it's less than 64K, let's | |
1046 | * use 64K minus 256 | |
1047 | */ | |
1048 | dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG); | |
1049 | ||
1050 | /* Allocate libata host for 1 port */ | |
1051 | memset(&pinfo, 0, sizeof(struct ata_port_info)); | |
1052 | pmac_macio_calc_timing_masks(priv, &pinfo); | |
9cbe056f | 1053 | pinfo.flags = ATA_FLAG_SLAVE_POSS; |
88358ab0 BH |
1054 | pinfo.port_ops = &pata_macio_ops; |
1055 | pinfo.private_data = priv; | |
1056 | ||
1057 | priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1); | |
1058 | if (priv->host == NULL) { | |
1059 | dev_err(priv->dev, "Failed to allocate ATA port structure\n"); | |
1060 | return -ENOMEM; | |
1061 | } | |
1062 | ||
1063 | /* Setup the private data in host too */ | |
1064 | priv->host->private_data = priv; | |
1065 | ||
1066 | /* Map base registers */ | |
1067 | priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100); | |
1068 | if (priv->tfregs == NULL) { | |
1069 | dev_err(priv->dev, "Failed to map ATA ports\n"); | |
1070 | return -ENOMEM; | |
1071 | } | |
1072 | priv->host->iomap = &priv->tfregs; | |
1073 | ||
1074 | /* Map DMA regs */ | |
1075 | if (dmaregs != 0) { | |
1076 | dma_regs = devm_ioremap(priv->dev, dmaregs, | |
1077 | sizeof(struct dbdma_regs)); | |
1078 | if (dma_regs == NULL) | |
1079 | dev_warn(priv->dev, "Failed to map ATA DMA registers\n"); | |
1080 | } | |
1081 | ||
1082 | /* If chip has local feature control, map those regs too */ | |
1083 | if (fcregs != 0) { | |
1084 | priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4); | |
1085 | if (priv->kauai_fcr == NULL) { | |
1086 | dev_err(priv->dev, "Failed to map ATA FCR register\n"); | |
1087 | return -ENOMEM; | |
1088 | } | |
1089 | } | |
1090 | ||
1091 | /* Setup port data structure */ | |
1092 | pata_macio_setup_ios(&priv->host->ports[0]->ioaddr, | |
1093 | priv->tfregs, dma_regs); | |
1094 | priv->host->ports[0]->private_data = priv; | |
1095 | ||
1096 | /* hard-reset the controller */ | |
1097 | pata_macio_reset_hw(priv, 0); | |
1098 | pata_macio_apply_timings(priv->host->ports[0], 0); | |
1099 | ||
1100 | /* Enable bus master if necessary */ | |
1101 | if (priv->pdev && dma_regs) | |
1102 | pci_set_master(priv->pdev); | |
1103 | ||
1104 | dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n", | |
1105 | macio_ata_names[priv->kind], priv->aapl_bus_id); | |
1106 | ||
1107 | /* Start it up */ | |
1108 | priv->irq = irq; | |
c3b28894 | 1109 | return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0, |
88358ab0 BH |
1110 | &pata_macio_sht); |
1111 | } | |
1112 | ||
0ec24914 GKH |
1113 | static int pata_macio_attach(struct macio_dev *mdev, |
1114 | const struct of_device_id *match) | |
88358ab0 BH |
1115 | { |
1116 | struct pata_macio_priv *priv; | |
1117 | resource_size_t tfregs, dmaregs = 0; | |
1118 | unsigned long irq; | |
1119 | int rc; | |
1120 | ||
1121 | /* Check for broken device-trees */ | |
1122 | if (macio_resource_count(mdev) == 0) { | |
1123 | dev_err(&mdev->ofdev.dev, | |
1124 | "No addresses for controller\n"); | |
1125 | return -ENXIO; | |
1126 | } | |
1127 | ||
1128 | /* Enable managed resources */ | |
1129 | macio_enable_devres(mdev); | |
1130 | ||
1131 | /* Allocate and init private data structure */ | |
1132 | priv = devm_kzalloc(&mdev->ofdev.dev, | |
1133 | sizeof(struct pata_macio_priv), GFP_KERNEL); | |
1134 | if (priv == NULL) { | |
1135 | dev_err(&mdev->ofdev.dev, | |
1136 | "Failed to allocate private memory\n"); | |
1137 | return -ENOMEM; | |
1138 | } | |
61c7a080 | 1139 | priv->node = of_node_get(mdev->ofdev.dev.of_node); |
88358ab0 BH |
1140 | priv->mdev = mdev; |
1141 | priv->dev = &mdev->ofdev.dev; | |
1142 | ||
1143 | /* Request memory resource for taskfile registers */ | |
1144 | if (macio_request_resource(mdev, 0, "pata-macio")) { | |
1145 | dev_err(&mdev->ofdev.dev, | |
1146 | "Cannot obtain taskfile resource\n"); | |
1147 | return -EBUSY; | |
1148 | } | |
1149 | tfregs = macio_resource_start(mdev, 0); | |
1150 | ||
1151 | /* Request resources for DMA registers if any */ | |
1152 | if (macio_resource_count(mdev) >= 2) { | |
1153 | if (macio_request_resource(mdev, 1, "pata-macio-dma")) | |
1154 | dev_err(&mdev->ofdev.dev, | |
1155 | "Cannot obtain DMA resource\n"); | |
1156 | else | |
1157 | dmaregs = macio_resource_start(mdev, 1); | |
1158 | } | |
1159 | ||
1160 | /* | |
1161 | * Fixup missing IRQ for some old implementations with broken | |
1162 | * device-trees. | |
1163 | * | |
1164 | * This is a bit bogus, it should be fixed in the device-tree itself, | |
1165 | * via the existing macio fixups, based on the type of interrupt | |
1166 | * controller in the machine. However, I have no test HW for this case, | |
1167 | * and this trick works well enough on those old machines... | |
1168 | */ | |
1169 | if (macio_irq_count(mdev) == 0) { | |
1170 | dev_warn(&mdev->ofdev.dev, | |
1171 | "No interrupts for controller, using 13\n"); | |
1172 | irq = irq_create_mapping(NULL, 13); | |
1173 | } else | |
1174 | irq = macio_irq(mdev, 0); | |
1175 | ||
1176 | /* Prevvent media bay callbacks until fully registered */ | |
1177 | lock_media_bay(priv->mdev->media_bay); | |
1178 | ||
1179 | /* Get register addresses and call common initialization */ | |
1180 | rc = pata_macio_common_init(priv, | |
1181 | tfregs, /* Taskfile regs */ | |
1182 | dmaregs, /* DBDMA regs */ | |
1183 | 0, /* Feature control */ | |
1184 | irq); | |
1185 | unlock_media_bay(priv->mdev->media_bay); | |
1186 | ||
1187 | return rc; | |
1188 | } | |
1189 | ||
0ec24914 | 1190 | static int pata_macio_detach(struct macio_dev *mdev) |
88358ab0 BH |
1191 | { |
1192 | struct ata_host *host = macio_get_drvdata(mdev); | |
1193 | struct pata_macio_priv *priv = host->private_data; | |
1194 | ||
1195 | lock_media_bay(priv->mdev->media_bay); | |
1196 | ||
1197 | /* Make sure the mediabay callback doesn't try to access | |
1198 | * dead stuff | |
1199 | */ | |
1200 | priv->host->private_data = NULL; | |
1201 | ||
1202 | ata_host_detach(host); | |
1203 | ||
1204 | unlock_media_bay(priv->mdev->media_bay); | |
1205 | ||
1206 | return 0; | |
1207 | } | |
1208 | ||
58eb8cd5 | 1209 | #ifdef CONFIG_PM_SLEEP |
88358ab0 BH |
1210 | static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) |
1211 | { | |
1212 | struct ata_host *host = macio_get_drvdata(mdev); | |
1213 | ||
1214 | return pata_macio_do_suspend(host->private_data, mesg); | |
1215 | } | |
1216 | ||
1217 | static int pata_macio_resume(struct macio_dev *mdev) | |
1218 | { | |
1219 | struct ata_host *host = macio_get_drvdata(mdev); | |
1220 | ||
1221 | return pata_macio_do_resume(host->private_data); | |
1222 | } | |
58eb8cd5 | 1223 | #endif /* CONFIG_PM_SLEEP */ |
88358ab0 BH |
1224 | |
1225 | #ifdef CONFIG_PMAC_MEDIABAY | |
1226 | static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state) | |
1227 | { | |
1228 | struct ata_host *host = macio_get_drvdata(mdev); | |
1229 | struct ata_port *ap; | |
1230 | struct ata_eh_info *ehi; | |
1231 | struct ata_device *dev; | |
1232 | unsigned long flags; | |
1233 | ||
1234 | if (!host || !host->private_data) | |
1235 | return; | |
1236 | ap = host->ports[0]; | |
1237 | spin_lock_irqsave(ap->lock, flags); | |
1238 | ehi = &ap->link.eh_info; | |
1239 | if (mb_state == MB_CD) { | |
1240 | ata_ehi_push_desc(ehi, "mediabay plug"); | |
1241 | ata_ehi_hotplugged(ehi); | |
1242 | ata_port_freeze(ap); | |
1243 | } else { | |
1244 | ata_ehi_push_desc(ehi, "mediabay unplug"); | |
1245 | ata_for_each_dev(dev, &ap->link, ALL) | |
1246 | dev->flags |= ATA_DFLAG_DETACH; | |
1247 | ata_port_abort(ap); | |
1248 | } | |
1249 | spin_unlock_irqrestore(ap->lock, flags); | |
1250 | ||
1251 | } | |
1252 | #endif /* CONFIG_PMAC_MEDIABAY */ | |
1253 | ||
1254 | ||
0ec24914 GKH |
1255 | static int pata_macio_pci_attach(struct pci_dev *pdev, |
1256 | const struct pci_device_id *id) | |
88358ab0 BH |
1257 | { |
1258 | struct pata_macio_priv *priv; | |
1259 | struct device_node *np; | |
1260 | resource_size_t rbase; | |
1261 | ||
1262 | /* We cannot use a MacIO controller without its OF device node */ | |
1263 | np = pci_device_to_OF_node(pdev); | |
1264 | if (np == NULL) { | |
1265 | dev_err(&pdev->dev, | |
1266 | "Cannot find OF device node for controller\n"); | |
1267 | return -ENODEV; | |
1268 | } | |
1269 | ||
1270 | /* Check that it can be enabled */ | |
1271 | if (pcim_enable_device(pdev)) { | |
1272 | dev_err(&pdev->dev, | |
1273 | "Cannot enable controller PCI device\n"); | |
1274 | return -ENXIO; | |
1275 | } | |
1276 | ||
1277 | /* Allocate and init private data structure */ | |
1278 | priv = devm_kzalloc(&pdev->dev, | |
1279 | sizeof(struct pata_macio_priv), GFP_KERNEL); | |
1280 | if (priv == NULL) { | |
1281 | dev_err(&pdev->dev, | |
1282 | "Failed to allocate private memory\n"); | |
1283 | return -ENOMEM; | |
1284 | } | |
1285 | priv->node = of_node_get(np); | |
1286 | priv->pdev = pdev; | |
1287 | priv->dev = &pdev->dev; | |
1288 | ||
1289 | /* Get MMIO regions */ | |
1290 | if (pci_request_regions(pdev, "pata-macio")) { | |
1291 | dev_err(&pdev->dev, | |
1292 | "Cannot obtain PCI resources\n"); | |
1293 | return -EBUSY; | |
1294 | } | |
1295 | ||
1296 | /* Get register addresses and call common initialization */ | |
1297 | rbase = pci_resource_start(pdev, 0); | |
1298 | if (pata_macio_common_init(priv, | |
1299 | rbase + 0x2000, /* Taskfile regs */ | |
1300 | rbase + 0x1000, /* DBDMA regs */ | |
1301 | rbase, /* Feature control */ | |
1302 | pdev->irq)) | |
1303 | return -ENXIO; | |
1304 | ||
1305 | return 0; | |
1306 | } | |
1307 | ||
0ec24914 | 1308 | static void pata_macio_pci_detach(struct pci_dev *pdev) |
88358ab0 | 1309 | { |
0a86e1c8 | 1310 | struct ata_host *host = pci_get_drvdata(pdev); |
88358ab0 BH |
1311 | |
1312 | ata_host_detach(host); | |
1313 | } | |
1314 | ||
58eb8cd5 | 1315 | #ifdef CONFIG_PM_SLEEP |
88358ab0 BH |
1316 | static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) |
1317 | { | |
0a86e1c8 | 1318 | struct ata_host *host = pci_get_drvdata(pdev); |
88358ab0 BH |
1319 | |
1320 | return pata_macio_do_suspend(host->private_data, mesg); | |
1321 | } | |
1322 | ||
1323 | static int pata_macio_pci_resume(struct pci_dev *pdev) | |
1324 | { | |
0a86e1c8 | 1325 | struct ata_host *host = pci_get_drvdata(pdev); |
88358ab0 BH |
1326 | |
1327 | return pata_macio_do_resume(host->private_data); | |
1328 | } | |
58eb8cd5 | 1329 | #endif /* CONFIG_PM_SLEEP */ |
88358ab0 | 1330 | |
e3779f6a | 1331 | static const struct of_device_id pata_macio_match[] = |
88358ab0 BH |
1332 | { |
1333 | { | |
1334 | .name = "IDE", | |
1335 | }, | |
1336 | { | |
1337 | .name = "ATA", | |
1338 | }, | |
1339 | { | |
1340 | .type = "ide", | |
1341 | }, | |
1342 | { | |
1343 | .type = "ata", | |
1344 | }, | |
1345 | {}, | |
1346 | }; | |
469eabb3 | 1347 | MODULE_DEVICE_TABLE(of, pata_macio_match); |
88358ab0 BH |
1348 | |
1349 | static struct macio_driver pata_macio_driver = | |
1350 | { | |
c2cdf6ab BH |
1351 | .driver = { |
1352 | .name = "pata-macio", | |
1353 | .owner = THIS_MODULE, | |
1354 | .of_match_table = pata_macio_match, | |
1355 | }, | |
88358ab0 BH |
1356 | .probe = pata_macio_attach, |
1357 | .remove = pata_macio_detach, | |
58eb8cd5 | 1358 | #ifdef CONFIG_PM_SLEEP |
88358ab0 BH |
1359 | .suspend = pata_macio_suspend, |
1360 | .resume = pata_macio_resume, | |
1361 | #endif | |
1362 | #ifdef CONFIG_PMAC_MEDIABAY | |
1363 | .mediabay_event = pata_macio_mb_event, | |
1364 | #endif | |
88358ab0 BH |
1365 | }; |
1366 | ||
1367 | static const struct pci_device_id pata_macio_pci_match[] = { | |
1368 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 }, | |
1369 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 }, | |
1370 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 }, | |
1371 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 }, | |
1372 | { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 }, | |
1373 | {}, | |
1374 | }; | |
1375 | ||
1376 | static struct pci_driver pata_macio_pci_driver = { | |
1377 | .name = "pata-pci-macio", | |
1378 | .id_table = pata_macio_pci_match, | |
1379 | .probe = pata_macio_pci_attach, | |
1380 | .remove = pata_macio_pci_detach, | |
58eb8cd5 | 1381 | #ifdef CONFIG_PM_SLEEP |
88358ab0 BH |
1382 | .suspend = pata_macio_pci_suspend, |
1383 | .resume = pata_macio_pci_resume, | |
1384 | #endif | |
1385 | .driver = { | |
1386 | .owner = THIS_MODULE, | |
1387 | }, | |
1388 | }; | |
1389 | MODULE_DEVICE_TABLE(pci, pata_macio_pci_match); | |
1390 | ||
1391 | ||
1392 | static int __init pata_macio_init(void) | |
1393 | { | |
1394 | int rc; | |
1395 | ||
1396 | if (!machine_is(powermac)) | |
1397 | return -ENODEV; | |
1398 | ||
1399 | rc = pci_register_driver(&pata_macio_pci_driver); | |
1400 | if (rc) | |
1401 | return rc; | |
1402 | rc = macio_register_driver(&pata_macio_driver); | |
1403 | if (rc) { | |
1404 | pci_unregister_driver(&pata_macio_pci_driver); | |
1405 | return rc; | |
1406 | } | |
1407 | return 0; | |
1408 | } | |
1409 | ||
1410 | static void __exit pata_macio_exit(void) | |
1411 | { | |
1412 | macio_unregister_driver(&pata_macio_driver); | |
1413 | pci_unregister_driver(&pata_macio_pci_driver); | |
1414 | } | |
1415 | ||
1416 | module_init(pata_macio_init); | |
1417 | module_exit(pata_macio_exit); | |
1418 | ||
1419 | MODULE_AUTHOR("Benjamin Herrenschmidt"); | |
1420 | MODULE_DESCRIPTION("Apple MacIO PATA driver"); | |
1421 | MODULE_LICENSE("GPL"); | |
1422 | MODULE_VERSION(DRV_VERSION); |