2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
216 struct nv_adma_prd *aprd;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
225 struct nv_host_priv {
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 static void nv_remove_one (struct pci_dev *pdev);
234 static int nv_pci_device_resume(struct pci_dev *pdev);
236 static void nv_ck804_host_stop(struct ata_host *host);
237 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
238 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
239 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
240 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
241 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
243 static void nv_nf2_freeze(struct ata_port *ap);
244 static void nv_nf2_thaw(struct ata_port *ap);
245 static void nv_ck804_freeze(struct ata_port *ap);
246 static void nv_ck804_thaw(struct ata_port *ap);
247 static void nv_error_handler(struct ata_port *ap);
248 static int nv_adma_slave_config(struct scsi_device *sdev);
249 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
250 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
251 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
252 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
253 static void nv_adma_irq_clear(struct ata_port *ap);
254 static int nv_adma_port_start(struct ata_port *ap);
255 static void nv_adma_port_stop(struct ata_port *ap);
257 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
258 static int nv_adma_port_resume(struct ata_port *ap);
260 static void nv_adma_freeze(struct ata_port *ap);
261 static void nv_adma_thaw(struct ata_port *ap);
262 static void nv_adma_error_handler(struct ata_port *ap);
263 static void nv_adma_host_stop(struct ata_host *host);
264 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
265 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
271 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
276 static const struct pci_device_id nv_pci_tbl[] = {
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
286 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
287 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
288 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
289 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
290 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
291 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
292 PCI_ANY_ID, PCI_ANY_ID,
293 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
294 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
295 PCI_ANY_ID, PCI_ANY_ID,
296 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
298 { } /* terminate list */
301 static struct pci_driver nv_pci_driver = {
303 .id_table = nv_pci_tbl,
304 .probe = nv_init_one,
306 .suspend = ata_pci_device_suspend,
307 .resume = nv_pci_device_resume,
309 .remove = nv_remove_one,
312 static struct scsi_host_template nv_sht = {
313 .module = THIS_MODULE,
315 .ioctl = ata_scsi_ioctl,
316 .queuecommand = ata_scsi_queuecmd,
317 .can_queue = ATA_DEF_QUEUE,
318 .this_id = ATA_SHT_THIS_ID,
319 .sg_tablesize = LIBATA_MAX_PRD,
320 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
321 .emulated = ATA_SHT_EMULATED,
322 .use_clustering = ATA_SHT_USE_CLUSTERING,
323 .proc_name = DRV_NAME,
324 .dma_boundary = ATA_DMA_BOUNDARY,
325 .slave_configure = ata_scsi_slave_config,
326 .slave_destroy = ata_scsi_slave_destroy,
327 .bios_param = ata_std_bios_param,
329 .suspend = ata_scsi_device_suspend,
330 .resume = ata_scsi_device_resume,
334 static struct scsi_host_template nv_adma_sht = {
335 .module = THIS_MODULE,
337 .ioctl = ata_scsi_ioctl,
338 .queuecommand = ata_scsi_queuecmd,
339 .can_queue = NV_ADMA_MAX_CPBS,
340 .this_id = ATA_SHT_THIS_ID,
341 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
342 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
343 .emulated = ATA_SHT_EMULATED,
344 .use_clustering = ATA_SHT_USE_CLUSTERING,
345 .proc_name = DRV_NAME,
346 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
347 .slave_configure = nv_adma_slave_config,
348 .slave_destroy = ata_scsi_slave_destroy,
349 .bios_param = ata_std_bios_param,
351 .suspend = ata_scsi_device_suspend,
352 .resume = ata_scsi_device_resume,
356 static const struct ata_port_operations nv_generic_ops = {
357 .port_disable = ata_port_disable,
358 .tf_load = ata_tf_load,
359 .tf_read = ata_tf_read,
360 .exec_command = ata_exec_command,
361 .check_status = ata_check_status,
362 .dev_select = ata_std_dev_select,
363 .bmdma_setup = ata_bmdma_setup,
364 .bmdma_start = ata_bmdma_start,
365 .bmdma_stop = ata_bmdma_stop,
366 .bmdma_status = ata_bmdma_status,
367 .qc_prep = ata_qc_prep,
368 .qc_issue = ata_qc_issue_prot,
369 .freeze = ata_bmdma_freeze,
370 .thaw = ata_bmdma_thaw,
371 .error_handler = nv_error_handler,
372 .post_internal_cmd = ata_bmdma_post_internal_cmd,
373 .data_xfer = ata_data_xfer,
374 .irq_clear = ata_bmdma_irq_clear,
375 .irq_on = ata_irq_on,
376 .irq_ack = ata_irq_ack,
377 .scr_read = nv_scr_read,
378 .scr_write = nv_scr_write,
379 .port_start = ata_port_start,
382 static const struct ata_port_operations nv_nf2_ops = {
383 .port_disable = ata_port_disable,
384 .tf_load = ata_tf_load,
385 .tf_read = ata_tf_read,
386 .exec_command = ata_exec_command,
387 .check_status = ata_check_status,
388 .dev_select = ata_std_dev_select,
389 .bmdma_setup = ata_bmdma_setup,
390 .bmdma_start = ata_bmdma_start,
391 .bmdma_stop = ata_bmdma_stop,
392 .bmdma_status = ata_bmdma_status,
393 .qc_prep = ata_qc_prep,
394 .qc_issue = ata_qc_issue_prot,
395 .freeze = nv_nf2_freeze,
397 .error_handler = nv_error_handler,
398 .post_internal_cmd = ata_bmdma_post_internal_cmd,
399 .data_xfer = ata_data_xfer,
400 .irq_clear = ata_bmdma_irq_clear,
401 .irq_on = ata_irq_on,
402 .irq_ack = ata_irq_ack,
403 .scr_read = nv_scr_read,
404 .scr_write = nv_scr_write,
405 .port_start = ata_port_start,
408 static const struct ata_port_operations nv_ck804_ops = {
409 .port_disable = ata_port_disable,
410 .tf_load = ata_tf_load,
411 .tf_read = ata_tf_read,
412 .exec_command = ata_exec_command,
413 .check_status = ata_check_status,
414 .dev_select = ata_std_dev_select,
415 .bmdma_setup = ata_bmdma_setup,
416 .bmdma_start = ata_bmdma_start,
417 .bmdma_stop = ata_bmdma_stop,
418 .bmdma_status = ata_bmdma_status,
419 .qc_prep = ata_qc_prep,
420 .qc_issue = ata_qc_issue_prot,
421 .freeze = nv_ck804_freeze,
422 .thaw = nv_ck804_thaw,
423 .error_handler = nv_error_handler,
424 .post_internal_cmd = ata_bmdma_post_internal_cmd,
425 .data_xfer = ata_data_xfer,
426 .irq_clear = ata_bmdma_irq_clear,
427 .irq_on = ata_irq_on,
428 .irq_ack = ata_irq_ack,
429 .scr_read = nv_scr_read,
430 .scr_write = nv_scr_write,
431 .port_start = ata_port_start,
432 .host_stop = nv_ck804_host_stop,
435 static const struct ata_port_operations nv_adma_ops = {
436 .port_disable = ata_port_disable,
437 .tf_load = ata_tf_load,
438 .tf_read = nv_adma_tf_read,
439 .check_atapi_dma = nv_adma_check_atapi_dma,
440 .exec_command = ata_exec_command,
441 .check_status = ata_check_status,
442 .dev_select = ata_std_dev_select,
443 .bmdma_setup = ata_bmdma_setup,
444 .bmdma_start = ata_bmdma_start,
445 .bmdma_stop = ata_bmdma_stop,
446 .bmdma_status = ata_bmdma_status,
447 .qc_prep = nv_adma_qc_prep,
448 .qc_issue = nv_adma_qc_issue,
449 .freeze = nv_adma_freeze,
450 .thaw = nv_adma_thaw,
451 .error_handler = nv_adma_error_handler,
452 .post_internal_cmd = nv_adma_post_internal_cmd,
453 .data_xfer = ata_data_xfer,
454 .irq_clear = nv_adma_irq_clear,
455 .irq_on = ata_irq_on,
456 .irq_ack = ata_irq_ack,
457 .scr_read = nv_scr_read,
458 .scr_write = nv_scr_write,
459 .port_start = nv_adma_port_start,
460 .port_stop = nv_adma_port_stop,
462 .port_suspend = nv_adma_port_suspend,
463 .port_resume = nv_adma_port_resume,
465 .host_stop = nv_adma_host_stop,
468 static struct ata_port_info nv_port_info[] = {
472 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
473 ATA_FLAG_HRST_TO_RESUME,
474 .pio_mask = NV_PIO_MASK,
475 .mwdma_mask = NV_MWDMA_MASK,
476 .udma_mask = NV_UDMA_MASK,
477 .port_ops = &nv_generic_ops,
478 .irq_handler = nv_generic_interrupt,
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME,
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_nf2_ops,
489 .irq_handler = nv_nf2_interrupt,
494 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
495 ATA_FLAG_HRST_TO_RESUME,
496 .pio_mask = NV_PIO_MASK,
497 .mwdma_mask = NV_MWDMA_MASK,
498 .udma_mask = NV_UDMA_MASK,
499 .port_ops = &nv_ck804_ops,
500 .irq_handler = nv_ck804_interrupt,
505 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
506 ATA_FLAG_HRST_TO_RESUME |
507 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
508 .pio_mask = NV_PIO_MASK,
509 .mwdma_mask = NV_MWDMA_MASK,
510 .udma_mask = NV_UDMA_MASK,
511 .port_ops = &nv_adma_ops,
512 .irq_handler = nv_adma_interrupt,
516 MODULE_AUTHOR("NVIDIA");
517 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
518 MODULE_LICENSE("GPL");
519 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
520 MODULE_VERSION(DRV_VERSION);
522 static int adma_enabled = 1;
524 static void nv_adma_register_mode(struct ata_port *ap)
526 struct nv_adma_port_priv *pp = ap->private_data;
527 void __iomem *mmio = pp->ctl_block;
531 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
534 status = readw(mmio + NV_ADMA_STAT);
535 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
537 status = readw(mmio + NV_ADMA_STAT);
541 ata_port_printk(ap, KERN_WARNING,
542 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
545 tmp = readw(mmio + NV_ADMA_CTL);
546 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
549 status = readw(mmio + NV_ADMA_STAT);
550 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
552 status = readw(mmio + NV_ADMA_STAT);
556 ata_port_printk(ap, KERN_WARNING,
557 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
560 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
563 static void nv_adma_mode(struct ata_port *ap)
565 struct nv_adma_port_priv *pp = ap->private_data;
566 void __iomem *mmio = pp->ctl_block;
570 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
573 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
575 tmp = readw(mmio + NV_ADMA_CTL);
576 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
578 status = readw(mmio + NV_ADMA_STAT);
579 while(((status & NV_ADMA_STAT_LEGACY) ||
580 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
582 status = readw(mmio + NV_ADMA_STAT);
586 ata_port_printk(ap, KERN_WARNING,
587 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
590 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
593 static int nv_adma_slave_config(struct scsi_device *sdev)
595 struct ata_port *ap = ata_shost_to_port(sdev->host);
596 struct nv_adma_port_priv *pp = ap->private_data;
597 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
599 unsigned long segment_boundary;
600 unsigned short sg_tablesize;
603 u32 current_reg, new_reg, config_mask;
605 rc = ata_scsi_slave_config(sdev);
607 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
608 /* Not a proper libata device, ignore */
611 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
613 * NVIDIA reports that ADMA mode does not support ATAPI commands.
614 * Therefore ATAPI commands are sent through the legacy interface.
615 * However, the legacy interface only supports 32-bit DMA.
616 * Restrict DMA parameters as required by the legacy interface
617 * when an ATAPI device is connected.
619 bounce_limit = ATA_DMA_MASK;
620 segment_boundary = ATA_DMA_BOUNDARY;
621 /* Subtract 1 since an extra entry may be needed for padding, see
623 sg_tablesize = LIBATA_MAX_PRD - 1;
625 /* Since the legacy DMA engine is in use, we need to disable ADMA
628 nv_adma_register_mode(ap);
631 bounce_limit = *ap->dev->dma_mask;
632 segment_boundary = NV_ADMA_DMA_BOUNDARY;
633 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
637 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
640 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
641 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
643 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
644 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
647 new_reg = current_reg | config_mask;
648 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
651 new_reg = current_reg & ~config_mask;
652 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
655 if(current_reg != new_reg)
656 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
658 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
659 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
660 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
661 ata_port_printk(ap, KERN_INFO,
662 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
663 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
667 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
669 struct nv_adma_port_priv *pp = qc->ap->private_data;
670 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
673 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
675 /* Since commands where a result TF is requested are not
676 executed in ADMA mode, the only time this function will be called
677 in ADMA mode will be if a command fails. In this case we
678 don't care about going into register mode with ADMA commands
679 pending, as the commands will all shortly be aborted anyway. */
680 nv_adma_register_mode(ap);
685 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
687 unsigned int idx = 0;
689 if(tf->flags & ATA_TFLAG_ISADDR) {
690 if (tf->flags & ATA_TFLAG_LBA48) {
691 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
692 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
693 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
694 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
695 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
696 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
698 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
700 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
701 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
702 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
703 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
706 if(tf->flags & ATA_TFLAG_DEVICE)
707 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
709 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
712 cpb[idx++] = cpu_to_le16(IGN);
717 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
719 struct nv_adma_port_priv *pp = ap->private_data;
720 u8 flags = pp->cpb[cpb_num].resp_flags;
722 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
724 if (unlikely((force_err ||
725 flags & (NV_CPB_RESP_ATA_ERR |
726 NV_CPB_RESP_CMD_ERR |
727 NV_CPB_RESP_CPB_ERR)))) {
728 struct ata_eh_info *ehi = &ap->eh_info;
731 ata_ehi_clear_desc(ehi);
732 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
733 if (flags & NV_CPB_RESP_ATA_ERR) {
734 ata_ehi_push_desc(ehi, ": ATA error");
735 ehi->err_mask |= AC_ERR_DEV;
736 } else if (flags & NV_CPB_RESP_CMD_ERR) {
737 ata_ehi_push_desc(ehi, ": CMD error");
738 ehi->err_mask |= AC_ERR_DEV;
739 } else if (flags & NV_CPB_RESP_CPB_ERR) {
740 ata_ehi_push_desc(ehi, ": CPB error");
741 ehi->err_mask |= AC_ERR_SYSTEM;
744 /* notifier error, but no error in CPB flags? */
745 ehi->err_mask |= AC_ERR_OTHER;
748 /* Kill all commands. EH will determine what actually failed. */
756 if (likely(flags & NV_CPB_RESP_DONE)) {
757 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
758 VPRINTK("CPB flags done, flags=0x%x\n", flags);
760 DPRINTK("Completing qc from tag %d\n",cpb_num);
763 struct ata_eh_info *ehi = &ap->eh_info;
764 /* Notifier bits set without a command may indicate the drive
765 is misbehaving. Raise host state machine violation on this
767 ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
769 ehi->err_mask |= AC_ERR_HSM;
770 ehi->action |= ATA_EH_SOFTRESET;
778 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
780 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
782 /* freeze if hotplugged */
783 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
788 /* bail out if not our interrupt */
789 if (!(irq_stat & NV_INT_DEV))
792 /* DEV interrupt w/ no active qc? */
793 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
794 ata_check_status(ap);
798 /* handle interrupt */
799 return ata_host_intr(ap, qc);
802 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
804 struct ata_host *host = dev_instance;
806 u32 notifier_clears[2];
808 spin_lock(&host->lock);
810 for (i = 0; i < host->n_ports; i++) {
811 struct ata_port *ap = host->ports[i];
812 notifier_clears[i] = 0;
814 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
815 struct nv_adma_port_priv *pp = ap->private_data;
816 void __iomem *mmio = pp->ctl_block;
819 u32 notifier, notifier_error;
821 /* if ADMA is disabled, use standard ata interrupt handler */
822 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
823 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
824 >> (NV_INT_PORT_SHIFT * i);
825 handled += nv_host_intr(ap, irq_stat);
829 /* if in ATA register mode, check for standard interrupts */
830 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
831 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
832 >> (NV_INT_PORT_SHIFT * i);
833 if(ata_tag_valid(ap->active_tag))
834 /** NV_INT_DEV indication seems unreliable at times
835 at least in ADMA mode. Force it on always when a
836 command is active, to prevent losing interrupts. */
837 irq_stat |= NV_INT_DEV;
838 handled += nv_host_intr(ap, irq_stat);
841 notifier = readl(mmio + NV_ADMA_NOTIFIER);
842 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
843 notifier_clears[i] = notifier | notifier_error;
845 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
847 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
852 status = readw(mmio + NV_ADMA_STAT);
854 /* Clear status. Ensure the controller sees the clearing before we start
855 looking at any of the CPB statuses, so that any CPB completions after
856 this point in the handler will raise another interrupt. */
857 writew(status, mmio + NV_ADMA_STAT);
858 readw(mmio + NV_ADMA_STAT); /* flush posted write */
861 handled++; /* irq handled if we got here */
863 /* freeze if hotplugged or controller error */
864 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
865 NV_ADMA_STAT_HOTUNPLUG |
866 NV_ADMA_STAT_TIMEOUT |
867 NV_ADMA_STAT_SERROR))) {
868 struct ata_eh_info *ehi = &ap->eh_info;
870 ata_ehi_clear_desc(ehi);
871 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
872 if (status & NV_ADMA_STAT_TIMEOUT) {
873 ehi->err_mask |= AC_ERR_SYSTEM;
874 ata_ehi_push_desc(ehi, ": timeout");
875 } else if (status & NV_ADMA_STAT_HOTPLUG) {
876 ata_ehi_hotplugged(ehi);
877 ata_ehi_push_desc(ehi, ": hotplug");
878 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
879 ata_ehi_hotplugged(ehi);
880 ata_ehi_push_desc(ehi, ": hot unplug");
881 } else if (status & NV_ADMA_STAT_SERROR) {
882 /* let libata analyze SError and figure out the cause */
883 ata_ehi_push_desc(ehi, ": SError");
889 if (status & (NV_ADMA_STAT_DONE |
890 NV_ADMA_STAT_CPBERR)) {
894 if(ata_tag_valid(ap->active_tag))
895 check_commands = 1 << ap->active_tag;
897 check_commands = ap->sactive;
899 /** Check CPBs for completed commands */
900 while ((pos = ffs(check_commands)) && !error) {
902 error = nv_adma_check_cpb(ap, pos,
903 notifier_error & (1 << pos) );
904 check_commands &= ~(1 << pos );
910 if(notifier_clears[0] || notifier_clears[1]) {
911 /* Note: Both notifier clear registers must be written
912 if either is set, even if one is zero, according to NVIDIA. */
913 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
914 writel(notifier_clears[0], pp->notifier_clear_block);
915 pp = host->ports[1]->private_data;
916 writel(notifier_clears[1], pp->notifier_clear_block);
919 spin_unlock(&host->lock);
921 return IRQ_RETVAL(handled);
924 static void nv_adma_freeze(struct ata_port *ap)
926 struct nv_adma_port_priv *pp = ap->private_data;
927 void __iomem *mmio = pp->ctl_block;
932 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
935 /* clear any outstanding CK804 notifications */
936 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
937 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
939 /* Disable interrupt */
940 tmp = readw(mmio + NV_ADMA_CTL);
941 writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
943 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
946 static void nv_adma_thaw(struct ata_port *ap)
948 struct nv_adma_port_priv *pp = ap->private_data;
949 void __iomem *mmio = pp->ctl_block;
954 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
957 /* Enable interrupt */
958 tmp = readw(mmio + NV_ADMA_CTL);
959 writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
961 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
964 static void nv_adma_irq_clear(struct ata_port *ap)
966 struct nv_adma_port_priv *pp = ap->private_data;
967 void __iomem *mmio = pp->ctl_block;
968 u32 notifier_clears[2];
970 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
971 ata_bmdma_irq_clear(ap);
975 /* clear any outstanding CK804 notifications */
976 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
977 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
979 /* clear ADMA status */
980 writew(0xffff, mmio + NV_ADMA_STAT);
982 /* clear notifiers - note both ports need to be written with
983 something even though we are only clearing on one */
984 if (ap->port_no == 0) {
985 notifier_clears[0] = 0xFFFFFFFF;
986 notifier_clears[1] = 0;
988 notifier_clears[0] = 0;
989 notifier_clears[1] = 0xFFFFFFFF;
991 pp = ap->host->ports[0]->private_data;
992 writel(notifier_clears[0], pp->notifier_clear_block);
993 pp = ap->host->ports[1]->private_data;
994 writel(notifier_clears[1], pp->notifier_clear_block);
997 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
999 struct nv_adma_port_priv *pp = qc->ap->private_data;
1001 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1002 ata_bmdma_post_internal_cmd(qc);
1005 static int nv_adma_port_start(struct ata_port *ap)
1007 struct device *dev = ap->host->dev;
1008 struct nv_adma_port_priv *pp;
1017 rc = ata_port_start(ap);
1021 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1025 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1026 ap->port_no * NV_ADMA_PORT_SIZE;
1027 pp->ctl_block = mmio;
1028 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1029 pp->notifier_clear_block = pp->gen_block +
1030 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1032 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1033 &mem_dma, GFP_KERNEL);
1036 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1039 * First item in chunk of DMA memory:
1040 * 128-byte command parameter block (CPB)
1041 * one for each command tag
1044 pp->cpb_dma = mem_dma;
1046 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1047 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1049 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1050 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1053 * Second item: block of ADMA_SGTBL_LEN s/g entries
1056 pp->aprd_dma = mem_dma;
1058 ap->private_data = pp;
1060 /* clear any outstanding interrupt conditions */
1061 writew(0xffff, mmio + NV_ADMA_STAT);
1063 /* initialize port variables */
1064 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1066 /* clear CPB fetch count */
1067 writew(0, mmio + NV_ADMA_CPB_COUNT);
1069 /* clear GO for register mode, enable interrupt */
1070 tmp = readw(mmio + NV_ADMA_CTL);
1071 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1072 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1074 tmp = readw(mmio + NV_ADMA_CTL);
1075 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1076 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1078 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1079 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1084 static void nv_adma_port_stop(struct ata_port *ap)
1086 struct nv_adma_port_priv *pp = ap->private_data;
1087 void __iomem *mmio = pp->ctl_block;
1090 writew(0, mmio + NV_ADMA_CTL);
1094 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1096 struct nv_adma_port_priv *pp = ap->private_data;
1097 void __iomem *mmio = pp->ctl_block;
1099 /* Go to register mode - clears GO */
1100 nv_adma_register_mode(ap);
1102 /* clear CPB fetch count */
1103 writew(0, mmio + NV_ADMA_CPB_COUNT);
1105 /* disable interrupt, shut down port */
1106 writew(0, mmio + NV_ADMA_CTL);
1111 static int nv_adma_port_resume(struct ata_port *ap)
1113 struct nv_adma_port_priv *pp = ap->private_data;
1114 void __iomem *mmio = pp->ctl_block;
1117 /* set CPB block location */
1118 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1119 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1121 /* clear any outstanding interrupt conditions */
1122 writew(0xffff, mmio + NV_ADMA_STAT);
1124 /* initialize port variables */
1125 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1127 /* clear CPB fetch count */
1128 writew(0, mmio + NV_ADMA_CPB_COUNT);
1130 /* clear GO for register mode, enable interrupt */
1131 tmp = readw(mmio + NV_ADMA_CTL);
1132 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1133 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1135 tmp = readw(mmio + NV_ADMA_CTL);
1136 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1137 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1139 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1140 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1146 static void nv_adma_setup_port(struct ata_port *ap)
1148 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1149 struct ata_ioports *ioport = &ap->ioaddr;
1153 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1155 ioport->cmd_addr = mmio;
1156 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1157 ioport->error_addr =
1158 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1159 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1160 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1161 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1162 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1163 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1164 ioport->status_addr =
1165 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1166 ioport->altstatus_addr =
1167 ioport->ctl_addr = mmio + 0x20;
1170 static int nv_adma_host_init(struct ata_host *host)
1172 struct pci_dev *pdev = to_pci_dev(host->dev);
1178 /* enable ADMA on the ports */
1179 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1180 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1181 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1182 NV_MCP_SATA_CFG_20_PORT1_EN |
1183 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1185 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1187 for (i = 0; i < host->n_ports; i++)
1188 nv_adma_setup_port(host->ports[i]);
1193 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1194 struct scatterlist *sg,
1196 struct nv_adma_prd *aprd)
1199 if (qc->tf.flags & ATA_TFLAG_WRITE)
1200 flags |= NV_APRD_WRITE;
1201 if (idx == qc->n_elem - 1)
1202 flags |= NV_APRD_END;
1204 flags |= NV_APRD_CONT;
1206 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1207 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1208 aprd->flags = flags;
1209 aprd->packet_len = 0;
1212 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1214 struct nv_adma_port_priv *pp = qc->ap->private_data;
1216 struct nv_adma_prd *aprd;
1217 struct scatterlist *sg;
1223 ata_for_each_sg(sg, qc) {
1224 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1225 nv_adma_fill_aprd(qc, sg, idx, aprd);
1229 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1231 cpb->next_aprd = cpu_to_le64(0);
1234 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1236 struct nv_adma_port_priv *pp = qc->ap->private_data;
1238 /* ADMA engine can only be used for non-ATAPI DMA commands,
1239 or interrupt-driven no-data commands, where a result taskfile
1241 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1242 (qc->tf.flags & ATA_TFLAG_POLLING) ||
1243 (qc->flags & ATA_QCFLAG_RESULT_TF))
1246 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1247 (qc->tf.protocol == ATA_PROT_NODATA))
1253 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1255 struct nv_adma_port_priv *pp = qc->ap->private_data;
1256 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1257 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1260 if (nv_adma_use_reg_mode(qc)) {
1261 nv_adma_register_mode(qc->ap);
1266 cpb->resp_flags = NV_CPB_RESP_DONE;
1273 cpb->next_cpb_idx = 0;
1275 /* turn on NCQ flags for NCQ commands */
1276 if (qc->tf.protocol == ATA_PROT_NCQ)
1277 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1279 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1281 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1283 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1284 nv_adma_fill_sg(qc, cpb);
1285 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1287 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1289 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1290 finished filling in all of the contents */
1292 cpb->ctl_flags = ctl_flags;
1294 cpb->resp_flags = 0;
1297 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1299 struct nv_adma_port_priv *pp = qc->ap->private_data;
1300 void __iomem *mmio = pp->ctl_block;
1301 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1305 if (nv_adma_use_reg_mode(qc)) {
1306 /* use ATA register mode */
1307 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1308 nv_adma_register_mode(qc->ap);
1309 return ata_qc_issue_prot(qc);
1311 nv_adma_mode(qc->ap);
1313 /* write append register, command tag in lower 8 bits
1314 and (number of cpbs to append -1) in top 8 bits */
1317 if(curr_ncq != pp->last_issue_ncq) {
1318 /* Seems to need some delay before switching between NCQ and non-NCQ
1319 commands, else we get command timeouts and such. */
1321 pp->last_issue_ncq = curr_ncq;
1324 writew(qc->tag, mmio + NV_ADMA_APPEND);
1326 DPRINTK("Issued tag %u\n",qc->tag);
1331 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1333 struct ata_host *host = dev_instance;
1335 unsigned int handled = 0;
1336 unsigned long flags;
1338 spin_lock_irqsave(&host->lock, flags);
1340 for (i = 0; i < host->n_ports; i++) {
1341 struct ata_port *ap;
1343 ap = host->ports[i];
1345 !(ap->flags & ATA_FLAG_DISABLED)) {
1346 struct ata_queued_cmd *qc;
1348 qc = ata_qc_from_tag(ap, ap->active_tag);
1349 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1350 handled += ata_host_intr(ap, qc);
1352 // No request pending? Clear interrupt status
1353 // anyway, in case there's one pending.
1354 ap->ops->check_status(ap);
1359 spin_unlock_irqrestore(&host->lock, flags);
1361 return IRQ_RETVAL(handled);
1364 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1368 for (i = 0; i < host->n_ports; i++) {
1369 struct ata_port *ap = host->ports[i];
1371 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1372 handled += nv_host_intr(ap, irq_stat);
1374 irq_stat >>= NV_INT_PORT_SHIFT;
1377 return IRQ_RETVAL(handled);
1380 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1382 struct ata_host *host = dev_instance;
1386 spin_lock(&host->lock);
1387 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1388 ret = nv_do_interrupt(host, irq_stat);
1389 spin_unlock(&host->lock);
1394 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1396 struct ata_host *host = dev_instance;
1400 spin_lock(&host->lock);
1401 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1402 ret = nv_do_interrupt(host, irq_stat);
1403 spin_unlock(&host->lock);
1408 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1410 if (sc_reg > SCR_CONTROL)
1413 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1416 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1418 if (sc_reg > SCR_CONTROL)
1421 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1424 static void nv_nf2_freeze(struct ata_port *ap)
1426 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1427 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1430 mask = ioread8(scr_addr + NV_INT_ENABLE);
1431 mask &= ~(NV_INT_ALL << shift);
1432 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1435 static void nv_nf2_thaw(struct ata_port *ap)
1437 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1438 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1441 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1443 mask = ioread8(scr_addr + NV_INT_ENABLE);
1444 mask |= (NV_INT_MASK << shift);
1445 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1448 static void nv_ck804_freeze(struct ata_port *ap)
1450 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1451 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1454 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1455 mask &= ~(NV_INT_ALL << shift);
1456 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1459 static void nv_ck804_thaw(struct ata_port *ap)
1461 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1462 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1465 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1467 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1468 mask |= (NV_INT_MASK << shift);
1469 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1472 static int nv_hardreset(struct ata_port *ap, unsigned int *class,
1473 unsigned long deadline)
1477 /* SATA hardreset fails to retrieve proper device signature on
1478 * some controllers. Don't classify on hardreset. For more
1479 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1481 return sata_std_hardreset(ap, &dummy, deadline);
1484 static void nv_error_handler(struct ata_port *ap)
1486 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1487 nv_hardreset, ata_std_postreset);
1490 static void nv_adma_error_handler(struct ata_port *ap)
1492 struct nv_adma_port_priv *pp = ap->private_data;
1493 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1494 void __iomem *mmio = pp->ctl_block;
1498 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1499 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1500 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1501 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1502 u32 status = readw(mmio + NV_ADMA_STAT);
1503 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1504 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1506 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1507 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1508 "next cpb count 0x%X next cpb idx 0x%x\n",
1509 notifier, notifier_error, gen_ctl, status,
1510 cpb_count, next_cpb_idx);
1512 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1513 struct nv_adma_cpb *cpb = &pp->cpb[i];
1514 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1515 ap->sactive & (1 << i) )
1516 ata_port_printk(ap, KERN_ERR,
1517 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1518 i, cpb->ctl_flags, cpb->resp_flags);
1522 /* Push us back into port register mode for error handling. */
1523 nv_adma_register_mode(ap);
1525 /* Mark all of the CPBs as invalid to prevent them from being executed */
1526 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1527 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1529 /* clear CPB fetch count */
1530 writew(0, mmio + NV_ADMA_CPB_COUNT);
1533 tmp = readw(mmio + NV_ADMA_CTL);
1534 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1535 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1537 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1538 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1541 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1542 nv_hardreset, ata_std_postreset);
1545 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1547 static int printed_version = 0;
1548 const struct ata_port_info *ppi[2];
1549 struct ata_host *host;
1550 struct nv_host_priv *hpriv;
1554 unsigned long type = ent->driver_data;
1556 // Make sure this is a SATA controller by counting the number of bars
1557 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1558 // it's an IDE controller and we ignore it.
1559 for (bar=0; bar<6; bar++)
1560 if (pci_resource_start(pdev, bar) == 0)
1563 if (!printed_version++)
1564 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1566 rc = pcim_enable_device(pdev);
1570 /* determine type and allocate host */
1571 if (type >= CK804 && adma_enabled) {
1572 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1576 ppi[0] = ppi[1] = &nv_port_info[type];
1577 rc = ata_pci_prepare_native_host(pdev, ppi, 2, &host);
1581 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1585 host->private_data = hpriv;
1587 /* set 64bit dma masks, may fail */
1589 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
1590 pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1593 /* request and iomap NV_MMIO_BAR */
1594 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
1598 /* configure SCR access */
1599 base = host->iomap[NV_MMIO_BAR];
1600 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1601 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1603 /* enable SATA space for CK804 */
1604 if (type >= CK804) {
1607 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1608 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1609 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1614 rc = nv_adma_host_init(host);
1619 pci_set_master(pdev);
1620 return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
1621 IRQF_SHARED, ppi[0]->sht);
1624 static void nv_remove_one (struct pci_dev *pdev)
1626 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1627 struct nv_host_priv *hpriv = host->private_data;
1629 ata_pci_remove_one(pdev);
1634 static int nv_pci_device_resume(struct pci_dev *pdev)
1636 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1637 struct nv_host_priv *hpriv = host->private_data;
1640 rc = ata_pci_device_do_resume(pdev);
1644 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1645 if(hpriv->type >= CK804) {
1648 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1649 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1650 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1652 if(hpriv->type == ADMA) {
1654 struct nv_adma_port_priv *pp;
1655 /* enable/disable ADMA on the ports appropriately */
1656 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1658 pp = host->ports[0]->private_data;
1659 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1660 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1661 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1663 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1664 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1665 pp = host->ports[1]->private_data;
1666 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1667 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1668 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1670 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1671 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1673 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1677 ata_host_resume(host);
1683 static void nv_ck804_host_stop(struct ata_host *host)
1685 struct pci_dev *pdev = to_pci_dev(host->dev);
1688 /* disable SATA space for CK804 */
1689 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1690 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1691 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1694 static void nv_adma_host_stop(struct ata_host *host)
1696 struct pci_dev *pdev = to_pci_dev(host->dev);
1699 /* disable ADMA on the ports */
1700 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1701 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1702 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1703 NV_MCP_SATA_CFG_20_PORT1_EN |
1704 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1706 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1708 nv_ck804_host_stop(host);
1711 static int __init nv_init(void)
1713 return pci_register_driver(&nv_pci_driver);
1716 static void __exit nv_exit(void)
1718 pci_unregister_driver(&nv_pci_driver);
1721 module_init(nv_init);
1722 module_exit(nv_exit);
1723 module_param_named(adma, adma_enabled, bool, 0444);
1724 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");