2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
4 * Copyright (c) 2008-2009 USI Co., Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm8001_chips.h"
44 #include "pm80xx_hwi.h"
46 static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING |
47 PM8001_EVENT_LOGGING | PM8001_INIT_LOGGING;
48 module_param(logging_level, ulong, 0644);
49 MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
51 static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120;
52 module_param(link_rate, ulong, 0644);
53 MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
54 " 1: Link rate 1.5G\n"
55 " 2: Link rate 3.0G\n"
56 " 4: Link rate 6.0G\n"
57 " 8: Link rate 12.0G\n");
59 bool pm8001_use_msix = true;
60 module_param_named(use_msix, pm8001_use_msix, bool, 0444);
61 MODULE_PARM_DESC(zoned, "Use MSIX interrupts. Default: true");
63 static bool pm8001_use_tasklet = true;
64 module_param_named(use_tasklet, pm8001_use_tasklet, bool, 0444);
65 MODULE_PARM_DESC(zoned, "Use MSIX interrupts. Default: true");
67 static bool pm8001_read_wwn = true;
68 module_param_named(read_wwn, pm8001_read_wwn, bool, 0444);
69 MODULE_PARM_DESC(zoned, "Get WWN from the controller. Default: true");
71 static struct scsi_transport_template *pm8001_stt;
72 static int pm8001_init_ccb_tag(struct pm8001_hba_info *);
75 * chip info structure to identify chip key functionality as
76 * encryption available/not, no of ports, hw specific function ref
78 static const struct pm8001_chip_info pm8001_chips[] = {
79 [chip_8001] = {0, 8, &pm8001_8001_dispatch,},
80 [chip_8008] = {0, 8, &pm8001_80xx_dispatch,},
81 [chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
82 [chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
83 [chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
84 [chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
85 [chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
86 [chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
87 [chip_8006] = {0, 16, &pm8001_80xx_dispatch,},
88 [chip_8070] = {0, 8, &pm8001_80xx_dispatch,},
89 [chip_8072] = {0, 16, &pm8001_80xx_dispatch,},
95 struct workqueue_struct *pm8001_wq;
97 static void pm8001_map_queues(struct Scsi_Host *shost)
99 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
100 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
101 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
103 if (pm8001_ha->number_of_intr > 1)
104 blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
106 return blk_mq_map_queues(qmap);
110 * The main structure which LLDD must register for scsi core.
112 static const struct scsi_host_template pm8001_sht = {
114 .scan_finished = pm8001_scan_finished,
115 .scan_start = pm8001_scan_start,
117 .sg_tablesize = PM8001_MAX_DMA_SG,
118 .shost_groups = pm8001_host_groups,
119 .sdev_groups = pm8001_sdev_groups,
120 .track_queue_depth = 1,
122 .map_queues = pm8001_map_queues,
126 * Sas layer call this function to execute specific task.
128 static struct sas_domain_function_template pm8001_transport_ops = {
129 .lldd_dev_found = pm8001_dev_found,
130 .lldd_dev_gone = pm8001_dev_gone,
132 .lldd_execute_task = pm8001_queue_command,
133 .lldd_control_phy = pm8001_phy_control,
135 .lldd_abort_task = pm8001_abort_task,
136 .lldd_abort_task_set = sas_abort_task_set,
137 .lldd_clear_task_set = pm8001_clear_task_set,
138 .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
139 .lldd_lu_reset = pm8001_lu_reset,
140 .lldd_query_task = pm8001_query_task,
141 .lldd_port_formed = pm8001_port_formed,
142 .lldd_tmf_exec_complete = pm8001_setds_completion,
143 .lldd_tmf_aborted = pm8001_tmf_aborted,
147 * pm8001_phy_init - initiate our adapter phys
148 * @pm8001_ha: our hba structure.
151 static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
153 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
154 struct asd_sas_phy *sas_phy = &phy->sas_phy;
155 phy->phy_state = PHY_LINK_DISABLE;
156 phy->pm8001_ha = pm8001_ha;
157 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
158 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
159 sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
160 sas_phy->iproto = SAS_PROTOCOL_ALL;
162 sas_phy->role = PHY_ROLE_INITIATOR;
163 sas_phy->oob_mode = OOB_NOT_CONNECTED;
164 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
165 sas_phy->id = phy_id;
166 sas_phy->sas_addr = (u8 *)&phy->dev_sas_addr;
167 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
168 sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
169 sas_phy->lldd_phy = phy;
173 * pm8001_free - free hba
174 * @pm8001_ha: our hba structure.
176 static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
183 for (i = 0; i < USI_MAX_MEMCNT; i++) {
184 if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
185 dma_free_coherent(&pm8001_ha->pdev->dev,
186 (pm8001_ha->memoryMap.region[i].total_len +
187 pm8001_ha->memoryMap.region[i].alignment),
188 pm8001_ha->memoryMap.region[i].virt_ptr,
189 pm8001_ha->memoryMap.region[i].phys_addr);
192 PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
193 flush_workqueue(pm8001_wq);
194 bitmap_free(pm8001_ha->rsvd_tags);
199 * pm8001_tasklet() - tasklet for 64 msi-x interrupt handler
200 * @opaque: the passed general host adapter struct
201 * Note: pm8001_tasklet is common for pm8001 & pm80xx
203 static void pm8001_tasklet(unsigned long opaque)
205 struct isr_param *irq_vector = (struct isr_param *)opaque;
206 struct pm8001_hba_info *pm8001_ha = irq_vector->drv_inst;
208 if (WARN_ON_ONCE(!pm8001_ha))
211 PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
214 static void pm8001_init_tasklet(struct pm8001_hba_info *pm8001_ha)
218 if (!pm8001_use_tasklet)
221 /* Tasklet for non msi-x interrupt handler */
222 if ((!pm8001_ha->pdev->msix_cap || !pci_msi_enabled()) ||
223 (pm8001_ha->chip_id == chip_8001)) {
224 tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
225 (unsigned long)&(pm8001_ha->irq_vector[0]));
228 for (i = 0; i < PM8001_MAX_MSIX_VEC; i++)
229 tasklet_init(&pm8001_ha->tasklet[i], pm8001_tasklet,
230 (unsigned long)&(pm8001_ha->irq_vector[i]));
233 static void pm8001_kill_tasklet(struct pm8001_hba_info *pm8001_ha)
237 if (!pm8001_use_tasklet)
240 /* For non-msix and msix interrupts */
241 if ((!pm8001_ha->pdev->msix_cap || !pci_msi_enabled()) ||
242 (pm8001_ha->chip_id == chip_8001)) {
243 tasklet_kill(&pm8001_ha->tasklet[0]);
247 for (i = 0; i < PM8001_MAX_MSIX_VEC; i++)
248 tasklet_kill(&pm8001_ha->tasklet[i]);
251 static irqreturn_t pm8001_handle_irq(struct pm8001_hba_info *pm8001_ha,
254 if (unlikely(!pm8001_ha))
257 if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
260 if (!pm8001_use_tasklet)
261 return PM8001_CHIP_DISP->isr(pm8001_ha, irq);
263 tasklet_schedule(&pm8001_ha->tasklet[irq]);
268 * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
269 * It obtains the vector number and calls the equivalent bottom
270 * half or services directly.
271 * @irq: interrupt number
272 * @opaque: the passed outbound queue/vector. Host structure is
273 * retrieved from the same.
275 static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
277 struct isr_param *irq_vector = (struct isr_param *)opaque;
278 struct pm8001_hba_info *pm8001_ha = irq_vector->drv_inst;
280 return pm8001_handle_irq(pm8001_ha, irq_vector->irq_id);
284 * pm8001_interrupt_handler_intx - main INTx interrupt handler.
285 * @irq: interrupt number
286 * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure.
289 static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
291 struct sas_ha_struct *sha = dev_id;
292 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
294 return pm8001_handle_irq(pm8001_ha, 0);
297 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
298 static void pm8001_free_irq(struct pm8001_hba_info *pm8001_ha);
301 * pm8001_alloc - initiate our hba structure and 6 DMAs area.
302 * @pm8001_ha: our hba structure.
303 * @ent: PCI device ID structure to match on
305 static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
306 const struct pci_device_id *ent)
308 int i, count = 0, rc = 0;
309 u32 ci_offset, ib_offset, ob_offset, pi_offset;
310 struct inbound_queue_table *ibq;
311 struct outbound_queue_table *obq;
313 spin_lock_init(&pm8001_ha->lock);
314 spin_lock_init(&pm8001_ha->bitmap_lock);
315 pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
316 pm8001_ha->chip->n_phy);
318 /* Request Interrupt */
319 rc = pm8001_request_irq(pm8001_ha);
323 count = pm8001_ha->max_q_num;
324 /* Queues are chosen based on the number of cores/msix availability */
325 ib_offset = pm8001_ha->ib_offset = USI_MAX_MEMCNT_BASE;
326 ci_offset = pm8001_ha->ci_offset = ib_offset + count;
327 ob_offset = pm8001_ha->ob_offset = ci_offset + count;
328 pi_offset = pm8001_ha->pi_offset = ob_offset + count;
329 pm8001_ha->max_memcnt = pi_offset + count;
331 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
332 pm8001_phy_init(pm8001_ha, i);
333 pm8001_ha->port[i].wide_port_phymap = 0;
334 pm8001_ha->port[i].port_attached = 0;
335 pm8001_ha->port[i].port_state = 0;
336 INIT_LIST_HEAD(&pm8001_ha->port[i].list);
339 /* MPI Memory region 1 for AAP Event Log for fw */
340 pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
341 pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
342 pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
343 pm8001_ha->memoryMap.region[AAP1].alignment = 32;
345 /* MPI Memory region 2 for IOP Event Log for fw */
346 pm8001_ha->memoryMap.region[IOP].num_elements = 1;
347 pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
348 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
349 pm8001_ha->memoryMap.region[IOP].alignment = 32;
351 for (i = 0; i < count; i++) {
352 ibq = &pm8001_ha->inbnd_q_tbl[i];
353 spin_lock_init(&ibq->iq_lock);
354 /* MPI Memory region 3 for consumer Index of inbound queues */
355 pm8001_ha->memoryMap.region[ci_offset+i].num_elements = 1;
356 pm8001_ha->memoryMap.region[ci_offset+i].element_size = 4;
357 pm8001_ha->memoryMap.region[ci_offset+i].total_len = 4;
358 pm8001_ha->memoryMap.region[ci_offset+i].alignment = 4;
360 if ((ent->driver_data) != chip_8001) {
361 /* MPI Memory region 5 inbound queues */
362 pm8001_ha->memoryMap.region[ib_offset+i].num_elements =
364 pm8001_ha->memoryMap.region[ib_offset+i].element_size
366 pm8001_ha->memoryMap.region[ib_offset+i].total_len =
367 PM8001_MPI_QUEUE * 128;
368 pm8001_ha->memoryMap.region[ib_offset+i].alignment
371 pm8001_ha->memoryMap.region[ib_offset+i].num_elements =
373 pm8001_ha->memoryMap.region[ib_offset+i].element_size
375 pm8001_ha->memoryMap.region[ib_offset+i].total_len =
376 PM8001_MPI_QUEUE * 64;
377 pm8001_ha->memoryMap.region[ib_offset+i].alignment = 64;
381 for (i = 0; i < count; i++) {
382 obq = &pm8001_ha->outbnd_q_tbl[i];
383 spin_lock_init(&obq->oq_lock);
384 /* MPI Memory region 4 for producer Index of outbound queues */
385 pm8001_ha->memoryMap.region[pi_offset+i].num_elements = 1;
386 pm8001_ha->memoryMap.region[pi_offset+i].element_size = 4;
387 pm8001_ha->memoryMap.region[pi_offset+i].total_len = 4;
388 pm8001_ha->memoryMap.region[pi_offset+i].alignment = 4;
390 if (ent->driver_data != chip_8001) {
391 /* MPI Memory region 6 Outbound queues */
392 pm8001_ha->memoryMap.region[ob_offset+i].num_elements =
394 pm8001_ha->memoryMap.region[ob_offset+i].element_size
396 pm8001_ha->memoryMap.region[ob_offset+i].total_len =
397 PM8001_MPI_QUEUE * 128;
398 pm8001_ha->memoryMap.region[ob_offset+i].alignment
401 /* MPI Memory region 6 Outbound queues */
402 pm8001_ha->memoryMap.region[ob_offset+i].num_elements =
404 pm8001_ha->memoryMap.region[ob_offset+i].element_size
406 pm8001_ha->memoryMap.region[ob_offset+i].total_len =
407 PM8001_MPI_QUEUE * 64;
408 pm8001_ha->memoryMap.region[ob_offset+i].alignment = 64;
412 /* Memory region write DMA*/
413 pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
414 pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
415 pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
417 /* Memory region for fw flash */
418 pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
420 pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1;
421 pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
422 pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
423 pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
424 for (i = 0; i < pm8001_ha->max_memcnt; i++) {
425 struct mpi_mem *region = &pm8001_ha->memoryMap.region[i];
427 if (pm8001_mem_alloc(pm8001_ha->pdev,
430 ®ion->phys_addr_hi,
431 ®ion->phys_addr_lo,
433 region->alignment) != 0) {
434 pm8001_dbg(pm8001_ha, FAIL, "Mem%d alloc failed\n", i);
439 /* Memory region for devices*/
440 pm8001_ha->devices = kzalloc(PM8001_MAX_DEVICES
441 * sizeof(struct pm8001_device), GFP_KERNEL);
442 if (!pm8001_ha->devices) {
446 for (i = 0; i < PM8001_MAX_DEVICES; i++) {
447 pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
448 pm8001_ha->devices[i].id = i;
449 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
450 atomic_set(&pm8001_ha->devices[i].running_req, 0);
452 pm8001_ha->flags = PM8001F_INIT_TIME;
456 for (i = 0; i < pm8001_ha->max_memcnt; i++) {
457 if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
458 dma_free_coherent(&pm8001_ha->pdev->dev,
459 (pm8001_ha->memoryMap.region[i].total_len +
460 pm8001_ha->memoryMap.region[i].alignment),
461 pm8001_ha->memoryMap.region[i].virt_ptr,
462 pm8001_ha->memoryMap.region[i].phys_addr);
470 * pm8001_ioremap - remap the pci high physical address to kernel virtual
471 * address so that we can access them.
472 * @pm8001_ha: our hba structure.
474 static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
478 struct pci_dev *pdev;
480 pdev = pm8001_ha->pdev;
481 /* map pci mem (PMC pci base 0-3)*/
482 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
484 ** logical BARs for SPC:
485 ** bar 0 and 1 - logical BAR0
486 ** bar 2 and 3 - logical BAR1
487 ** bar4 - logical BAR2
488 ** bar5 - logical BAR3
489 ** Skip the appropriate assignments:
491 if ((bar == 1) || (bar == 3))
493 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
494 pm8001_ha->io_mem[logicalBar].membase =
495 pci_resource_start(pdev, bar);
496 pm8001_ha->io_mem[logicalBar].memsize =
497 pci_resource_len(pdev, bar);
498 pm8001_ha->io_mem[logicalBar].memvirtaddr =
499 ioremap(pm8001_ha->io_mem[logicalBar].membase,
500 pm8001_ha->io_mem[logicalBar].memsize);
501 if (!pm8001_ha->io_mem[logicalBar].memvirtaddr) {
502 pm8001_dbg(pm8001_ha, INIT,
503 "Failed to ioremap bar %d, logicalBar %d",
507 pm8001_dbg(pm8001_ha, INIT,
508 "base addr %llx virt_addr=%llx len=%d\n",
509 (u64)pm8001_ha->io_mem[logicalBar].membase,
511 pm8001_ha->io_mem[logicalBar].memvirtaddr,
512 pm8001_ha->io_mem[logicalBar].memsize);
514 pm8001_ha->io_mem[logicalBar].membase = 0;
515 pm8001_ha->io_mem[logicalBar].memsize = 0;
516 pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL;
524 * pm8001_pci_alloc - initialize our ha card structure
527 * @shost: scsi host struct which has been initialized before.
529 static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
530 const struct pci_device_id *ent,
531 struct Scsi_Host *shost)
534 struct pm8001_hba_info *pm8001_ha;
535 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
537 pm8001_ha = sha->lldd_ha;
541 pm8001_ha->pdev = pdev;
542 pm8001_ha->dev = &pdev->dev;
543 pm8001_ha->chip_id = ent->driver_data;
544 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
545 pm8001_ha->irq = pdev->irq;
546 pm8001_ha->sas = sha;
547 pm8001_ha->shost = shost;
548 pm8001_ha->id = pm8001_id++;
549 pm8001_ha->logging_level = logging_level;
550 pm8001_ha->non_fatal_count = 0;
551 if (link_rate >= 1 && link_rate <= 15)
552 pm8001_ha->link_rate = (link_rate << 8);
554 pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 |
555 LINKRATE_60 | LINKRATE_120;
556 pm8001_dbg(pm8001_ha, FAIL,
557 "Setting link rate to default value\n");
559 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
560 /* IOMB size is 128 for 8088/89 controllers */
561 if (pm8001_ha->chip_id != chip_8001)
562 pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
564 pm8001_ha->iomb_size = IOMB_SIZE_SPC;
566 pm8001_init_tasklet(pm8001_ha);
568 if (pm8001_ioremap(pm8001_ha))
569 goto failed_pci_alloc;
570 if (!pm8001_alloc(pm8001_ha, ent))
573 pm8001_free(pm8001_ha);
578 * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
581 static int pci_go_44(struct pci_dev *pdev)
585 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
587 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
589 dev_printk(KERN_ERR, &pdev->dev,
590 "32-bit DMA enable failed\n");
596 * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
597 * @shost: scsi host which has been allocated outside.
598 * @chip_info: our ha struct.
600 static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
601 const struct pm8001_chip_info *chip_info)
604 struct asd_sas_phy **arr_phy;
605 struct asd_sas_port **arr_port;
606 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
608 phy_nr = chip_info->n_phy;
610 memset(sha, 0x00, sizeof(*sha));
611 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
614 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
618 sha->sas_phy = arr_phy;
619 sha->sas_port = arr_port;
620 sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
624 shost->transportt = pm8001_stt;
625 shost->max_id = PM8001_MAX_DEVICES;
626 shost->unique_id = pm8001_id;
627 shost->max_cmd_len = 16;
638 * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
639 * @shost: scsi host which has been allocated outside
640 * @chip_info: our ha struct.
642 static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
643 const struct pm8001_chip_info *chip_info)
646 struct pm8001_hba_info *pm8001_ha;
647 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
649 pm8001_ha = sha->lldd_ha;
650 for (i = 0; i < chip_info->n_phy; i++) {
651 sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
652 sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
653 sha->sas_phy[i]->sas_addr =
654 (u8 *)&pm8001_ha->phy[i].dev_sas_addr;
656 sha->sas_ha_name = DRV_NAME;
657 sha->dev = pm8001_ha->dev;
658 sha->strict_wide_ports = 1;
659 sha->sas_addr = &pm8001_ha->sas_addr[0];
660 sha->num_phys = chip_info->n_phy;
665 * pm8001_init_sas_add - initialize sas address
666 * @pm8001_ha: our ha struct.
668 * Currently we just set the fixed SAS address to our HBA, for manufacture,
669 * it should read from the EEPROM
671 static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
673 DECLARE_COMPLETION_ONSTACK(completion);
674 struct pm8001_ioctl_payload payload;
675 unsigned long time_remaining;
681 if (!pm8001_read_wwn) {
682 __be64 dev_sas_addr = cpu_to_be64(0x50010c600047f9d0ULL);
684 for (i = 0; i < pm8001_ha->chip->n_phy; i++)
685 memcpy(&pm8001_ha->phy[i].dev_sas_addr, &dev_sas_addr,
687 memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
693 * For new SPC controllers WWN is stored in flash vpd. For SPC/SPCve
694 * controllers WWN is stored in EEPROM. And for Older SPC WWN is stored
697 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
698 pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n");
702 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
703 pm8001_ha->nvmd_completion = &completion;
705 if (pm8001_ha->chip_id == chip_8001) {
706 if (deviceid == 0x8081 || deviceid == 0x0042) {
707 payload.minor_function = 4;
708 payload.rd_length = 4096;
710 payload.minor_function = 0;
711 payload.rd_length = 128;
713 } else if ((pm8001_ha->chip_id == chip_8070 ||
714 pm8001_ha->chip_id == chip_8072) &&
715 pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) {
716 payload.minor_function = 4;
717 payload.rd_length = 4096;
719 payload.minor_function = 1;
720 payload.rd_length = 4096;
723 payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL);
724 if (!payload.func_specific) {
725 pm8001_dbg(pm8001_ha, FAIL, "mem alloc fail\n");
728 rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
730 kfree(payload.func_specific);
731 pm8001_dbg(pm8001_ha, FAIL, "nvmd failed\n");
734 time_remaining = wait_for_completion_timeout(&completion,
735 msecs_to_jiffies(60*1000)); // 1 min
736 if (!time_remaining) {
737 kfree(payload.func_specific);
738 pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
743 for (i = 0, j = 0; i <= 7; i++, j++) {
744 if (pm8001_ha->chip_id == chip_8001) {
745 if (deviceid == 0x8081)
746 pm8001_ha->sas_addr[j] =
747 payload.func_specific[0x704 + i];
748 else if (deviceid == 0x0042)
749 pm8001_ha->sas_addr[j] =
750 payload.func_specific[0x010 + i];
751 } else if ((pm8001_ha->chip_id == chip_8070 ||
752 pm8001_ha->chip_id == chip_8072) &&
753 pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) {
754 pm8001_ha->sas_addr[j] =
755 payload.func_specific[0x010 + i];
757 pm8001_ha->sas_addr[j] =
758 payload.func_specific[0x804 + i];
760 memcpy(sas_add, pm8001_ha->sas_addr, SAS_ADDR_SIZE);
761 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
762 if (i && ((i % 4) == 0))
763 sas_add[7] = sas_add[7] + 4;
764 memcpy(&pm8001_ha->phy[i].dev_sas_addr,
765 sas_add, SAS_ADDR_SIZE);
766 pm8001_dbg(pm8001_ha, INIT, "phy %d sas_addr = %016llx\n", i,
767 pm8001_ha->phy[i].dev_sas_addr);
769 kfree(payload.func_specific);
775 * pm8001_get_phy_settings_info : Read phy setting values.
776 * @pm8001_ha : our hba.
778 static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
780 DECLARE_COMPLETION_ONSTACK(completion);
781 struct pm8001_ioctl_payload payload;
784 if (!pm8001_read_wwn)
787 pm8001_ha->nvmd_completion = &completion;
788 /* SAS ADDRESS read from flash / EEPROM */
789 payload.minor_function = 6;
791 payload.rd_length = 4096;
792 payload.func_specific = kzalloc(4096, GFP_KERNEL);
793 if (!payload.func_specific)
795 /* Read phy setting values from flash */
796 rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
798 kfree(payload.func_specific);
799 pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n");
802 wait_for_completion(&completion);
803 pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
804 kfree(payload.func_specific);
809 struct pm8001_mpi3_phy_pg_trx_config {
822 * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings
823 * @pm8001_ha : our adapter
824 * @phycfg : PHY config page to populate
827 void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha,
828 struct pm8001_mpi3_phy_pg_trx_config *phycfg)
830 phycfg->LaneLosCfg = 0x00000132;
831 phycfg->LanePgaCfg1 = 0x00203949;
832 phycfg->LanePisoCfg1 = 0x000000FF;
833 phycfg->LanePisoCfg2 = 0xFF000001;
834 phycfg->LanePisoCfg3 = 0xE7011300;
835 phycfg->LanePisoCfg4 = 0x631C40C0;
836 phycfg->LanePisoCfg5 = 0xF8102036;
837 phycfg->LanePisoCfg6 = 0xF74A1000;
838 phycfg->LaneBctCtrl = 0x00FB33F8;
842 * pm8001_get_external_phy_settings - Retrieves the external PHY settings
843 * @pm8001_ha : our adapter
844 * @phycfg : PHY config page to populate
847 void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha,
848 struct pm8001_mpi3_phy_pg_trx_config *phycfg)
850 phycfg->LaneLosCfg = 0x00000132;
851 phycfg->LanePgaCfg1 = 0x00203949;
852 phycfg->LanePisoCfg1 = 0x000000FF;
853 phycfg->LanePisoCfg2 = 0xFF000001;
854 phycfg->LanePisoCfg3 = 0xE7011300;
855 phycfg->LanePisoCfg4 = 0x63349140;
856 phycfg->LanePisoCfg5 = 0xF8102036;
857 phycfg->LanePisoCfg6 = 0xF80D9300;
858 phycfg->LaneBctCtrl = 0x00FB33F8;
862 * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext
863 * @pm8001_ha : our adapter
864 * @phymask : The PHY mask
867 void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask)
869 switch (pm8001_ha->pdev->subsystem_device) {
870 case 0x0070: /* H1280 - 8 external 0 internal */
871 case 0x0072: /* H12F0 - 16 external 0 internal */
875 case 0x0071: /* H1208 - 0 external 8 internal */
876 case 0x0073: /* H120F - 0 external 16 internal */
880 case 0x0080: /* H1244 - 4 external 4 internal */
884 case 0x0081: /* H1248 - 4 external 8 internal */
888 case 0x0082: /* H1288 - 8 external 8 internal */
893 pm8001_dbg(pm8001_ha, INIT,
894 "Unknown subsystem device=0x%.04x\n",
895 pm8001_ha->pdev->subsystem_device);
900 * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings
901 * @pm8001_ha : our adapter
904 int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha)
906 struct pm8001_mpi3_phy_pg_trx_config phycfg_int;
907 struct pm8001_mpi3_phy_pg_trx_config phycfg_ext;
911 memset(&phycfg_int, 0, sizeof(phycfg_int));
912 memset(&phycfg_ext, 0, sizeof(phycfg_ext));
914 pm8001_get_internal_phy_settings(pm8001_ha, &phycfg_int);
915 pm8001_get_external_phy_settings(pm8001_ha, &phycfg_ext);
916 pm8001_get_phy_mask(pm8001_ha, &phymask);
918 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
919 if (phymask & (1 << i)) {/* Internal PHY */
920 pm8001_set_phy_profile_single(pm8001_ha, i,
921 sizeof(phycfg_int) / sizeof(u32),
924 } else { /* External PHY */
925 pm8001_set_phy_profile_single(pm8001_ha, i,
926 sizeof(phycfg_ext) / sizeof(u32),
935 * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID.
936 * @pm8001_ha : our hba.
938 static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
940 switch (pm8001_ha->pdev->subsystem_vendor) {
941 case PCI_VENDOR_ID_ATTO:
942 if (pm8001_ha->pdev->device == 0x0042) /* 6Gb */
945 return pm8001_set_phy_settings_ven_117c_12G(pm8001_ha);
947 case PCI_VENDOR_ID_ADAPTEC2:
952 return pm8001_get_phy_settings_info(pm8001_ha);
957 * pm8001_setup_msix - enable MSI-X interrupt
958 * @pm8001_ha: our ha struct.
960 static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
962 unsigned int allocated_irq_vectors;
965 /* SPCv controllers supports 64 msi-x */
966 if (pm8001_ha->chip_id == chip_8001) {
967 rc = pci_alloc_irq_vectors(pm8001_ha->pdev, 1, 1,
971 * Queue index #0 is used always for housekeeping, so don't
972 * include in the affinity spreading.
974 struct irq_affinity desc = {
977 rc = pci_alloc_irq_vectors_affinity(
978 pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC,
979 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
982 allocated_irq_vectors = rc;
986 /* Assigns the number of interrupts */
987 pm8001_ha->number_of_intr = allocated_irq_vectors;
989 /* Maximum queue number updating in HBA structure */
990 pm8001_ha->max_q_num = allocated_irq_vectors;
992 pm8001_dbg(pm8001_ha, INIT,
993 "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
994 rc, pm8001_ha->number_of_intr);
998 static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
1001 int flag = 0, rc = 0;
1002 int nr_irqs = pm8001_ha->number_of_intr;
1004 if (pm8001_ha->chip_id != chip_8001)
1005 flag &= ~IRQF_SHARED;
1007 pm8001_dbg(pm8001_ha, INIT,
1008 "pci_enable_msix request number of intr %d\n",
1009 pm8001_ha->number_of_intr);
1011 if (nr_irqs > ARRAY_SIZE(pm8001_ha->intr_drvname))
1012 nr_irqs = ARRAY_SIZE(pm8001_ha->intr_drvname);
1014 for (i = 0; i < nr_irqs; i++) {
1015 snprintf(pm8001_ha->intr_drvname[i],
1016 sizeof(pm8001_ha->intr_drvname[0]),
1017 "%s-%d", pm8001_ha->name, i);
1018 pm8001_ha->irq_vector[i].irq_id = i;
1019 pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
1021 rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
1022 pm8001_interrupt_handler_msix, flag,
1023 pm8001_ha->intr_drvname[i],
1024 &(pm8001_ha->irq_vector[i]));
1026 for (j = 0; j < i; j++) {
1027 free_irq(pci_irq_vector(pm8001_ha->pdev, i),
1028 &(pm8001_ha->irq_vector[i]));
1030 pci_free_irq_vectors(pm8001_ha->pdev);
1039 * pm8001_request_irq - register interrupt
1040 * @pm8001_ha: our ha struct.
1042 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
1044 struct pci_dev *pdev = pm8001_ha->pdev;
1047 if (pm8001_use_msix && pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
1048 rc = pm8001_setup_msix(pm8001_ha);
1050 pm8001_dbg(pm8001_ha, FAIL,
1051 "pm8001_setup_irq failed [ret: %d]\n", rc);
1055 if (!pdev->msix_cap || !pci_msi_enabled())
1058 rc = pm8001_request_msix(pm8001_ha);
1062 pm8001_ha->use_msix = true;
1068 /* Initialize the INT-X interrupt */
1069 pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
1070 pm8001_ha->use_msix = false;
1071 pm8001_ha->irq_vector[0].irq_id = 0;
1072 pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
1074 return request_irq(pdev->irq, pm8001_interrupt_handler_intx,
1075 IRQF_SHARED, pm8001_ha->name,
1076 SHOST_TO_SAS_HA(pm8001_ha->shost));
1079 static void pm8001_free_irq(struct pm8001_hba_info *pm8001_ha)
1081 struct pci_dev *pdev = pm8001_ha->pdev;
1084 if (pm8001_ha->use_msix) {
1085 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1086 synchronize_irq(pci_irq_vector(pdev, i));
1088 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1089 free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
1091 pci_free_irq_vectors(pdev);
1096 free_irq(pm8001_ha->irq, pm8001_ha->sas);
1100 * pm8001_pci_probe - probe supported device
1101 * @pdev: pci device which kernel has been prepared for.
1102 * @ent: pci device id
1104 * This function is the main initialization function, when register a new
1105 * pci driver it is invoked, all struct and hardware initialization should be
1106 * done here, also, register interrupt.
1108 static int pm8001_pci_probe(struct pci_dev *pdev,
1109 const struct pci_device_id *ent)
1114 struct pm8001_hba_info *pm8001_ha;
1115 struct Scsi_Host *shost = NULL;
1116 const struct pm8001_chip_info *chip;
1117 struct sas_ha_struct *sha;
1119 dev_printk(KERN_INFO, &pdev->dev,
1120 "pm80xx: driver version %s\n", DRV_VERSION);
1121 rc = pci_enable_device(pdev);
1123 goto err_out_enable;
1124 pci_set_master(pdev);
1126 * Enable pci slot busmaster by setting pci command register.
1127 * This is required by FW for Cyclone card.
1130 pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
1132 pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
1133 rc = pci_request_regions(pdev, DRV_NAME);
1135 goto err_out_disable;
1136 rc = pci_go_44(pdev);
1138 goto err_out_regions;
1140 shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
1143 goto err_out_regions;
1145 chip = &pm8001_chips[ent->driver_data];
1146 sha = kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
1149 goto err_out_free_host;
1151 SHOST_TO_SAS_HA(shost) = sha;
1153 rc = pm8001_prep_sas_ha_init(shost, chip);
1158 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
1159 /* ent->driver variable is used to differentiate between controllers */
1160 pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
1166 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1167 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
1169 pm8001_dbg(pm8001_ha, FAIL,
1170 "chip_init failed [ret: %d]\n", rc);
1171 goto err_out_ha_free;
1174 rc = pm8001_init_ccb_tag(pm8001_ha);
1176 goto err_out_enable;
1179 PM8001_CHIP_DISP->chip_post_init(pm8001_ha);
1181 if (pm8001_ha->number_of_intr > 1) {
1182 shost->nr_hw_queues = pm8001_ha->number_of_intr - 1;
1184 * For now, ensure we're not sent too many commands by setting
1185 * host_tagset. This is also required if we start using request
1188 shost->host_tagset = 1;
1191 rc = scsi_add_host(shost, &pdev->dev);
1193 goto err_out_ha_free;
1195 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
1196 if (pm8001_ha->chip_id != chip_8001) {
1197 for (i = 1; i < pm8001_ha->number_of_intr; i++)
1198 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
1199 /* setup thermal configuration. */
1200 pm80xx_set_thermal_config(pm8001_ha);
1203 rc = pm8001_init_sas_add(pm8001_ha);
1206 /* phy setting support for motherboard controller */
1207 rc = pm8001_configure_phy_settings(pm8001_ha);
1211 pm8001_post_sas_ha_init(shost, chip);
1212 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
1214 pm8001_dbg(pm8001_ha, FAIL,
1215 "sas_register_ha failed [ret: %d]\n", rc);
1218 list_add_tail(&pm8001_ha->list, &hba_list);
1219 pm8001_ha->flags = PM8001F_RUN_TIME;
1220 scsi_scan_host(pm8001_ha->shost);
1224 scsi_remove_host(pm8001_ha->shost);
1226 pm8001_free(pm8001_ha);
1230 scsi_host_put(shost);
1232 pci_release_regions(pdev);
1234 pci_disable_device(pdev);
1240 * pm8001_init_ccb_tag - allocate memory to CCB and tag.
1241 * @pm8001_ha: our hba card information.
1243 static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha)
1245 struct Scsi_Host *shost = pm8001_ha->shost;
1246 struct device *dev = pm8001_ha->dev;
1247 u32 max_out_io, ccb_count;
1250 max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io;
1251 ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io);
1253 shost->can_queue = ccb_count - PM8001_RESERVE_SLOT;
1255 pm8001_ha->rsvd_tags = bitmap_zalloc(PM8001_RESERVE_SLOT, GFP_KERNEL);
1256 if (!pm8001_ha->rsvd_tags)
1259 /* Memory region for ccb_info*/
1260 pm8001_ha->ccb_count = ccb_count;
1261 pm8001_ha->ccb_info =
1262 kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL);
1263 if (!pm8001_ha->ccb_info) {
1264 pm8001_dbg(pm8001_ha, FAIL,
1265 "Unable to allocate memory for ccb\n");
1268 for (i = 0; i < ccb_count; i++) {
1269 pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(dev,
1270 sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
1271 &pm8001_ha->ccb_info[i].ccb_dma_handle,
1273 if (!pm8001_ha->ccb_info[i].buf_prd) {
1274 pm8001_dbg(pm8001_ha, FAIL,
1275 "ccb prd memory allocation error\n");
1278 pm8001_ha->ccb_info[i].task = NULL;
1279 pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG;
1280 pm8001_ha->ccb_info[i].device = NULL;
1286 kfree(pm8001_ha->devices);
1291 static void pm8001_pci_remove(struct pci_dev *pdev)
1293 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
1294 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
1297 sas_unregister_ha(sha);
1298 sas_remove_host(pm8001_ha->shost);
1299 list_del(&pm8001_ha->list);
1300 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
1301 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1303 pm8001_free_irq(pm8001_ha);
1304 pm8001_kill_tasklet(pm8001_ha);
1305 scsi_host_put(pm8001_ha->shost);
1307 for (i = 0; i < pm8001_ha->ccb_count; i++) {
1308 dma_free_coherent(&pm8001_ha->pdev->dev,
1309 sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
1310 pm8001_ha->ccb_info[i].buf_prd,
1311 pm8001_ha->ccb_info[i].ccb_dma_handle);
1313 kfree(pm8001_ha->ccb_info);
1314 kfree(pm8001_ha->devices);
1316 pm8001_free(pm8001_ha);
1317 kfree(sha->sas_phy);
1318 kfree(sha->sas_port);
1320 pci_release_regions(pdev);
1321 pci_disable_device(pdev);
1325 * pm8001_pci_suspend - power management suspend main entry point
1326 * @dev: Device struct
1328 * Return: 0 on success, anything else on error.
1330 static int __maybe_unused pm8001_pci_suspend(struct device *dev)
1332 struct pci_dev *pdev = to_pci_dev(dev);
1333 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
1334 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
1336 sas_suspend_ha(sha);
1337 flush_workqueue(pm8001_wq);
1338 scsi_block_requests(pm8001_ha->shost);
1339 if (!pdev->pm_cap) {
1340 dev_err(dev, " PCI PM not supported\n");
1343 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
1344 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1346 pm8001_free_irq(pm8001_ha);
1347 pm8001_kill_tasklet(pm8001_ha);
1349 pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, entering "
1350 "suspended state\n", pdev,
1356 * pm8001_pci_resume - power management resume main entry point
1357 * @dev: Device struct
1359 * Return: 0 on success, anything else on error.
1361 static int __maybe_unused pm8001_pci_resume(struct device *dev)
1363 struct pci_dev *pdev = to_pci_dev(dev);
1364 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
1365 struct pm8001_hba_info *pm8001_ha;
1368 DECLARE_COMPLETION_ONSTACK(completion);
1370 pm8001_ha = sha->lldd_ha;
1372 pm8001_info(pm8001_ha,
1373 "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n",
1374 pdev, pm8001_ha->name, pdev->current_state);
1376 rc = pci_go_44(pdev);
1378 goto err_out_disable;
1379 sas_prep_resume_ha(sha);
1380 /* chip soft rst only for spc */
1381 if (pm8001_ha->chip_id == chip_8001) {
1382 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1383 pm8001_dbg(pm8001_ha, INIT, "chip soft reset successful\n");
1385 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
1387 goto err_out_disable;
1389 /* disable all the interrupt bits */
1390 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
1392 rc = pm8001_request_irq(pm8001_ha);
1394 goto err_out_disable;
1396 pm8001_init_tasklet(pm8001_ha);
1398 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
1399 if (pm8001_ha->chip_id != chip_8001) {
1400 for (i = 1; i < pm8001_ha->number_of_intr; i++)
1401 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
1404 /* Chip documentation for the 8070 and 8072 SPCv */
1405 /* states that a 500ms minimum delay is required */
1406 /* before issuing commands. Otherwise, the firmware */
1407 /* will enter an unrecoverable state. */
1409 if (pm8001_ha->chip_id == chip_8070 ||
1410 pm8001_ha->chip_id == chip_8072) {
1414 /* Spin up the PHYs */
1416 pm8001_ha->flags = PM8001F_RUN_TIME;
1417 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
1418 pm8001_ha->phy[i].enable_completion = &completion;
1419 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
1420 wait_for_completion(&completion);
1426 scsi_remove_host(pm8001_ha->shost);
1431 /* update of pci device, vendor id and driver data with
1432 * unique value for each of the controller
1434 static struct pci_device_id pm8001_pci_table[] = {
1435 { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
1436 { PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
1437 { PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
1438 { PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
1439 /* Support for SPC/SPCv/SPCve controllers */
1440 { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
1441 { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
1442 { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
1443 { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
1444 { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
1445 { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
1446 { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
1447 { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
1448 { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
1449 { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 },
1450 { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 },
1451 { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 },
1452 { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 },
1453 { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 },
1454 { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },
1455 { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
1456 PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
1457 { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
1458 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
1459 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1460 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
1461 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1462 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
1463 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1464 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
1465 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1466 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
1467 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1468 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
1469 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1470 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
1471 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1472 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
1473 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1474 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
1475 { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
1476 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 },
1477 { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
1478 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 },
1479 { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
1480 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 },
1481 { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
1482 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 },
1483 { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
1484 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 },
1485 { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
1486 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 },
1487 { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
1488 PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 },
1489 { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
1490 PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 },
1491 { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
1492 PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },
1493 { PCI_VENDOR_ID_ATTO, 0x8070,
1494 PCI_VENDOR_ID_ATTO, 0x0070, 0, 0, chip_8070 },
1495 { PCI_VENDOR_ID_ATTO, 0x8070,
1496 PCI_VENDOR_ID_ATTO, 0x0071, 0, 0, chip_8070 },
1497 { PCI_VENDOR_ID_ATTO, 0x8072,
1498 PCI_VENDOR_ID_ATTO, 0x0072, 0, 0, chip_8072 },
1499 { PCI_VENDOR_ID_ATTO, 0x8072,
1500 PCI_VENDOR_ID_ATTO, 0x0073, 0, 0, chip_8072 },
1501 { PCI_VENDOR_ID_ATTO, 0x8070,
1502 PCI_VENDOR_ID_ATTO, 0x0080, 0, 0, chip_8070 },
1503 { PCI_VENDOR_ID_ATTO, 0x8072,
1504 PCI_VENDOR_ID_ATTO, 0x0081, 0, 0, chip_8072 },
1505 { PCI_VENDOR_ID_ATTO, 0x8072,
1506 PCI_VENDOR_ID_ATTO, 0x0082, 0, 0, chip_8072 },
1507 {} /* terminate list */
1510 static SIMPLE_DEV_PM_OPS(pm8001_pci_pm_ops,
1514 static struct pci_driver pm8001_pci_driver = {
1516 .id_table = pm8001_pci_table,
1517 .probe = pm8001_pci_probe,
1518 .remove = pm8001_pci_remove,
1519 .driver.pm = &pm8001_pci_pm_ops,
1523 * pm8001_init - initialize scsi transport template
1525 static int __init pm8001_init(void)
1529 if (pm8001_use_tasklet && !pm8001_use_msix)
1530 pm8001_use_tasklet = false;
1532 pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
1537 pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
1540 rc = pci_register_driver(&pm8001_pci_driver);
1546 sas_release_transport(pm8001_stt);
1548 destroy_workqueue(pm8001_wq);
1553 static void __exit pm8001_exit(void)
1555 pci_unregister_driver(&pm8001_pci_driver);
1556 sas_release_transport(pm8001_stt);
1557 destroy_workqueue(pm8001_wq);
1560 module_init(pm8001_init);
1561 module_exit(pm8001_exit);
1568 "PMC-Sierra PM8001/8006/8081/8088/8089/8074/8076/8077/8070/8072 "
1569 "SAS/SATA controller driver");
1570 MODULE_VERSION(DRV_VERSION);
1571 MODULE_LICENSE("GPL");
1572 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);