2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
4 * Copyright (c) 2008-2009 USI Co., Ltd.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
40 #include <linux/slab.h>
41 #include "pm8001_sas.h"
42 #include "pm8001_hwi.h"
43 #include "pm8001_chips.h"
44 #include "pm8001_ctl.h"
47 * read_main_config_table - read the configure table and save it.
48 * @pm8001_ha: our hba card information
50 static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
52 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
53 pm8001_ha->main_cfg_tbl.pm8001_tbl.signature =
54 pm8001_mr32(address, 0x00);
55 pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
56 pm8001_mr32(address, 0x04);
57 pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev =
58 pm8001_mr32(address, 0x08);
59 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io =
60 pm8001_mr32(address, 0x0C);
61 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl =
62 pm8001_mr32(address, 0x10);
63 pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
64 pm8001_mr32(address, 0x14);
65 pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset =
66 pm8001_mr32(address, 0x18);
67 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
68 pm8001_mr32(address, MAIN_IBQ_OFFSET);
69 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
70 pm8001_mr32(address, MAIN_OBQ_OFFSET);
71 pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag =
72 pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
74 /* read analog Setting offset from the configuration table */
75 pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
76 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
78 /* read Error Dump Offset and Length */
79 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
80 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
81 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
82 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
83 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
84 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
85 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
86 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
90 * read_general_status_table - read the general status table and save it.
91 * @pm8001_ha: our hba card information
93 static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
95 void __iomem *address = pm8001_ha->general_stat_tbl_addr;
96 pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate =
97 pm8001_mr32(address, 0x00);
98 pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 =
99 pm8001_mr32(address, 0x04);
100 pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 =
101 pm8001_mr32(address, 0x08);
102 pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt =
103 pm8001_mr32(address, 0x0C);
104 pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt =
105 pm8001_mr32(address, 0x10);
106 pm8001_ha->gs_tbl.pm8001_tbl.rsvd =
107 pm8001_mr32(address, 0x14);
108 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] =
109 pm8001_mr32(address, 0x18);
110 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] =
111 pm8001_mr32(address, 0x1C);
112 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] =
113 pm8001_mr32(address, 0x20);
114 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] =
115 pm8001_mr32(address, 0x24);
116 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] =
117 pm8001_mr32(address, 0x28);
118 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] =
119 pm8001_mr32(address, 0x2C);
120 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] =
121 pm8001_mr32(address, 0x30);
122 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] =
123 pm8001_mr32(address, 0x34);
124 pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val =
125 pm8001_mr32(address, 0x38);
126 pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] =
127 pm8001_mr32(address, 0x3C);
128 pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] =
129 pm8001_mr32(address, 0x40);
130 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] =
131 pm8001_mr32(address, 0x44);
132 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] =
133 pm8001_mr32(address, 0x48);
134 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] =
135 pm8001_mr32(address, 0x4C);
136 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] =
137 pm8001_mr32(address, 0x50);
138 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] =
139 pm8001_mr32(address, 0x54);
140 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] =
141 pm8001_mr32(address, 0x58);
142 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] =
143 pm8001_mr32(address, 0x5C);
144 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] =
145 pm8001_mr32(address, 0x60);
149 * read_inbnd_queue_table - read the inbound queue table and save it.
150 * @pm8001_ha: our hba card information
152 static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
155 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
156 for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
157 u32 offset = i * 0x20;
158 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
159 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
160 pm8001_ha->inbnd_q_tbl[i].pi_offset =
161 pm8001_mr32(address, (offset + 0x18));
166 * read_outbnd_queue_table - read the outbound queue table and save it.
167 * @pm8001_ha: our hba card information
169 static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
172 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
173 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
174 u32 offset = i * 0x24;
175 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
176 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
177 pm8001_ha->outbnd_q_tbl[i].ci_offset =
178 pm8001_mr32(address, (offset + 0x18));
183 * init_default_table_values - init the default table.
184 * @pm8001_ha: our hba card information
186 static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
189 u32 offsetib, offsetob;
190 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
191 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
192 u32 ib_offset = pm8001_ha->ib_offset;
193 u32 ob_offset = pm8001_ha->ob_offset;
194 u32 ci_offset = pm8001_ha->ci_offset;
195 u32 pi_offset = pm8001_ha->pi_offset;
197 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0;
198 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0;
199 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0;
200 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0;
201 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0;
202 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
204 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
206 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
207 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
208 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
209 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
211 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr =
212 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
213 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr =
214 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
215 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size =
216 PM8001_EVENT_LOG_SIZE;
217 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01;
218 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr =
219 pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
220 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr =
221 pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
222 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size =
223 PM8001_EVENT_LOG_SIZE;
224 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01;
225 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
226 for (i = 0; i < pm8001_ha->max_q_num; i++) {
227 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
228 PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
229 pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
230 pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi;
231 pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
232 pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo;
233 pm8001_ha->inbnd_q_tbl[i].base_virt =
234 (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr;
235 pm8001_ha->inbnd_q_tbl[i].total_length =
236 pm8001_ha->memoryMap.region[ib_offset + i].total_len;
237 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
238 pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi;
239 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
240 pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo;
241 pm8001_ha->inbnd_q_tbl[i].ci_virt =
242 pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr;
243 pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0);
245 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
246 get_pci_bar_index(pm8001_mr32(addressib,
248 pm8001_ha->inbnd_q_tbl[i].pi_offset =
249 pm8001_mr32(addressib, (offsetib + 0x18));
250 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
251 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
253 for (i = 0; i < pm8001_ha->max_q_num; i++) {
254 pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
255 PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
256 pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
257 pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi;
258 pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
259 pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo;
260 pm8001_ha->outbnd_q_tbl[i].base_virt =
261 (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr;
262 pm8001_ha->outbnd_q_tbl[i].total_length =
263 pm8001_ha->memoryMap.region[ob_offset + i].total_len;
264 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
265 pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi;
266 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
267 pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo;
268 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay =
269 0 | (10 << 16) | (i << 24);
270 pm8001_ha->outbnd_q_tbl[i].pi_virt =
271 pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr;
272 pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0);
274 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
275 get_pci_bar_index(pm8001_mr32(addressob,
277 pm8001_ha->outbnd_q_tbl[i].ci_offset =
278 pm8001_mr32(addressob, (offsetob + 0x18));
279 pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
280 pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
285 * update_main_config_table - update the main default table to the HBA.
286 * @pm8001_ha: our hba card information
288 static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
290 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
291 pm8001_mw32(address, 0x24,
292 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
293 pm8001_mw32(address, 0x28,
294 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
295 pm8001_mw32(address, 0x2C,
296 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
297 pm8001_mw32(address, 0x30,
298 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
299 pm8001_mw32(address, 0x34,
300 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
301 pm8001_mw32(address, 0x38,
302 pm8001_ha->main_cfg_tbl.pm8001_tbl.
303 outbound_tgt_ITNexus_event_pid0_3);
304 pm8001_mw32(address, 0x3C,
305 pm8001_ha->main_cfg_tbl.pm8001_tbl.
306 outbound_tgt_ITNexus_event_pid4_7);
307 pm8001_mw32(address, 0x40,
308 pm8001_ha->main_cfg_tbl.pm8001_tbl.
309 outbound_tgt_ssp_event_pid0_3);
310 pm8001_mw32(address, 0x44,
311 pm8001_ha->main_cfg_tbl.pm8001_tbl.
312 outbound_tgt_ssp_event_pid4_7);
313 pm8001_mw32(address, 0x48,
314 pm8001_ha->main_cfg_tbl.pm8001_tbl.
315 outbound_tgt_smp_event_pid0_3);
316 pm8001_mw32(address, 0x4C,
317 pm8001_ha->main_cfg_tbl.pm8001_tbl.
318 outbound_tgt_smp_event_pid4_7);
319 pm8001_mw32(address, 0x50,
320 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
321 pm8001_mw32(address, 0x54,
322 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
323 pm8001_mw32(address, 0x58,
324 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
325 pm8001_mw32(address, 0x5C,
326 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
327 pm8001_mw32(address, 0x60,
328 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
329 pm8001_mw32(address, 0x64,
330 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
331 pm8001_mw32(address, 0x68,
332 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
333 pm8001_mw32(address, 0x6C,
334 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
335 pm8001_mw32(address, 0x70,
336 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
340 * update_inbnd_queue_table - update the inbound queue table to the HBA.
341 * @pm8001_ha: our hba card information
342 * @number: entry in the queue
344 static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
347 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
348 u16 offset = number * 0x20;
349 pm8001_mw32(address, offset + 0x00,
350 pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
351 pm8001_mw32(address, offset + 0x04,
352 pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
353 pm8001_mw32(address, offset + 0x08,
354 pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
355 pm8001_mw32(address, offset + 0x0C,
356 pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
357 pm8001_mw32(address, offset + 0x10,
358 pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
362 * update_outbnd_queue_table - update the outbound queue table to the HBA.
363 * @pm8001_ha: our hba card information
364 * @number: entry in the queue
366 static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
369 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
370 u16 offset = number * 0x24;
371 pm8001_mw32(address, offset + 0x00,
372 pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
373 pm8001_mw32(address, offset + 0x04,
374 pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
375 pm8001_mw32(address, offset + 0x08,
376 pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
377 pm8001_mw32(address, offset + 0x0C,
378 pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
379 pm8001_mw32(address, offset + 0x10,
380 pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
381 pm8001_mw32(address, offset + 0x1C,
382 pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
386 * pm8001_bar4_shift - function is called to shift BAR base address
387 * @pm8001_ha : our hba card information
388 * @shiftValue : shifting value in memory bar.
390 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
395 /* program the inbound AXI translation Lower Address */
396 pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
398 /* confirm the setting is written */
399 start = jiffies + HZ; /* 1 sec */
401 regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
402 } while ((regVal != shiftValue) && time_before(jiffies, start));
404 if (regVal != shiftValue) {
405 pm8001_dbg(pm8001_ha, INIT,
406 "TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW = 0x%x\n",
414 * mpi_set_phys_g3_with_ssc
415 * @pm8001_ha: our hba card information
416 * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc.
418 static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
424 #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
425 #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
426 #define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074
427 #define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074
428 #define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12
429 #define PHY_G3_WITH_SSC_BIT_SHIFT 13
430 #define SNW3_PHY_CAPABILITIES_PARITY 31
433 * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
434 * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
436 spin_lock_irqsave(&pm8001_ha->lock, flags);
437 if (-1 == pm8001_bar4_shift(pm8001_ha,
438 SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) {
439 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
443 for (i = 0; i < 4; i++) {
444 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
445 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
447 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
448 if (-1 == pm8001_bar4_shift(pm8001_ha,
449 SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) {
450 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
453 for (i = 4; i < 8; i++) {
454 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
455 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
457 /*************************************************************
458 Change the SSC upspreading value to 0x0 so that upspreading is disabled.
459 Device MABC SMOD0 Controls
460 Address: (via MEMBASE-III):
461 Using shifted destination address 0x0_0000: with Offset 0xD8
463 31:28 R/W Reserved Do not change
464 27:24 R/W SAS_SMOD_SPRDUP 0000
465 23:20 R/W SAS_SMOD_SPRDDN 0000
466 19:0 R/W Reserved Do not change
467 Upon power-up this register will read as 0x8990c016,
468 and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
469 so that the written value will be 0x8090c016.
470 This will ensure only down-spreading SSC is enabled on the SPC.
471 *************************************************************/
472 pm8001_cr32(pm8001_ha, 2, 0xd8);
473 pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
475 /*set the shifted destination address to 0x0 to avoid error operation */
476 pm8001_bar4_shift(pm8001_ha, 0x0);
477 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
482 * mpi_set_open_retry_interval_reg
483 * @pm8001_ha: our hba card information
484 * @interval: interval time for each OPEN_REJECT (RETRY). The units are in 1us.
486 static void mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
494 #define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
495 #define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
496 #define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4
497 #define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4
498 #define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
500 value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
501 spin_lock_irqsave(&pm8001_ha->lock, flags);
502 /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
503 if (-1 == pm8001_bar4_shift(pm8001_ha,
504 OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) {
505 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
508 for (i = 0; i < 4; i++) {
509 offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
510 pm8001_cw32(pm8001_ha, 2, offset, value);
513 if (-1 == pm8001_bar4_shift(pm8001_ha,
514 OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) {
515 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
518 for (i = 4; i < 8; i++) {
519 offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
520 pm8001_cw32(pm8001_ha, 2, offset, value);
522 /*set the shifted destination address to 0x0 to avoid error operation */
523 pm8001_bar4_shift(pm8001_ha, 0x0);
524 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
529 * mpi_init_check - check firmware initialization status.
530 * @pm8001_ha: our hba card information
532 static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
536 u32 gst_len_mpistate;
537 /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
539 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE);
540 /* wait until Inbound DoorBell Clear Register toggled */
541 max_wait_count = 1 * 1000 * 1000;/* 1 sec */
544 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
545 value &= SPC_MSGU_CFG_TABLE_UPDATE;
546 } while ((value != 0) && (--max_wait_count));
550 /* check the MPI-State for initialization */
552 pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
553 GST_GSTLEN_MPIS_OFFSET);
554 if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK))
556 /* check MPI Initialization error */
557 gst_len_mpistate = gst_len_mpistate >> 16;
558 if (0x0000 != gst_len_mpistate)
564 * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
565 * @pm8001_ha: our hba card information
567 static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
571 /* check error state */
572 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
573 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
574 /* check AAP error */
575 if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) {
577 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
581 /* check IOP error */
582 if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) {
584 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
588 /* bit 4-31 of scratch pad1 should be zeros if it is not
590 if (value & SCRATCH_PAD1_STATE_MASK) {
592 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
596 /* bit 2, 4-31 of scratch pad2 should be zeros if it is not
598 if (value1 & SCRATCH_PAD2_STATE_MASK) {
603 max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */
605 /* wait until scratch pad 1 and 2 registers in ready state */
608 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
610 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
612 if ((--max_wait_count) == 0)
614 } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY));
618 static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
620 void __iomem *base_addr;
626 value = pm8001_cr32(pm8001_ha, 0, 0x44);
627 offset = value & 0x03FFFFFF;
628 pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 Offset: %x\n", offset);
629 pcilogic = (value & 0xFC000000) >> 26;
630 pcibar = get_pci_bar_index(pcilogic);
631 pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
632 pm8001_ha->main_cfg_tbl_addr = base_addr =
633 pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
634 pm8001_ha->general_stat_tbl_addr =
635 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18);
636 pm8001_ha->inbnd_q_tbl_addr =
637 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C);
638 pm8001_ha->outbnd_q_tbl_addr =
639 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20);
643 * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
644 * @pm8001_ha: our hba card information
646 static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
650 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
651 /* 8081 controllers need BAR shift to access MPI space
652 * as this is shared with BIOS data */
653 if (deviceid == 0x8081 || deviceid == 0x0042) {
654 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
655 pm8001_dbg(pm8001_ha, FAIL,
656 "Shift Bar4 to 0x%x failed\n",
661 /* check the firmware status */
662 if (-1 == check_fw_ready(pm8001_ha)) {
663 pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n");
667 /* Initialize pci space address eg: mpi offset */
668 init_pci_device_addresses(pm8001_ha);
669 init_default_table_values(pm8001_ha);
670 read_main_config_table(pm8001_ha);
671 read_general_status_table(pm8001_ha);
672 read_inbnd_queue_table(pm8001_ha);
673 read_outbnd_queue_table(pm8001_ha);
674 /* update main config table ,inbound table and outbound table */
675 update_main_config_table(pm8001_ha);
676 for (i = 0; i < pm8001_ha->max_q_num; i++)
677 update_inbnd_queue_table(pm8001_ha, i);
678 for (i = 0; i < pm8001_ha->max_q_num; i++)
679 update_outbnd_queue_table(pm8001_ha, i);
680 /* 8081 controller donot require these operations */
681 if (deviceid != 0x8081 && deviceid != 0x0042) {
682 mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
683 /* 7->130ms, 34->500ms, 119->1.5s */
684 mpi_set_open_retry_interval_reg(pm8001_ha, 119);
686 /* notify firmware update finished and check initialization status */
687 if (0 == mpi_init_check(pm8001_ha)) {
688 pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n");
691 /*This register is a 16-bit timer with a resolution of 1us. This is the
692 timer used for interrupt delay/coalescing in the PCIe Application Layer.
693 Zero is not a valid value. A value of 1 in the register will cause the
694 interrupts to be normal. A value greater than 1 will cause coalescing
696 pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1);
697 pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0);
701 static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
705 u32 gst_len_mpistate;
707 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
708 if (deviceid == 0x8081 || deviceid == 0x0042) {
709 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
710 pm8001_dbg(pm8001_ha, FAIL,
711 "Shift Bar4 to 0x%x failed\n",
716 init_pci_device_addresses(pm8001_ha);
717 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
719 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET);
721 /* wait until Inbound DoorBell Clear Register toggled */
722 max_wait_count = 1 * 1000 * 1000;/* 1 sec */
725 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
726 value &= SPC_MSGU_CFG_TABLE_RESET;
727 } while ((value != 0) && (--max_wait_count));
729 if (!max_wait_count) {
730 pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=0x%x\n",
735 /* check the MPI-State for termination in progress */
736 /* wait until Inbound DoorBell Clear Register toggled */
737 max_wait_count = 1 * 1000 * 1000; /* 1 sec */
741 pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
742 GST_GSTLEN_MPIS_OFFSET);
743 if (GST_MPI_STATE_UNINIT ==
744 (gst_len_mpistate & GST_MPI_STATE_MASK))
746 } while (--max_wait_count);
747 if (!max_wait_count) {
748 pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n",
749 gst_len_mpistate & GST_MPI_STATE_MASK);
756 * soft_reset_ready_check - Function to check FW is ready for soft reset.
757 * @pm8001_ha: our hba card information
759 static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
761 u32 regVal, regVal1, regVal2;
762 if (mpi_uninit_check(pm8001_ha) != 0) {
763 pm8001_dbg(pm8001_ha, FAIL, "MPI state is not ready\n");
766 /* read the scratch pad 2 register bit 2 */
767 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
768 & SCRATCH_PAD2_FWRDY_RST;
769 if (regVal == SCRATCH_PAD2_FWRDY_RST) {
770 pm8001_dbg(pm8001_ha, INIT, "Firmware is ready for reset.\n");
773 /* Trigger NMI twice via RB6 */
774 spin_lock_irqsave(&pm8001_ha->lock, flags);
775 if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
776 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
777 pm8001_dbg(pm8001_ha, FAIL,
778 "Shift Bar4 to 0x%x failed\n",
782 pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET,
783 RB6_MAGIC_NUMBER_RST);
784 pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST);
785 /* wait for 100 ms */
787 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) &
788 SCRATCH_PAD2_FWRDY_RST;
789 if (regVal != SCRATCH_PAD2_FWRDY_RST) {
790 regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
791 regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
792 pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MSGU_SCRATCH_PAD1=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
794 pm8001_dbg(pm8001_ha, FAIL,
795 "SCRATCH_PAD0 value = 0x%x\n",
796 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0));
797 pm8001_dbg(pm8001_ha, FAIL,
798 "SCRATCH_PAD3 value = 0x%x\n",
799 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3));
800 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
803 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
809 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
810 * the FW register status to the originated status.
811 * @pm8001_ha: our hba card information
814 pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
816 u32 regVal, toggleVal;
818 u32 regVal1, regVal2, regVal3;
819 u32 signature = 0x252acbcd; /* for host scratch pad0 */
822 /* step1: Check FW is ready for soft reset */
823 if (soft_reset_ready_check(pm8001_ha) != 0) {
824 pm8001_dbg(pm8001_ha, FAIL, "FW is not ready\n");
828 /* step 2: clear NMI status register on AAP1 and IOP, write the same
830 /* map 0x60000 to BAR4(0x20), BAR2(win) */
831 spin_lock_irqsave(&pm8001_ha->lock, flags);
832 if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
833 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
834 pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
835 MBIC_AAP1_ADDR_BASE);
838 regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP);
839 pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (IOP)= 0x%x\n",
841 pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
842 /* map 0x70000 to BAR4(0x20), BAR2(win) */
843 if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
844 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
845 pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
849 regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1);
850 pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n",
852 pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0);
854 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE);
855 pm8001_dbg(pm8001_ha, INIT, "PCIE -Event Interrupt Enable = 0x%x\n",
857 pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0);
859 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT);
860 pm8001_dbg(pm8001_ha, INIT, "PCIE - Event Interrupt = 0x%x\n",
862 pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal);
864 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE);
865 pm8001_dbg(pm8001_ha, INIT, "PCIE -Error Interrupt Enable = 0x%x\n",
867 pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0);
869 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT);
870 pm8001_dbg(pm8001_ha, INIT, "PCIE - Error Interrupt = 0x%x\n", regVal);
871 pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal);
873 /* read the scratch pad 1 register bit 2 */
874 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
876 toggleVal = regVal ^ SCRATCH_PAD1_RST;
878 /* set signature in host scratch pad0 register to tell SPC that the
879 host performs the soft reset */
880 pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature);
882 /* read required registers for confirmming */
883 /* map 0x0700000 to BAR4(0x20), BAR2(win) */
884 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
885 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
886 pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
890 pm8001_dbg(pm8001_ha, INIT,
891 "GSM 0x0(0x00007b88)-GSM Configuration and Reset = 0x%x\n",
892 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
894 /* step 3: host read GSM Configuration and Reset register */
895 regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
896 /* Put those bits to low */
897 /* GSM XCBI offset = 0x70 0000
898 0x00 Bit 13 COM_SLV_SW_RSTB 1
899 0x00 Bit 12 QSSP_SW_RSTB 1
900 0x00 Bit 11 RAAE_SW_RSTB 1
901 0x00 Bit 9 RB_1_SW_RSTB 1
902 0x00 Bit 8 SM_SW_RSTB 1
904 regVal &= ~(0x00003b00);
905 /* host write GSM Configuration and Reset register */
906 pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
907 pm8001_dbg(pm8001_ha, INIT,
908 "GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM Configuration and Reset is set to = 0x%x\n",
909 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
912 /* disable GSM - Read Address Parity Check */
913 regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
914 pm8001_dbg(pm8001_ha, INIT,
915 "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n",
917 pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0);
918 pm8001_dbg(pm8001_ha, INIT,
919 "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n",
920 pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK));
922 /* disable GSM - Write Address Parity Check */
923 regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
924 pm8001_dbg(pm8001_ha, INIT,
925 "GSM 0x700040 - Write Address Parity Check Enable = 0x%x\n",
927 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0);
928 pm8001_dbg(pm8001_ha, INIT,
929 "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n",
930 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK));
932 /* disable GSM - Write Data Parity Check */
933 regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
934 pm8001_dbg(pm8001_ha, INIT, "GSM 0x300048 - Write Data Parity Check Enable = 0x%x\n",
936 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0);
937 pm8001_dbg(pm8001_ha, INIT,
938 "GSM 0x300048 - Write Data Parity Check Enable is set to = 0x%x\n",
939 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK));
941 /* step 5: delay 10 usec */
943 /* step 5-b: set GPIO-0 output control to tristate anyway */
944 if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
945 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
946 pm8001_dbg(pm8001_ha, INIT, "Shift Bar4 to 0x%x failed\n",
950 regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
951 pm8001_dbg(pm8001_ha, INIT, "GPIO Output Control Register: = 0x%x\n",
953 /* set GPIO-0 output control to tri-state */
954 regVal &= 0xFFFFFFFC;
955 pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
957 /* Step 6: Reset the IOP and AAP1 */
958 /* map 0x00000 to BAR4(0x20), BAR2(win) */
959 if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
960 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
961 pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n",
962 SPC_TOP_LEVEL_ADDR_BASE);
965 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
966 pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting IOP/AAP1:= 0x%x\n",
968 regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
969 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
971 /* step 7: Reset the BDMA/OSSP */
972 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
973 pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting BDMA/OSSP: = 0x%x\n",
975 regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
976 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
978 /* step 8: delay 10 usec */
981 /* step 9: bring the BDMA and OSSP out of reset */
982 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
983 pm8001_dbg(pm8001_ha, INIT,
984 "Top Register before bringing up BDMA/OSSP:= 0x%x\n",
986 regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
987 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
989 /* step 10: delay 10 usec */
992 /* step 11: reads and sets the GSM Configuration and Reset Register */
993 /* map 0x0700000 to BAR4(0x20), BAR2(win) */
994 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
995 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
996 pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n",
1000 pm8001_dbg(pm8001_ha, INIT,
1001 "GSM 0x0 (0x00007b88)-GSM Configuration and Reset = 0x%x\n",
1002 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
1003 regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
1004 /* Put those bits to high */
1005 /* GSM XCBI offset = 0x70 0000
1006 0x00 Bit 13 COM_SLV_SW_RSTB 1
1007 0x00 Bit 12 QSSP_SW_RSTB 1
1008 0x00 Bit 11 RAAE_SW_RSTB 1
1009 0x00 Bit 9 RB_1_SW_RSTB 1
1010 0x00 Bit 8 SM_SW_RSTB 1
1012 regVal |= (GSM_CONFIG_RESET_VALUE);
1013 pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
1014 pm8001_dbg(pm8001_ha, INIT, "GSM (0x00004088 ==> 0x00007b88) - GSM Configuration and Reset is set to = 0x%x\n",
1015 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET));
1017 /* step 12: Restore GSM - Read Address Parity Check */
1018 regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
1019 /* just for debugging */
1020 pm8001_dbg(pm8001_ha, INIT,
1021 "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n",
1023 pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1);
1024 pm8001_dbg(pm8001_ha, INIT, "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n",
1025 pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK));
1026 /* Restore GSM - Write Address Parity Check */
1027 regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
1028 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2);
1029 pm8001_dbg(pm8001_ha, INIT,
1030 "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n",
1031 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK));
1032 /* Restore GSM - Write Data Parity Check */
1033 regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
1034 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3);
1035 pm8001_dbg(pm8001_ha, INIT,
1036 "GSM 0x700048 - Write Data Parity Check Enable is set to = 0x%x\n",
1037 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK));
1039 /* step 13: bring the IOP and AAP1 out of reset */
1040 /* map 0x00000 to BAR4(0x20), BAR2(win) */
1041 if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
1042 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1043 pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n",
1044 SPC_TOP_LEVEL_ADDR_BASE);
1047 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
1048 regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
1049 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
1051 /* step 14: delay 10 usec - Normal Mode */
1053 /* check Soft Reset Normal mode or Soft Reset HDA mode */
1054 if (signature == SPC_SOFT_RESET_SIGNATURE) {
1055 /* step 15 (Normal Mode): wait until scratch pad1 register
1057 max_wait_count = 2 * 1000 * 1000;/* 2 sec */
1060 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
1062 } while ((regVal != toggleVal) && (--max_wait_count));
1064 if (!max_wait_count) {
1065 regVal = pm8001_cr32(pm8001_ha, 0,
1066 MSGU_SCRATCH_PAD_1);
1067 pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT : ToggleVal 0x%x,MSGU_SCRATCH_PAD1 = 0x%x\n",
1069 pm8001_dbg(pm8001_ha, FAIL,
1070 "SCRATCH_PAD0 value = 0x%x\n",
1071 pm8001_cr32(pm8001_ha, 0,
1072 MSGU_SCRATCH_PAD_0));
1073 pm8001_dbg(pm8001_ha, FAIL,
1074 "SCRATCH_PAD2 value = 0x%x\n",
1075 pm8001_cr32(pm8001_ha, 0,
1076 MSGU_SCRATCH_PAD_2));
1077 pm8001_dbg(pm8001_ha, FAIL,
1078 "SCRATCH_PAD3 value = 0x%x\n",
1079 pm8001_cr32(pm8001_ha, 0,
1080 MSGU_SCRATCH_PAD_3));
1081 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1085 /* step 16 (Normal) - Clear ODMR and ODCR */
1086 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
1087 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
1089 /* step 17 (Normal Mode): wait for the FW and IOP to get
1090 ready - 1 sec timeout */
1091 /* Wait for the SPC Configuration Table to be ready */
1092 if (check_fw_ready(pm8001_ha) == -1) {
1093 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
1094 /* return error if MPI Configuration Table not ready */
1095 pm8001_dbg(pm8001_ha, INIT,
1096 "FW not ready SCRATCH_PAD1 = 0x%x\n",
1098 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
1099 /* return error if MPI Configuration Table not ready */
1100 pm8001_dbg(pm8001_ha, INIT,
1101 "FW not ready SCRATCH_PAD2 = 0x%x\n",
1103 pm8001_dbg(pm8001_ha, INIT,
1104 "SCRATCH_PAD0 value = 0x%x\n",
1105 pm8001_cr32(pm8001_ha, 0,
1106 MSGU_SCRATCH_PAD_0));
1107 pm8001_dbg(pm8001_ha, INIT,
1108 "SCRATCH_PAD3 value = 0x%x\n",
1109 pm8001_cr32(pm8001_ha, 0,
1110 MSGU_SCRATCH_PAD_3));
1111 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1115 pm8001_bar4_shift(pm8001_ha, 0);
1116 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1118 pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n");
1122 static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
1126 pm8001_dbg(pm8001_ha, INIT, "chip reset start\n");
1128 /* do SPC chip reset. */
1129 regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
1130 regVal &= ~(SPC_REG_RESET_DEVICE);
1131 pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
1136 /* bring chip reset out of reset */
1137 regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
1138 regVal |= SPC_REG_RESET_DEVICE;
1139 pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
1144 /* wait for 20 msec until the firmware gets reloaded */
1148 } while ((--i) != 0);
1150 pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n");
1154 * pm8001_chip_iounmap - which mapped when initialized.
1155 * @pm8001_ha: our hba card information
1157 void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
1159 s8 bar, logical = 0;
1160 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1162 ** logical BARs for SPC:
1163 ** bar 0 and 1 - logical BAR0
1164 ** bar 2 and 3 - logical BAR1
1165 ** bar4 - logical BAR2
1166 ** bar5 - logical BAR3
1167 ** Skip the appropriate assignments:
1169 if ((bar == 1) || (bar == 3))
1171 if (pm8001_ha->io_mem[logical].memvirtaddr) {
1172 iounmap(pm8001_ha->io_mem[logical].memvirtaddr);
1178 #ifndef PM8001_USE_MSIX
1180 * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt
1181 * @pm8001_ha: our hba card information
1184 pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
1186 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
1187 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
1191 * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt
1192 * @pm8001_ha: our hba card information
1195 pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
1197 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
1203 * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
1204 * @pm8001_ha: our hba card information
1205 * @int_vec_idx: interrupt number to enable
1208 pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
1213 msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
1214 msi_index += MSIX_TABLE_BASE;
1215 pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE);
1216 value = (1 << int_vec_idx);
1217 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value);
1222 * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt
1223 * @pm8001_ha: our hba card information
1224 * @int_vec_idx: interrupt number to disable
1227 pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
1231 msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
1232 msi_index += MSIX_TABLE_BASE;
1233 pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE);
1238 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
1239 * @pm8001_ha: our hba card information
1243 pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1245 #ifdef PM8001_USE_MSIX
1246 pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
1248 pm8001_chip_intx_interrupt_enable(pm8001_ha);
1253 * pm8001_chip_interrupt_disable - disable PM8001 chip interrupt
1254 * @pm8001_ha: our hba card information
1258 pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1260 #ifdef PM8001_USE_MSIX
1261 pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
1263 pm8001_chip_intx_interrupt_disable(pm8001_ha);
1268 * pm8001_mpi_msg_free_get - get the free message buffer for transfer
1270 * @circularQ: the inbound queue we want to transfer to HBA.
1271 * @messageSize: the message size of this transfer, normally it is 64 bytes
1272 * @messagePtr: the pointer to message.
1274 int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
1275 u16 messageSize, void **messagePtr)
1277 u32 offset, consumer_index;
1278 struct mpi_msg_hdr *msgHeader;
1279 u8 bcCount = 1; /* only support single buffer */
1281 /* Checks is the requested message size can be allocated in this queue*/
1282 if (messageSize > IOMB_SIZE_SPCV) {
1287 /* Stores the new consumer index */
1288 consumer_index = pm8001_read_32(circularQ->ci_virt);
1289 circularQ->consumer_index = cpu_to_le32(consumer_index);
1290 if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) ==
1291 le32_to_cpu(circularQ->consumer_index)) {
1295 /* get memory IOMB buffer address */
1296 offset = circularQ->producer_idx * messageSize;
1297 /* increment to next bcCount element */
1298 circularQ->producer_idx = (circularQ->producer_idx + bcCount)
1300 /* Adds that distance to the base of the region virtual address plus
1301 the message header size*/
1302 msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset);
1303 *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr);
1308 * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
1309 * FW to tell the fw to get this message from IOMB.
1310 * @pm8001_ha: our hba card information
1311 * @circularQ: the inbound queue we want to transfer to HBA.
1312 * @opCode: the operation code represents commands which LLDD and fw recognized.
1313 * @payload: the command payload of each operation command.
1314 * @nb: size in bytes of the command payload
1315 * @responseQueue: queue to interrupt on w/ command response (if any)
1317 int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
1318 struct inbound_queue_table *circularQ,
1319 u32 opCode, void *payload, size_t nb,
1322 u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
1324 unsigned long flags;
1325 int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
1328 WARN_ON(q_index >= PM8001_MAX_INB_NUM);
1329 spin_lock_irqsave(&circularQ->iq_lock, flags);
1330 rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
1333 pm8001_dbg(pm8001_ha, IO, "No free mpi buffer\n");
1338 if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr)))
1339 nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr);
1340 memcpy(pMessage, payload, nb);
1341 if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size)
1342 memset(pMessage + nb, 0, pm8001_ha->iomb_size -
1343 (nb + sizeof(struct mpi_msg_hdr)));
1345 /*Build the header*/
1346 Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
1347 | ((responseQueue & 0x3F) << 16)
1348 | ((category & 0xF) << 12) | (opCode & 0xFFF));
1350 pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header));
1351 /*Update the PI to the firmware*/
1352 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
1353 circularQ->pi_offset, circularQ->producer_idx);
1354 pm8001_dbg(pm8001_ha, DEVIO,
1355 "INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
1356 responseQueue, opCode, circularQ->producer_idx,
1357 circularQ->consumer_index);
1359 spin_unlock_irqrestore(&circularQ->iq_lock, flags);
1363 u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1364 struct outbound_queue_table *circularQ, u8 bc)
1367 struct mpi_msg_hdr *msgHeader;
1368 struct mpi_msg_hdr *pOutBoundMsgHeader;
1370 msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
1371 pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
1372 circularQ->consumer_idx * pm8001_ha->iomb_size);
1373 if (pOutBoundMsgHeader != msgHeader) {
1374 pm8001_dbg(pm8001_ha, FAIL,
1375 "consumer_idx = %d msgHeader = %p\n",
1376 circularQ->consumer_idx, msgHeader);
1378 /* Update the producer index from SPC */
1379 producer_index = pm8001_read_32(circularQ->pi_virt);
1380 circularQ->producer_index = cpu_to_le32(producer_index);
1381 pm8001_dbg(pm8001_ha, FAIL,
1382 "consumer_idx = %d producer_index = %dmsgHeader = %p\n",
1383 circularQ->consumer_idx,
1384 circularQ->producer_index, msgHeader);
1387 /* free the circular queue buffer elements associated with the message*/
1388 circularQ->consumer_idx = (circularQ->consumer_idx + bc)
1390 /* update the CI of outbound queue */
1391 pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
1392 circularQ->consumer_idx);
1393 /* Update the producer index from SPC*/
1394 producer_index = pm8001_read_32(circularQ->pi_virt);
1395 circularQ->producer_index = cpu_to_le32(producer_index);
1396 pm8001_dbg(pm8001_ha, IO, " CI=%d PI=%d\n",
1397 circularQ->consumer_idx, circularQ->producer_index);
1402 * pm8001_mpi_msg_consume- get the MPI message from outbound queue
1404 * @pm8001_ha: our hba card information
1405 * @circularQ: the outbound queue table.
1406 * @messagePtr1: the message contents of this outbound message.
1407 * @pBC: the message size.
1409 u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1410 struct outbound_queue_table *circularQ,
1411 void **messagePtr1, u8 *pBC)
1413 struct mpi_msg_hdr *msgHeader;
1414 __le32 msgHeader_tmp;
1417 /* If there are not-yet-delivered messages ... */
1418 if (le32_to_cpu(circularQ->producer_index)
1419 != circularQ->consumer_idx) {
1420 /*Get the pointer to the circular queue buffer element*/
1421 msgHeader = (struct mpi_msg_hdr *)
1422 (circularQ->base_virt +
1423 circularQ->consumer_idx * pm8001_ha->iomb_size);
1425 header_tmp = pm8001_read_32(msgHeader);
1426 msgHeader_tmp = cpu_to_le32(header_tmp);
1427 pm8001_dbg(pm8001_ha, DEVIO,
1428 "outbound opcode msgheader:%x ci=%d pi=%d\n",
1429 msgHeader_tmp, circularQ->consumer_idx,
1430 circularQ->producer_index);
1431 if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
1432 if (OPC_OUB_SKIP_ENTRY !=
1433 (le32_to_cpu(msgHeader_tmp) & 0xfff)) {
1436 sizeof(struct mpi_msg_hdr);
1437 *pBC = (u8)((le32_to_cpu(msgHeader_tmp)
1439 pm8001_dbg(pm8001_ha, IO,
1440 ": CI=%d PI=%d msgHeader=%x\n",
1441 circularQ->consumer_idx,
1442 circularQ->producer_index,
1444 return MPI_IO_STATUS_SUCCESS;
1446 circularQ->consumer_idx =
1447 (circularQ->consumer_idx +
1448 ((le32_to_cpu(msgHeader_tmp)
1452 pm8001_write_32(msgHeader, 0, 0);
1453 /* update the CI of outbound queue */
1454 pm8001_cw32(pm8001_ha,
1455 circularQ->ci_pci_bar,
1456 circularQ->ci_offset,
1457 circularQ->consumer_idx);
1460 circularQ->consumer_idx =
1461 (circularQ->consumer_idx +
1462 ((le32_to_cpu(msgHeader_tmp) >> 24) &
1463 0x1f)) % PM8001_MPI_QUEUE;
1465 pm8001_write_32(msgHeader, 0, 0);
1466 /* update the CI of outbound queue */
1467 pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar,
1468 circularQ->ci_offset,
1469 circularQ->consumer_idx);
1470 return MPI_IO_STATUS_FAIL;
1474 void *pi_virt = circularQ->pi_virt;
1475 /* spurious interrupt during setup if
1476 * kexec-ing and driver doing a doorbell access
1477 * with the pre-kexec oq interrupt setup
1481 /* Update the producer index from SPC */
1482 producer_index = pm8001_read_32(pi_virt);
1483 circularQ->producer_index = cpu_to_le32(producer_index);
1485 } while (le32_to_cpu(circularQ->producer_index) !=
1486 circularQ->consumer_idx);
1487 /* while we don't have any more not-yet-delivered message */
1489 return MPI_IO_STATUS_BUSY;
1492 void pm8001_work_fn(struct work_struct *work)
1494 struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
1495 struct pm8001_device *pm8001_dev;
1496 struct domain_device *dev;
1499 * So far, all users of this stash an associated structure here.
1500 * If we get here, and this pointer is null, then the action
1501 * was cancelled. This nullification happens when the device
1504 if (pw->handler != IO_FATAL_ERROR) {
1505 pm8001_dev = pw->data; /* Most stash device structure */
1506 if ((pm8001_dev == NULL)
1507 || ((pw->handler != IO_XFER_ERROR_BREAK)
1508 && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
1514 switch (pw->handler) {
1515 case IO_XFER_ERROR_BREAK:
1516 { /* This one stashes the sas_task instead */
1517 struct sas_task *t = (struct sas_task *)pm8001_dev;
1519 struct pm8001_ccb_info *ccb;
1520 struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
1521 unsigned long flags, flags1;
1522 struct task_status_struct *ts;
1525 if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC)
1526 break; /* Task still on lu */
1527 spin_lock_irqsave(&pm8001_ha->lock, flags);
1529 spin_lock_irqsave(&t->task_state_lock, flags1);
1530 if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
1531 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1532 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1533 break; /* Task got completed by another */
1535 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1537 /* Search for a possible ccb that matches the task */
1538 for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
1539 ccb = &pm8001_ha->ccb_info[i];
1541 if ((tag != 0xFFFFFFFF) && (ccb->task == t))
1545 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1546 break; /* Task got freed by another */
1548 ts = &t->task_status;
1549 ts->resp = SAS_TASK_COMPLETE;
1550 /* Force the midlayer to retry */
1551 ts->stat = SAS_QUEUE_FULL;
1552 pm8001_dev = ccb->device;
1554 atomic_dec(&pm8001_dev->running_req);
1555 spin_lock_irqsave(&t->task_state_lock, flags1);
1556 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1557 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
1558 t->task_state_flags |= SAS_TASK_STATE_DONE;
1559 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
1560 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1561 pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
1562 t, pw->handler, ts->resp, ts->stat);
1563 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1564 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1566 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1567 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1568 mb();/* in order to force CPU ordering */
1569 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1573 case IO_XFER_OPEN_RETRY_TIMEOUT:
1574 { /* This one stashes the sas_task instead */
1575 struct sas_task *t = (struct sas_task *)pm8001_dev;
1577 struct pm8001_ccb_info *ccb;
1578 struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
1579 unsigned long flags, flags1;
1582 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
1584 ret = pm8001_query_task(t);
1586 if (ret == TMF_RESP_FUNC_SUCC)
1587 pm8001_dbg(pm8001_ha, IO, "...Task on lu\n");
1588 else if (ret == TMF_RESP_FUNC_COMPLETE)
1589 pm8001_dbg(pm8001_ha, IO, "...Task NOT on lu\n");
1591 pm8001_dbg(pm8001_ha, DEVIO, "...query task failed!!!\n");
1593 spin_lock_irqsave(&pm8001_ha->lock, flags);
1595 spin_lock_irqsave(&t->task_state_lock, flags1);
1597 if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
1598 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1599 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1600 if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
1601 (void)pm8001_abort_task(t);
1602 break; /* Task got completed by another */
1605 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1607 /* Search for a possible ccb that matches the task */
1608 for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
1609 ccb = &pm8001_ha->ccb_info[i];
1611 if ((tag != 0xFFFFFFFF) && (ccb->task == t))
1615 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1616 if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
1617 (void)pm8001_abort_task(t);
1618 break; /* Task got freed by another */
1621 pm8001_dev = ccb->device;
1622 dev = pm8001_dev->sas_device;
1625 case TMF_RESP_FUNC_SUCC: /* task on lu */
1626 ccb->open_retry = 1; /* Snub completion */
1627 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1628 ret = pm8001_abort_task(t);
1629 ccb->open_retry = 0;
1631 case TMF_RESP_FUNC_SUCC:
1632 case TMF_RESP_FUNC_COMPLETE:
1634 default: /* device misbehavior */
1635 ret = TMF_RESP_FUNC_FAILED;
1636 pm8001_dbg(pm8001_ha, IO, "...Reset phy\n");
1637 pm8001_I_T_nexus_reset(dev);
1642 case TMF_RESP_FUNC_COMPLETE: /* task not on lu */
1643 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1644 /* Do we need to abort the task locally? */
1647 default: /* device misbehavior */
1648 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1649 ret = TMF_RESP_FUNC_FAILED;
1650 pm8001_dbg(pm8001_ha, IO, "...Reset phy\n");
1651 pm8001_I_T_nexus_reset(dev);
1654 if (ret == TMF_RESP_FUNC_FAILED)
1656 pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
1657 pm8001_dbg(pm8001_ha, IO, "...Complete\n");
1659 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1660 dev = pm8001_dev->sas_device;
1661 pm8001_I_T_nexus_event_handler(dev);
1663 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
1664 dev = pm8001_dev->sas_device;
1665 pm8001_I_T_nexus_reset(dev);
1667 case IO_DS_IN_ERROR:
1668 dev = pm8001_dev->sas_device;
1669 pm8001_I_T_nexus_reset(dev);
1671 case IO_DS_NON_OPERATIONAL:
1672 dev = pm8001_dev->sas_device;
1673 pm8001_I_T_nexus_reset(dev);
1675 case IO_FATAL_ERROR:
1677 struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
1678 struct pm8001_ccb_info *ccb;
1679 struct task_status_struct *ts;
1680 struct sas_task *task;
1684 for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
1685 ccb = &pm8001_ha->ccb_info[i];
1687 ts = &task->task_status;
1689 /* check if tag is NULL */
1691 pm8001_dbg(pm8001_ha, FAIL,
1698 pm8001_dbg(pm8001_ha, FAIL,
1702 /*complete sas task and update to top layer */
1703 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
1704 ts->resp = SAS_TASK_COMPLETE;
1705 task->task_done(task);
1706 } else if (tag != 0xFFFFFFFF) {
1707 /* complete the internal commands/non-sas task */
1708 pm8001_dev = ccb->device;
1709 if (pm8001_dev->dcompletion) {
1710 complete(pm8001_dev->dcompletion);
1711 pm8001_dev->dcompletion = NULL;
1713 complete(pm8001_ha->nvmd_completion);
1714 pm8001_tag_free(pm8001_ha, tag);
1717 /* Deregister all the device ids */
1718 for (i = 0; i < PM8001_MAX_DEVICES; i++) {
1719 pm8001_dev = &pm8001_ha->devices[i];
1720 device_id = pm8001_dev->device_id;
1722 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
1723 pm8001_free_dev(pm8001_dev);
1731 int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
1734 struct pm8001_work *pw;
1737 pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC);
1739 pw->pm8001_ha = pm8001_ha;
1741 pw->handler = handler;
1742 INIT_WORK(&pw->work, pm8001_work_fn);
1743 queue_work(pm8001_wq, &pw->work);
1750 static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
1751 struct pm8001_device *pm8001_ha_dev)
1755 struct pm8001_ccb_info *ccb;
1756 struct sas_task *task = NULL;
1757 struct task_abort_req task_abort;
1758 struct inbound_queue_table *circularQ;
1759 u32 opc = OPC_INB_SATA_ABORT;
1762 if (!pm8001_ha_dev) {
1763 pm8001_dbg(pm8001_ha, FAIL, "dev is null\n");
1767 task = sas_alloc_slow_task(GFP_ATOMIC);
1770 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
1774 task->task_done = pm8001_task_done;
1776 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1780 ccb = &pm8001_ha->ccb_info[ccb_tag];
1781 ccb->device = pm8001_ha_dev;
1782 ccb->ccb_tag = ccb_tag;
1785 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1787 memset(&task_abort, 0, sizeof(task_abort));
1788 task_abort.abort_all = cpu_to_le32(1);
1789 task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1790 task_abort.tag = cpu_to_le32(ccb_tag);
1792 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
1793 sizeof(task_abort), 0);
1795 pm8001_tag_free(pm8001_ha, ccb_tag);
1799 static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
1800 struct pm8001_device *pm8001_ha_dev)
1802 struct sata_start_req sata_cmd;
1805 struct pm8001_ccb_info *ccb;
1806 struct sas_task *task = NULL;
1807 struct host_to_dev_fis fis;
1808 struct domain_device *dev;
1809 struct inbound_queue_table *circularQ;
1810 u32 opc = OPC_INB_SATA_HOST_OPSTART;
1812 task = sas_alloc_slow_task(GFP_ATOMIC);
1815 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
1818 task->task_done = pm8001_task_done;
1820 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1822 sas_free_task(task);
1823 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n");
1827 /* allocate domain device by ourselves as libsas
1828 * is not going to provide any
1830 dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
1832 sas_free_task(task);
1833 pm8001_tag_free(pm8001_ha, ccb_tag);
1834 pm8001_dbg(pm8001_ha, FAIL,
1835 "Domain device cannot be allocated\n");
1839 task->dev->lldd_dev = pm8001_ha_dev;
1841 ccb = &pm8001_ha->ccb_info[ccb_tag];
1842 ccb->device = pm8001_ha_dev;
1843 ccb->ccb_tag = ccb_tag;
1845 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
1846 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
1848 memset(&sata_cmd, 0, sizeof(sata_cmd));
1849 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1851 /* construct read log FIS */
1852 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1853 fis.fis_type = 0x27;
1855 fis.command = ATA_CMD_READ_LOG_EXT;
1857 fis.sector_count = 0x1;
1859 sata_cmd.tag = cpu_to_le32(ccb_tag);
1860 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1861 sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
1862 memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
1864 res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
1865 sizeof(sata_cmd), 0);
1867 sas_free_task(task);
1868 pm8001_tag_free(pm8001_ha, ccb_tag);
1874 * mpi_ssp_completion- process the event that FW response to the SSP request.
1875 * @pm8001_ha: our hba card information
1876 * @piomb: the message contents of this outbound message.
1878 * When FW has completed a ssp request for example a IO request, after it has
1879 * filled the SG data with the data, it will trigger this event representing
1880 * that he has finished the job; please check the corresponding buffer.
1881 * So we will tell the caller who maybe waiting the result to tell upper layer
1882 * that the task has been finished.
1885 mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1888 struct pm8001_ccb_info *ccb;
1889 unsigned long flags;
1893 struct ssp_completion_resp *psspPayload;
1894 struct task_status_struct *ts;
1895 struct ssp_response_iu *iu;
1896 struct pm8001_device *pm8001_dev;
1897 psspPayload = (struct ssp_completion_resp *)(piomb + 4);
1898 status = le32_to_cpu(psspPayload->status);
1899 tag = le32_to_cpu(psspPayload->tag);
1900 ccb = &pm8001_ha->ccb_info[tag];
1901 if ((status == IO_ABORTED) && ccb->open_retry) {
1902 /* Being completed by another */
1903 ccb->open_retry = 0;
1906 pm8001_dev = ccb->device;
1907 param = le32_to_cpu(psspPayload->param);
1911 if (status && status != IO_UNDERFLOW)
1912 pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status);
1913 if (unlikely(!t || !t->lldd_task || !t->dev))
1915 ts = &t->task_status;
1916 /* Print sas address of IO failed device */
1917 if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
1918 (status != IO_UNDERFLOW))
1919 pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n",
1920 SAS_ADDR(t->dev->sas_addr));
1923 pm8001_dbg(pm8001_ha, IOERR,
1924 "status:0x%x, tag:0x%x, task:0x%p\n",
1929 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS,param = %d\n",
1932 ts->resp = SAS_TASK_COMPLETE;
1933 ts->stat = SAS_SAM_STAT_GOOD;
1935 ts->resp = SAS_TASK_COMPLETE;
1936 ts->stat = SAS_PROTO_RESPONSE;
1937 ts->residual = param;
1938 iu = &psspPayload->ssp_resp_iu;
1939 sas_ssp_task_response(pm8001_ha->dev, t, iu);
1942 atomic_dec(&pm8001_dev->running_req);
1945 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
1946 ts->resp = SAS_TASK_COMPLETE;
1947 ts->stat = SAS_ABORTED_TASK;
1950 /* SSP Completion with error */
1951 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW,param = %d\n",
1953 ts->resp = SAS_TASK_COMPLETE;
1954 ts->stat = SAS_DATA_UNDERRUN;
1955 ts->residual = param;
1957 atomic_dec(&pm8001_dev->running_req);
1960 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
1961 ts->resp = SAS_TASK_UNDELIVERED;
1962 ts->stat = SAS_PHY_DOWN;
1964 case IO_XFER_ERROR_BREAK:
1965 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
1966 ts->resp = SAS_TASK_COMPLETE;
1967 ts->stat = SAS_OPEN_REJECT;
1968 /* Force the midlayer to retry */
1969 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1971 case IO_XFER_ERROR_PHY_NOT_READY:
1972 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
1973 ts->resp = SAS_TASK_COMPLETE;
1974 ts->stat = SAS_OPEN_REJECT;
1975 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1977 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1978 pm8001_dbg(pm8001_ha, IO,
1979 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
1980 ts->resp = SAS_TASK_COMPLETE;
1981 ts->stat = SAS_OPEN_REJECT;
1982 ts->open_rej_reason = SAS_OREJ_EPROTO;
1984 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1985 pm8001_dbg(pm8001_ha, IO,
1986 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
1987 ts->resp = SAS_TASK_COMPLETE;
1988 ts->stat = SAS_OPEN_REJECT;
1989 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1991 case IO_OPEN_CNX_ERROR_BREAK:
1992 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
1993 ts->resp = SAS_TASK_COMPLETE;
1994 ts->stat = SAS_OPEN_REJECT;
1995 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1997 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1998 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
1999 ts->resp = SAS_TASK_COMPLETE;
2000 ts->stat = SAS_OPEN_REJECT;
2001 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2003 pm8001_handle_event(pm8001_ha,
2005 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2007 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2008 pm8001_dbg(pm8001_ha, IO,
2009 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
2010 ts->resp = SAS_TASK_COMPLETE;
2011 ts->stat = SAS_OPEN_REJECT;
2012 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2014 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2015 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
2016 ts->resp = SAS_TASK_COMPLETE;
2017 ts->stat = SAS_OPEN_REJECT;
2018 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2020 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2021 pm8001_dbg(pm8001_ha, IO,
2022 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
2023 ts->resp = SAS_TASK_UNDELIVERED;
2024 ts->stat = SAS_OPEN_REJECT;
2025 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2027 case IO_XFER_ERROR_NAK_RECEIVED:
2028 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
2029 ts->resp = SAS_TASK_COMPLETE;
2030 ts->stat = SAS_OPEN_REJECT;
2031 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2033 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
2034 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
2035 ts->resp = SAS_TASK_COMPLETE;
2036 ts->stat = SAS_NAK_R_ERR;
2038 case IO_XFER_ERROR_DMA:
2039 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
2040 ts->resp = SAS_TASK_COMPLETE;
2041 ts->stat = SAS_OPEN_REJECT;
2043 case IO_XFER_OPEN_RETRY_TIMEOUT:
2044 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
2045 ts->resp = SAS_TASK_COMPLETE;
2046 ts->stat = SAS_OPEN_REJECT;
2047 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2049 case IO_XFER_ERROR_OFFSET_MISMATCH:
2050 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
2051 ts->resp = SAS_TASK_COMPLETE;
2052 ts->stat = SAS_OPEN_REJECT;
2054 case IO_PORT_IN_RESET:
2055 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
2056 ts->resp = SAS_TASK_COMPLETE;
2057 ts->stat = SAS_OPEN_REJECT;
2059 case IO_DS_NON_OPERATIONAL:
2060 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
2061 ts->resp = SAS_TASK_COMPLETE;
2062 ts->stat = SAS_OPEN_REJECT;
2064 pm8001_handle_event(pm8001_ha,
2066 IO_DS_NON_OPERATIONAL);
2068 case IO_DS_IN_RECOVERY:
2069 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
2070 ts->resp = SAS_TASK_COMPLETE;
2071 ts->stat = SAS_OPEN_REJECT;
2073 case IO_TM_TAG_NOT_FOUND:
2074 pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n");
2075 ts->resp = SAS_TASK_COMPLETE;
2076 ts->stat = SAS_OPEN_REJECT;
2078 case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
2079 pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n");
2080 ts->resp = SAS_TASK_COMPLETE;
2081 ts->stat = SAS_OPEN_REJECT;
2083 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
2084 pm8001_dbg(pm8001_ha, IO,
2085 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
2086 ts->resp = SAS_TASK_COMPLETE;
2087 ts->stat = SAS_OPEN_REJECT;
2088 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2091 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
2092 /* not allowed case. Therefore, return failed status */
2093 ts->resp = SAS_TASK_COMPLETE;
2094 ts->stat = SAS_OPEN_REJECT;
2097 pm8001_dbg(pm8001_ha, IO, "scsi_status = %x\n",
2098 psspPayload->ssp_resp_iu.status);
2099 spin_lock_irqsave(&t->task_state_lock, flags);
2100 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2101 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2102 t->task_state_flags |= SAS_TASK_STATE_DONE;
2103 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2104 spin_unlock_irqrestore(&t->task_state_lock, flags);
2105 pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
2106 t, status, ts->resp, ts->stat);
2107 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2109 spin_unlock_irqrestore(&t->task_state_lock, flags);
2110 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2111 mb();/* in order to force CPU ordering */
2116 /*See the comments for mpi_ssp_completion */
2117 static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
2120 unsigned long flags;
2121 struct task_status_struct *ts;
2122 struct pm8001_ccb_info *ccb;
2123 struct pm8001_device *pm8001_dev;
2124 struct ssp_event_resp *psspPayload =
2125 (struct ssp_event_resp *)(piomb + 4);
2126 u32 event = le32_to_cpu(psspPayload->event);
2127 u32 tag = le32_to_cpu(psspPayload->tag);
2128 u32 port_id = le32_to_cpu(psspPayload->port_id);
2129 u32 dev_id = le32_to_cpu(psspPayload->device_id);
2131 ccb = &pm8001_ha->ccb_info[tag];
2133 pm8001_dev = ccb->device;
2135 pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event);
2136 if (unlikely(!t || !t->lldd_task || !t->dev))
2138 ts = &t->task_status;
2139 pm8001_dbg(pm8001_ha, DEVIO, "port_id = %x,device_id = %x\n",
2143 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
2144 ts->resp = SAS_TASK_COMPLETE;
2145 ts->stat = SAS_DATA_OVERRUN;
2148 atomic_dec(&pm8001_dev->running_req);
2150 case IO_XFER_ERROR_BREAK:
2151 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
2152 pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
2154 case IO_XFER_ERROR_PHY_NOT_READY:
2155 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
2156 ts->resp = SAS_TASK_COMPLETE;
2157 ts->stat = SAS_OPEN_REJECT;
2158 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2160 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2161 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
2162 ts->resp = SAS_TASK_COMPLETE;
2163 ts->stat = SAS_OPEN_REJECT;
2164 ts->open_rej_reason = SAS_OREJ_EPROTO;
2166 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2167 pm8001_dbg(pm8001_ha, IO,
2168 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
2169 ts->resp = SAS_TASK_COMPLETE;
2170 ts->stat = SAS_OPEN_REJECT;
2171 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2173 case IO_OPEN_CNX_ERROR_BREAK:
2174 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
2175 ts->resp = SAS_TASK_COMPLETE;
2176 ts->stat = SAS_OPEN_REJECT;
2177 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2179 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2180 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
2181 ts->resp = SAS_TASK_COMPLETE;
2182 ts->stat = SAS_OPEN_REJECT;
2183 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2185 pm8001_handle_event(pm8001_ha,
2187 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2189 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2190 pm8001_dbg(pm8001_ha, IO,
2191 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
2192 ts->resp = SAS_TASK_COMPLETE;
2193 ts->stat = SAS_OPEN_REJECT;
2194 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2196 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2197 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
2198 ts->resp = SAS_TASK_COMPLETE;
2199 ts->stat = SAS_OPEN_REJECT;
2200 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2202 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2203 pm8001_dbg(pm8001_ha, IO,
2204 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
2205 ts->resp = SAS_TASK_COMPLETE;
2206 ts->stat = SAS_OPEN_REJECT;
2207 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2209 case IO_XFER_ERROR_NAK_RECEIVED:
2210 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
2211 ts->resp = SAS_TASK_COMPLETE;
2212 ts->stat = SAS_OPEN_REJECT;
2213 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2215 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
2216 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
2217 ts->resp = SAS_TASK_COMPLETE;
2218 ts->stat = SAS_NAK_R_ERR;
2220 case IO_XFER_OPEN_RETRY_TIMEOUT:
2221 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
2222 pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
2224 case IO_XFER_ERROR_UNEXPECTED_PHASE:
2225 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
2226 ts->resp = SAS_TASK_COMPLETE;
2227 ts->stat = SAS_DATA_OVERRUN;
2229 case IO_XFER_ERROR_XFER_RDY_OVERRUN:
2230 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
2231 ts->resp = SAS_TASK_COMPLETE;
2232 ts->stat = SAS_DATA_OVERRUN;
2234 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
2235 pm8001_dbg(pm8001_ha, IO,
2236 "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
2237 ts->resp = SAS_TASK_COMPLETE;
2238 ts->stat = SAS_DATA_OVERRUN;
2240 case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
2241 pm8001_dbg(pm8001_ha, IO,
2242 "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n");
2243 ts->resp = SAS_TASK_COMPLETE;
2244 ts->stat = SAS_DATA_OVERRUN;
2246 case IO_XFER_ERROR_OFFSET_MISMATCH:
2247 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
2248 ts->resp = SAS_TASK_COMPLETE;
2249 ts->stat = SAS_DATA_OVERRUN;
2251 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
2252 pm8001_dbg(pm8001_ha, IO,
2253 "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
2254 ts->resp = SAS_TASK_COMPLETE;
2255 ts->stat = SAS_DATA_OVERRUN;
2257 case IO_XFER_CMD_FRAME_ISSUED:
2258 pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
2261 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
2262 /* not allowed case. Therefore, return failed status */
2263 ts->resp = SAS_TASK_COMPLETE;
2264 ts->stat = SAS_DATA_OVERRUN;
2267 spin_lock_irqsave(&t->task_state_lock, flags);
2268 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2269 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2270 t->task_state_flags |= SAS_TASK_STATE_DONE;
2271 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2272 spin_unlock_irqrestore(&t->task_state_lock, flags);
2273 pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
2274 t, event, ts->resp, ts->stat);
2275 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2277 spin_unlock_irqrestore(&t->task_state_lock, flags);
2278 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2279 mb();/* in order to force CPU ordering */
2284 /*See the comments for mpi_ssp_completion */
2286 mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2289 struct pm8001_ccb_info *ccb;
2294 u8 sata_addr_low[4];
2295 u32 temp_sata_addr_low;
2297 u32 temp_sata_addr_hi;
2298 struct sata_completion_resp *psataPayload;
2299 struct task_status_struct *ts;
2300 struct ata_task_resp *resp ;
2302 struct pm8001_device *pm8001_dev;
2303 unsigned long flags;
2305 psataPayload = (struct sata_completion_resp *)(piomb + 4);
2306 status = le32_to_cpu(psataPayload->status);
2307 tag = le32_to_cpu(psataPayload->tag);
2310 pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
2313 ccb = &pm8001_ha->ccb_info[tag];
2314 param = le32_to_cpu(psataPayload->param);
2317 pm8001_dev = ccb->device;
2319 pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
2324 if (t->dev && (t->dev->lldd_dev))
2325 pm8001_dev = t->dev->lldd_dev;
2327 pm8001_dbg(pm8001_ha, FAIL, "task null\n");
2331 if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
2332 && unlikely(!t || !t->lldd_task || !t->dev)) {
2333 pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
2337 ts = &t->task_status;
2339 pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
2344 pm8001_dbg(pm8001_ha, IOERR,
2345 "status:0x%x, tag:0x%x, task::0x%p\n",
2348 /* Print sas address of IO failed device */
2349 if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
2350 (status != IO_UNDERFLOW)) {
2351 if (!((t->dev->parent) &&
2352 (dev_is_expander(t->dev->parent->dev_type)))) {
2353 for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++)
2354 sata_addr_low[i] = pm8001_ha->sas_addr[j];
2355 for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++)
2356 sata_addr_hi[i] = pm8001_ha->sas_addr[j];
2357 memcpy(&temp_sata_addr_low, sata_addr_low,
2358 sizeof(sata_addr_low));
2359 memcpy(&temp_sata_addr_hi, sata_addr_hi,
2360 sizeof(sata_addr_hi));
2361 temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
2362 |((temp_sata_addr_hi << 8) &
2364 ((temp_sata_addr_hi >> 8)
2366 ((temp_sata_addr_hi << 24) &
2368 temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
2370 ((temp_sata_addr_low << 8)
2372 ((temp_sata_addr_low >> 8)
2374 ((temp_sata_addr_low << 24)
2376 pm8001_dev->attached_phy +
2378 pm8001_dbg(pm8001_ha, FAIL,
2379 "SAS Address of IO Failure Drive:%08x%08x\n",
2381 temp_sata_addr_low);
2383 pm8001_dbg(pm8001_ha, FAIL,
2384 "SAS Address of IO Failure Drive:%016llx\n",
2385 SAS_ADDR(t->dev->sas_addr));
2390 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
2392 ts->resp = SAS_TASK_COMPLETE;
2393 ts->stat = SAS_SAM_STAT_GOOD;
2394 /* check if response is for SEND READ LOG */
2396 (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
2397 /* set new bit for abort_all */
2398 pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
2399 /* clear bit for read log */
2400 pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
2401 pm8001_send_abort_all(pm8001_ha, pm8001_dev);
2403 pm8001_tag_free(pm8001_ha, tag);
2409 ts->resp = SAS_TASK_COMPLETE;
2410 ts->stat = SAS_PROTO_RESPONSE;
2411 ts->residual = param;
2412 pm8001_dbg(pm8001_ha, IO,
2413 "SAS_PROTO_RESPONSE len = %d\n",
2415 sata_resp = &psataPayload->sata_resp[0];
2416 resp = (struct ata_task_resp *)ts->buf;
2417 if (t->ata_task.dma_xfer == 0 &&
2418 t->data_dir == DMA_FROM_DEVICE) {
2419 len = sizeof(struct pio_setup_fis);
2420 pm8001_dbg(pm8001_ha, IO,
2421 "PIO read len = %d\n", len);
2422 } else if (t->ata_task.use_ncq) {
2423 len = sizeof(struct set_dev_bits_fis);
2424 pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
2427 len = sizeof(struct dev_to_host_fis);
2428 pm8001_dbg(pm8001_ha, IO, "other len = %d\n",
2431 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
2432 resp->frame_len = len;
2433 memcpy(&resp->ending_fis[0], sata_resp, len);
2434 ts->buf_valid_size = sizeof(*resp);
2436 pm8001_dbg(pm8001_ha, IO,
2437 "response too large\n");
2440 atomic_dec(&pm8001_dev->running_req);
2443 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n");
2444 ts->resp = SAS_TASK_COMPLETE;
2445 ts->stat = SAS_ABORTED_TASK;
2447 atomic_dec(&pm8001_dev->running_req);
2449 /* following cases are to do cases */
2451 /* SATA Completion with error */
2452 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param);
2453 ts->resp = SAS_TASK_COMPLETE;
2454 ts->stat = SAS_DATA_UNDERRUN;
2455 ts->residual = param;
2457 atomic_dec(&pm8001_dev->running_req);
2460 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
2461 ts->resp = SAS_TASK_UNDELIVERED;
2462 ts->stat = SAS_PHY_DOWN;
2464 atomic_dec(&pm8001_dev->running_req);
2466 case IO_XFER_ERROR_BREAK:
2467 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
2468 ts->resp = SAS_TASK_COMPLETE;
2469 ts->stat = SAS_INTERRUPTED;
2471 atomic_dec(&pm8001_dev->running_req);
2473 case IO_XFER_ERROR_PHY_NOT_READY:
2474 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
2475 ts->resp = SAS_TASK_COMPLETE;
2476 ts->stat = SAS_OPEN_REJECT;
2477 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2479 atomic_dec(&pm8001_dev->running_req);
2481 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2482 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
2483 ts->resp = SAS_TASK_COMPLETE;
2484 ts->stat = SAS_OPEN_REJECT;
2485 ts->open_rej_reason = SAS_OREJ_EPROTO;
2487 atomic_dec(&pm8001_dev->running_req);
2489 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2490 pm8001_dbg(pm8001_ha, IO,
2491 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
2492 ts->resp = SAS_TASK_COMPLETE;
2493 ts->stat = SAS_OPEN_REJECT;
2494 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2496 atomic_dec(&pm8001_dev->running_req);
2498 case IO_OPEN_CNX_ERROR_BREAK:
2499 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
2500 ts->resp = SAS_TASK_COMPLETE;
2501 ts->stat = SAS_OPEN_REJECT;
2502 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2504 atomic_dec(&pm8001_dev->running_req);
2506 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2507 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
2508 ts->resp = SAS_TASK_COMPLETE;
2509 ts->stat = SAS_DEV_NO_RESPONSE;
2510 if (!t->uldd_task) {
2511 pm8001_handle_event(pm8001_ha,
2513 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2514 ts->resp = SAS_TASK_UNDELIVERED;
2515 ts->stat = SAS_QUEUE_FULL;
2516 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2520 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2521 pm8001_dbg(pm8001_ha, IO,
2522 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
2523 ts->resp = SAS_TASK_UNDELIVERED;
2524 ts->stat = SAS_OPEN_REJECT;
2525 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2526 if (!t->uldd_task) {
2527 pm8001_handle_event(pm8001_ha,
2529 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2530 ts->resp = SAS_TASK_UNDELIVERED;
2531 ts->stat = SAS_QUEUE_FULL;
2532 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2536 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2537 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
2538 ts->resp = SAS_TASK_COMPLETE;
2539 ts->stat = SAS_OPEN_REJECT;
2540 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2542 atomic_dec(&pm8001_dev->running_req);
2544 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
2545 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n");
2546 ts->resp = SAS_TASK_COMPLETE;
2547 ts->stat = SAS_DEV_NO_RESPONSE;
2548 if (!t->uldd_task) {
2549 pm8001_handle_event(pm8001_ha,
2551 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
2552 ts->resp = SAS_TASK_UNDELIVERED;
2553 ts->stat = SAS_QUEUE_FULL;
2554 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2558 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2559 pm8001_dbg(pm8001_ha, IO,
2560 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
2561 ts->resp = SAS_TASK_COMPLETE;
2562 ts->stat = SAS_OPEN_REJECT;
2563 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2565 atomic_dec(&pm8001_dev->running_req);
2567 case IO_XFER_ERROR_NAK_RECEIVED:
2568 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
2569 ts->resp = SAS_TASK_COMPLETE;
2570 ts->stat = SAS_NAK_R_ERR;
2572 atomic_dec(&pm8001_dev->running_req);
2574 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
2575 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n");
2576 ts->resp = SAS_TASK_COMPLETE;
2577 ts->stat = SAS_NAK_R_ERR;
2579 atomic_dec(&pm8001_dev->running_req);
2581 case IO_XFER_ERROR_DMA:
2582 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n");
2583 ts->resp = SAS_TASK_COMPLETE;
2584 ts->stat = SAS_ABORTED_TASK;
2586 atomic_dec(&pm8001_dev->running_req);
2588 case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
2589 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n");
2590 ts->resp = SAS_TASK_UNDELIVERED;
2591 ts->stat = SAS_DEV_NO_RESPONSE;
2593 atomic_dec(&pm8001_dev->running_req);
2595 case IO_XFER_ERROR_REJECTED_NCQ_MODE:
2596 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
2597 ts->resp = SAS_TASK_COMPLETE;
2598 ts->stat = SAS_DATA_UNDERRUN;
2600 atomic_dec(&pm8001_dev->running_req);
2602 case IO_XFER_OPEN_RETRY_TIMEOUT:
2603 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
2604 ts->resp = SAS_TASK_COMPLETE;
2605 ts->stat = SAS_OPEN_TO;
2607 atomic_dec(&pm8001_dev->running_req);
2609 case IO_PORT_IN_RESET:
2610 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
2611 ts->resp = SAS_TASK_COMPLETE;
2612 ts->stat = SAS_DEV_NO_RESPONSE;
2614 atomic_dec(&pm8001_dev->running_req);
2616 case IO_DS_NON_OPERATIONAL:
2617 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
2618 ts->resp = SAS_TASK_COMPLETE;
2619 ts->stat = SAS_DEV_NO_RESPONSE;
2620 if (!t->uldd_task) {
2621 pm8001_handle_event(pm8001_ha, pm8001_dev,
2622 IO_DS_NON_OPERATIONAL);
2623 ts->resp = SAS_TASK_UNDELIVERED;
2624 ts->stat = SAS_QUEUE_FULL;
2625 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2629 case IO_DS_IN_RECOVERY:
2630 pm8001_dbg(pm8001_ha, IO, " IO_DS_IN_RECOVERY\n");
2631 ts->resp = SAS_TASK_COMPLETE;
2632 ts->stat = SAS_DEV_NO_RESPONSE;
2634 atomic_dec(&pm8001_dev->running_req);
2636 case IO_DS_IN_ERROR:
2637 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n");
2638 ts->resp = SAS_TASK_COMPLETE;
2639 ts->stat = SAS_DEV_NO_RESPONSE;
2640 if (!t->uldd_task) {
2641 pm8001_handle_event(pm8001_ha, pm8001_dev,
2643 ts->resp = SAS_TASK_UNDELIVERED;
2644 ts->stat = SAS_QUEUE_FULL;
2645 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2649 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
2650 pm8001_dbg(pm8001_ha, IO,
2651 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
2652 ts->resp = SAS_TASK_COMPLETE;
2653 ts->stat = SAS_OPEN_REJECT;
2654 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2656 atomic_dec(&pm8001_dev->running_req);
2659 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
2660 /* not allowed case. Therefore, return failed status */
2661 ts->resp = SAS_TASK_COMPLETE;
2662 ts->stat = SAS_DEV_NO_RESPONSE;
2664 atomic_dec(&pm8001_dev->running_req);
2667 spin_lock_irqsave(&t->task_state_lock, flags);
2668 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2669 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2670 t->task_state_flags |= SAS_TASK_STATE_DONE;
2671 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2672 spin_unlock_irqrestore(&t->task_state_lock, flags);
2673 pm8001_dbg(pm8001_ha, FAIL,
2674 "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
2675 t, status, ts->resp, ts->stat);
2676 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2678 spin_unlock_irqrestore(&t->task_state_lock, flags);
2679 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2683 /*See the comments for mpi_ssp_completion */
2684 static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
2687 struct task_status_struct *ts;
2688 struct pm8001_ccb_info *ccb;
2689 struct pm8001_device *pm8001_dev;
2690 struct sata_event_resp *psataPayload =
2691 (struct sata_event_resp *)(piomb + 4);
2692 u32 event = le32_to_cpu(psataPayload->event);
2693 u32 tag = le32_to_cpu(psataPayload->tag);
2694 u32 port_id = le32_to_cpu(psataPayload->port_id);
2695 u32 dev_id = le32_to_cpu(psataPayload->device_id);
2696 unsigned long flags;
2698 ccb = &pm8001_ha->ccb_info[tag];
2702 pm8001_dev = ccb->device;
2704 pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
2707 pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
2709 /* Check if this is NCQ error */
2710 if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
2711 /* find device using device id */
2712 pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
2713 /* send read log extension */
2715 pm8001_send_read_log(pm8001_ha, pm8001_dev);
2719 ccb = &pm8001_ha->ccb_info[tag];
2721 pm8001_dev = ccb->device;
2723 pm8001_dbg(pm8001_ha, FAIL, "sata IO status 0x%x\n", event);
2724 if (unlikely(!t || !t->lldd_task || !t->dev))
2726 ts = &t->task_status;
2727 pm8001_dbg(pm8001_ha, DEVIO,
2728 "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
2729 port_id, dev_id, tag, event);
2732 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
2733 ts->resp = SAS_TASK_COMPLETE;
2734 ts->stat = SAS_DATA_OVERRUN;
2737 atomic_dec(&pm8001_dev->running_req);
2739 case IO_XFER_ERROR_BREAK:
2740 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
2741 ts->resp = SAS_TASK_COMPLETE;
2742 ts->stat = SAS_INTERRUPTED;
2744 case IO_XFER_ERROR_PHY_NOT_READY:
2745 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
2746 ts->resp = SAS_TASK_COMPLETE;
2747 ts->stat = SAS_OPEN_REJECT;
2748 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2750 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2751 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
2752 ts->resp = SAS_TASK_COMPLETE;
2753 ts->stat = SAS_OPEN_REJECT;
2754 ts->open_rej_reason = SAS_OREJ_EPROTO;
2756 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2757 pm8001_dbg(pm8001_ha, IO,
2758 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
2759 ts->resp = SAS_TASK_COMPLETE;
2760 ts->stat = SAS_OPEN_REJECT;
2761 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2763 case IO_OPEN_CNX_ERROR_BREAK:
2764 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
2765 ts->resp = SAS_TASK_COMPLETE;
2766 ts->stat = SAS_OPEN_REJECT;
2767 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2769 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2770 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
2771 ts->resp = SAS_TASK_UNDELIVERED;
2772 ts->stat = SAS_DEV_NO_RESPONSE;
2773 if (!t->uldd_task) {
2774 pm8001_handle_event(pm8001_ha,
2776 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2777 ts->resp = SAS_TASK_COMPLETE;
2778 ts->stat = SAS_QUEUE_FULL;
2779 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2783 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2784 pm8001_dbg(pm8001_ha, IO,
2785 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
2786 ts->resp = SAS_TASK_UNDELIVERED;
2787 ts->stat = SAS_OPEN_REJECT;
2788 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2790 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2791 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
2792 ts->resp = SAS_TASK_COMPLETE;
2793 ts->stat = SAS_OPEN_REJECT;
2794 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2796 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2797 pm8001_dbg(pm8001_ha, IO,
2798 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
2799 ts->resp = SAS_TASK_COMPLETE;
2800 ts->stat = SAS_OPEN_REJECT;
2801 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2803 case IO_XFER_ERROR_NAK_RECEIVED:
2804 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n");
2805 ts->resp = SAS_TASK_COMPLETE;
2806 ts->stat = SAS_NAK_R_ERR;
2808 case IO_XFER_ERROR_PEER_ABORTED:
2809 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n");
2810 ts->resp = SAS_TASK_COMPLETE;
2811 ts->stat = SAS_NAK_R_ERR;
2813 case IO_XFER_ERROR_REJECTED_NCQ_MODE:
2814 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n");
2815 ts->resp = SAS_TASK_COMPLETE;
2816 ts->stat = SAS_DATA_UNDERRUN;
2818 case IO_XFER_OPEN_RETRY_TIMEOUT:
2819 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
2820 ts->resp = SAS_TASK_COMPLETE;
2821 ts->stat = SAS_OPEN_TO;
2823 case IO_XFER_ERROR_UNEXPECTED_PHASE:
2824 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n");
2825 ts->resp = SAS_TASK_COMPLETE;
2826 ts->stat = SAS_OPEN_TO;
2828 case IO_XFER_ERROR_XFER_RDY_OVERRUN:
2829 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n");
2830 ts->resp = SAS_TASK_COMPLETE;
2831 ts->stat = SAS_OPEN_TO;
2833 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
2834 pm8001_dbg(pm8001_ha, IO,
2835 "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n");
2836 ts->resp = SAS_TASK_COMPLETE;
2837 ts->stat = SAS_OPEN_TO;
2839 case IO_XFER_ERROR_OFFSET_MISMATCH:
2840 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n");
2841 ts->resp = SAS_TASK_COMPLETE;
2842 ts->stat = SAS_OPEN_TO;
2844 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
2845 pm8001_dbg(pm8001_ha, IO,
2846 "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n");
2847 ts->resp = SAS_TASK_COMPLETE;
2848 ts->stat = SAS_OPEN_TO;
2850 case IO_XFER_CMD_FRAME_ISSUED:
2851 pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n");
2853 case IO_XFER_PIO_SETUP_ERROR:
2854 pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n");
2855 ts->resp = SAS_TASK_COMPLETE;
2856 ts->stat = SAS_OPEN_TO;
2859 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event);
2860 /* not allowed case. Therefore, return failed status */
2861 ts->resp = SAS_TASK_COMPLETE;
2862 ts->stat = SAS_OPEN_TO;
2865 spin_lock_irqsave(&t->task_state_lock, flags);
2866 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2867 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2868 t->task_state_flags |= SAS_TASK_STATE_DONE;
2869 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2870 spin_unlock_irqrestore(&t->task_state_lock, flags);
2871 pm8001_dbg(pm8001_ha, FAIL,
2872 "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
2873 t, event, ts->resp, ts->stat);
2874 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2876 spin_unlock_irqrestore(&t->task_state_lock, flags);
2877 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2881 /*See the comments for mpi_ssp_completion */
2883 mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2886 struct pm8001_ccb_info *ccb;
2887 unsigned long flags;
2890 struct smp_completion_resp *psmpPayload;
2891 struct task_status_struct *ts;
2892 struct pm8001_device *pm8001_dev;
2894 psmpPayload = (struct smp_completion_resp *)(piomb + 4);
2895 status = le32_to_cpu(psmpPayload->status);
2896 tag = le32_to_cpu(psmpPayload->tag);
2898 ccb = &pm8001_ha->ccb_info[tag];
2900 ts = &t->task_status;
2901 pm8001_dev = ccb->device;
2903 pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status);
2904 pm8001_dbg(pm8001_ha, IOERR,
2905 "status:0x%x, tag:0x%x, task:0x%p\n",
2908 if (unlikely(!t || !t->lldd_task || !t->dev))
2913 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
2914 ts->resp = SAS_TASK_COMPLETE;
2915 ts->stat = SAS_SAM_STAT_GOOD;
2917 atomic_dec(&pm8001_dev->running_req);
2920 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n");
2921 ts->resp = SAS_TASK_COMPLETE;
2922 ts->stat = SAS_ABORTED_TASK;
2924 atomic_dec(&pm8001_dev->running_req);
2927 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n");
2928 ts->resp = SAS_TASK_COMPLETE;
2929 ts->stat = SAS_DATA_OVERRUN;
2932 atomic_dec(&pm8001_dev->running_req);
2935 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n");
2936 ts->resp = SAS_TASK_COMPLETE;
2937 ts->stat = SAS_PHY_DOWN;
2939 case IO_ERROR_HW_TIMEOUT:
2940 pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
2941 ts->resp = SAS_TASK_COMPLETE;
2942 ts->stat = SAS_SAM_STAT_BUSY;
2944 case IO_XFER_ERROR_BREAK:
2945 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
2946 ts->resp = SAS_TASK_COMPLETE;
2947 ts->stat = SAS_SAM_STAT_BUSY;
2949 case IO_XFER_ERROR_PHY_NOT_READY:
2950 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
2951 ts->resp = SAS_TASK_COMPLETE;
2952 ts->stat = SAS_SAM_STAT_BUSY;
2954 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2955 pm8001_dbg(pm8001_ha, IO,
2956 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n");
2957 ts->resp = SAS_TASK_COMPLETE;
2958 ts->stat = SAS_OPEN_REJECT;
2959 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2961 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2962 pm8001_dbg(pm8001_ha, IO,
2963 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n");
2964 ts->resp = SAS_TASK_COMPLETE;
2965 ts->stat = SAS_OPEN_REJECT;
2966 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2968 case IO_OPEN_CNX_ERROR_BREAK:
2969 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n");
2970 ts->resp = SAS_TASK_COMPLETE;
2971 ts->stat = SAS_OPEN_REJECT;
2972 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2974 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2975 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n");
2976 ts->resp = SAS_TASK_COMPLETE;
2977 ts->stat = SAS_OPEN_REJECT;
2978 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2979 pm8001_handle_event(pm8001_ha,
2981 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2983 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2984 pm8001_dbg(pm8001_ha, IO,
2985 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n");
2986 ts->resp = SAS_TASK_COMPLETE;
2987 ts->stat = SAS_OPEN_REJECT;
2988 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2990 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2991 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n");
2992 ts->resp = SAS_TASK_COMPLETE;
2993 ts->stat = SAS_OPEN_REJECT;
2994 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2996 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2997 pm8001_dbg(pm8001_ha, IO,
2998 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n");
2999 ts->resp = SAS_TASK_COMPLETE;
3000 ts->stat = SAS_OPEN_REJECT;
3001 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
3003 case IO_XFER_ERROR_RX_FRAME:
3004 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n");
3005 ts->resp = SAS_TASK_COMPLETE;
3006 ts->stat = SAS_DEV_NO_RESPONSE;
3008 case IO_XFER_OPEN_RETRY_TIMEOUT:
3009 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n");
3010 ts->resp = SAS_TASK_COMPLETE;
3011 ts->stat = SAS_OPEN_REJECT;
3012 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
3014 case IO_ERROR_INTERNAL_SMP_RESOURCE:
3015 pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n");
3016 ts->resp = SAS_TASK_COMPLETE;
3017 ts->stat = SAS_QUEUE_FULL;
3019 case IO_PORT_IN_RESET:
3020 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n");
3021 ts->resp = SAS_TASK_COMPLETE;
3022 ts->stat = SAS_OPEN_REJECT;
3023 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
3025 case IO_DS_NON_OPERATIONAL:
3026 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n");
3027 ts->resp = SAS_TASK_COMPLETE;
3028 ts->stat = SAS_DEV_NO_RESPONSE;
3030 case IO_DS_IN_RECOVERY:
3031 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n");
3032 ts->resp = SAS_TASK_COMPLETE;
3033 ts->stat = SAS_OPEN_REJECT;
3034 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
3036 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
3037 pm8001_dbg(pm8001_ha, IO,
3038 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n");
3039 ts->resp = SAS_TASK_COMPLETE;
3040 ts->stat = SAS_OPEN_REJECT;
3041 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
3044 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
3045 ts->resp = SAS_TASK_COMPLETE;
3046 ts->stat = SAS_DEV_NO_RESPONSE;
3047 /* not allowed case. Therefore, return failed status */
3050 spin_lock_irqsave(&t->task_state_lock, flags);
3051 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
3052 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
3053 t->task_state_flags |= SAS_TASK_STATE_DONE;
3054 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
3055 spin_unlock_irqrestore(&t->task_state_lock, flags);
3056 pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
3057 t, status, ts->resp, ts->stat);
3058 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3060 spin_unlock_irqrestore(&t->task_state_lock, flags);
3061 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3062 mb();/* in order to force CPU ordering */
3067 void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
3070 struct set_dev_state_resp *pPayload =
3071 (struct set_dev_state_resp *)(piomb + 4);
3072 u32 tag = le32_to_cpu(pPayload->tag);
3073 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
3074 struct pm8001_device *pm8001_dev = ccb->device;
3075 u32 status = le32_to_cpu(pPayload->status);
3076 u32 device_id = le32_to_cpu(pPayload->device_id);
3077 u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
3078 u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
3079 pm8001_dbg(pm8001_ha, MSG, "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n",
3080 device_id, pds, nds, status);
3081 complete(pm8001_dev->setds_completion);
3083 ccb->ccb_tag = 0xFFFFFFFF;
3084 pm8001_tag_free(pm8001_ha, tag);
3087 void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3089 struct get_nvm_data_resp *pPayload =
3090 (struct get_nvm_data_resp *)(piomb + 4);
3091 u32 tag = le32_to_cpu(pPayload->tag);
3092 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
3093 u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
3094 complete(pm8001_ha->nvmd_completion);
3095 pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
3096 if ((dlen_status & NVMD_STAT) != 0) {
3097 pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n",
3101 ccb->ccb_tag = 0xFFFFFFFF;
3102 pm8001_tag_free(pm8001_ha, tag);
3106 pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3108 struct fw_control_ex *fw_control_context;
3109 struct get_nvm_data_resp *pPayload =
3110 (struct get_nvm_data_resp *)(piomb + 4);
3111 u32 tag = le32_to_cpu(pPayload->tag);
3112 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
3113 u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
3114 u32 ir_tds_bn_dps_das_nvm =
3115 le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
3116 void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
3117 fw_control_context = ccb->fw_control_context;
3119 pm8001_dbg(pm8001_ha, MSG, "Get nvm data complete!\n");
3120 if ((dlen_status & NVMD_STAT) != 0) {
3121 pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error %x\n",
3123 complete(pm8001_ha->nvmd_completion);
3124 /* We should free tag during failure also, the tag is not being
3125 * freed by requesting path anywhere.
3128 ccb->ccb_tag = 0xFFFFFFFF;
3129 pm8001_tag_free(pm8001_ha, tag);
3132 if (ir_tds_bn_dps_das_nvm & IPMode) {
3133 /* indirect mode - IR bit set */
3134 pm8001_dbg(pm8001_ha, MSG, "Get NVMD success, IR=1\n");
3135 if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) {
3136 if (ir_tds_bn_dps_das_nvm == 0x80a80200) {
3137 memcpy(pm8001_ha->sas_addr,
3138 ((u8 *)virt_addr + 4),
3140 pm8001_dbg(pm8001_ha, MSG, "Get SAS address from VPD successfully!\n");
3142 } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM)
3143 || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) ||
3144 ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) {
3146 } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP)
3147 || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) {
3150 /* Should not be happened*/
3151 pm8001_dbg(pm8001_ha, MSG,
3152 "(IR=1)Wrong Device type 0x%x\n",
3153 ir_tds_bn_dps_das_nvm);
3155 } else /* direct mode */{
3156 pm8001_dbg(pm8001_ha, MSG,
3157 "Get NVMD success, IR=0, dataLen=%d\n",
3158 (dlen_status & NVMD_LEN) >> 24);
3160 /* Though fw_control_context is freed below, usrAddr still needs
3161 * to be updated as this holds the response to the request function
3163 memcpy(fw_control_context->usrAddr,
3164 pm8001_ha->memoryMap.region[NVMD].virt_ptr,
3165 fw_control_context->len);
3166 kfree(ccb->fw_control_context);
3167 /* To avoid race condition, complete should be
3168 * called after the message is copied to
3169 * fw_control_context->usrAddr
3171 complete(pm8001_ha->nvmd_completion);
3172 pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n");
3174 ccb->ccb_tag = 0xFFFFFFFF;
3175 pm8001_tag_free(pm8001_ha, tag);
3178 int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
3181 struct local_phy_ctl_resp *pPayload =
3182 (struct local_phy_ctl_resp *)(piomb + 4);
3183 u32 status = le32_to_cpu(pPayload->status);
3184 u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS;
3185 u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
3186 tag = le32_to_cpu(pPayload->tag);
3188 pm8001_dbg(pm8001_ha, MSG,
3189 "%x phy execute %x phy op failed!\n",
3192 pm8001_dbg(pm8001_ha, MSG,
3193 "%x phy execute %x phy op success!\n",
3195 pm8001_ha->phy[phy_id].reset_success = true;
3197 if (pm8001_ha->phy[phy_id].enable_completion) {
3198 complete(pm8001_ha->phy[phy_id].enable_completion);
3199 pm8001_ha->phy[phy_id].enable_completion = NULL;
3201 pm8001_tag_free(pm8001_ha, tag);
3206 * pm8001_bytes_dmaed - one of the interface function communication with libsas
3207 * @pm8001_ha: our hba card information
3208 * @i: which phy that received the event.
3210 * when HBA driver received the identify done event or initiate FIS received
3211 * event(for SATA), it will invoke this function to notify the sas layer that
3212 * the sas toplogy has formed, please discover the the whole sas domain,
3213 * while receive a broadcast(change) primitive just tell the sas
3214 * layer to discover the changed domain rather than the whole domain.
3216 void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
3218 struct pm8001_phy *phy = &pm8001_ha->phy[i];
3219 struct asd_sas_phy *sas_phy = &phy->sas_phy;
3220 if (!phy->phy_attached)
3224 struct sas_phy *sphy = sas_phy->phy;
3225 sphy->negotiated_linkrate = sas_phy->linkrate;
3226 sphy->minimum_linkrate = phy->minimum_linkrate;
3227 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
3228 sphy->maximum_linkrate = phy->maximum_linkrate;
3229 sphy->maximum_linkrate_hw = phy->maximum_linkrate;
3232 if (phy->phy_type & PORT_TYPE_SAS) {
3233 struct sas_identify_frame *id;
3234 id = (struct sas_identify_frame *)phy->frame_rcvd;
3235 id->dev_type = phy->identify.device_type;
3236 id->initiator_bits = SAS_PROTOCOL_ALL;
3237 id->target_bits = phy->identify.target_port_protocols;
3238 } else if (phy->phy_type & PORT_TYPE_SATA) {
3241 pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i);
3243 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
3244 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC);
3247 /* Get the link rate speed */
3248 void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
3250 struct sas_phy *sas_phy = phy->sas_phy.phy;
3252 switch (link_rate) {
3254 phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
3255 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
3258 phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
3259 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
3262 phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
3263 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
3266 phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
3267 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
3270 sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
3271 sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
3272 sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
3273 sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
3274 sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
3278 * pm8001_get_attached_sas_addr - extract/generate attached SAS address
3279 * @phy: pointer to asd_phy
3280 * @sas_addr: pointer to buffer where the SAS address is to be written
3282 * This function extracts the SAS address from an IDENTIFY frame
3283 * received. If OOB is SATA, then a SAS address is generated from the
3286 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
3289 void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
3292 if (phy->sas_phy.frame_rcvd[0] == 0x34
3293 && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
3294 struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha;
3295 /* FIS device-to-host */
3296 u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr);
3297 addr += phy->sas_phy.id;
3298 *(__be64 *)sas_addr = cpu_to_be64(addr);
3300 struct sas_identify_frame *idframe =
3301 (void *) phy->sas_phy.frame_rcvd;
3302 memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
3307 * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
3308 * @pm8001_ha: our hba card information
3309 * @Qnum: the outbound queue message number.
3310 * @SEA: source of event to ack
3311 * @port_id: port id.
3313 * @param0: parameter 0.
3314 * @param1: parameter 1.
3316 static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
3317 u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
3319 struct hw_event_ack_req payload;
3320 u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
3322 struct inbound_queue_table *circularQ;
3324 memset((u8 *)&payload, 0, sizeof(payload));
3325 circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
3326 payload.tag = cpu_to_le32(1);
3327 payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
3328 ((phyId & 0x0F) << 4) | (port_id & 0x0F));
3329 payload.param0 = cpu_to_le32(param0);
3330 payload.param1 = cpu_to_le32(param1);
3331 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
3332 sizeof(payload), 0);
3335 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
3336 u32 phyId, u32 phy_op);
3339 * hw_event_sas_phy_up -FW tells me a SAS phy up event.
3340 * @pm8001_ha: our hba card information
3341 * @piomb: IO message buffer
3344 hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3346 struct hw_event_resp *pPayload =
3347 (struct hw_event_resp *)(piomb + 4);
3348 u32 lr_evt_status_phyid_portid =
3349 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
3351 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
3352 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
3354 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
3355 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
3356 u8 portstate = (u8)(npip_portstate & 0x0000000F);
3357 struct pm8001_port *port = &pm8001_ha->port[port_id];
3358 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3359 unsigned long flags;
3360 u8 deviceType = pPayload->sas_identify.dev_type;
3362 port->port_id = port_id;
3363 port->port_state = portstate;
3364 phy->phy_state = PHY_STATE_LINK_UP_SPC;
3365 pm8001_dbg(pm8001_ha, MSG,
3366 "HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
3369 switch (deviceType) {
3370 case SAS_PHY_UNUSED:
3371 pm8001_dbg(pm8001_ha, MSG, "device type no device.\n");
3373 case SAS_END_DEVICE:
3374 pm8001_dbg(pm8001_ha, MSG, "end device.\n");
3375 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
3376 PHY_NOTIFY_ENABLE_SPINUP);
3377 port->port_attached = 1;
3378 pm8001_get_lrate_mode(phy, link_rate);
3380 case SAS_EDGE_EXPANDER_DEVICE:
3381 pm8001_dbg(pm8001_ha, MSG, "expander device.\n");
3382 port->port_attached = 1;
3383 pm8001_get_lrate_mode(phy, link_rate);
3385 case SAS_FANOUT_EXPANDER_DEVICE:
3386 pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n");
3387 port->port_attached = 1;
3388 pm8001_get_lrate_mode(phy, link_rate);
3391 pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n",
3395 phy->phy_type |= PORT_TYPE_SAS;
3396 phy->identify.device_type = deviceType;
3397 phy->phy_attached = 1;
3398 if (phy->identify.device_type == SAS_END_DEVICE)
3399 phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
3400 else if (phy->identify.device_type != SAS_PHY_UNUSED)
3401 phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
3402 phy->sas_phy.oob_mode = SAS_OOB_MODE;
3403 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
3404 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
3405 memcpy(phy->frame_rcvd, &pPayload->sas_identify,
3406 sizeof(struct sas_identify_frame)-4);
3407 phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
3408 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
3409 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
3410 if (pm8001_ha->flags == PM8001F_RUN_TIME)
3411 mdelay(200);/*delay a moment to wait disk to spinup*/
3412 pm8001_bytes_dmaed(pm8001_ha, phy_id);
3416 * hw_event_sata_phy_up -FW tells me a SATA phy up event.
3417 * @pm8001_ha: our hba card information
3418 * @piomb: IO message buffer
3421 hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3423 struct hw_event_resp *pPayload =
3424 (struct hw_event_resp *)(piomb + 4);
3425 u32 lr_evt_status_phyid_portid =
3426 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
3428 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
3429 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
3431 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
3432 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
3433 u8 portstate = (u8)(npip_portstate & 0x0000000F);
3434 struct pm8001_port *port = &pm8001_ha->port[port_id];
3435 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3436 unsigned long flags;
3437 pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n",
3440 port->port_id = port_id;
3441 port->port_state = portstate;
3442 phy->phy_state = PHY_STATE_LINK_UP_SPC;
3443 port->port_attached = 1;
3444 pm8001_get_lrate_mode(phy, link_rate);
3445 phy->phy_type |= PORT_TYPE_SATA;
3446 phy->phy_attached = 1;
3447 phy->sas_phy.oob_mode = SATA_OOB_MODE;
3448 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
3449 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
3450 memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
3451 sizeof(struct dev_to_host_fis));
3452 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
3453 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
3454 phy->identify.device_type = SAS_SATA_DEV;
3455 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
3456 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
3457 pm8001_bytes_dmaed(pm8001_ha, phy_id);
3461 * hw_event_phy_down -we should notify the libsas the phy is down.
3462 * @pm8001_ha: our hba card information
3463 * @piomb: IO message buffer
3466 hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3468 struct hw_event_resp *pPayload =
3469 (struct hw_event_resp *)(piomb + 4);
3470 u32 lr_evt_status_phyid_portid =
3471 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
3472 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
3474 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
3475 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
3476 u8 portstate = (u8)(npip_portstate & 0x0000000F);
3477 struct pm8001_port *port = &pm8001_ha->port[port_id];
3478 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3479 port->port_state = portstate;
3481 phy->identify.device_type = 0;
3482 phy->phy_attached = 0;
3483 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
3484 switch (portstate) {
3488 pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n",
3490 pm8001_dbg(pm8001_ha, MSG,
3491 " Last phy Down and port invalid\n");
3492 port->port_attached = 0;
3493 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3494 port_id, phy_id, 0, 0);
3497 pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n",
3500 case PORT_NOT_ESTABLISHED:
3501 pm8001_dbg(pm8001_ha, MSG,
3502 " phy Down and PORT_NOT_ESTABLISHED\n");
3503 port->port_attached = 0;
3506 pm8001_dbg(pm8001_ha, MSG, " phy Down and PORT_LOSTCOMM\n");
3507 pm8001_dbg(pm8001_ha, MSG,
3508 " Last phy Down and port invalid\n");
3509 port->port_attached = 0;
3510 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3511 port_id, phy_id, 0, 0);
3514 port->port_attached = 0;
3515 pm8001_dbg(pm8001_ha, DEVIO, " phy Down and(default) = %x\n",
3523 * pm8001_mpi_reg_resp -process register device ID response.
3524 * @pm8001_ha: our hba card information
3525 * @piomb: IO message buffer
3527 * when sas layer find a device it will notify LLDD, then the driver register
3528 * the domain device to FW, this event is the return device ID which the FW
3529 * has assigned, from now, inter-communication with FW is no longer using the
3530 * SAS address, use device ID which FW assigned.
3532 int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3537 struct pm8001_ccb_info *ccb;
3538 struct pm8001_device *pm8001_dev;
3539 struct dev_reg_resp *registerRespPayload =
3540 (struct dev_reg_resp *)(piomb + 4);
3542 htag = le32_to_cpu(registerRespPayload->tag);
3543 ccb = &pm8001_ha->ccb_info[htag];
3544 pm8001_dev = ccb->device;
3545 status = le32_to_cpu(registerRespPayload->status);
3546 device_id = le32_to_cpu(registerRespPayload->device_id);
3547 pm8001_dbg(pm8001_ha, MSG, " register device is status = %d\n",
3550 case DEVREG_SUCCESS:
3551 pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n");
3552 pm8001_dev->device_id = device_id;
3554 case DEVREG_FAILURE_OUT_OF_RESOURCE:
3555 pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_OUT_OF_RESOURCE\n");
3557 case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED:
3558 pm8001_dbg(pm8001_ha, MSG,
3559 "DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n");
3561 case DEVREG_FAILURE_INVALID_PHY_ID:
3562 pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_INVALID_PHY_ID\n");
3564 case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED:
3565 pm8001_dbg(pm8001_ha, MSG,
3566 "DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n");
3568 case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE:
3569 pm8001_dbg(pm8001_ha, MSG,
3570 "DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n");
3572 case DEVREG_FAILURE_PORT_NOT_VALID_STATE:
3573 pm8001_dbg(pm8001_ha, MSG,
3574 "DEVREG_FAILURE_PORT_NOT_VALID_STATE\n");
3576 case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID:
3577 pm8001_dbg(pm8001_ha, MSG,
3578 "DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n");
3581 pm8001_dbg(pm8001_ha, MSG,
3582 "DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n");
3585 complete(pm8001_dev->dcompletion);
3587 ccb->ccb_tag = 0xFFFFFFFF;
3588 pm8001_tag_free(pm8001_ha, htag);
3592 int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3596 struct dev_reg_resp *registerRespPayload =
3597 (struct dev_reg_resp *)(piomb + 4);
3599 status = le32_to_cpu(registerRespPayload->status);
3600 device_id = le32_to_cpu(registerRespPayload->device_id);
3602 pm8001_dbg(pm8001_ha, MSG,
3603 " deregister device failed ,status = %x, device_id = %x\n",
3609 * pm8001_mpi_fw_flash_update_resp - Response from FW for flash update command.
3610 * @pm8001_ha: our hba card information
3611 * @piomb: IO message buffer
3613 int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
3617 struct fw_flash_Update_resp *ppayload =
3618 (struct fw_flash_Update_resp *)(piomb + 4);
3619 u32 tag = le32_to_cpu(ppayload->tag);
3620 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
3621 status = le32_to_cpu(ppayload->status);
3623 case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
3624 pm8001_dbg(pm8001_ha, MSG,
3625 ": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n");
3627 case FLASH_UPDATE_IN_PROGRESS:
3628 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_IN_PROGRESS\n");
3630 case FLASH_UPDATE_HDR_ERR:
3631 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HDR_ERR\n");
3633 case FLASH_UPDATE_OFFSET_ERR:
3634 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_OFFSET_ERR\n");
3636 case FLASH_UPDATE_CRC_ERR:
3637 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_CRC_ERR\n");
3639 case FLASH_UPDATE_LENGTH_ERR:
3640 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_LENGTH_ERR\n");
3642 case FLASH_UPDATE_HW_ERR:
3643 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HW_ERR\n");
3645 case FLASH_UPDATE_DNLD_NOT_SUPPORTED:
3646 pm8001_dbg(pm8001_ha, MSG,
3647 ": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n");
3649 case FLASH_UPDATE_DISABLED:
3650 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_DISABLED\n");
3653 pm8001_dbg(pm8001_ha, DEVIO, "No matched status = %d\n",
3657 kfree(ccb->fw_control_context);
3659 ccb->ccb_tag = 0xFFFFFFFF;
3660 pm8001_tag_free(pm8001_ha, tag);
3661 complete(pm8001_ha->nvmd_completion);
3665 int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3669 struct general_event_resp *pPayload =
3670 (struct general_event_resp *)(piomb + 4);
3671 status = le32_to_cpu(pPayload->status);
3672 pm8001_dbg(pm8001_ha, MSG, " status = 0x%x\n", status);
3673 for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
3674 pm8001_dbg(pm8001_ha, MSG, "inb_IOMB_payload[0x%x] 0x%x,\n",
3676 pPayload->inb_IOMB_payload[i]);
3680 int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3683 struct pm8001_ccb_info *ccb;
3684 unsigned long flags;
3687 struct task_status_struct *ts;
3688 struct pm8001_device *pm8001_dev;
3690 struct task_abort_resp *pPayload =
3691 (struct task_abort_resp *)(piomb + 4);
3693 status = le32_to_cpu(pPayload->status);
3694 tag = le32_to_cpu(pPayload->tag);
3696 pm8001_dbg(pm8001_ha, FAIL, " TAG NULL. RETURNING !!!\n");
3700 scp = le32_to_cpu(pPayload->scp);
3701 ccb = &pm8001_ha->ccb_info[tag];
3703 pm8001_dev = ccb->device; /* retrieve device */
3706 pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n");
3709 ts = &t->task_status;
3711 pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n",
3715 pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n");
3716 ts->resp = SAS_TASK_COMPLETE;
3717 ts->stat = SAS_SAM_STAT_GOOD;
3720 pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n");
3721 ts->resp = TMF_RESP_FUNC_FAILED;
3724 spin_lock_irqsave(&t->task_state_lock, flags);
3725 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
3726 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
3727 t->task_state_flags |= SAS_TASK_STATE_DONE;
3728 spin_unlock_irqrestore(&t->task_state_lock, flags);
3729 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3732 if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
3733 pm8001_tag_free(pm8001_ha, tag);
3735 /* clear the flag */
3736 pm8001_dev->id &= 0xBFFFFFFF;
3744 * mpi_hw_event -The hw event has come.
3745 * @pm8001_ha: our hba card information
3746 * @piomb: IO message buffer
3748 static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3750 unsigned long flags;
3751 struct hw_event_resp *pPayload =
3752 (struct hw_event_resp *)(piomb + 4);
3753 u32 lr_evt_status_phyid_portid =
3754 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
3755 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
3757 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
3759 (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8);
3761 (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24);
3762 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
3763 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3764 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
3765 pm8001_dbg(pm8001_ha, DEVIO,
3766 "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
3767 port_id, phy_id, eventType, status);
3768 switch (eventType) {
3769 case HW_EVENT_PHY_START_STATUS:
3770 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n",
3775 if (pm8001_ha->flags == PM8001F_RUN_TIME &&
3776 phy->enable_completion != NULL) {
3777 complete(phy->enable_completion);
3778 phy->enable_completion = NULL;
3781 case HW_EVENT_SAS_PHY_UP:
3782 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n");
3783 hw_event_sas_phy_up(pm8001_ha, piomb);
3785 case HW_EVENT_SATA_PHY_UP:
3786 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n");
3787 hw_event_sata_phy_up(pm8001_ha, piomb);
3789 case HW_EVENT_PHY_STOP_STATUS:
3790 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_STOP_STATUS status = %x\n",
3795 case HW_EVENT_SATA_SPINUP_HOLD:
3796 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
3797 sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
3800 case HW_EVENT_PHY_DOWN:
3801 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
3802 sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL,
3804 phy->phy_attached = 0;
3806 hw_event_phy_down(pm8001_ha, piomb);
3808 case HW_EVENT_PORT_INVALID:
3809 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
3810 sas_phy_disconnected(sas_phy);
3811 phy->phy_attached = 0;
3812 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3815 /* the broadcast change primitive received, tell the LIBSAS this event
3816 to revalidate the sas domain*/
3817 case HW_EVENT_BROADCAST_CHANGE:
3818 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n");
3819 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
3820 port_id, phy_id, 1, 0);
3821 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
3822 sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
3823 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
3824 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
3827 case HW_EVENT_PHY_ERROR:
3828 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
3829 sas_phy_disconnected(&phy->sas_phy);
3830 phy->phy_attached = 0;
3831 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
3833 case HW_EVENT_BROADCAST_EXP:
3834 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
3835 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
3836 sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
3837 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
3838 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
3841 case HW_EVENT_LINK_ERR_INVALID_DWORD:
3842 pm8001_dbg(pm8001_ha, MSG,
3843 "HW_EVENT_LINK_ERR_INVALID_DWORD\n");
3844 pm8001_hw_event_ack_req(pm8001_ha, 0,
3845 HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
3846 sas_phy_disconnected(sas_phy);
3847 phy->phy_attached = 0;
3848 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3851 case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
3852 pm8001_dbg(pm8001_ha, MSG,
3853 "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n");
3854 pm8001_hw_event_ack_req(pm8001_ha, 0,
3855 HW_EVENT_LINK_ERR_DISPARITY_ERROR,
3856 port_id, phy_id, 0, 0);
3857 sas_phy_disconnected(sas_phy);
3858 phy->phy_attached = 0;
3859 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3862 case HW_EVENT_LINK_ERR_CODE_VIOLATION:
3863 pm8001_dbg(pm8001_ha, MSG,
3864 "HW_EVENT_LINK_ERR_CODE_VIOLATION\n");
3865 pm8001_hw_event_ack_req(pm8001_ha, 0,
3866 HW_EVENT_LINK_ERR_CODE_VIOLATION,
3867 port_id, phy_id, 0, 0);
3868 sas_phy_disconnected(sas_phy);
3869 phy->phy_attached = 0;
3870 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3873 case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
3874 pm8001_dbg(pm8001_ha, MSG,
3875 "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n");
3876 pm8001_hw_event_ack_req(pm8001_ha, 0,
3877 HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
3878 port_id, phy_id, 0, 0);
3879 sas_phy_disconnected(sas_phy);
3880 phy->phy_attached = 0;
3881 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3884 case HW_EVENT_MALFUNCTION:
3885 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
3887 case HW_EVENT_BROADCAST_SES:
3888 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n");
3889 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
3890 sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
3891 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
3892 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
3895 case HW_EVENT_INBOUND_CRC_ERROR:
3896 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
3897 pm8001_hw_event_ack_req(pm8001_ha, 0,
3898 HW_EVENT_INBOUND_CRC_ERROR,
3899 port_id, phy_id, 0, 0);
3901 case HW_EVENT_HARD_RESET_RECEIVED:
3902 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
3903 sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC);
3905 case HW_EVENT_ID_FRAME_TIMEOUT:
3906 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
3907 sas_phy_disconnected(sas_phy);
3908 phy->phy_attached = 0;
3909 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3912 case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
3913 pm8001_dbg(pm8001_ha, MSG,
3914 "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n");
3915 pm8001_hw_event_ack_req(pm8001_ha, 0,
3916 HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
3917 port_id, phy_id, 0, 0);
3918 sas_phy_disconnected(sas_phy);
3919 phy->phy_attached = 0;
3920 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3923 case HW_EVENT_PORT_RESET_TIMER_TMO:
3924 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
3925 sas_phy_disconnected(sas_phy);
3926 phy->phy_attached = 0;
3927 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3930 case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
3931 pm8001_dbg(pm8001_ha, MSG,
3932 "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
3933 sas_phy_disconnected(sas_phy);
3934 phy->phy_attached = 0;
3935 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
3938 case HW_EVENT_PORT_RECOVER:
3939 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
3941 case HW_EVENT_PORT_RESET_COMPLETE:
3942 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n");
3944 case EVENT_BROADCAST_ASYNCH_EVENT:
3945 pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n");
3948 pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type = %x\n",
3956 * process_one_iomb - process one outbound Queue memory block
3957 * @pm8001_ha: our hba card information
3958 * @piomb: IO message buffer
3960 static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3962 __le32 pHeader = *(__le32 *)piomb;
3963 u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
3965 pm8001_dbg(pm8001_ha, MSG, "process_one_iomb:\n");
3969 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n");
3971 case OPC_OUB_HW_EVENT:
3972 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n");
3973 mpi_hw_event(pm8001_ha, piomb);
3975 case OPC_OUB_SSP_COMP:
3976 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n");
3977 mpi_ssp_completion(pm8001_ha, piomb);
3979 case OPC_OUB_SMP_COMP:
3980 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n");
3981 mpi_smp_completion(pm8001_ha, piomb);
3983 case OPC_OUB_LOCAL_PHY_CNTRL:
3984 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n");
3985 pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
3987 case OPC_OUB_DEV_REGIST:
3988 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n");
3989 pm8001_mpi_reg_resp(pm8001_ha, piomb);
3991 case OPC_OUB_DEREG_DEV:
3992 pm8001_dbg(pm8001_ha, MSG, "unregister the device\n");
3993 pm8001_mpi_dereg_resp(pm8001_ha, piomb);
3995 case OPC_OUB_GET_DEV_HANDLE:
3996 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n");
3998 case OPC_OUB_SATA_COMP:
3999 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n");
4000 mpi_sata_completion(pm8001_ha, piomb);
4002 case OPC_OUB_SATA_EVENT:
4003 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n");
4004 mpi_sata_event(pm8001_ha, piomb);
4006 case OPC_OUB_SSP_EVENT:
4007 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n");
4008 mpi_ssp_event(pm8001_ha, piomb);
4010 case OPC_OUB_DEV_HANDLE_ARRIV:
4011 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n");
4012 /*This is for target*/
4014 case OPC_OUB_SSP_RECV_EVENT:
4015 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n");
4016 /*This is for target*/
4018 case OPC_OUB_DEV_INFO:
4019 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_INFO\n");
4021 case OPC_OUB_FW_FLASH_UPDATE:
4022 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n");
4023 pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
4025 case OPC_OUB_GPIO_RESPONSE:
4026 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n");
4028 case OPC_OUB_GPIO_EVENT:
4029 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n");
4031 case OPC_OUB_GENERAL_EVENT:
4032 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n");
4033 pm8001_mpi_general_event(pm8001_ha, piomb);
4035 case OPC_OUB_SSP_ABORT_RSP:
4036 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n");
4037 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
4039 case OPC_OUB_SATA_ABORT_RSP:
4040 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n");
4041 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
4043 case OPC_OUB_SAS_DIAG_MODE_START_END:
4044 pm8001_dbg(pm8001_ha, MSG,
4045 "OPC_OUB_SAS_DIAG_MODE_START_END\n");
4047 case OPC_OUB_SAS_DIAG_EXECUTE:
4048 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n");
4050 case OPC_OUB_GET_TIME_STAMP:
4051 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n");
4053 case OPC_OUB_SAS_HW_EVENT_ACK:
4054 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n");
4056 case OPC_OUB_PORT_CONTROL:
4057 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n");
4059 case OPC_OUB_SMP_ABORT_RSP:
4060 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n");
4061 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
4063 case OPC_OUB_GET_NVMD_DATA:
4064 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n");
4065 pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
4067 case OPC_OUB_SET_NVMD_DATA:
4068 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n");
4069 pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
4071 case OPC_OUB_DEVICE_HANDLE_REMOVAL:
4072 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n");
4074 case OPC_OUB_SET_DEVICE_STATE:
4075 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n");
4076 pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
4078 case OPC_OUB_GET_DEVICE_STATE:
4079 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n");
4081 case OPC_OUB_SET_DEV_INFO:
4082 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n");
4084 case OPC_OUB_SAS_RE_INITIALIZE:
4085 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_RE_INITIALIZE\n");
4088 pm8001_dbg(pm8001_ha, DEVIO,
4089 "Unknown outbound Queue IOMB OPC = %x\n",
4095 static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
4097 struct outbound_queue_table *circularQ;
4100 u32 ret = MPI_IO_STATUS_FAIL;
4101 unsigned long flags;
4103 spin_lock_irqsave(&pm8001_ha->lock, flags);
4104 circularQ = &pm8001_ha->outbnd_q_tbl[vec];
4106 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
4107 if (MPI_IO_STATUS_SUCCESS == ret) {
4108 /* process the outbound message */
4109 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
4110 /* free the message from the outbound circular buffer */
4111 pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
4114 if (MPI_IO_STATUS_BUSY == ret) {
4115 /* Update the producer index from SPC */
4116 circularQ->producer_index =
4117 cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
4118 if (le32_to_cpu(circularQ->producer_index) ==
4119 circularQ->consumer_idx)
4124 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
4128 /* DMA_... to our direction translation. */
4129 static const u8 data_dir_flags[] = {
4130 [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
4131 [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
4132 [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
4133 [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
4136 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
4139 struct scatterlist *sg;
4140 struct pm8001_prd *buf_prd = prd;
4142 for_each_sg(scatter, sg, nr, i) {
4143 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
4144 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
4145 buf_prd->im_len.e = 0;
4150 static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd)
4152 psmp_cmd->tag = hTag;
4153 psmp_cmd->device_id = cpu_to_le32(deviceID);
4154 psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
4158 * pm8001_chip_smp_req - send a SMP task to FW
4159 * @pm8001_ha: our hba card information.
4160 * @ccb: the ccb information this request used.
4162 static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
4163 struct pm8001_ccb_info *ccb)
4166 struct sas_task *task = ccb->task;
4167 struct domain_device *dev = task->dev;
4168 struct pm8001_device *pm8001_dev = dev->lldd_dev;
4169 struct scatterlist *sg_req, *sg_resp;
4170 u32 req_len, resp_len;
4171 struct smp_req smp_cmd;
4173 struct inbound_queue_table *circularQ;
4175 memset(&smp_cmd, 0, sizeof(smp_cmd));
4177 * DMA-map SMP request, response buffers
4179 sg_req = &task->smp_task.smp_req;
4180 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
4183 req_len = sg_dma_len(sg_req);
4185 sg_resp = &task->smp_task.smp_resp;
4186 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
4191 resp_len = sg_dma_len(sg_resp);
4192 /* must be in dwords */
4193 if ((req_len & 0x3) || (resp_len & 0x3)) {
4198 opc = OPC_INB_SMP_REQUEST;
4199 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4200 smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
4201 smp_cmd.long_smp_req.long_req_addr =
4202 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
4203 smp_cmd.long_smp_req.long_req_size =
4204 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
4205 smp_cmd.long_smp_req.long_resp_addr =
4206 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
4207 smp_cmd.long_smp_req.long_resp_size =
4208 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
4209 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
4210 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
4211 &smp_cmd, sizeof(smp_cmd), 0);
4218 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
4221 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
4227 * pm8001_chip_ssp_io_req - send a SSP task to FW
4228 * @pm8001_ha: our hba card information.
4229 * @ccb: the ccb information this request used.
4231 static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
4232 struct pm8001_ccb_info *ccb)
4234 struct sas_task *task = ccb->task;
4235 struct domain_device *dev = task->dev;
4236 struct pm8001_device *pm8001_dev = dev->lldd_dev;
4237 struct ssp_ini_io_start_req ssp_cmd;
4238 u32 tag = ccb->ccb_tag;
4241 struct inbound_queue_table *circularQ;
4242 u32 opc = OPC_INB_SSPINIIOSTART;
4243 memset(&ssp_cmd, 0, sizeof(ssp_cmd));
4244 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
4246 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
4247 SAS 1.1 compatible TLR*/
4248 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
4249 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
4250 ssp_cmd.tag = cpu_to_le32(tag);
4251 if (task->ssp_task.enable_first_burst)
4252 ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
4253 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
4254 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
4255 memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
4256 task->ssp_task.cmd->cmd_len);
4257 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4259 /* fill in PRD (scatter/gather) table, if any */
4260 if (task->num_scatter > 1) {
4261 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
4262 phys_addr = ccb->ccb_dma_handle;
4263 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr));
4264 ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr));
4265 ssp_cmd.esgl = cpu_to_le32(1<<31);
4266 } else if (task->num_scatter == 1) {
4267 u64 dma_addr = sg_dma_address(task->scatter);
4268 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
4269 ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr));
4270 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
4272 } else if (task->num_scatter == 0) {
4273 ssp_cmd.addr_low = 0;
4274 ssp_cmd.addr_high = 0;
4275 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
4278 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
4279 sizeof(ssp_cmd), 0);
4283 static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4284 struct pm8001_ccb_info *ccb)
4286 struct sas_task *task = ccb->task;
4287 struct domain_device *dev = task->dev;
4288 struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
4289 u32 tag = ccb->ccb_tag;
4291 struct sata_start_req sata_cmd;
4292 u32 hdr_tag, ncg_tag = 0;
4296 struct inbound_queue_table *circularQ;
4297 unsigned long flags;
4298 u32 opc = OPC_INB_SATA_HOST_OPSTART;
4299 memset(&sata_cmd, 0, sizeof(sata_cmd));
4300 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4301 if (task->data_dir == DMA_NONE) {
4302 ATAP = 0x04; /* no data*/
4303 pm8001_dbg(pm8001_ha, IO, "no data\n");
4304 } else if (likely(!task->ata_task.device_control_reg_update)) {
4305 if (task->ata_task.dma_xfer) {
4306 ATAP = 0x06; /* DMA */
4307 pm8001_dbg(pm8001_ha, IO, "DMA\n");
4309 ATAP = 0x05; /* PIO*/
4310 pm8001_dbg(pm8001_ha, IO, "PIO\n");
4312 if (task->ata_task.use_ncq &&
4313 dev->sata_dev.class != ATA_DEV_ATAPI) {
4314 ATAP = 0x07; /* FPDMA */
4315 pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
4318 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
4319 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
4322 dir = data_dir_flags[task->data_dir] << 8;
4323 sata_cmd.tag = cpu_to_le32(tag);
4324 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
4325 sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
4326 sata_cmd.ncqtag_atap_dir_m =
4327 cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir);
4328 sata_cmd.sata_fis = task->ata_task.fis;
4329 if (likely(!task->ata_task.device_control_reg_update))
4330 sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
4331 sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
4332 /* fill in PRD (scatter/gather) table, if any */
4333 if (task->num_scatter > 1) {
4334 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
4335 phys_addr = ccb->ccb_dma_handle;
4336 sata_cmd.addr_low = lower_32_bits(phys_addr);
4337 sata_cmd.addr_high = upper_32_bits(phys_addr);
4338 sata_cmd.esgl = cpu_to_le32(1 << 31);
4339 } else if (task->num_scatter == 1) {
4340 u64 dma_addr = sg_dma_address(task->scatter);
4341 sata_cmd.addr_low = lower_32_bits(dma_addr);
4342 sata_cmd.addr_high = upper_32_bits(dma_addr);
4343 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
4345 } else if (task->num_scatter == 0) {
4346 sata_cmd.addr_low = 0;
4347 sata_cmd.addr_high = 0;
4348 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
4352 /* Check for read log for failed drive and return */
4353 if (sata_cmd.sata_fis.command == 0x2f) {
4354 if (((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
4355 (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
4356 (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
4357 struct task_status_struct *ts;
4359 pm8001_ha_dev->id &= 0xDFFFFFFF;
4360 ts = &task->task_status;
4362 spin_lock_irqsave(&task->task_state_lock, flags);
4363 ts->resp = SAS_TASK_COMPLETE;
4364 ts->stat = SAS_SAM_STAT_GOOD;
4365 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
4366 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
4367 task->task_state_flags |= SAS_TASK_STATE_DONE;
4368 if (unlikely((task->task_state_flags &
4369 SAS_TASK_STATE_ABORTED))) {
4370 spin_unlock_irqrestore(&task->task_state_lock,
4372 pm8001_dbg(pm8001_ha, FAIL,
4373 "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
4376 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4378 spin_unlock_irqrestore(&task->task_state_lock,
4380 pm8001_ccb_task_free_done(pm8001_ha, task,
4387 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
4388 sizeof(sata_cmd), 0);
4393 * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND
4394 * @pm8001_ha: our hba card information.
4395 * @phy_id: the phy id which we wanted to start up.
4398 pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
4400 struct phy_start_req payload;
4401 struct inbound_queue_table *circularQ;
4404 u32 opcode = OPC_INB_PHYSTART;
4405 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4406 memset(&payload, 0, sizeof(payload));
4407 payload.tag = cpu_to_le32(tag);
4409 ** [0:7] PHY Identifier
4410 ** [8:11] link rate 1.5G, 3G, 6G
4411 ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both
4412 ** [14] 0b disable spin up hold; 1b enable spin up hold
4414 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
4415 LINKMODE_AUTO | LINKRATE_15 |
4416 LINKRATE_30 | LINKRATE_60 | phy_id);
4417 payload.sas_identify.dev_type = SAS_END_DEVICE;
4418 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
4419 memcpy(payload.sas_identify.sas_addr,
4420 pm8001_ha->sas_addr, SAS_ADDR_SIZE);
4421 payload.sas_identify.phy_id = phy_id;
4422 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
4423 sizeof(payload), 0);
4428 * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
4429 * @pm8001_ha: our hba card information.
4430 * @phy_id: the phy id which we wanted to start up.
4432 static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
4435 struct phy_stop_req payload;
4436 struct inbound_queue_table *circularQ;
4439 u32 opcode = OPC_INB_PHYSTOP;
4440 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4441 memset(&payload, 0, sizeof(payload));
4442 payload.tag = cpu_to_le32(tag);
4443 payload.phy_id = cpu_to_le32(phy_id);
4444 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
4445 sizeof(payload), 0);
4450 * see comments on pm8001_mpi_reg_resp.
4452 static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
4453 struct pm8001_device *pm8001_dev, u32 flag)
4455 struct reg_dev_req payload;
4457 u32 stp_sspsmp_sata = 0x4;
4458 struct inbound_queue_table *circularQ;
4459 u32 linkrate, phy_id;
4460 int rc, tag = 0xdeadbeef;
4461 struct pm8001_ccb_info *ccb;
4463 u16 firstBurstSize = 0;
4465 struct domain_device *dev = pm8001_dev->sas_device;
4466 struct domain_device *parent_dev = dev->parent;
4467 struct pm8001_port *port = dev->port->lldd_port;
4468 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4470 memset(&payload, 0, sizeof(payload));
4471 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4474 ccb = &pm8001_ha->ccb_info[tag];
4475 ccb->device = pm8001_dev;
4477 payload.tag = cpu_to_le32(tag);
4479 stp_sspsmp_sata = 0x02; /*direct attached sata */
4481 if (pm8001_dev->dev_type == SAS_SATA_DEV)
4482 stp_sspsmp_sata = 0x00; /* stp*/
4483 else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
4484 dev_is_expander(pm8001_dev->dev_type))
4485 stp_sspsmp_sata = 0x01; /*ssp or smp*/
4487 if (parent_dev && dev_is_expander(parent_dev->dev_type))
4488 phy_id = parent_dev->ex_dev.ex_phy->phy_id;
4490 phy_id = pm8001_dev->attached_phy;
4491 opc = OPC_INB_REG_DEV;
4492 linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
4493 pm8001_dev->sas_device->linkrate : dev->port->linkrate;
4494 payload.phyid_portid =
4495 cpu_to_le32(((port->port_id) & 0x0F) |
4496 ((phy_id & 0x0F) << 4));
4497 payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) |
4498 ((linkrate & 0x0F) * 0x1000000) |
4499 ((stp_sspsmp_sata & 0x03) * 0x10000000));
4500 payload.firstburstsize_ITNexustimeout =
4501 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
4502 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
4504 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
4505 sizeof(payload), 0);
4510 * see comments on pm8001_mpi_reg_resp.
4512 int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
4515 struct dereg_dev_req payload;
4516 u32 opc = OPC_INB_DEREG_DEV_HANDLE;
4518 struct inbound_queue_table *circularQ;
4520 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4521 memset(&payload, 0, sizeof(payload));
4522 payload.tag = cpu_to_le32(1);
4523 payload.device_id = cpu_to_le32(device_id);
4524 pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n",
4526 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
4527 sizeof(payload), 0);
4532 * pm8001_chip_phy_ctl_req - support the local phy operation
4533 * @pm8001_ha: our hba card information.
4534 * @phyId: the phy id which we wanted to operate
4535 * @phy_op: the phy operation to request
4537 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4538 u32 phyId, u32 phy_op)
4540 struct local_phy_ctl_req payload;
4541 struct inbound_queue_table *circularQ;
4543 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4544 memset(&payload, 0, sizeof(payload));
4545 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4546 payload.tag = cpu_to_le32(1);
4547 payload.phyop_phyid =
4548 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
4549 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
4550 sizeof(payload), 0);
4554 static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
4556 #ifdef PM8001_USE_MSIX
4561 value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
4569 * pm8001_chip_isr - PM8001 isr handler.
4570 * @pm8001_ha: our hba card information.
4574 pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
4576 pm8001_chip_interrupt_disable(pm8001_ha, vec);
4577 pm8001_dbg(pm8001_ha, DEVIO,
4578 "irq vec %d, ODMR:0x%x\n",
4579 vec, pm8001_cr32(pm8001_ha, 0, 0x30));
4580 process_oq(pm8001_ha, vec);
4581 pm8001_chip_interrupt_enable(pm8001_ha, vec);
4585 static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
4586 u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag)
4588 struct task_abort_req task_abort;
4589 struct inbound_queue_table *circularQ;
4591 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4592 memset(&task_abort, 0, sizeof(task_abort));
4593 if (ABORT_SINGLE == (flag & ABORT_MASK)) {
4594 task_abort.abort_all = 0;
4595 task_abort.device_id = cpu_to_le32(dev_id);
4596 task_abort.tag_to_abort = cpu_to_le32(task_tag);
4597 task_abort.tag = cpu_to_le32(cmd_tag);
4598 } else if (ABORT_ALL == (flag & ABORT_MASK)) {
4599 task_abort.abort_all = cpu_to_le32(1);
4600 task_abort.device_id = cpu_to_le32(dev_id);
4601 task_abort.tag = cpu_to_le32(cmd_tag);
4603 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
4604 sizeof(task_abort), 0);
4609 * pm8001_chip_abort_task - SAS abort task when error or exception happened.
4611 int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
4612 struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
4615 int rc = TMF_RESP_FUNC_FAILED;
4616 pm8001_dbg(pm8001_ha, EH, "cmd_tag = %x, abort task tag = 0x%x\n",
4618 if (pm8001_dev->dev_type == SAS_END_DEVICE)
4619 opc = OPC_INB_SSP_ABORT;
4620 else if (pm8001_dev->dev_type == SAS_SATA_DEV)
4621 opc = OPC_INB_SATA_ABORT;
4623 opc = OPC_INB_SMP_ABORT;/* SMP */
4624 device_id = pm8001_dev->device_id;
4625 rc = send_task_abort(pm8001_ha, opc, device_id, flag,
4627 if (rc != TMF_RESP_FUNC_COMPLETE)
4628 pm8001_dbg(pm8001_ha, EH, "rc= %d\n", rc);
4633 * pm8001_chip_ssp_tm_req - built the task management command.
4634 * @pm8001_ha: our hba card information.
4635 * @ccb: the ccb information.
4636 * @tmf: task management function.
4638 int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
4639 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
4641 struct sas_task *task = ccb->task;
4642 struct domain_device *dev = task->dev;
4643 struct pm8001_device *pm8001_dev = dev->lldd_dev;
4644 u32 opc = OPC_INB_SSPINITMSTART;
4645 struct inbound_queue_table *circularQ;
4646 struct ssp_ini_tm_start_req sspTMCmd;
4649 memset(&sspTMCmd, 0, sizeof(sspTMCmd));
4650 sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
4651 sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
4652 sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
4653 memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
4654 sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
4655 if (pm8001_ha->chip_id != chip_8001)
4656 sspTMCmd.ds_ads_m = 0x08;
4657 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4658 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
4659 sizeof(sspTMCmd), 0);
4663 int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4666 u32 opc = OPC_INB_GET_NVMD_DATA;
4670 struct pm8001_ccb_info *ccb;
4671 struct inbound_queue_table *circularQ;
4672 struct get_nvm_data_req nvmd_req;
4673 struct fw_control_ex *fw_control_context;
4674 struct pm8001_ioctl_payload *ioctl_payload = payload;
4676 nvmd_type = ioctl_payload->minor_function;
4677 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4678 if (!fw_control_context)
4680 fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
4681 fw_control_context->len = ioctl_payload->rd_length;
4682 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4683 memset(&nvmd_req, 0, sizeof(nvmd_req));
4684 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4686 kfree(fw_control_context);
4689 ccb = &pm8001_ha->ccb_info[tag];
4691 ccb->fw_control_context = fw_control_context;
4692 nvmd_req.tag = cpu_to_le32(tag);
4694 switch (nvmd_type) {
4696 u32 twi_addr, twi_page_size;
4700 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
4701 twi_page_size << 8 | TWI_DEVICE);
4702 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
4703 nvmd_req.resp_addr_hi =
4704 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4705 nvmd_req.resp_addr_lo =
4706 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4710 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
4711 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
4712 nvmd_req.resp_addr_hi =
4713 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4714 nvmd_req.resp_addr_lo =
4715 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4719 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
4720 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
4721 nvmd_req.resp_addr_hi =
4722 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4723 nvmd_req.resp_addr_lo =
4724 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4728 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
4729 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
4730 nvmd_req.resp_addr_hi =
4731 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4732 nvmd_req.resp_addr_lo =
4733 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4737 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
4738 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
4739 nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
4740 nvmd_req.resp_addr_hi =
4741 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4742 nvmd_req.resp_addr_lo =
4743 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4749 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
4750 sizeof(nvmd_req), 0);
4752 kfree(fw_control_context);
4753 pm8001_tag_free(pm8001_ha, tag);
4758 int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4761 u32 opc = OPC_INB_SET_NVMD_DATA;
4765 struct pm8001_ccb_info *ccb;
4766 struct inbound_queue_table *circularQ;
4767 struct set_nvm_data_req nvmd_req;
4768 struct fw_control_ex *fw_control_context;
4769 struct pm8001_ioctl_payload *ioctl_payload = payload;
4771 nvmd_type = ioctl_payload->minor_function;
4772 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4773 if (!fw_control_context)
4775 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4776 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
4777 &ioctl_payload->func_specific,
4778 ioctl_payload->wr_length);
4779 memset(&nvmd_req, 0, sizeof(nvmd_req));
4780 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4782 kfree(fw_control_context);
4785 ccb = &pm8001_ha->ccb_info[tag];
4786 ccb->fw_control_context = fw_control_context;
4788 nvmd_req.tag = cpu_to_le32(tag);
4789 switch (nvmd_type) {
4791 u32 twi_addr, twi_page_size;
4794 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
4795 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
4796 twi_page_size << 8 | TWI_DEVICE);
4797 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
4798 nvmd_req.resp_addr_hi =
4799 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4800 nvmd_req.resp_addr_lo =
4801 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4805 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
4806 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
4807 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
4808 nvmd_req.resp_addr_hi =
4809 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4810 nvmd_req.resp_addr_lo =
4811 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4814 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
4815 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
4816 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
4817 nvmd_req.resp_addr_hi =
4818 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4819 nvmd_req.resp_addr_lo =
4820 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4823 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
4824 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
4825 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
4826 nvmd_req.resp_addr_hi =
4827 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
4828 nvmd_req.resp_addr_lo =
4829 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
4834 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
4835 sizeof(nvmd_req), 0);
4837 kfree(fw_control_context);
4838 pm8001_tag_free(pm8001_ha, tag);
4844 * pm8001_chip_fw_flash_update_build - support the firmware update operation
4845 * @pm8001_ha: our hba card information.
4846 * @fw_flash_updata_info: firmware flash update param
4847 * @tag: Tag to apply to the payload
4850 pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
4851 void *fw_flash_updata_info, u32 tag)
4853 struct fw_flash_Update_req payload;
4854 struct fw_flash_updata_info *info;
4855 struct inbound_queue_table *circularQ;
4857 u32 opc = OPC_INB_FW_FLASH_UPDATE;
4859 memset(&payload, 0, sizeof(struct fw_flash_Update_req));
4860 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4861 info = fw_flash_updata_info;
4862 payload.tag = cpu_to_le32(tag);
4863 payload.cur_image_len = cpu_to_le32(info->cur_image_len);
4864 payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
4865 payload.total_image_len = cpu_to_le32(info->total_image_len);
4866 payload.len = info->sgl.im_len.len ;
4867 payload.sgl_addr_lo =
4868 cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
4869 payload.sgl_addr_hi =
4870 cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
4871 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
4872 sizeof(payload), 0);
4877 pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4880 struct fw_flash_updata_info flash_update_info;
4881 struct fw_control_info *fw_control;
4882 struct fw_control_ex *fw_control_context;
4885 struct pm8001_ccb_info *ccb;
4886 void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
4887 dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
4888 struct pm8001_ioctl_payload *ioctl_payload = payload;
4890 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4891 if (!fw_control_context)
4893 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
4894 pm8001_dbg(pm8001_ha, DEVIO,
4895 "dma fw_control context input length :%x\n",
4897 memcpy(buffer, fw_control->buffer, fw_control->len);
4898 flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
4899 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
4900 flash_update_info.sgl.im_len.e = 0;
4901 flash_update_info.cur_image_offset = fw_control->offset;
4902 flash_update_info.cur_image_len = fw_control->len;
4903 flash_update_info.total_image_len = fw_control->size;
4904 fw_control_context->fw_control = fw_control;
4905 fw_control_context->virtAddr = buffer;
4906 fw_control_context->phys_addr = phys_addr;
4907 fw_control_context->len = fw_control->len;
4908 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4910 kfree(fw_control_context);
4913 ccb = &pm8001_ha->ccb_info[tag];
4914 ccb->fw_control_context = fw_control_context;
4916 rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
4922 pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
4924 u32 value, rem, offset = 0, bar = 0;
4925 u32 index, work_offset, dw_length;
4926 u32 shift_value, gsm_base, gsm_dump_offset;
4928 struct Scsi_Host *shost = class_to_shost(cdev);
4929 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
4930 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
4933 gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset;
4935 /* check max is 1 Mbytes */
4936 if ((length > 0x100000) || (gsm_dump_offset & 3) ||
4937 ((gsm_dump_offset + length) > 0x1000000))
4940 if (pm8001_ha->chip_id == chip_8001)
4945 work_offset = gsm_dump_offset & 0xFFFF0000;
4946 offset = gsm_dump_offset & 0x0000FFFF;
4947 gsm_dump_offset = work_offset;
4948 /* adjust length to dword boundary */
4950 dw_length = length >> 2;
4952 for (index = 0; index < dw_length; index++) {
4953 if ((work_offset + offset) & 0xFFFF0000) {
4954 if (pm8001_ha->chip_id == chip_8001)
4955 shift_value = ((gsm_dump_offset + offset) &
4956 SHIFT_REG_64K_MASK);
4958 shift_value = (((gsm_dump_offset + offset) &
4959 SHIFT_REG_64K_MASK) >>
4960 SHIFT_REG_BIT_SHIFT);
4962 if (pm8001_ha->chip_id == chip_8001) {
4963 gsm_base = GSM_BASE;
4964 if (-1 == pm8001_bar4_shift(pm8001_ha,
4965 (gsm_base + shift_value)))
4969 if (-1 == pm80xx_bar4_shift(pm8001_ha,
4970 (gsm_base + shift_value)))
4973 gsm_dump_offset = (gsm_dump_offset + offset) &
4976 offset = offset & 0x0000FFFF;
4978 value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
4980 direct_data += sprintf(direct_data, "%08x ", value);
4984 value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
4986 /* xfr for non_dw */
4987 direct_data += sprintf(direct_data, "%08x ", value);
4989 /* Shift back to BAR4 original address */
4990 if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
4992 pm8001_ha->fatal_forensic_shift_offset += 1024;
4994 if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
4995 pm8001_ha->fatal_forensic_shift_offset = 0;
4996 return direct_data - buf;
5000 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
5001 struct pm8001_device *pm8001_dev, u32 state)
5003 struct set_dev_state_req payload;
5004 struct inbound_queue_table *circularQ;
5005 struct pm8001_ccb_info *ccb;
5008 u32 opc = OPC_INB_SET_DEVICE_STATE;
5009 memset(&payload, 0, sizeof(payload));
5010 rc = pm8001_tag_alloc(pm8001_ha, &tag);
5013 ccb = &pm8001_ha->ccb_info[tag];
5015 ccb->device = pm8001_dev;
5016 circularQ = &pm8001_ha->inbnd_q_tbl[0];
5017 payload.tag = cpu_to_le32(tag);
5018 payload.device_id = cpu_to_le32(pm8001_dev->device_id);
5019 payload.nds = cpu_to_le32(state);
5020 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
5021 sizeof(payload), 0);
5027 pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
5029 struct sas_re_initialization_req payload;
5030 struct inbound_queue_table *circularQ;
5031 struct pm8001_ccb_info *ccb;
5034 u32 opc = OPC_INB_SAS_RE_INITIALIZE;
5035 memset(&payload, 0, sizeof(payload));
5036 rc = pm8001_tag_alloc(pm8001_ha, &tag);
5039 ccb = &pm8001_ha->ccb_info[tag];
5041 circularQ = &pm8001_ha->inbnd_q_tbl[0];
5042 payload.tag = cpu_to_le32(tag);
5043 payload.SSAHOLT = cpu_to_le32(0xd << 25);
5044 payload.sata_hol_tmo = cpu_to_le32(80);
5045 payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
5046 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
5047 sizeof(payload), 0);
5049 pm8001_tag_free(pm8001_ha, tag);
5054 const struct pm8001_dispatch pm8001_8001_dispatch = {
5056 .chip_init = pm8001_chip_init,
5057 .chip_soft_rst = pm8001_chip_soft_rst,
5058 .chip_rst = pm8001_hw_chip_rst,
5059 .chip_iounmap = pm8001_chip_iounmap,
5060 .isr = pm8001_chip_isr,
5061 .is_our_interrupt = pm8001_chip_is_our_interrupt,
5062 .isr_process_oq = process_oq,
5063 .interrupt_enable = pm8001_chip_interrupt_enable,
5064 .interrupt_disable = pm8001_chip_interrupt_disable,
5065 .make_prd = pm8001_chip_make_sg,
5066 .smp_req = pm8001_chip_smp_req,
5067 .ssp_io_req = pm8001_chip_ssp_io_req,
5068 .sata_req = pm8001_chip_sata_req,
5069 .phy_start_req = pm8001_chip_phy_start_req,
5070 .phy_stop_req = pm8001_chip_phy_stop_req,
5071 .reg_dev_req = pm8001_chip_reg_dev_req,
5072 .dereg_dev_req = pm8001_chip_dereg_dev_req,
5073 .phy_ctl_req = pm8001_chip_phy_ctl_req,
5074 .task_abort = pm8001_chip_abort_task,
5075 .ssp_tm_req = pm8001_chip_ssp_tm_req,
5076 .get_nvmd_req = pm8001_chip_get_nvmd_req,
5077 .set_nvmd_req = pm8001_chip_set_nvmd_req,
5078 .fw_flash_update_req = pm8001_chip_fw_flash_update_req,
5079 .set_dev_state_req = pm8001_chip_set_dev_state_req,
5080 .sas_re_init_req = pm8001_chip_sas_re_initialization,
5081 .fatal_errors = pm80xx_fatal_errors,