]>
Commit | Line | Data |
---|---|---|
f0c568a4 JL |
1 | /* |
2 | * Marvell UMI driver | |
3 | * | |
4 | * Copyright 2011 Marvell. <[email protected]> | |
5 | * | |
6 | * This file is licensed under GPLv2. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License as | |
10 | * published by the Free Software Foundation; version 2 of the | |
11 | * License. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | |
21 | * USA | |
22 | */ | |
23 | ||
24 | #include <linux/kernel.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/moduleparam.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/pci.h> | |
30 | #include <linux/list.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/interrupt.h> | |
33 | #include <linux/delay.h> | |
36f8ef7f | 34 | #include <linux/ktime.h> |
f0c568a4 JL |
35 | #include <linux/blkdev.h> |
36 | #include <linux/io.h> | |
37 | #include <scsi/scsi.h> | |
38 | #include <scsi/scsi_cmnd.h> | |
bd756dde | 39 | #include <scsi/scsi_device.h> |
f0c568a4 JL |
40 | #include <scsi/scsi_host.h> |
41 | #include <scsi/scsi_transport.h> | |
42 | #include <scsi/scsi_eh.h> | |
43 | #include <linux/uaccess.h> | |
bd756dde | 44 | #include <linux/kthread.h> |
f0c568a4 JL |
45 | |
46 | #include "mvumi.h" | |
47 | ||
48 | MODULE_LICENSE("GPL"); | |
49 | MODULE_AUTHOR("[email protected]"); | |
50 | MODULE_DESCRIPTION("Marvell UMI Driver"); | |
51 | ||
9baa3c34 | 52 | static const struct pci_device_id mvumi_pci_table[] = { |
c85bcadc MS |
53 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, |
54 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, | |
f0c568a4 JL |
55 | { 0 } |
56 | }; | |
57 | ||
58 | MODULE_DEVICE_TABLE(pci, mvumi_pci_table); | |
59 | ||
60 | static void tag_init(struct mvumi_tag *st, unsigned short size) | |
61 | { | |
62 | unsigned short i; | |
63 | BUG_ON(size != st->size); | |
64 | st->top = size; | |
65 | for (i = 0; i < size; i++) | |
66 | st->stack[i] = size - 1 - i; | |
67 | } | |
68 | ||
69 | static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) | |
70 | { | |
71 | BUG_ON(st->top <= 0); | |
72 | return st->stack[--st->top]; | |
73 | } | |
74 | ||
75 | static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, | |
76 | unsigned short tag) | |
77 | { | |
78 | BUG_ON(st->top >= st->size); | |
79 | st->stack[st->top++] = tag; | |
80 | } | |
81 | ||
82 | static bool tag_is_empty(struct mvumi_tag *st) | |
83 | { | |
84 | if (st->top == 0) | |
85 | return 1; | |
86 | else | |
87 | return 0; | |
88 | } | |
89 | ||
90 | static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array) | |
91 | { | |
92 | int i; | |
93 | ||
94 | for (i = 0; i < MAX_BASE_ADDRESS; i++) | |
95 | if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) && | |
96 | addr_array[i]) | |
97 | pci_iounmap(dev, addr_array[i]); | |
98 | } | |
99 | ||
100 | static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) | |
101 | { | |
102 | int i; | |
103 | ||
104 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { | |
105 | if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { | |
106 | addr_array[i] = pci_iomap(dev, i, 0); | |
107 | if (!addr_array[i]) { | |
108 | dev_err(&dev->dev, "failed to map Bar[%d]\n", | |
109 | i); | |
110 | mvumi_unmap_pci_addr(dev, addr_array); | |
111 | return -ENOMEM; | |
112 | } | |
113 | } else | |
114 | addr_array[i] = NULL; | |
115 | ||
116 | dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]); | |
117 | } | |
118 | ||
119 | return 0; | |
120 | } | |
121 | ||
122 | static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, | |
123 | enum resource_type type, unsigned int size) | |
124 | { | |
bd756dde | 125 | struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC); |
f0c568a4 JL |
126 | |
127 | if (!res) { | |
128 | dev_err(&mhba->pdev->dev, | |
59e13d48 | 129 | "Failed to allocate memory for resource manager.\n"); |
f0c568a4 JL |
130 | return NULL; |
131 | } | |
132 | ||
133 | switch (type) { | |
134 | case RESOURCE_CACHED_MEMORY: | |
bd756dde | 135 | res->virt_addr = kzalloc(size, GFP_ATOMIC); |
f0c568a4 JL |
136 | if (!res->virt_addr) { |
137 | dev_err(&mhba->pdev->dev, | |
138 | "unable to allocate memory,size = %d.\n", size); | |
139 | kfree(res); | |
140 | return NULL; | |
141 | } | |
142 | break; | |
143 | ||
144 | case RESOURCE_UNCACHED_MEMORY: | |
145 | size = round_up(size, 8); | |
ab8e7f4b CH |
146 | res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, |
147 | &res->bus_addr, GFP_KERNEL); | |
f0c568a4 JL |
148 | if (!res->virt_addr) { |
149 | dev_err(&mhba->pdev->dev, | |
150 | "unable to allocate consistent mem," | |
151 | "size = %d.\n", size); | |
152 | kfree(res); | |
153 | return NULL; | |
154 | } | |
f0c568a4 JL |
155 | break; |
156 | ||
157 | default: | |
158 | dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); | |
159 | kfree(res); | |
160 | return NULL; | |
161 | } | |
162 | ||
163 | res->type = type; | |
164 | res->size = size; | |
165 | INIT_LIST_HEAD(&res->entry); | |
166 | list_add_tail(&res->entry, &mhba->res_list); | |
167 | ||
168 | return res; | |
169 | } | |
170 | ||
171 | static void mvumi_release_mem_resource(struct mvumi_hba *mhba) | |
172 | { | |
173 | struct mvumi_res *res, *tmp; | |
174 | ||
175 | list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { | |
176 | switch (res->type) { | |
177 | case RESOURCE_UNCACHED_MEMORY: | |
ab8e7f4b | 178 | dma_free_coherent(&mhba->pdev->dev, res->size, |
f0c568a4 JL |
179 | res->virt_addr, res->bus_addr); |
180 | break; | |
181 | case RESOURCE_CACHED_MEMORY: | |
182 | kfree(res->virt_addr); | |
183 | break; | |
184 | default: | |
185 | dev_err(&mhba->pdev->dev, | |
186 | "unknown resource type %d\n", res->type); | |
187 | break; | |
188 | } | |
189 | list_del(&res->entry); | |
190 | kfree(res); | |
191 | } | |
192 | mhba->fw_flag &= ~MVUMI_FW_ALLOC; | |
193 | } | |
194 | ||
195 | /** | |
196 | * mvumi_make_sgl - Prepares SGL | |
197 | * @mhba: Adapter soft state | |
198 | * @scmd: SCSI command from the mid-layer | |
199 | * @sgl_p: SGL to be filled in | |
200 | * @sg_count return the number of SG elements | |
201 | * | |
202 | * If successful, this function returns 0. otherwise, it returns -1. | |
203 | */ | |
204 | static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, | |
205 | void *sgl_p, unsigned char *sg_count) | |
206 | { | |
207 | struct scatterlist *sg; | |
208 | struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p; | |
209 | unsigned int i; | |
210 | unsigned int sgnum = scsi_sg_count(scmd); | |
211 | dma_addr_t busaddr; | |
212 | ||
4bd13a07 | 213 | sg = scsi_sglist(scmd); |
ab8e7f4b CH |
214 | *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum, |
215 | scmd->sc_data_direction); | |
4bd13a07 AK |
216 | if (*sg_count > mhba->max_sge) { |
217 | dev_err(&mhba->pdev->dev, | |
218 | "sg count[0x%x] is bigger than max sg[0x%x].\n", | |
219 | *sg_count, mhba->max_sge); | |
ab8e7f4b CH |
220 | dma_unmap_sg(&mhba->pdev->dev, sg, sgnum, |
221 | scmd->sc_data_direction); | |
4bd13a07 AK |
222 | return -1; |
223 | } | |
224 | for (i = 0; i < *sg_count; i++) { | |
225 | busaddr = sg_dma_address(&sg[i]); | |
f0c568a4 JL |
226 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); |
227 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); | |
4bd13a07 AK |
228 | m_sg->flags = 0; |
229 | sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); | |
230 | if ((i + 1) == *sg_count) | |
231 | m_sg->flags |= 1U << mhba->eot_flag; | |
232 | ||
233 | sgd_inc(mhba, m_sg); | |
f0c568a4 JL |
234 | } |
235 | ||
236 | return 0; | |
237 | } | |
238 | ||
239 | static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, | |
240 | unsigned int size) | |
241 | { | |
242 | struct mvumi_sgl *m_sg; | |
243 | void *virt_addr; | |
244 | dma_addr_t phy_addr; | |
245 | ||
246 | if (size == 0) | |
247 | return 0; | |
248 | ||
ab8e7f4b CH |
249 | virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, |
250 | GFP_KERNEL); | |
f0c568a4 JL |
251 | if (!virt_addr) |
252 | return -1; | |
253 | ||
f0c568a4 JL |
254 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; |
255 | cmd->frame->sg_counts = 1; | |
256 | cmd->data_buf = virt_addr; | |
257 | ||
258 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); | |
259 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); | |
bd756dde SF |
260 | m_sg->flags = 1U << mhba->eot_flag; |
261 | sgd_setsz(mhba, m_sg, cpu_to_le32(size)); | |
f0c568a4 JL |
262 | |
263 | return 0; | |
264 | } | |
265 | ||
266 | static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, | |
267 | unsigned int buf_size) | |
268 | { | |
269 | struct mvumi_cmd *cmd; | |
270 | ||
271 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | |
272 | if (!cmd) { | |
273 | dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); | |
274 | return NULL; | |
275 | } | |
276 | INIT_LIST_HEAD(&cmd->queue_pointer); | |
277 | ||
ab8e7f4b CH |
278 | cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size, |
279 | &cmd->frame_phys, GFP_KERNEL); | |
f0c568a4 JL |
280 | if (!cmd->frame) { |
281 | dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" | |
282 | " frame,size = %d.\n", mhba->ib_max_size); | |
283 | kfree(cmd); | |
284 | return NULL; | |
285 | } | |
286 | ||
287 | if (buf_size) { | |
288 | if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { | |
289 | dev_err(&mhba->pdev->dev, "failed to allocate memory" | |
290 | " for internal frame\n"); | |
ab8e7f4b | 291 | dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, |
bd756dde | 292 | cmd->frame, cmd->frame_phys); |
f0c568a4 JL |
293 | kfree(cmd); |
294 | return NULL; | |
295 | } | |
296 | } else | |
297 | cmd->frame->sg_counts = 0; | |
298 | ||
299 | return cmd; | |
300 | } | |
301 | ||
302 | static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, | |
303 | struct mvumi_cmd *cmd) | |
304 | { | |
305 | struct mvumi_sgl *m_sg; | |
306 | unsigned int size; | |
307 | dma_addr_t phy_addr; | |
308 | ||
309 | if (cmd && cmd->frame) { | |
310 | if (cmd->frame->sg_counts) { | |
311 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; | |
bd756dde | 312 | sgd_getsz(mhba, m_sg, size); |
f0c568a4 JL |
313 | |
314 | phy_addr = (dma_addr_t) m_sg->baseaddr_l | | |
315 | (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); | |
316 | ||
ab8e7f4b | 317 | dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf, |
f0c568a4 JL |
318 | phy_addr); |
319 | } | |
ab8e7f4b | 320 | dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, |
bd756dde | 321 | cmd->frame, cmd->frame_phys); |
f0c568a4 JL |
322 | kfree(cmd); |
323 | } | |
324 | } | |
325 | ||
326 | /** | |
327 | * mvumi_get_cmd - Get a command from the free pool | |
328 | * @mhba: Adapter soft state | |
329 | * | |
330 | * Returns a free command from the pool | |
331 | */ | |
332 | static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) | |
333 | { | |
334 | struct mvumi_cmd *cmd = NULL; | |
335 | ||
336 | if (likely(!list_empty(&mhba->cmd_pool))) { | |
337 | cmd = list_entry((&mhba->cmd_pool)->next, | |
338 | struct mvumi_cmd, queue_pointer); | |
339 | list_del_init(&cmd->queue_pointer); | |
340 | } else | |
341 | dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); | |
342 | ||
343 | return cmd; | |
344 | } | |
345 | ||
346 | /** | |
347 | * mvumi_return_cmd - Return a cmd to free command pool | |
348 | * @mhba: Adapter soft state | |
349 | * @cmd: Command packet to be returned to free command pool | |
350 | */ | |
351 | static inline void mvumi_return_cmd(struct mvumi_hba *mhba, | |
352 | struct mvumi_cmd *cmd) | |
353 | { | |
354 | cmd->scmd = NULL; | |
355 | list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); | |
356 | } | |
357 | ||
358 | /** | |
359 | * mvumi_free_cmds - Free all the cmds in the free cmd pool | |
360 | * @mhba: Adapter soft state | |
361 | */ | |
362 | static void mvumi_free_cmds(struct mvumi_hba *mhba) | |
363 | { | |
364 | struct mvumi_cmd *cmd; | |
365 | ||
366 | while (!list_empty(&mhba->cmd_pool)) { | |
367 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, | |
368 | queue_pointer); | |
369 | list_del(&cmd->queue_pointer); | |
bd756dde SF |
370 | if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) |
371 | kfree(cmd->frame); | |
f0c568a4 JL |
372 | kfree(cmd); |
373 | } | |
374 | } | |
375 | ||
376 | /** | |
377 | * mvumi_alloc_cmds - Allocates the command packets | |
378 | * @mhba: Adapter soft state | |
379 | * | |
380 | */ | |
381 | static int mvumi_alloc_cmds(struct mvumi_hba *mhba) | |
382 | { | |
383 | int i; | |
384 | struct mvumi_cmd *cmd; | |
385 | ||
386 | for (i = 0; i < mhba->max_io; i++) { | |
387 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | |
388 | if (!cmd) | |
389 | goto err_exit; | |
390 | ||
391 | INIT_LIST_HEAD(&cmd->queue_pointer); | |
392 | list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); | |
bd756dde SF |
393 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
394 | cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; | |
395 | cmd->frame_phys = mhba->ib_frame_phys | |
396 | + i * mhba->ib_max_size; | |
397 | } else | |
398 | cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); | |
f0c568a4 JL |
399 | if (!cmd->frame) |
400 | goto err_exit; | |
401 | } | |
402 | return 0; | |
403 | ||
404 | err_exit: | |
405 | dev_err(&mhba->pdev->dev, | |
406 | "failed to allocate memory for cmd[0x%x].\n", i); | |
407 | while (!list_empty(&mhba->cmd_pool)) { | |
408 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, | |
409 | queue_pointer); | |
410 | list_del(&cmd->queue_pointer); | |
bd756dde SF |
411 | if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) |
412 | kfree(cmd->frame); | |
f0c568a4 JL |
413 | kfree(cmd); |
414 | } | |
415 | return -ENOMEM; | |
416 | } | |
417 | ||
bd756dde | 418 | static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) |
f0c568a4 | 419 | { |
bd756dde SF |
420 | unsigned int ib_rp_reg; |
421 | struct mvumi_hw_regs *regs = mhba->regs; | |
422 | ||
423 | ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); | |
f0c568a4 | 424 | |
bd756dde SF |
425 | if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) == |
426 | (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && | |
427 | ((ib_rp_reg & regs->cl_pointer_toggle) | |
428 | != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { | |
429 | dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); | |
430 | return 0; | |
431 | } | |
f0c568a4 JL |
432 | if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { |
433 | dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); | |
bd756dde SF |
434 | return 0; |
435 | } else { | |
436 | return mhba->max_io - atomic_read(&mhba->fw_outstanding); | |
f0c568a4 | 437 | } |
bd756dde | 438 | } |
f0c568a4 | 439 | |
bd756dde SF |
440 | static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) |
441 | { | |
442 | unsigned int count; | |
443 | if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) | |
444 | return 0; | |
445 | count = ioread32(mhba->ib_shadow); | |
446 | if (count == 0xffff) | |
447 | return 0; | |
448 | return count; | |
449 | } | |
450 | ||
451 | static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) | |
452 | { | |
453 | unsigned int cur_ib_entry; | |
f0c568a4 | 454 | |
bd756dde | 455 | cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; |
f0c568a4 JL |
456 | cur_ib_entry++; |
457 | if (cur_ib_entry >= mhba->list_num_io) { | |
458 | cur_ib_entry -= mhba->list_num_io; | |
bd756dde SF |
459 | mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; |
460 | } | |
461 | mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; | |
462 | mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); | |
463 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { | |
464 | *ib_entry = mhba->ib_list + cur_ib_entry * | |
465 | sizeof(struct mvumi_dyn_list_entry); | |
466 | } else { | |
467 | *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; | |
f0c568a4 | 468 | } |
f0c568a4 | 469 | atomic_inc(&mhba->fw_outstanding); |
f0c568a4 JL |
470 | } |
471 | ||
472 | static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) | |
473 | { | |
bd756dde SF |
474 | iowrite32(0xffff, mhba->ib_shadow); |
475 | iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); | |
f0c568a4 JL |
476 | } |
477 | ||
478 | static char mvumi_check_ob_frame(struct mvumi_hba *mhba, | |
479 | unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame) | |
480 | { | |
481 | unsigned short tag, request_id; | |
482 | ||
483 | udelay(1); | |
484 | p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; | |
485 | request_id = p_outb_frame->request_id; | |
486 | tag = p_outb_frame->tag; | |
487 | if (tag > mhba->tag_pool.size) { | |
488 | dev_err(&mhba->pdev->dev, "ob frame data error\n"); | |
489 | return -1; | |
490 | } | |
491 | if (mhba->tag_cmd[tag] == NULL) { | |
492 | dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); | |
493 | return -1; | |
494 | } else if (mhba->tag_cmd[tag]->request_id != request_id && | |
495 | mhba->request_id_enabled) { | |
496 | dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," | |
497 | "cmd request ID:0x%x\n", request_id, | |
498 | mhba->tag_cmd[tag]->request_id); | |
499 | return -1; | |
500 | } | |
501 | ||
502 | return 0; | |
503 | } | |
504 | ||
bd756dde SF |
505 | static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, |
506 | unsigned int *cur_obf, unsigned int *assign_obf_end) | |
f0c568a4 | 507 | { |
bd756dde SF |
508 | unsigned int ob_write, ob_write_shadow; |
509 | struct mvumi_hw_regs *regs = mhba->regs; | |
f0c568a4 JL |
510 | |
511 | do { | |
bd756dde SF |
512 | ob_write = ioread32(regs->outb_copy_pointer); |
513 | ob_write_shadow = ioread32(mhba->ob_shadow); | |
514 | } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow); | |
f0c568a4 | 515 | |
bd756dde SF |
516 | *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; |
517 | *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; | |
f0c568a4 | 518 | |
bd756dde SF |
519 | if ((ob_write & regs->cl_pointer_toggle) != |
520 | (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { | |
521 | *assign_obf_end += mhba->list_num_io; | |
f0c568a4 | 522 | } |
bd756dde SF |
523 | return 0; |
524 | } | |
525 | ||
526 | static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, | |
527 | unsigned int *cur_obf, unsigned int *assign_obf_end) | |
528 | { | |
529 | unsigned int ob_write; | |
530 | struct mvumi_hw_regs *regs = mhba->regs; | |
531 | ||
532 | ob_write = ioread32(regs->outb_read_pointer); | |
533 | ob_write = ioread32(regs->outb_copy_pointer); | |
534 | *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; | |
535 | *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; | |
536 | if (*assign_obf_end < *cur_obf) | |
537 | *assign_obf_end += mhba->list_num_io; | |
538 | else if (*assign_obf_end == *cur_obf) | |
539 | return -1; | |
540 | return 0; | |
541 | } | |
542 | ||
543 | static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) | |
544 | { | |
545 | unsigned int cur_obf, assign_obf_end, i; | |
546 | struct mvumi_ob_data *ob_data; | |
547 | struct mvumi_rsp_frame *p_outb_frame; | |
548 | struct mvumi_hw_regs *regs = mhba->regs; | |
549 | ||
550 | if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) | |
551 | return; | |
f0c568a4 JL |
552 | |
553 | for (i = (assign_obf_end - cur_obf); i != 0; i--) { | |
554 | cur_obf++; | |
555 | if (cur_obf >= mhba->list_num_io) { | |
556 | cur_obf -= mhba->list_num_io; | |
bd756dde | 557 | mhba->ob_cur_slot ^= regs->cl_pointer_toggle; |
f0c568a4 JL |
558 | } |
559 | ||
560 | p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; | |
561 | ||
562 | /* Copy pointer may point to entry in outbound list | |
563 | * before entry has valid data | |
564 | */ | |
565 | if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || | |
566 | mhba->tag_cmd[p_outb_frame->tag] == NULL || | |
567 | p_outb_frame->request_id != | |
568 | mhba->tag_cmd[p_outb_frame->tag]->request_id)) | |
569 | if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) | |
570 | continue; | |
571 | ||
572 | if (!list_empty(&mhba->ob_data_list)) { | |
573 | ob_data = (struct mvumi_ob_data *) | |
574 | list_first_entry(&mhba->ob_data_list, | |
575 | struct mvumi_ob_data, list); | |
576 | list_del_init(&ob_data->list); | |
577 | } else { | |
578 | ob_data = NULL; | |
579 | if (cur_obf == 0) { | |
580 | cur_obf = mhba->list_num_io - 1; | |
bd756dde | 581 | mhba->ob_cur_slot ^= regs->cl_pointer_toggle; |
f0c568a4 JL |
582 | } else |
583 | cur_obf -= 1; | |
584 | break; | |
585 | } | |
586 | ||
587 | memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); | |
588 | p_outb_frame->tag = 0xff; | |
589 | ||
590 | list_add_tail(&ob_data->list, &mhba->free_ob_list); | |
591 | } | |
bd756dde SF |
592 | mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; |
593 | mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); | |
594 | iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); | |
f0c568a4 JL |
595 | } |
596 | ||
bd756dde | 597 | static void mvumi_reset(struct mvumi_hba *mhba) |
f0c568a4 | 598 | { |
bd756dde SF |
599 | struct mvumi_hw_regs *regs = mhba->regs; |
600 | ||
601 | iowrite32(0, regs->enpointa_mask_reg); | |
602 | if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE) | |
f0c568a4 JL |
603 | return; |
604 | ||
bd756dde | 605 | iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg); |
f0c568a4 JL |
606 | } |
607 | ||
608 | static unsigned char mvumi_start(struct mvumi_hba *mhba); | |
609 | ||
610 | static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) | |
611 | { | |
612 | mhba->fw_state = FW_STATE_ABORT; | |
bd756dde | 613 | mvumi_reset(mhba); |
f0c568a4 JL |
614 | |
615 | if (mvumi_start(mhba)) | |
616 | return FAILED; | |
617 | else | |
618 | return SUCCESS; | |
619 | } | |
620 | ||
bd756dde SF |
621 | static int mvumi_wait_for_fw(struct mvumi_hba *mhba) |
622 | { | |
623 | struct mvumi_hw_regs *regs = mhba->regs; | |
624 | u32 tmp; | |
625 | unsigned long before; | |
626 | before = jiffies; | |
627 | ||
628 | iowrite32(0, regs->enpointa_mask_reg); | |
629 | tmp = ioread32(regs->arm_to_pciea_msg1); | |
630 | while (tmp != HANDSHAKE_READYSTATE) { | |
631 | iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg); | |
632 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { | |
633 | dev_err(&mhba->pdev->dev, | |
634 | "FW reset failed [0x%x].\n", tmp); | |
635 | return FAILED; | |
636 | } | |
637 | ||
638 | msleep(500); | |
639 | rmb(); | |
640 | tmp = ioread32(regs->arm_to_pciea_msg1); | |
641 | } | |
642 | ||
643 | return SUCCESS; | |
644 | } | |
645 | ||
646 | static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) | |
647 | { | |
648 | unsigned char i; | |
649 | ||
650 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { | |
651 | pci_read_config_dword(mhba->pdev, 0x10 + i * 4, | |
652 | &mhba->pci_base[i]); | |
653 | } | |
654 | } | |
655 | ||
656 | static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) | |
657 | { | |
658 | unsigned char i; | |
659 | ||
660 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { | |
661 | if (mhba->pci_base[i]) | |
662 | pci_write_config_dword(mhba->pdev, 0x10 + i * 4, | |
663 | mhba->pci_base[i]); | |
664 | } | |
665 | } | |
666 | ||
ab8e7f4b | 667 | static int mvumi_pci_set_master(struct pci_dev *pdev) |
bd756dde | 668 | { |
ab8e7f4b CH |
669 | int ret = 0; |
670 | ||
bd756dde SF |
671 | pci_set_master(pdev); |
672 | ||
673 | if (IS_DMA64) { | |
ab8e7f4b CH |
674 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) |
675 | ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
bd756dde | 676 | } else |
ab8e7f4b | 677 | ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
bd756dde SF |
678 | |
679 | return ret; | |
680 | } | |
681 | ||
682 | static int mvumi_reset_host_9580(struct mvumi_hba *mhba) | |
683 | { | |
684 | mhba->fw_state = FW_STATE_ABORT; | |
685 | ||
686 | iowrite32(0, mhba->regs->reset_enable); | |
687 | iowrite32(0xf, mhba->regs->reset_request); | |
688 | ||
689 | iowrite32(0x10, mhba->regs->reset_enable); | |
690 | iowrite32(0x10, mhba->regs->reset_request); | |
691 | msleep(100); | |
692 | pci_disable_device(mhba->pdev); | |
693 | ||
694 | if (pci_enable_device(mhba->pdev)) { | |
695 | dev_err(&mhba->pdev->dev, "enable device failed\n"); | |
696 | return FAILED; | |
697 | } | |
698 | if (mvumi_pci_set_master(mhba->pdev)) { | |
699 | dev_err(&mhba->pdev->dev, "set master failed\n"); | |
700 | return FAILED; | |
701 | } | |
702 | mvumi_restore_bar_addr(mhba); | |
703 | if (mvumi_wait_for_fw(mhba) == FAILED) | |
704 | return FAILED; | |
705 | ||
706 | return mvumi_wait_for_outstanding(mhba); | |
707 | } | |
708 | ||
709 | static int mvumi_reset_host_9143(struct mvumi_hba *mhba) | |
710 | { | |
711 | return mvumi_wait_for_outstanding(mhba); | |
712 | } | |
713 | ||
f0c568a4 JL |
714 | static int mvumi_host_reset(struct scsi_cmnd *scmd) |
715 | { | |
716 | struct mvumi_hba *mhba; | |
717 | ||
718 | mhba = (struct mvumi_hba *) scmd->device->host->hostdata; | |
719 | ||
720 | scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", | |
721 | scmd->serial_number, scmd->cmnd[0], scmd->retries); | |
722 | ||
bd756dde | 723 | return mhba->instancet->reset_host(mhba); |
f0c568a4 JL |
724 | } |
725 | ||
726 | static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, | |
727 | struct mvumi_cmd *cmd) | |
728 | { | |
729 | unsigned long flags; | |
730 | ||
731 | cmd->cmd_status = REQ_STATUS_PENDING; | |
732 | ||
733 | if (atomic_read(&cmd->sync_cmd)) { | |
734 | dev_err(&mhba->pdev->dev, | |
735 | "last blocked cmd not finished, sync_cmd = %d\n", | |
736 | atomic_read(&cmd->sync_cmd)); | |
737 | BUG_ON(1); | |
738 | return -1; | |
739 | } | |
740 | atomic_inc(&cmd->sync_cmd); | |
741 | spin_lock_irqsave(mhba->shost->host_lock, flags); | |
742 | mhba->instancet->fire_cmd(mhba, cmd); | |
743 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); | |
744 | ||
745 | wait_event_timeout(mhba->int_cmd_wait_q, | |
746 | (cmd->cmd_status != REQ_STATUS_PENDING), | |
747 | MVUMI_INTERNAL_CMD_WAIT_TIME * HZ); | |
748 | ||
749 | /* command timeout */ | |
750 | if (atomic_read(&cmd->sync_cmd)) { | |
751 | spin_lock_irqsave(mhba->shost->host_lock, flags); | |
752 | atomic_dec(&cmd->sync_cmd); | |
753 | if (mhba->tag_cmd[cmd->frame->tag]) { | |
754 | mhba->tag_cmd[cmd->frame->tag] = 0; | |
755 | dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", | |
756 | cmd->frame->tag); | |
757 | tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); | |
758 | } | |
759 | if (!list_empty(&cmd->queue_pointer)) { | |
760 | dev_warn(&mhba->pdev->dev, | |
761 | "TIMEOUT:A internal command doesn't send!\n"); | |
762 | list_del_init(&cmd->queue_pointer); | |
763 | } else | |
764 | atomic_dec(&mhba->fw_outstanding); | |
765 | ||
766 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); | |
767 | } | |
768 | return 0; | |
769 | } | |
770 | ||
771 | static void mvumi_release_fw(struct mvumi_hba *mhba) | |
772 | { | |
773 | mvumi_free_cmds(mhba); | |
774 | mvumi_release_mem_resource(mhba); | |
775 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); | |
ab8e7f4b | 776 | dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, |
bd756dde SF |
777 | mhba->handshake_page, mhba->handshake_page_phys); |
778 | kfree(mhba->regs); | |
f0c568a4 JL |
779 | pci_release_regions(mhba->pdev); |
780 | } | |
781 | ||
782 | static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) | |
783 | { | |
784 | struct mvumi_cmd *cmd; | |
785 | struct mvumi_msg_frame *frame; | |
786 | unsigned char device_id, retry = 0; | |
787 | unsigned char bitcount = sizeof(unsigned char) * 8; | |
788 | ||
789 | for (device_id = 0; device_id < mhba->max_target_id; device_id++) { | |
790 | if (!(mhba->target_map[device_id / bitcount] & | |
791 | (1 << (device_id % bitcount)))) | |
792 | continue; | |
793 | get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); | |
794 | if (!cmd) { | |
795 | if (retry++ >= 5) { | |
796 | dev_err(&mhba->pdev->dev, "failed to get memory" | |
797 | " for internal flush cache cmd for " | |
798 | "device %d", device_id); | |
799 | retry = 0; | |
800 | continue; | |
801 | } else | |
802 | goto get_cmd; | |
803 | } | |
804 | cmd->scmd = NULL; | |
805 | cmd->cmd_status = REQ_STATUS_PENDING; | |
806 | atomic_set(&cmd->sync_cmd, 0); | |
807 | frame = cmd->frame; | |
808 | frame->req_function = CL_FUN_SCSI_CMD; | |
809 | frame->device_id = device_id; | |
810 | frame->cmd_flag = CMD_FLAG_NON_DATA; | |
811 | frame->data_transfer_length = 0; | |
812 | frame->cdb_length = MAX_COMMAND_SIZE; | |
813 | memset(frame->cdb, 0, MAX_COMMAND_SIZE); | |
814 | frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; | |
bd756dde | 815 | frame->cdb[1] = CDB_CORE_MODULE; |
f0c568a4 JL |
816 | frame->cdb[2] = CDB_CORE_SHUTDOWN; |
817 | ||
818 | mvumi_issue_blocked_cmd(mhba, cmd); | |
819 | if (cmd->cmd_status != SAM_STAT_GOOD) { | |
820 | dev_err(&mhba->pdev->dev, | |
821 | "device %d flush cache failed, status=0x%x.\n", | |
822 | device_id, cmd->cmd_status); | |
823 | } | |
824 | ||
825 | mvumi_delete_internal_cmd(mhba, cmd); | |
826 | } | |
827 | return 0; | |
828 | } | |
829 | ||
830 | static unsigned char | |
831 | mvumi_calculate_checksum(struct mvumi_hs_header *p_header, | |
832 | unsigned short len) | |
833 | { | |
834 | unsigned char *ptr; | |
835 | unsigned char ret = 0, i; | |
836 | ||
837 | ptr = (unsigned char *) p_header->frame_content; | |
838 | for (i = 0; i < len; i++) { | |
839 | ret ^= *ptr; | |
840 | ptr++; | |
841 | } | |
842 | ||
843 | return ret; | |
844 | } | |
845 | ||
bd756dde | 846 | static void mvumi_hs_build_page(struct mvumi_hba *mhba, |
f0c568a4 JL |
847 | struct mvumi_hs_header *hs_header) |
848 | { | |
849 | struct mvumi_hs_page2 *hs_page2; | |
850 | struct mvumi_hs_page4 *hs_page4; | |
851 | struct mvumi_hs_page3 *hs_page3; | |
36f8ef7f TR |
852 | u64 time; |
853 | u64 local_time; | |
f0c568a4 JL |
854 | |
855 | switch (hs_header->page_code) { | |
856 | case HS_PAGE_HOST_INFO: | |
857 | hs_page2 = (struct mvumi_hs_page2 *) hs_header; | |
858 | hs_header->frame_length = sizeof(*hs_page2) - 4; | |
859 | memset(hs_header->frame_content, 0, hs_header->frame_length); | |
860 | hs_page2->host_type = 3; /* 3 mean linux*/ | |
bd756dde SF |
861 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) |
862 | hs_page2->host_cap = 0x08;/* host dynamic source mode */ | |
f0c568a4 JL |
863 | hs_page2->host_ver.ver_major = VER_MAJOR; |
864 | hs_page2->host_ver.ver_minor = VER_MINOR; | |
865 | hs_page2->host_ver.ver_oem = VER_OEM; | |
866 | hs_page2->host_ver.ver_build = VER_BUILD; | |
867 | hs_page2->system_io_bus = 0; | |
868 | hs_page2->slot_number = 0; | |
869 | hs_page2->intr_level = 0; | |
870 | hs_page2->intr_vector = 0; | |
36f8ef7f TR |
871 | time = ktime_get_real_seconds(); |
872 | local_time = (time - (sys_tz.tz_minuteswest * 60)); | |
f0c568a4 JL |
873 | hs_page2->seconds_since1970 = local_time; |
874 | hs_header->checksum = mvumi_calculate_checksum(hs_header, | |
875 | hs_header->frame_length); | |
876 | break; | |
877 | ||
878 | case HS_PAGE_FIRM_CTL: | |
879 | hs_page3 = (struct mvumi_hs_page3 *) hs_header; | |
880 | hs_header->frame_length = sizeof(*hs_page3) - 4; | |
881 | memset(hs_header->frame_content, 0, hs_header->frame_length); | |
882 | hs_header->checksum = mvumi_calculate_checksum(hs_header, | |
883 | hs_header->frame_length); | |
884 | break; | |
885 | ||
886 | case HS_PAGE_CL_INFO: | |
887 | hs_page4 = (struct mvumi_hs_page4 *) hs_header; | |
888 | hs_header->frame_length = sizeof(*hs_page4) - 4; | |
889 | memset(hs_header->frame_content, 0, hs_header->frame_length); | |
890 | hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); | |
891 | hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); | |
892 | ||
893 | hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); | |
894 | hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); | |
895 | hs_page4->ib_entry_size = mhba->ib_max_size_setting; | |
896 | hs_page4->ob_entry_size = mhba->ob_max_size_setting; | |
bd756dde SF |
897 | if (mhba->hba_capability |
898 | & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) { | |
899 | hs_page4->ob_depth = find_first_bit((unsigned long *) | |
900 | &mhba->list_num_io, | |
901 | BITS_PER_LONG); | |
902 | hs_page4->ib_depth = find_first_bit((unsigned long *) | |
903 | &mhba->list_num_io, | |
904 | BITS_PER_LONG); | |
905 | } else { | |
906 | hs_page4->ob_depth = (u8) mhba->list_num_io; | |
907 | hs_page4->ib_depth = (u8) mhba->list_num_io; | |
908 | } | |
f0c568a4 JL |
909 | hs_header->checksum = mvumi_calculate_checksum(hs_header, |
910 | hs_header->frame_length); | |
911 | break; | |
912 | ||
913 | default: | |
914 | dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", | |
915 | hs_header->page_code); | |
916 | break; | |
917 | } | |
918 | } | |
919 | ||
920 | /** | |
921 | * mvumi_init_data - Initialize requested date for FW | |
922 | * @mhba: Adapter soft state | |
923 | */ | |
924 | static int mvumi_init_data(struct mvumi_hba *mhba) | |
925 | { | |
926 | struct mvumi_ob_data *ob_pool; | |
927 | struct mvumi_res *res_mgnt; | |
928 | unsigned int tmp_size, offset, i; | |
929 | void *virmem, *v; | |
930 | dma_addr_t p; | |
931 | ||
932 | if (mhba->fw_flag & MVUMI_FW_ALLOC) | |
933 | return 0; | |
934 | ||
935 | tmp_size = mhba->ib_max_size * mhba->max_io; | |
bd756dde SF |
936 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) |
937 | tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; | |
938 | ||
f0c568a4 | 939 | tmp_size += 128 + mhba->ob_max_size * mhba->max_io; |
bd756dde | 940 | tmp_size += 8 + sizeof(u32)*2 + 16; |
f0c568a4 JL |
941 | |
942 | res_mgnt = mvumi_alloc_mem_resource(mhba, | |
943 | RESOURCE_UNCACHED_MEMORY, tmp_size); | |
944 | if (!res_mgnt) { | |
945 | dev_err(&mhba->pdev->dev, | |
946 | "failed to allocate memory for inbound list\n"); | |
947 | goto fail_alloc_dma_buf; | |
948 | } | |
949 | ||
950 | p = res_mgnt->bus_addr; | |
951 | v = res_mgnt->virt_addr; | |
952 | /* ib_list */ | |
953 | offset = round_up(p, 128) - p; | |
954 | p += offset; | |
955 | v += offset; | |
956 | mhba->ib_list = v; | |
957 | mhba->ib_list_phys = p; | |
bd756dde SF |
958 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
959 | v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; | |
960 | p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; | |
961 | mhba->ib_frame = v; | |
962 | mhba->ib_frame_phys = p; | |
963 | } | |
f0c568a4 JL |
964 | v += mhba->ib_max_size * mhba->max_io; |
965 | p += mhba->ib_max_size * mhba->max_io; | |
bd756dde | 966 | |
f0c568a4 JL |
967 | /* ib shadow */ |
968 | offset = round_up(p, 8) - p; | |
969 | p += offset; | |
970 | v += offset; | |
971 | mhba->ib_shadow = v; | |
972 | mhba->ib_shadow_phys = p; | |
bd756dde SF |
973 | p += sizeof(u32)*2; |
974 | v += sizeof(u32)*2; | |
f0c568a4 | 975 | /* ob shadow */ |
bd756dde SF |
976 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { |
977 | offset = round_up(p, 8) - p; | |
978 | p += offset; | |
979 | v += offset; | |
980 | mhba->ob_shadow = v; | |
981 | mhba->ob_shadow_phys = p; | |
982 | p += 8; | |
983 | v += 8; | |
984 | } else { | |
985 | offset = round_up(p, 4) - p; | |
986 | p += offset; | |
987 | v += offset; | |
988 | mhba->ob_shadow = v; | |
989 | mhba->ob_shadow_phys = p; | |
990 | p += 4; | |
991 | v += 4; | |
992 | } | |
f0c568a4 JL |
993 | |
994 | /* ob list */ | |
995 | offset = round_up(p, 128) - p; | |
996 | p += offset; | |
997 | v += offset; | |
998 | ||
999 | mhba->ob_list = v; | |
1000 | mhba->ob_list_phys = p; | |
1001 | ||
1002 | /* ob data pool */ | |
1003 | tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); | |
1004 | tmp_size = round_up(tmp_size, 8); | |
1005 | ||
1006 | res_mgnt = mvumi_alloc_mem_resource(mhba, | |
1007 | RESOURCE_CACHED_MEMORY, tmp_size); | |
1008 | if (!res_mgnt) { | |
1009 | dev_err(&mhba->pdev->dev, | |
1010 | "failed to allocate memory for outbound data buffer\n"); | |
1011 | goto fail_alloc_dma_buf; | |
1012 | } | |
1013 | virmem = res_mgnt->virt_addr; | |
1014 | ||
1015 | for (i = mhba->max_io; i != 0; i--) { | |
1016 | ob_pool = (struct mvumi_ob_data *) virmem; | |
1017 | list_add_tail(&ob_pool->list, &mhba->ob_data_list); | |
1018 | virmem += mhba->ob_max_size + sizeof(*ob_pool); | |
1019 | } | |
1020 | ||
1021 | tmp_size = sizeof(unsigned short) * mhba->max_io + | |
1022 | sizeof(struct mvumi_cmd *) * mhba->max_io; | |
1023 | tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / | |
1024 | (sizeof(unsigned char) * 8); | |
1025 | ||
1026 | res_mgnt = mvumi_alloc_mem_resource(mhba, | |
1027 | RESOURCE_CACHED_MEMORY, tmp_size); | |
1028 | if (!res_mgnt) { | |
1029 | dev_err(&mhba->pdev->dev, | |
1030 | "failed to allocate memory for tag and target map\n"); | |
1031 | goto fail_alloc_dma_buf; | |
1032 | } | |
1033 | ||
1034 | virmem = res_mgnt->virt_addr; | |
1035 | mhba->tag_pool.stack = virmem; | |
1036 | mhba->tag_pool.size = mhba->max_io; | |
1037 | tag_init(&mhba->tag_pool, mhba->max_io); | |
1038 | virmem += sizeof(unsigned short) * mhba->max_io; | |
1039 | ||
1040 | mhba->tag_cmd = virmem; | |
1041 | virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; | |
1042 | ||
1043 | mhba->target_map = virmem; | |
1044 | ||
1045 | mhba->fw_flag |= MVUMI_FW_ALLOC; | |
1046 | return 0; | |
1047 | ||
1048 | fail_alloc_dma_buf: | |
1049 | mvumi_release_mem_resource(mhba); | |
1050 | return -1; | |
1051 | } | |
1052 | ||
1053 | static int mvumi_hs_process_page(struct mvumi_hba *mhba, | |
1054 | struct mvumi_hs_header *hs_header) | |
1055 | { | |
1056 | struct mvumi_hs_page1 *hs_page1; | |
1057 | unsigned char page_checksum; | |
1058 | ||
1059 | page_checksum = mvumi_calculate_checksum(hs_header, | |
1060 | hs_header->frame_length); | |
1061 | if (page_checksum != hs_header->checksum) { | |
1062 | dev_err(&mhba->pdev->dev, "checksum error\n"); | |
1063 | return -1; | |
1064 | } | |
1065 | ||
1066 | switch (hs_header->page_code) { | |
1067 | case HS_PAGE_FIRM_CAP: | |
1068 | hs_page1 = (struct mvumi_hs_page1 *) hs_header; | |
1069 | ||
1070 | mhba->max_io = hs_page1->max_io_support; | |
1071 | mhba->list_num_io = hs_page1->cl_inout_list_depth; | |
1072 | mhba->max_transfer_size = hs_page1->max_transfer_size; | |
1073 | mhba->max_target_id = hs_page1->max_devices_support; | |
1074 | mhba->hba_capability = hs_page1->capability; | |
1075 | mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; | |
1076 | mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; | |
1077 | ||
1078 | mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; | |
1079 | mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; | |
1080 | ||
1081 | dev_dbg(&mhba->pdev->dev, "FW version:%d\n", | |
1082 | hs_page1->fw_ver.ver_build); | |
1083 | ||
bd756dde SF |
1084 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) |
1085 | mhba->eot_flag = 22; | |
1086 | else | |
1087 | mhba->eot_flag = 27; | |
1088 | if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) | |
1089 | mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; | |
f0c568a4 JL |
1090 | break; |
1091 | default: | |
1092 | dev_err(&mhba->pdev->dev, "handshake: page code error\n"); | |
1093 | return -1; | |
1094 | } | |
1095 | return 0; | |
1096 | } | |
1097 | ||
1098 | /** | |
1099 | * mvumi_handshake - Move the FW to READY state | |
1100 | * @mhba: Adapter soft state | |
1101 | * | |
1102 | * During the initialization, FW passes can potentially be in any one of | |
1103 | * several possible states. If the FW in operational, waiting-for-handshake | |
1104 | * states, driver must take steps to bring it to ready state. Otherwise, it | |
1105 | * has to wait for the ready state. | |
1106 | */ | |
1107 | static int mvumi_handshake(struct mvumi_hba *mhba) | |
1108 | { | |
1109 | unsigned int hs_state, tmp, hs_fun; | |
1110 | struct mvumi_hs_header *hs_header; | |
bd756dde | 1111 | struct mvumi_hw_regs *regs = mhba->regs; |
f0c568a4 JL |
1112 | |
1113 | if (mhba->fw_state == FW_STATE_STARTING) | |
1114 | hs_state = HS_S_START; | |
1115 | else { | |
bd756dde | 1116 | tmp = ioread32(regs->arm_to_pciea_msg0); |
f0c568a4 JL |
1117 | hs_state = HS_GET_STATE(tmp); |
1118 | dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); | |
1119 | if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { | |
1120 | mhba->fw_state = FW_STATE_STARTING; | |
1121 | return -1; | |
1122 | } | |
1123 | } | |
1124 | ||
1125 | hs_fun = 0; | |
1126 | switch (hs_state) { | |
1127 | case HS_S_START: | |
1128 | mhba->fw_state = FW_STATE_HANDSHAKING; | |
1129 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); | |
1130 | HS_SET_STATE(hs_fun, HS_S_RESET); | |
bd756dde SF |
1131 | iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1); |
1132 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); | |
1133 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); | |
f0c568a4 JL |
1134 | break; |
1135 | ||
1136 | case HS_S_RESET: | |
1137 | iowrite32(lower_32_bits(mhba->handshake_page_phys), | |
bd756dde | 1138 | regs->pciea_to_arm_msg1); |
f0c568a4 | 1139 | iowrite32(upper_32_bits(mhba->handshake_page_phys), |
bd756dde | 1140 | regs->arm_to_pciea_msg1); |
f0c568a4 JL |
1141 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
1142 | HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); | |
bd756dde SF |
1143 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
1144 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); | |
f0c568a4 JL |
1145 | break; |
1146 | ||
1147 | case HS_S_PAGE_ADDR: | |
1148 | case HS_S_QUERY_PAGE: | |
1149 | case HS_S_SEND_PAGE: | |
1150 | hs_header = (struct mvumi_hs_header *) mhba->handshake_page; | |
1151 | if (hs_header->page_code == HS_PAGE_FIRM_CAP) { | |
1152 | mhba->hba_total_pages = | |
1153 | ((struct mvumi_hs_page1 *) hs_header)->total_pages; | |
1154 | ||
1155 | if (mhba->hba_total_pages == 0) | |
1156 | mhba->hba_total_pages = HS_PAGE_TOTAL-1; | |
1157 | } | |
1158 | ||
1159 | if (hs_state == HS_S_QUERY_PAGE) { | |
1160 | if (mvumi_hs_process_page(mhba, hs_header)) { | |
1161 | HS_SET_STATE(hs_fun, HS_S_ABORT); | |
1162 | return -1; | |
1163 | } | |
1164 | if (mvumi_init_data(mhba)) { | |
1165 | HS_SET_STATE(hs_fun, HS_S_ABORT); | |
1166 | return -1; | |
1167 | } | |
1168 | } else if (hs_state == HS_S_PAGE_ADDR) { | |
1169 | hs_header->page_code = 0; | |
1170 | mhba->hba_total_pages = HS_PAGE_TOTAL-1; | |
1171 | } | |
1172 | ||
1173 | if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { | |
1174 | hs_header->page_code++; | |
1175 | if (hs_header->page_code != HS_PAGE_FIRM_CAP) { | |
1176 | mvumi_hs_build_page(mhba, hs_header); | |
1177 | HS_SET_STATE(hs_fun, HS_S_SEND_PAGE); | |
1178 | } else | |
1179 | HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE); | |
1180 | } else | |
1181 | HS_SET_STATE(hs_fun, HS_S_END); | |
1182 | ||
1183 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); | |
bd756dde SF |
1184 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
1185 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); | |
f0c568a4 JL |
1186 | break; |
1187 | ||
1188 | case HS_S_END: | |
1189 | /* Set communication list ISR */ | |
bd756dde SF |
1190 | tmp = ioread32(regs->enpointa_mask_reg); |
1191 | tmp |= regs->int_comaout | regs->int_comaerr; | |
1192 | iowrite32(tmp, regs->enpointa_mask_reg); | |
f0c568a4 | 1193 | iowrite32(mhba->list_num_io, mhba->ib_shadow); |
59e13d48 | 1194 | /* Set InBound List Available count shadow */ |
f0c568a4 | 1195 | iowrite32(lower_32_bits(mhba->ib_shadow_phys), |
bd756dde | 1196 | regs->inb_aval_count_basel); |
f0c568a4 | 1197 | iowrite32(upper_32_bits(mhba->ib_shadow_phys), |
bd756dde SF |
1198 | regs->inb_aval_count_baseh); |
1199 | ||
1200 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { | |
1201 | /* Set OutBound List Available count shadow */ | |
1202 | iowrite32((mhba->list_num_io-1) | | |
1203 | regs->cl_pointer_toggle, | |
1204 | mhba->ob_shadow); | |
1205 | iowrite32(lower_32_bits(mhba->ob_shadow_phys), | |
1206 | regs->outb_copy_basel); | |
1207 | iowrite32(upper_32_bits(mhba->ob_shadow_phys), | |
1208 | regs->outb_copy_baseh); | |
1209 | } | |
f0c568a4 | 1210 | |
bd756dde SF |
1211 | mhba->ib_cur_slot = (mhba->list_num_io - 1) | |
1212 | regs->cl_pointer_toggle; | |
1213 | mhba->ob_cur_slot = (mhba->list_num_io - 1) | | |
1214 | regs->cl_pointer_toggle; | |
f0c568a4 JL |
1215 | mhba->fw_state = FW_STATE_STARTED; |
1216 | ||
1217 | break; | |
1218 | default: | |
1219 | dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", | |
1220 | hs_state); | |
1221 | return -1; | |
1222 | } | |
1223 | return 0; | |
1224 | } | |
1225 | ||
1226 | static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) | |
1227 | { | |
1228 | unsigned int isr_status; | |
1229 | unsigned long before; | |
1230 | ||
1231 | before = jiffies; | |
1232 | mvumi_handshake(mhba); | |
1233 | do { | |
bd756dde | 1234 | isr_status = mhba->instancet->read_fw_status_reg(mhba); |
f0c568a4 JL |
1235 | |
1236 | if (mhba->fw_state == FW_STATE_STARTED) | |
1237 | return 0; | |
1238 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { | |
1239 | dev_err(&mhba->pdev->dev, | |
1240 | "no handshake response at state 0x%x.\n", | |
1241 | mhba->fw_state); | |
1242 | dev_err(&mhba->pdev->dev, | |
1243 | "isr : global=0x%x,status=0x%x.\n", | |
1244 | mhba->global_isr, isr_status); | |
1245 | return -1; | |
1246 | } | |
1247 | rmb(); | |
1248 | usleep_range(1000, 2000); | |
1249 | } while (!(isr_status & DRBL_HANDSHAKE_ISR)); | |
1250 | ||
1251 | return 0; | |
1252 | } | |
1253 | ||
1254 | static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) | |
1255 | { | |
f0c568a4 JL |
1256 | unsigned int tmp; |
1257 | unsigned long before; | |
1258 | ||
1259 | before = jiffies; | |
bd756dde | 1260 | tmp = ioread32(mhba->regs->arm_to_pciea_msg1); |
f0c568a4 JL |
1261 | while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { |
1262 | if (tmp != HANDSHAKE_READYSTATE) | |
1263 | iowrite32(DRBL_MU_RESET, | |
bd756dde | 1264 | mhba->regs->pciea_to_arm_drbl_reg); |
f0c568a4 JL |
1265 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { |
1266 | dev_err(&mhba->pdev->dev, | |
1267 | "invalid signature [0x%x].\n", tmp); | |
1268 | return -1; | |
1269 | } | |
1270 | usleep_range(1000, 2000); | |
1271 | rmb(); | |
bd756dde | 1272 | tmp = ioread32(mhba->regs->arm_to_pciea_msg1); |
f0c568a4 JL |
1273 | } |
1274 | ||
1275 | mhba->fw_state = FW_STATE_STARTING; | |
1276 | dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); | |
1277 | do { | |
1278 | if (mvumi_handshake_event(mhba)) { | |
1279 | dev_err(&mhba->pdev->dev, | |
1280 | "handshake failed at state 0x%x.\n", | |
1281 | mhba->fw_state); | |
1282 | return -1; | |
1283 | } | |
1284 | } while (mhba->fw_state != FW_STATE_STARTED); | |
1285 | ||
1286 | dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); | |
1287 | ||
1288 | return 0; | |
1289 | } | |
1290 | ||
1291 | static unsigned char mvumi_start(struct mvumi_hba *mhba) | |
1292 | { | |
f0c568a4 | 1293 | unsigned int tmp; |
bd756dde SF |
1294 | struct mvumi_hw_regs *regs = mhba->regs; |
1295 | ||
f0c568a4 | 1296 | /* clear Door bell */ |
bd756dde SF |
1297 | tmp = ioread32(regs->arm_to_pciea_drbl_reg); |
1298 | iowrite32(tmp, regs->arm_to_pciea_drbl_reg); | |
f0c568a4 | 1299 | |
bd756dde SF |
1300 | iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); |
1301 | tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea; | |
1302 | iowrite32(tmp, regs->enpointa_mask_reg); | |
1303 | msleep(100); | |
f0c568a4 JL |
1304 | if (mvumi_check_handshake(mhba)) |
1305 | return -1; | |
1306 | ||
1307 | return 0; | |
1308 | } | |
1309 | ||
1310 | /** | |
1311 | * mvumi_complete_cmd - Completes a command | |
1312 | * @mhba: Adapter soft state | |
1313 | * @cmd: Command to be completed | |
1314 | */ | |
1315 | static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, | |
1316 | struct mvumi_rsp_frame *ob_frame) | |
1317 | { | |
1318 | struct scsi_cmnd *scmd = cmd->scmd; | |
1319 | ||
1320 | cmd->scmd->SCp.ptr = NULL; | |
1321 | scmd->result = ob_frame->req_status; | |
1322 | ||
1323 | switch (ob_frame->req_status) { | |
1324 | case SAM_STAT_GOOD: | |
1325 | scmd->result |= DID_OK << 16; | |
1326 | break; | |
1327 | case SAM_STAT_BUSY: | |
1328 | scmd->result |= DID_BUS_BUSY << 16; | |
1329 | break; | |
1330 | case SAM_STAT_CHECK_CONDITION: | |
1331 | scmd->result |= (DID_OK << 16); | |
1332 | if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) { | |
1333 | memcpy(cmd->scmd->sense_buffer, ob_frame->payload, | |
1334 | sizeof(struct mvumi_sense_data)); | |
1335 | scmd->result |= (DRIVER_SENSE << 24); | |
1336 | } | |
1337 | break; | |
1338 | default: | |
1339 | scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16); | |
1340 | break; | |
1341 | } | |
1342 | ||
4bd13a07 | 1343 | if (scsi_bufflen(scmd)) |
ab8e7f4b | 1344 | dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), |
4bd13a07 | 1345 | scsi_sg_count(scmd), |
ab8e7f4b | 1346 | scmd->sc_data_direction); |
f0c568a4 JL |
1347 | cmd->scmd->scsi_done(scmd); |
1348 | mvumi_return_cmd(mhba, cmd); | |
1349 | } | |
bd756dde | 1350 | |
f0c568a4 JL |
1351 | static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, |
1352 | struct mvumi_cmd *cmd, | |
1353 | struct mvumi_rsp_frame *ob_frame) | |
1354 | { | |
1355 | if (atomic_read(&cmd->sync_cmd)) { | |
1356 | cmd->cmd_status = ob_frame->req_status; | |
1357 | ||
1358 | if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) && | |
1359 | (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) && | |
1360 | cmd->data_buf) { | |
1361 | memcpy(cmd->data_buf, ob_frame->payload, | |
1362 | sizeof(struct mvumi_sense_data)); | |
1363 | } | |
1364 | atomic_dec(&cmd->sync_cmd); | |
1365 | wake_up(&mhba->int_cmd_wait_q); | |
1366 | } | |
1367 | } | |
1368 | ||
1369 | static void mvumi_show_event(struct mvumi_hba *mhba, | |
1370 | struct mvumi_driver_event *ptr) | |
1371 | { | |
1372 | unsigned int i; | |
1373 | ||
1374 | dev_warn(&mhba->pdev->dev, | |
1375 | "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n", | |
1376 | ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id); | |
1377 | if (ptr->param_count) { | |
1378 | printk(KERN_WARNING "Event param(len 0x%x): ", | |
1379 | ptr->param_count); | |
1380 | for (i = 0; i < ptr->param_count; i++) | |
1381 | printk(KERN_WARNING "0x%x ", ptr->params[i]); | |
1382 | ||
1383 | printk(KERN_WARNING "\n"); | |
1384 | } | |
1385 | ||
1386 | if (ptr->sense_data_length) { | |
1387 | printk(KERN_WARNING "Event sense data(len 0x%x): ", | |
1388 | ptr->sense_data_length); | |
1389 | for (i = 0; i < ptr->sense_data_length; i++) | |
1390 | printk(KERN_WARNING "0x%x ", ptr->sense_data[i]); | |
1391 | printk(KERN_WARNING "\n"); | |
1392 | } | |
1393 | } | |
1394 | ||
bd756dde SF |
1395 | static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) |
1396 | { | |
1397 | struct scsi_device *sdev; | |
1398 | int ret = -1; | |
1399 | ||
1400 | if (status == DEVICE_OFFLINE) { | |
1401 | sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); | |
1402 | if (sdev) { | |
1403 | dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, | |
1404 | sdev->id, 0); | |
1405 | scsi_remove_device(sdev); | |
1406 | scsi_device_put(sdev); | |
1407 | ret = 0; | |
1408 | } else | |
1409 | dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", | |
1410 | devid); | |
1411 | } else if (status == DEVICE_ONLINE) { | |
1412 | sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); | |
1413 | if (!sdev) { | |
1414 | scsi_add_device(mhba->shost, 0, devid, 0); | |
1415 | dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, | |
1416 | devid, 0); | |
1417 | ret = 0; | |
1418 | } else { | |
1419 | dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", | |
1420 | 0, devid, 0); | |
1421 | scsi_device_put(sdev); | |
1422 | } | |
1423 | } | |
1424 | return ret; | |
1425 | } | |
1426 | ||
1427 | static u64 mvumi_inquiry(struct mvumi_hba *mhba, | |
1428 | unsigned int id, struct mvumi_cmd *cmd) | |
1429 | { | |
1430 | struct mvumi_msg_frame *frame; | |
1431 | u64 wwid = 0; | |
1432 | int cmd_alloc = 0; | |
1433 | int data_buf_len = 64; | |
1434 | ||
1435 | if (!cmd) { | |
1436 | cmd = mvumi_create_internal_cmd(mhba, data_buf_len); | |
1437 | if (cmd) | |
1438 | cmd_alloc = 1; | |
1439 | else | |
1440 | return 0; | |
1441 | } else { | |
1442 | memset(cmd->data_buf, 0, data_buf_len); | |
1443 | } | |
1444 | cmd->scmd = NULL; | |
1445 | cmd->cmd_status = REQ_STATUS_PENDING; | |
1446 | atomic_set(&cmd->sync_cmd, 0); | |
1447 | frame = cmd->frame; | |
1448 | frame->device_id = (u16) id; | |
1449 | frame->cmd_flag = CMD_FLAG_DATA_IN; | |
1450 | frame->req_function = CL_FUN_SCSI_CMD; | |
1451 | frame->cdb_length = 6; | |
1452 | frame->data_transfer_length = MVUMI_INQUIRY_LENGTH; | |
1453 | memset(frame->cdb, 0, frame->cdb_length); | |
1454 | frame->cdb[0] = INQUIRY; | |
1455 | frame->cdb[4] = frame->data_transfer_length; | |
1456 | ||
1457 | mvumi_issue_blocked_cmd(mhba, cmd); | |
1458 | ||
1459 | if (cmd->cmd_status == SAM_STAT_GOOD) { | |
1460 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) | |
1461 | wwid = id + 1; | |
1462 | else | |
1463 | memcpy((void *)&wwid, | |
1464 | (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF), | |
1465 | MVUMI_INQUIRY_UUID_LEN); | |
1466 | dev_dbg(&mhba->pdev->dev, | |
1467 | "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid); | |
1468 | } else { | |
1469 | wwid = 0; | |
1470 | } | |
1471 | if (cmd_alloc) | |
1472 | mvumi_delete_internal_cmd(mhba, cmd); | |
1473 | ||
1474 | return wwid; | |
1475 | } | |
1476 | ||
1477 | static void mvumi_detach_devices(struct mvumi_hba *mhba) | |
1478 | { | |
1479 | struct mvumi_device *mv_dev = NULL , *dev_next; | |
1480 | struct scsi_device *sdev = NULL; | |
1481 | ||
1482 | mutex_lock(&mhba->device_lock); | |
1483 | ||
1484 | /* detach Hard Disk */ | |
1485 | list_for_each_entry_safe(mv_dev, dev_next, | |
1486 | &mhba->shost_dev_list, list) { | |
1487 | mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); | |
1488 | list_del_init(&mv_dev->list); | |
1489 | dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", | |
1490 | mv_dev->id, mv_dev->wwid); | |
1491 | kfree(mv_dev); | |
1492 | } | |
1493 | list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { | |
1494 | list_del_init(&mv_dev->list); | |
1495 | dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", | |
1496 | mv_dev->id, mv_dev->wwid); | |
1497 | kfree(mv_dev); | |
1498 | } | |
1499 | ||
1500 | /* detach virtual device */ | |
1501 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) | |
1502 | sdev = scsi_device_lookup(mhba->shost, 0, | |
1503 | mhba->max_target_id - 1, 0); | |
1504 | ||
1505 | if (sdev) { | |
1506 | scsi_remove_device(sdev); | |
1507 | scsi_device_put(sdev); | |
1508 | } | |
1509 | ||
1510 | mutex_unlock(&mhba->device_lock); | |
1511 | } | |
1512 | ||
1513 | static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) | |
1514 | { | |
1515 | struct scsi_device *sdev; | |
1516 | ||
1517 | sdev = scsi_device_lookup(mhba->shost, 0, id, 0); | |
1518 | if (sdev) { | |
1519 | scsi_rescan_device(&sdev->sdev_gendev); | |
1520 | scsi_device_put(sdev); | |
1521 | } | |
1522 | } | |
1523 | ||
1524 | static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) | |
1525 | { | |
1526 | struct mvumi_device *mv_dev = NULL; | |
1527 | ||
1528 | list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { | |
1529 | if (mv_dev->wwid == wwid) { | |
1530 | if (mv_dev->id != id) { | |
1531 | dev_err(&mhba->pdev->dev, | |
1532 | "%s has same wwid[%llx] ," | |
1533 | " but different id[%d %d]\n", | |
1534 | __func__, mv_dev->wwid, mv_dev->id, id); | |
1535 | return -1; | |
1536 | } else { | |
1537 | if (mhba->pdev->device == | |
1538 | PCI_DEVICE_ID_MARVELL_MV9143) | |
1539 | mvumi_rescan_devices(mhba, id); | |
1540 | return 1; | |
1541 | } | |
1542 | } | |
1543 | } | |
1544 | return 0; | |
1545 | } | |
1546 | ||
1547 | static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) | |
1548 | { | |
1549 | struct mvumi_device *mv_dev = NULL, *dev_next; | |
1550 | ||
1551 | list_for_each_entry_safe(mv_dev, dev_next, | |
1552 | &mhba->shost_dev_list, list) { | |
1553 | if (mv_dev->id == id) { | |
1554 | dev_dbg(&mhba->pdev->dev, | |
1555 | "detach device(0:%d:0) wwid(%llx) from HOST\n", | |
1556 | mv_dev->id, mv_dev->wwid); | |
1557 | mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); | |
1558 | list_del_init(&mv_dev->list); | |
1559 | kfree(mv_dev); | |
1560 | } | |
1561 | } | |
1562 | } | |
1563 | ||
1564 | static int mvumi_probe_devices(struct mvumi_hba *mhba) | |
1565 | { | |
1566 | int id, maxid; | |
1567 | u64 wwid = 0; | |
1568 | struct mvumi_device *mv_dev = NULL; | |
1569 | struct mvumi_cmd *cmd = NULL; | |
1570 | int found = 0; | |
1571 | ||
1572 | cmd = mvumi_create_internal_cmd(mhba, 64); | |
1573 | if (!cmd) | |
1574 | return -1; | |
1575 | ||
1576 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) | |
1577 | maxid = mhba->max_target_id; | |
1578 | else | |
1579 | maxid = mhba->max_target_id - 1; | |
1580 | ||
1581 | for (id = 0; id < maxid; id++) { | |
1582 | wwid = mvumi_inquiry(mhba, id, cmd); | |
1583 | if (!wwid) { | |
1584 | /* device no response, remove it */ | |
1585 | mvumi_remove_devices(mhba, id); | |
1586 | } else { | |
1587 | /* device response, add it */ | |
1588 | found = mvumi_match_devices(mhba, id, wwid); | |
1589 | if (!found) { | |
1590 | mvumi_remove_devices(mhba, id); | |
1591 | mv_dev = kzalloc(sizeof(struct mvumi_device), | |
1592 | GFP_KERNEL); | |
1593 | if (!mv_dev) { | |
1594 | dev_err(&mhba->pdev->dev, | |
1595 | "%s alloc mv_dev failed\n", | |
1596 | __func__); | |
1597 | continue; | |
1598 | } | |
1599 | mv_dev->id = id; | |
1600 | mv_dev->wwid = wwid; | |
1601 | mv_dev->sdev = NULL; | |
1602 | INIT_LIST_HEAD(&mv_dev->list); | |
1603 | list_add_tail(&mv_dev->list, | |
1604 | &mhba->mhba_dev_list); | |
1605 | dev_dbg(&mhba->pdev->dev, | |
1606 | "probe a new device(0:%d:0)" | |
1607 | " wwid(%llx)\n", id, mv_dev->wwid); | |
1608 | } else if (found == -1) | |
1609 | return -1; | |
1610 | else | |
1611 | continue; | |
1612 | } | |
1613 | } | |
1614 | ||
1615 | if (cmd) | |
1616 | mvumi_delete_internal_cmd(mhba, cmd); | |
1617 | ||
1618 | return 0; | |
1619 | } | |
1620 | ||
1621 | static int mvumi_rescan_bus(void *data) | |
1622 | { | |
1623 | int ret = 0; | |
1624 | struct mvumi_hba *mhba = (struct mvumi_hba *) data; | |
1625 | struct mvumi_device *mv_dev = NULL , *dev_next; | |
1626 | ||
1627 | while (!kthread_should_stop()) { | |
1628 | ||
1629 | set_current_state(TASK_INTERRUPTIBLE); | |
1630 | if (!atomic_read(&mhba->pnp_count)) | |
1631 | schedule(); | |
1632 | msleep(1000); | |
1633 | atomic_set(&mhba->pnp_count, 0); | |
1634 | __set_current_state(TASK_RUNNING); | |
1635 | ||
1636 | mutex_lock(&mhba->device_lock); | |
1637 | ret = mvumi_probe_devices(mhba); | |
1638 | if (!ret) { | |
1639 | list_for_each_entry_safe(mv_dev, dev_next, | |
1640 | &mhba->mhba_dev_list, list) { | |
1641 | if (mvumi_handle_hotplug(mhba, mv_dev->id, | |
1642 | DEVICE_ONLINE)) { | |
1643 | dev_err(&mhba->pdev->dev, | |
1644 | "%s add device(0:%d:0) failed" | |
1645 | "wwid(%llx) has exist\n", | |
1646 | __func__, | |
1647 | mv_dev->id, mv_dev->wwid); | |
1648 | list_del_init(&mv_dev->list); | |
1649 | kfree(mv_dev); | |
1650 | } else { | |
1651 | list_move_tail(&mv_dev->list, | |
1652 | &mhba->shost_dev_list); | |
1653 | } | |
1654 | } | |
1655 | } | |
1656 | mutex_unlock(&mhba->device_lock); | |
1657 | } | |
1658 | return 0; | |
1659 | } | |
1660 | ||
1661 | static void mvumi_proc_msg(struct mvumi_hba *mhba, | |
1662 | struct mvumi_hotplug_event *param) | |
1663 | { | |
1664 | u16 size = param->size; | |
1665 | const unsigned long *ar_bitmap; | |
1666 | const unsigned long *re_bitmap; | |
1667 | int index; | |
1668 | ||
1669 | if (mhba->fw_flag & MVUMI_FW_ATTACH) { | |
1670 | index = -1; | |
1671 | ar_bitmap = (const unsigned long *) param->bitmap; | |
1672 | re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3]; | |
1673 | ||
1674 | mutex_lock(&mhba->sas_discovery_mutex); | |
1675 | do { | |
1676 | index = find_next_zero_bit(ar_bitmap, size, index + 1); | |
1677 | if (index >= size) | |
1678 | break; | |
1679 | mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); | |
1680 | } while (1); | |
1681 | ||
1682 | index = -1; | |
1683 | do { | |
1684 | index = find_next_zero_bit(re_bitmap, size, index + 1); | |
1685 | if (index >= size) | |
1686 | break; | |
1687 | mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); | |
1688 | } while (1); | |
1689 | mutex_unlock(&mhba->sas_discovery_mutex); | |
1690 | } | |
1691 | } | |
1692 | ||
f0c568a4 JL |
1693 | static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) |
1694 | { | |
1695 | if (msg == APICDB1_EVENT_GETEVENT) { | |
1696 | int i, count; | |
1697 | struct mvumi_driver_event *param = NULL; | |
1698 | struct mvumi_event_req *er = buffer; | |
1699 | count = er->count; | |
1700 | if (count > MAX_EVENTS_RETURNED) { | |
1701 | dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" | |
1702 | " than max event count[0x%x].\n", | |
1703 | count, MAX_EVENTS_RETURNED); | |
1704 | return; | |
1705 | } | |
1706 | for (i = 0; i < count; i++) { | |
1707 | param = &er->events[i]; | |
1708 | mvumi_show_event(mhba, param); | |
1709 | } | |
bd756dde SF |
1710 | } else if (msg == APICDB1_HOST_GETEVENT) { |
1711 | mvumi_proc_msg(mhba, buffer); | |
f0c568a4 JL |
1712 | } |
1713 | } | |
1714 | ||
1715 | static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) | |
1716 | { | |
1717 | struct mvumi_cmd *cmd; | |
1718 | struct mvumi_msg_frame *frame; | |
1719 | ||
1720 | cmd = mvumi_create_internal_cmd(mhba, 512); | |
1721 | if (!cmd) | |
1722 | return -1; | |
1723 | cmd->scmd = NULL; | |
1724 | cmd->cmd_status = REQ_STATUS_PENDING; | |
1725 | atomic_set(&cmd->sync_cmd, 0); | |
1726 | frame = cmd->frame; | |
1727 | frame->device_id = 0; | |
1728 | frame->cmd_flag = CMD_FLAG_DATA_IN; | |
1729 | frame->req_function = CL_FUN_SCSI_CMD; | |
1730 | frame->cdb_length = MAX_COMMAND_SIZE; | |
1731 | frame->data_transfer_length = sizeof(struct mvumi_event_req); | |
1732 | memset(frame->cdb, 0, MAX_COMMAND_SIZE); | |
1733 | frame->cdb[0] = APICDB0_EVENT; | |
1734 | frame->cdb[1] = msg; | |
1735 | mvumi_issue_blocked_cmd(mhba, cmd); | |
1736 | ||
1737 | if (cmd->cmd_status != SAM_STAT_GOOD) | |
1738 | dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", | |
1739 | cmd->cmd_status); | |
1740 | else | |
1741 | mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); | |
1742 | ||
1743 | mvumi_delete_internal_cmd(mhba, cmd); | |
1744 | return 0; | |
1745 | } | |
1746 | ||
1747 | static void mvumi_scan_events(struct work_struct *work) | |
1748 | { | |
1749 | struct mvumi_events_wq *mu_ev = | |
1750 | container_of(work, struct mvumi_events_wq, work_q); | |
1751 | ||
1752 | mvumi_get_event(mu_ev->mhba, mu_ev->event); | |
1753 | kfree(mu_ev); | |
1754 | } | |
1755 | ||
bd756dde | 1756 | static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) |
f0c568a4 JL |
1757 | { |
1758 | struct mvumi_events_wq *mu_ev; | |
1759 | ||
bd756dde SF |
1760 | while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) { |
1761 | if (isr_status & DRBL_BUS_CHANGE) { | |
1762 | atomic_inc(&mhba->pnp_count); | |
1763 | wake_up_process(mhba->dm_thread); | |
1764 | isr_status &= ~(DRBL_BUS_CHANGE); | |
1765 | continue; | |
1766 | } | |
1767 | ||
1768 | mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); | |
1769 | if (mu_ev) { | |
1770 | INIT_WORK(&mu_ev->work_q, mvumi_scan_events); | |
1771 | mu_ev->mhba = mhba; | |
1772 | mu_ev->event = APICDB1_EVENT_GETEVENT; | |
1773 | isr_status &= ~(DRBL_EVENT_NOTIFY); | |
1774 | mu_ev->param = NULL; | |
1775 | schedule_work(&mu_ev->work_q); | |
1776 | } | |
f0c568a4 JL |
1777 | } |
1778 | } | |
1779 | ||
1780 | static void mvumi_handle_clob(struct mvumi_hba *mhba) | |
1781 | { | |
1782 | struct mvumi_rsp_frame *ob_frame; | |
1783 | struct mvumi_cmd *cmd; | |
1784 | struct mvumi_ob_data *pool; | |
1785 | ||
1786 | while (!list_empty(&mhba->free_ob_list)) { | |
1787 | pool = list_first_entry(&mhba->free_ob_list, | |
1788 | struct mvumi_ob_data, list); | |
1789 | list_del_init(&pool->list); | |
1790 | list_add_tail(&pool->list, &mhba->ob_data_list); | |
1791 | ||
1792 | ob_frame = (struct mvumi_rsp_frame *) &pool->data[0]; | |
1793 | cmd = mhba->tag_cmd[ob_frame->tag]; | |
1794 | ||
1795 | atomic_dec(&mhba->fw_outstanding); | |
1796 | mhba->tag_cmd[ob_frame->tag] = 0; | |
1797 | tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); | |
1798 | if (cmd->scmd) | |
1799 | mvumi_complete_cmd(mhba, cmd, ob_frame); | |
1800 | else | |
1801 | mvumi_complete_internal_cmd(mhba, cmd, ob_frame); | |
1802 | } | |
1803 | mhba->instancet->fire_cmd(mhba, NULL); | |
1804 | } | |
1805 | ||
1806 | static irqreturn_t mvumi_isr_handler(int irq, void *devp) | |
1807 | { | |
1808 | struct mvumi_hba *mhba = (struct mvumi_hba *) devp; | |
1809 | unsigned long flags; | |
1810 | ||
1811 | spin_lock_irqsave(mhba->shost->host_lock, flags); | |
1812 | if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { | |
1813 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); | |
1814 | return IRQ_NONE; | |
1815 | } | |
1816 | ||
bd756dde SF |
1817 | if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { |
1818 | if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) | |
1819 | mvumi_launch_events(mhba, mhba->isr_status); | |
f0c568a4 JL |
1820 | if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { |
1821 | dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); | |
1822 | mvumi_handshake(mhba); | |
1823 | } | |
bd756dde | 1824 | |
f0c568a4 JL |
1825 | } |
1826 | ||
bd756dde | 1827 | if (mhba->global_isr & mhba->regs->int_comaout) |
f0c568a4 JL |
1828 | mvumi_receive_ob_list_entry(mhba); |
1829 | ||
1830 | mhba->global_isr = 0; | |
1831 | mhba->isr_status = 0; | |
1832 | if (mhba->fw_state == FW_STATE_STARTED) | |
1833 | mvumi_handle_clob(mhba); | |
1834 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); | |
1835 | return IRQ_HANDLED; | |
1836 | } | |
1837 | ||
1838 | static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, | |
1839 | struct mvumi_cmd *cmd) | |
1840 | { | |
1841 | void *ib_entry; | |
1842 | struct mvumi_msg_frame *ib_frame; | |
1843 | unsigned int frame_len; | |
1844 | ||
1845 | ib_frame = cmd->frame; | |
1846 | if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { | |
1847 | dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); | |
1848 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; | |
1849 | } | |
1850 | if (tag_is_empty(&mhba->tag_pool)) { | |
1851 | dev_dbg(&mhba->pdev->dev, "no free tag.\n"); | |
1852 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; | |
1853 | } | |
bd756dde | 1854 | mvumi_get_ib_list_entry(mhba, &ib_entry); |
f0c568a4 JL |
1855 | |
1856 | cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); | |
1857 | cmd->frame->request_id = mhba->io_seq++; | |
1858 | cmd->request_id = cmd->frame->request_id; | |
1859 | mhba->tag_cmd[cmd->frame->tag] = cmd; | |
1860 | frame_len = sizeof(*ib_frame) - 4 + | |
1861 | ib_frame->sg_counts * sizeof(struct mvumi_sgl); | |
bd756dde SF |
1862 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
1863 | struct mvumi_dyn_list_entry *dle; | |
1864 | dle = ib_entry; | |
1865 | dle->src_low_addr = | |
1866 | cpu_to_le32(lower_32_bits(cmd->frame_phys)); | |
1867 | dle->src_high_addr = | |
1868 | cpu_to_le32(upper_32_bits(cmd->frame_phys)); | |
1869 | dle->if_length = (frame_len >> 2) & 0xFFF; | |
1870 | } else { | |
1871 | memcpy(ib_entry, ib_frame, frame_len); | |
1872 | } | |
f0c568a4 JL |
1873 | return MV_QUEUE_COMMAND_RESULT_SENT; |
1874 | } | |
1875 | ||
1876 | static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) | |
1877 | { | |
1878 | unsigned short num_of_cl_sent = 0; | |
bd756dde | 1879 | unsigned int count; |
f0c568a4 JL |
1880 | enum mvumi_qc_result result; |
1881 | ||
1882 | if (cmd) | |
1883 | list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); | |
bd756dde SF |
1884 | count = mhba->instancet->check_ib_list(mhba); |
1885 | if (list_empty(&mhba->waiting_req_list) || !count) | |
1886 | return; | |
f0c568a4 | 1887 | |
bd756dde | 1888 | do { |
f0c568a4 | 1889 | cmd = list_first_entry(&mhba->waiting_req_list, |
bd756dde | 1890 | struct mvumi_cmd, queue_pointer); |
f0c568a4 JL |
1891 | list_del_init(&cmd->queue_pointer); |
1892 | result = mvumi_send_command(mhba, cmd); | |
1893 | switch (result) { | |
1894 | case MV_QUEUE_COMMAND_RESULT_SENT: | |
1895 | num_of_cl_sent++; | |
1896 | break; | |
1897 | case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE: | |
1898 | list_add(&cmd->queue_pointer, &mhba->waiting_req_list); | |
1899 | if (num_of_cl_sent > 0) | |
1900 | mvumi_send_ib_list_entry(mhba); | |
1901 | ||
1902 | return; | |
1903 | } | |
bd756dde SF |
1904 | } while (!list_empty(&mhba->waiting_req_list) && count--); |
1905 | ||
f0c568a4 JL |
1906 | if (num_of_cl_sent > 0) |
1907 | mvumi_send_ib_list_entry(mhba); | |
1908 | } | |
1909 | ||
1910 | /** | |
1911 | * mvumi_enable_intr - Enables interrupts | |
bd756dde | 1912 | * @mhba: Adapter soft state |
f0c568a4 | 1913 | */ |
bd756dde | 1914 | static void mvumi_enable_intr(struct mvumi_hba *mhba) |
f0c568a4 JL |
1915 | { |
1916 | unsigned int mask; | |
bd756dde | 1917 | struct mvumi_hw_regs *regs = mhba->regs; |
f0c568a4 | 1918 | |
bd756dde SF |
1919 | iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); |
1920 | mask = ioread32(regs->enpointa_mask_reg); | |
1921 | mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr; | |
1922 | iowrite32(mask, regs->enpointa_mask_reg); | |
f0c568a4 JL |
1923 | } |
1924 | ||
1925 | /** | |
1926 | * mvumi_disable_intr -Disables interrupt | |
bd756dde | 1927 | * @mhba: Adapter soft state |
f0c568a4 | 1928 | */ |
bd756dde | 1929 | static void mvumi_disable_intr(struct mvumi_hba *mhba) |
f0c568a4 JL |
1930 | { |
1931 | unsigned int mask; | |
bd756dde | 1932 | struct mvumi_hw_regs *regs = mhba->regs; |
f0c568a4 | 1933 | |
bd756dde SF |
1934 | iowrite32(0, regs->arm_to_pciea_mask_reg); |
1935 | mask = ioread32(regs->enpointa_mask_reg); | |
1936 | mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout | | |
1937 | regs->int_comaerr); | |
1938 | iowrite32(mask, regs->enpointa_mask_reg); | |
f0c568a4 JL |
1939 | } |
1940 | ||
1941 | static int mvumi_clear_intr(void *extend) | |
1942 | { | |
1943 | struct mvumi_hba *mhba = (struct mvumi_hba *) extend; | |
1944 | unsigned int status, isr_status = 0, tmp = 0; | |
bd756dde | 1945 | struct mvumi_hw_regs *regs = mhba->regs; |
f0c568a4 | 1946 | |
bd756dde SF |
1947 | status = ioread32(regs->main_int_cause_reg); |
1948 | if (!(status & regs->int_mu) || status == 0xFFFFFFFF) | |
f0c568a4 | 1949 | return 1; |
bd756dde SF |
1950 | if (unlikely(status & regs->int_comaerr)) { |
1951 | tmp = ioread32(regs->outb_isr_cause); | |
1952 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { | |
1953 | if (tmp & regs->clic_out_err) { | |
1954 | iowrite32(tmp & regs->clic_out_err, | |
1955 | regs->outb_isr_cause); | |
1956 | } | |
1957 | } else { | |
1958 | if (tmp & (regs->clic_in_err | regs->clic_out_err)) | |
1959 | iowrite32(tmp & (regs->clic_in_err | | |
1960 | regs->clic_out_err), | |
1961 | regs->outb_isr_cause); | |
1962 | } | |
1963 | status ^= mhba->regs->int_comaerr; | |
f0c568a4 JL |
1964 | /* inbound or outbound parity error, command will timeout */ |
1965 | } | |
bd756dde SF |
1966 | if (status & regs->int_comaout) { |
1967 | tmp = ioread32(regs->outb_isr_cause); | |
1968 | if (tmp & regs->clic_irq) | |
1969 | iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause); | |
f0c568a4 | 1970 | } |
bd756dde SF |
1971 | if (status & regs->int_dl_cpu2pciea) { |
1972 | isr_status = ioread32(regs->arm_to_pciea_drbl_reg); | |
f0c568a4 | 1973 | if (isr_status) |
bd756dde | 1974 | iowrite32(isr_status, regs->arm_to_pciea_drbl_reg); |
f0c568a4 JL |
1975 | } |
1976 | ||
1977 | mhba->global_isr = status; | |
1978 | mhba->isr_status = isr_status; | |
1979 | ||
1980 | return 0; | |
1981 | } | |
1982 | ||
1983 | /** | |
1984 | * mvumi_read_fw_status_reg - returns the current FW status value | |
bd756dde | 1985 | * @mhba: Adapter soft state |
f0c568a4 | 1986 | */ |
bd756dde | 1987 | static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) |
f0c568a4 JL |
1988 | { |
1989 | unsigned int status; | |
1990 | ||
bd756dde | 1991 | status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); |
f0c568a4 | 1992 | if (status) |
bd756dde | 1993 | iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); |
f0c568a4 JL |
1994 | return status; |
1995 | } | |
1996 | ||
bd756dde | 1997 | static struct mvumi_instance_template mvumi_instance_9143 = { |
f0c568a4 JL |
1998 | .fire_cmd = mvumi_fire_cmd, |
1999 | .enable_intr = mvumi_enable_intr, | |
2000 | .disable_intr = mvumi_disable_intr, | |
2001 | .clear_intr = mvumi_clear_intr, | |
2002 | .read_fw_status_reg = mvumi_read_fw_status_reg, | |
bd756dde SF |
2003 | .check_ib_list = mvumi_check_ib_list_9143, |
2004 | .check_ob_list = mvumi_check_ob_list_9143, | |
2005 | .reset_host = mvumi_reset_host_9143, | |
2006 | }; | |
2007 | ||
2008 | static struct mvumi_instance_template mvumi_instance_9580 = { | |
2009 | .fire_cmd = mvumi_fire_cmd, | |
2010 | .enable_intr = mvumi_enable_intr, | |
2011 | .disable_intr = mvumi_disable_intr, | |
2012 | .clear_intr = mvumi_clear_intr, | |
2013 | .read_fw_status_reg = mvumi_read_fw_status_reg, | |
2014 | .check_ib_list = mvumi_check_ib_list_9580, | |
2015 | .check_ob_list = mvumi_check_ob_list_9580, | |
2016 | .reset_host = mvumi_reset_host_9580, | |
f0c568a4 JL |
2017 | }; |
2018 | ||
2019 | static int mvumi_slave_configure(struct scsi_device *sdev) | |
2020 | { | |
2021 | struct mvumi_hba *mhba; | |
2022 | unsigned char bitcount = sizeof(unsigned char) * 8; | |
2023 | ||
2024 | mhba = (struct mvumi_hba *) sdev->host->hostdata; | |
2025 | if (sdev->id >= mhba->max_target_id) | |
2026 | return -EINVAL; | |
2027 | ||
2028 | mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); | |
2029 | return 0; | |
2030 | } | |
2031 | ||
2032 | /** | |
2033 | * mvumi_build_frame - Prepares a direct cdb (DCDB) command | |
2034 | * @mhba: Adapter soft state | |
2035 | * @scmd: SCSI command | |
2036 | * @cmd: Command to be prepared in | |
2037 | * | |
2038 | * This function prepares CDB commands. These are typcially pass-through | |
2039 | * commands to the devices. | |
2040 | */ | |
2041 | static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, | |
2042 | struct scsi_cmnd *scmd, struct mvumi_cmd *cmd) | |
2043 | { | |
2044 | struct mvumi_msg_frame *pframe; | |
2045 | ||
2046 | cmd->scmd = scmd; | |
2047 | cmd->cmd_status = REQ_STATUS_PENDING; | |
2048 | pframe = cmd->frame; | |
2049 | pframe->device_id = ((unsigned short) scmd->device->id) | | |
2050 | (((unsigned short) scmd->device->lun) << 8); | |
2051 | pframe->cmd_flag = 0; | |
2052 | ||
2053 | switch (scmd->sc_data_direction) { | |
2054 | case DMA_NONE: | |
2055 | pframe->cmd_flag |= CMD_FLAG_NON_DATA; | |
2056 | break; | |
2057 | case DMA_FROM_DEVICE: | |
2058 | pframe->cmd_flag |= CMD_FLAG_DATA_IN; | |
2059 | break; | |
2060 | case DMA_TO_DEVICE: | |
2061 | pframe->cmd_flag |= CMD_FLAG_DATA_OUT; | |
2062 | break; | |
2063 | case DMA_BIDIRECTIONAL: | |
2064 | default: | |
2065 | dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " | |
2066 | "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]); | |
2067 | goto error; | |
2068 | } | |
2069 | ||
2070 | pframe->cdb_length = scmd->cmd_len; | |
2071 | memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length); | |
2072 | pframe->req_function = CL_FUN_SCSI_CMD; | |
2073 | if (scsi_bufflen(scmd)) { | |
2074 | if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], | |
2075 | &pframe->sg_counts)) | |
2076 | goto error; | |
2077 | ||
2078 | pframe->data_transfer_length = scsi_bufflen(scmd); | |
2079 | } else { | |
2080 | pframe->sg_counts = 0; | |
2081 | pframe->data_transfer_length = 0; | |
2082 | } | |
2083 | return 0; | |
2084 | ||
2085 | error: | |
2086 | scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) | | |
2087 | SAM_STAT_CHECK_CONDITION; | |
2088 | scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24, | |
2089 | 0); | |
2090 | return -1; | |
2091 | } | |
2092 | ||
2093 | /** | |
2094 | * mvumi_queue_command - Queue entry point | |
2095 | * @scmd: SCSI command to be queued | |
2096 | * @done: Callback entry point | |
2097 | */ | |
2098 | static int mvumi_queue_command(struct Scsi_Host *shost, | |
2099 | struct scsi_cmnd *scmd) | |
2100 | { | |
2101 | struct mvumi_cmd *cmd; | |
2102 | struct mvumi_hba *mhba; | |
2103 | unsigned long irq_flags; | |
2104 | ||
2105 | spin_lock_irqsave(shost->host_lock, irq_flags); | |
2106 | scsi_cmd_get_serial(shost, scmd); | |
2107 | ||
2108 | mhba = (struct mvumi_hba *) shost->hostdata; | |
2109 | scmd->result = 0; | |
2110 | cmd = mvumi_get_cmd(mhba); | |
2111 | if (unlikely(!cmd)) { | |
2112 | spin_unlock_irqrestore(shost->host_lock, irq_flags); | |
2113 | return SCSI_MLQUEUE_HOST_BUSY; | |
2114 | } | |
2115 | ||
2116 | if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) | |
2117 | goto out_return_cmd; | |
2118 | ||
2119 | cmd->scmd = scmd; | |
2120 | scmd->SCp.ptr = (char *) cmd; | |
2121 | mhba->instancet->fire_cmd(mhba, cmd); | |
2122 | spin_unlock_irqrestore(shost->host_lock, irq_flags); | |
2123 | return 0; | |
2124 | ||
2125 | out_return_cmd: | |
2126 | mvumi_return_cmd(mhba, cmd); | |
2127 | scmd->scsi_done(scmd); | |
2128 | spin_unlock_irqrestore(shost->host_lock, irq_flags); | |
2129 | return 0; | |
2130 | } | |
2131 | ||
2132 | static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) | |
2133 | { | |
2134 | struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr; | |
2135 | struct Scsi_Host *host = scmd->device->host; | |
2136 | struct mvumi_hba *mhba = shost_priv(host); | |
2137 | unsigned long flags; | |
2138 | ||
2139 | spin_lock_irqsave(mhba->shost->host_lock, flags); | |
2140 | ||
2141 | if (mhba->tag_cmd[cmd->frame->tag]) { | |
2142 | mhba->tag_cmd[cmd->frame->tag] = 0; | |
2143 | tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); | |
2144 | } | |
2145 | if (!list_empty(&cmd->queue_pointer)) | |
2146 | list_del_init(&cmd->queue_pointer); | |
2147 | else | |
2148 | atomic_dec(&mhba->fw_outstanding); | |
2149 | ||
2150 | scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16); | |
2151 | scmd->SCp.ptr = NULL; | |
2152 | if (scsi_bufflen(scmd)) { | |
ab8e7f4b | 2153 | dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), |
4bd13a07 | 2154 | scsi_sg_count(scmd), |
ab8e7f4b | 2155 | scmd->sc_data_direction); |
f0c568a4 JL |
2156 | } |
2157 | mvumi_return_cmd(mhba, cmd); | |
2158 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); | |
2159 | ||
6600593c | 2160 | return BLK_EH_DONE; |
f0c568a4 JL |
2161 | } |
2162 | ||
2163 | static int | |
2164 | mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev, | |
2165 | sector_t capacity, int geom[]) | |
2166 | { | |
2167 | int heads, sectors; | |
2168 | sector_t cylinders; | |
2169 | unsigned long tmp; | |
2170 | ||
2171 | heads = 64; | |
2172 | sectors = 32; | |
2173 | tmp = heads * sectors; | |
2174 | cylinders = capacity; | |
2175 | sector_div(cylinders, tmp); | |
2176 | ||
2177 | if (capacity >= 0x200000) { | |
2178 | heads = 255; | |
2179 | sectors = 63; | |
2180 | tmp = heads * sectors; | |
2181 | cylinders = capacity; | |
2182 | sector_div(cylinders, tmp); | |
2183 | } | |
2184 | geom[0] = heads; | |
2185 | geom[1] = sectors; | |
2186 | geom[2] = cylinders; | |
2187 | ||
2188 | return 0; | |
2189 | } | |
2190 | ||
2191 | static struct scsi_host_template mvumi_template = { | |
2192 | ||
2193 | .module = THIS_MODULE, | |
2194 | .name = "Marvell Storage Controller", | |
2195 | .slave_configure = mvumi_slave_configure, | |
2196 | .queuecommand = mvumi_queue_command, | |
103eb3b5 | 2197 | .eh_timed_out = mvumi_timed_out, |
f0c568a4 JL |
2198 | .eh_host_reset_handler = mvumi_host_reset, |
2199 | .bios_param = mvumi_bios_param, | |
2200 | .this_id = -1, | |
2201 | }; | |
2202 | ||
bd756dde SF |
2203 | static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) |
2204 | { | |
2205 | void *base = NULL; | |
2206 | struct mvumi_hw_regs *regs; | |
2207 | ||
2208 | switch (mhba->pdev->device) { | |
2209 | case PCI_DEVICE_ID_MARVELL_MV9143: | |
2210 | mhba->mmio = mhba->base_addr[0]; | |
2211 | base = mhba->mmio; | |
2212 | if (!mhba->regs) { | |
2213 | mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); | |
2214 | if (mhba->regs == NULL) | |
2215 | return -ENOMEM; | |
2216 | } | |
2217 | regs = mhba->regs; | |
2218 | ||
2219 | /* For Arm */ | |
2220 | regs->ctrl_sts_reg = base + 0x20104; | |
2221 | regs->rstoutn_mask_reg = base + 0x20108; | |
2222 | regs->sys_soft_rst_reg = base + 0x2010C; | |
2223 | regs->main_int_cause_reg = base + 0x20200; | |
2224 | regs->enpointa_mask_reg = base + 0x2020C; | |
2225 | regs->rstoutn_en_reg = base + 0xF1400; | |
2226 | /* For Doorbell */ | |
2227 | regs->pciea_to_arm_drbl_reg = base + 0x20400; | |
2228 | regs->arm_to_pciea_drbl_reg = base + 0x20408; | |
2229 | regs->arm_to_pciea_mask_reg = base + 0x2040C; | |
2230 | regs->pciea_to_arm_msg0 = base + 0x20430; | |
2231 | regs->pciea_to_arm_msg1 = base + 0x20434; | |
2232 | regs->arm_to_pciea_msg0 = base + 0x20438; | |
2233 | regs->arm_to_pciea_msg1 = base + 0x2043C; | |
2234 | ||
2235 | /* For Message Unit */ | |
2236 | ||
2237 | regs->inb_aval_count_basel = base + 0x508; | |
2238 | regs->inb_aval_count_baseh = base + 0x50C; | |
2239 | regs->inb_write_pointer = base + 0x518; | |
2240 | regs->inb_read_pointer = base + 0x51C; | |
2241 | regs->outb_coal_cfg = base + 0x568; | |
2242 | regs->outb_copy_basel = base + 0x5B0; | |
2243 | regs->outb_copy_baseh = base + 0x5B4; | |
2244 | regs->outb_copy_pointer = base + 0x544; | |
2245 | regs->outb_read_pointer = base + 0x548; | |
2246 | regs->outb_isr_cause = base + 0x560; | |
2247 | regs->outb_coal_cfg = base + 0x568; | |
2248 | /* Bit setting for HW */ | |
2249 | regs->int_comaout = 1 << 8; | |
2250 | regs->int_comaerr = 1 << 6; | |
2251 | regs->int_dl_cpu2pciea = 1 << 1; | |
2252 | regs->cl_pointer_toggle = 1 << 12; | |
2253 | regs->clic_irq = 1 << 1; | |
2254 | regs->clic_in_err = 1 << 8; | |
2255 | regs->clic_out_err = 1 << 12; | |
2256 | regs->cl_slot_num_mask = 0xFFF; | |
2257 | regs->int_drbl_int_mask = 0x3FFFFFFF; | |
2258 | regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout | | |
2259 | regs->int_comaerr; | |
2260 | break; | |
2261 | case PCI_DEVICE_ID_MARVELL_MV9580: | |
2262 | mhba->mmio = mhba->base_addr[2]; | |
2263 | base = mhba->mmio; | |
2264 | if (!mhba->regs) { | |
2265 | mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); | |
2266 | if (mhba->regs == NULL) | |
2267 | return -ENOMEM; | |
2268 | } | |
2269 | regs = mhba->regs; | |
2270 | /* For Arm */ | |
2271 | regs->ctrl_sts_reg = base + 0x20104; | |
2272 | regs->rstoutn_mask_reg = base + 0x1010C; | |
2273 | regs->sys_soft_rst_reg = base + 0x10108; | |
2274 | regs->main_int_cause_reg = base + 0x10200; | |
2275 | regs->enpointa_mask_reg = base + 0x1020C; | |
2276 | regs->rstoutn_en_reg = base + 0xF1400; | |
2277 | ||
2278 | /* For Doorbell */ | |
2279 | regs->pciea_to_arm_drbl_reg = base + 0x10460; | |
2280 | regs->arm_to_pciea_drbl_reg = base + 0x10480; | |
2281 | regs->arm_to_pciea_mask_reg = base + 0x10484; | |
2282 | regs->pciea_to_arm_msg0 = base + 0x10400; | |
2283 | regs->pciea_to_arm_msg1 = base + 0x10404; | |
2284 | regs->arm_to_pciea_msg0 = base + 0x10420; | |
2285 | regs->arm_to_pciea_msg1 = base + 0x10424; | |
2286 | ||
2287 | /* For reset*/ | |
2288 | regs->reset_request = base + 0x10108; | |
2289 | regs->reset_enable = base + 0x1010c; | |
2290 | ||
2291 | /* For Message Unit */ | |
2292 | regs->inb_aval_count_basel = base + 0x4008; | |
2293 | regs->inb_aval_count_baseh = base + 0x400C; | |
2294 | regs->inb_write_pointer = base + 0x4018; | |
2295 | regs->inb_read_pointer = base + 0x401C; | |
2296 | regs->outb_copy_basel = base + 0x4058; | |
2297 | regs->outb_copy_baseh = base + 0x405C; | |
2298 | regs->outb_copy_pointer = base + 0x406C; | |
2299 | regs->outb_read_pointer = base + 0x4070; | |
2300 | regs->outb_coal_cfg = base + 0x4080; | |
2301 | regs->outb_isr_cause = base + 0x4088; | |
2302 | /* Bit setting for HW */ | |
2303 | regs->int_comaout = 1 << 4; | |
2304 | regs->int_dl_cpu2pciea = 1 << 12; | |
2305 | regs->int_comaerr = 1 << 29; | |
2306 | regs->cl_pointer_toggle = 1 << 14; | |
2307 | regs->cl_slot_num_mask = 0x3FFF; | |
2308 | regs->clic_irq = 1 << 0; | |
2309 | regs->clic_out_err = 1 << 1; | |
2310 | regs->int_drbl_int_mask = 0x3FFFFFFF; | |
2311 | regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout; | |
2312 | break; | |
2313 | default: | |
2314 | return -1; | |
2315 | break; | |
2316 | } | |
2317 | ||
2318 | return 0; | |
2319 | } | |
2320 | ||
f0c568a4 JL |
2321 | /** |
2322 | * mvumi_init_fw - Initializes the FW | |
2323 | * @mhba: Adapter soft state | |
2324 | * | |
2325 | * This is the main function for initializing firmware. | |
2326 | */ | |
2327 | static int mvumi_init_fw(struct mvumi_hba *mhba) | |
2328 | { | |
2329 | int ret = 0; | |
2330 | ||
2331 | if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { | |
2332 | dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); | |
2333 | return -EBUSY; | |
2334 | } | |
2335 | ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); | |
2336 | if (ret) | |
2337 | goto fail_ioremap; | |
2338 | ||
f0c568a4 JL |
2339 | switch (mhba->pdev->device) { |
2340 | case PCI_DEVICE_ID_MARVELL_MV9143: | |
bd756dde | 2341 | mhba->instancet = &mvumi_instance_9143; |
f0c568a4 JL |
2342 | mhba->io_seq = 0; |
2343 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; | |
2344 | mhba->request_id_enabled = 1; | |
2345 | break; | |
bd756dde SF |
2346 | case PCI_DEVICE_ID_MARVELL_MV9580: |
2347 | mhba->instancet = &mvumi_instance_9580; | |
2348 | mhba->io_seq = 0; | |
2349 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; | |
2350 | break; | |
f0c568a4 JL |
2351 | default: |
2352 | dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", | |
2353 | mhba->pdev->device); | |
2354 | mhba->instancet = NULL; | |
2355 | ret = -EINVAL; | |
2356 | goto fail_alloc_mem; | |
2357 | } | |
2358 | dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", | |
2359 | mhba->pdev->device); | |
bd756dde SF |
2360 | ret = mvumi_cfg_hw_reg(mhba); |
2361 | if (ret) { | |
2362 | dev_err(&mhba->pdev->dev, | |
2363 | "failed to allocate memory for reg\n"); | |
2364 | ret = -ENOMEM; | |
2365 | goto fail_alloc_mem; | |
2366 | } | |
ab8e7f4b CH |
2367 | mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev, |
2368 | HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL); | |
f0c568a4 JL |
2369 | if (!mhba->handshake_page) { |
2370 | dev_err(&mhba->pdev->dev, | |
2371 | "failed to allocate memory for handshake\n"); | |
2372 | ret = -ENOMEM; | |
bd756dde | 2373 | goto fail_alloc_page; |
f0c568a4 | 2374 | } |
f0c568a4 JL |
2375 | |
2376 | if (mvumi_start(mhba)) { | |
2377 | ret = -EINVAL; | |
2378 | goto fail_ready_state; | |
2379 | } | |
2380 | ret = mvumi_alloc_cmds(mhba); | |
2381 | if (ret) | |
2382 | goto fail_ready_state; | |
2383 | ||
2384 | return 0; | |
2385 | ||
2386 | fail_ready_state: | |
2387 | mvumi_release_mem_resource(mhba); | |
ab8e7f4b | 2388 | dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, |
bd756dde SF |
2389 | mhba->handshake_page, mhba->handshake_page_phys); |
2390 | fail_alloc_page: | |
2391 | kfree(mhba->regs); | |
f0c568a4 JL |
2392 | fail_alloc_mem: |
2393 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); | |
2394 | fail_ioremap: | |
2395 | pci_release_regions(mhba->pdev); | |
2396 | ||
2397 | return ret; | |
2398 | } | |
2399 | ||
2400 | /** | |
2401 | * mvumi_io_attach - Attaches this driver to SCSI mid-layer | |
2402 | * @mhba: Adapter soft state | |
2403 | */ | |
2404 | static int mvumi_io_attach(struct mvumi_hba *mhba) | |
2405 | { | |
2406 | struct Scsi_Host *host = mhba->shost; | |
bd756dde | 2407 | struct scsi_device *sdev = NULL; |
f0c568a4 JL |
2408 | int ret; |
2409 | unsigned int max_sg = (mhba->ib_max_size + 4 - | |
2410 | sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); | |
2411 | ||
2412 | host->irq = mhba->pdev->irq; | |
2413 | host->unique_id = mhba->unique_id; | |
2414 | host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; | |
2415 | host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; | |
2416 | host->max_sectors = mhba->max_transfer_size / 512; | |
bd756dde | 2417 | host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; |
f0c568a4 JL |
2418 | host->max_id = mhba->max_target_id; |
2419 | host->max_cmd_len = MAX_COMMAND_SIZE; | |
f0c568a4 JL |
2420 | |
2421 | ret = scsi_add_host(host, &mhba->pdev->dev); | |
2422 | if (ret) { | |
2423 | dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); | |
2424 | return ret; | |
2425 | } | |
2426 | mhba->fw_flag |= MVUMI_FW_ATTACH; | |
f0c568a4 | 2427 | |
bd756dde SF |
2428 | mutex_lock(&mhba->sas_discovery_mutex); |
2429 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) | |
2430 | ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); | |
2431 | else | |
2432 | ret = 0; | |
2433 | if (ret) { | |
2434 | dev_err(&mhba->pdev->dev, "add virtual device failed\n"); | |
2435 | mutex_unlock(&mhba->sas_discovery_mutex); | |
2436 | goto fail_add_device; | |
2437 | } | |
2438 | ||
2439 | mhba->dm_thread = kthread_create(mvumi_rescan_bus, | |
2440 | mhba, "mvumi_scanthread"); | |
2441 | if (IS_ERR(mhba->dm_thread)) { | |
2442 | dev_err(&mhba->pdev->dev, | |
2443 | "failed to create device scan thread\n"); | |
2444 | mutex_unlock(&mhba->sas_discovery_mutex); | |
2445 | goto fail_create_thread; | |
2446 | } | |
2447 | atomic_set(&mhba->pnp_count, 1); | |
2448 | wake_up_process(mhba->dm_thread); | |
2449 | ||
2450 | mutex_unlock(&mhba->sas_discovery_mutex); | |
f0c568a4 | 2451 | return 0; |
bd756dde SF |
2452 | |
2453 | fail_create_thread: | |
2454 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) | |
2455 | sdev = scsi_device_lookup(mhba->shost, 0, | |
2456 | mhba->max_target_id - 1, 0); | |
2457 | if (sdev) { | |
2458 | scsi_remove_device(sdev); | |
2459 | scsi_device_put(sdev); | |
2460 | } | |
2461 | fail_add_device: | |
2462 | scsi_remove_host(mhba->shost); | |
2463 | return ret; | |
f0c568a4 JL |
2464 | } |
2465 | ||
2466 | /** | |
2467 | * mvumi_probe_one - PCI hotplug entry point | |
2468 | * @pdev: PCI device structure | |
2469 | * @id: PCI ids of supported hotplugged adapter | |
2470 | */ | |
6f039790 | 2471 | static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
f0c568a4 JL |
2472 | { |
2473 | struct Scsi_Host *host; | |
2474 | struct mvumi_hba *mhba; | |
2475 | int ret; | |
2476 | ||
2477 | dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", | |
2478 | pdev->vendor, pdev->device, pdev->subsystem_vendor, | |
2479 | pdev->subsystem_device); | |
2480 | ||
2481 | ret = pci_enable_device(pdev); | |
2482 | if (ret) | |
2483 | return ret; | |
2484 | ||
ab8e7f4b CH |
2485 | ret = mvumi_pci_set_master(pdev); |
2486 | if (ret) | |
2487 | goto fail_set_dma_mask; | |
f0c568a4 JL |
2488 | |
2489 | host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); | |
2490 | if (!host) { | |
2491 | dev_err(&pdev->dev, "scsi_host_alloc failed\n"); | |
2492 | ret = -ENOMEM; | |
2493 | goto fail_alloc_instance; | |
2494 | } | |
2495 | mhba = shost_priv(host); | |
2496 | ||
2497 | INIT_LIST_HEAD(&mhba->cmd_pool); | |
2498 | INIT_LIST_HEAD(&mhba->ob_data_list); | |
2499 | INIT_LIST_HEAD(&mhba->free_ob_list); | |
2500 | INIT_LIST_HEAD(&mhba->res_list); | |
2501 | INIT_LIST_HEAD(&mhba->waiting_req_list); | |
bd756dde SF |
2502 | mutex_init(&mhba->device_lock); |
2503 | INIT_LIST_HEAD(&mhba->mhba_dev_list); | |
2504 | INIT_LIST_HEAD(&mhba->shost_dev_list); | |
f0c568a4 JL |
2505 | atomic_set(&mhba->fw_outstanding, 0); |
2506 | init_waitqueue_head(&mhba->int_cmd_wait_q); | |
bd756dde | 2507 | mutex_init(&mhba->sas_discovery_mutex); |
f0c568a4 JL |
2508 | |
2509 | mhba->pdev = pdev; | |
2510 | mhba->shost = host; | |
2511 | mhba->unique_id = pdev->bus->number << 8 | pdev->devfn; | |
2512 | ||
2513 | ret = mvumi_init_fw(mhba); | |
2514 | if (ret) | |
2515 | goto fail_init_fw; | |
2516 | ||
2517 | ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, | |
2518 | "mvumi", mhba); | |
2519 | if (ret) { | |
2520 | dev_err(&pdev->dev, "failed to register IRQ\n"); | |
2521 | goto fail_init_irq; | |
2522 | } | |
bd756dde SF |
2523 | |
2524 | mhba->instancet->enable_intr(mhba); | |
f0c568a4 JL |
2525 | pci_set_drvdata(pdev, mhba); |
2526 | ||
2527 | ret = mvumi_io_attach(mhba); | |
2528 | if (ret) | |
2529 | goto fail_io_attach; | |
bd756dde SF |
2530 | |
2531 | mvumi_backup_bar_addr(mhba); | |
f0c568a4 JL |
2532 | dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); |
2533 | ||
2534 | return 0; | |
2535 | ||
2536 | fail_io_attach: | |
bd756dde | 2537 | mhba->instancet->disable_intr(mhba); |
f0c568a4 JL |
2538 | free_irq(mhba->pdev->irq, mhba); |
2539 | fail_init_irq: | |
2540 | mvumi_release_fw(mhba); | |
2541 | fail_init_fw: | |
2542 | scsi_host_put(host); | |
2543 | ||
2544 | fail_alloc_instance: | |
2545 | fail_set_dma_mask: | |
2546 | pci_disable_device(pdev); | |
2547 | ||
2548 | return ret; | |
2549 | } | |
2550 | ||
2551 | static void mvumi_detach_one(struct pci_dev *pdev) | |
2552 | { | |
2553 | struct Scsi_Host *host; | |
2554 | struct mvumi_hba *mhba; | |
2555 | ||
2556 | mhba = pci_get_drvdata(pdev); | |
bd756dde SF |
2557 | if (mhba->dm_thread) { |
2558 | kthread_stop(mhba->dm_thread); | |
2559 | mhba->dm_thread = NULL; | |
2560 | } | |
2561 | ||
2562 | mvumi_detach_devices(mhba); | |
f0c568a4 JL |
2563 | host = mhba->shost; |
2564 | scsi_remove_host(mhba->shost); | |
2565 | mvumi_flush_cache(mhba); | |
2566 | ||
bd756dde | 2567 | mhba->instancet->disable_intr(mhba); |
f0c568a4 JL |
2568 | free_irq(mhba->pdev->irq, mhba); |
2569 | mvumi_release_fw(mhba); | |
2570 | scsi_host_put(host); | |
f0c568a4 JL |
2571 | pci_disable_device(pdev); |
2572 | dev_dbg(&pdev->dev, "driver is removed!\n"); | |
2573 | } | |
2574 | ||
2575 | /** | |
2576 | * mvumi_shutdown - Shutdown entry point | |
2577 | * @device: Generic device structure | |
2578 | */ | |
2579 | static void mvumi_shutdown(struct pci_dev *pdev) | |
2580 | { | |
2581 | struct mvumi_hba *mhba = pci_get_drvdata(pdev); | |
2582 | ||
2583 | mvumi_flush_cache(mhba); | |
2584 | } | |
2585 | ||
fddbeb80 | 2586 | static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state) |
f0c568a4 JL |
2587 | { |
2588 | struct mvumi_hba *mhba = NULL; | |
2589 | ||
2590 | mhba = pci_get_drvdata(pdev); | |
2591 | mvumi_flush_cache(mhba); | |
2592 | ||
2593 | pci_set_drvdata(pdev, mhba); | |
bd756dde | 2594 | mhba->instancet->disable_intr(mhba); |
f0c568a4 JL |
2595 | free_irq(mhba->pdev->irq, mhba); |
2596 | mvumi_unmap_pci_addr(pdev, mhba->base_addr); | |
2597 | pci_release_regions(pdev); | |
2598 | pci_save_state(pdev); | |
2599 | pci_disable_device(pdev); | |
2600 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
2601 | ||
2602 | return 0; | |
2603 | } | |
2604 | ||
fddbeb80 | 2605 | static int __maybe_unused mvumi_resume(struct pci_dev *pdev) |
f0c568a4 JL |
2606 | { |
2607 | int ret; | |
2608 | struct mvumi_hba *mhba = NULL; | |
2609 | ||
2610 | mhba = pci_get_drvdata(pdev); | |
2611 | ||
2612 | pci_set_power_state(pdev, PCI_D0); | |
2613 | pci_enable_wake(pdev, PCI_D0, 0); | |
2614 | pci_restore_state(pdev); | |
2615 | ||
2616 | ret = pci_enable_device(pdev); | |
2617 | if (ret) { | |
2618 | dev_err(&pdev->dev, "enable device failed\n"); | |
2619 | return ret; | |
2620 | } | |
ab8e7f4b CH |
2621 | |
2622 | ret = mvumi_pci_set_master(pdev); | |
2623 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2624 | if (ret) | |
2625 | goto fail; | |
f0c568a4 JL |
2626 | ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); |
2627 | if (ret) | |
2628 | goto fail; | |
2629 | ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); | |
2630 | if (ret) | |
2631 | goto release_regions; | |
2632 | ||
bd756dde SF |
2633 | if (mvumi_cfg_hw_reg(mhba)) { |
2634 | ret = -EINVAL; | |
2635 | goto unmap_pci_addr; | |
2636 | } | |
2637 | ||
f0c568a4 | 2638 | mhba->mmio = mhba->base_addr[0]; |
bd756dde | 2639 | mvumi_reset(mhba); |
f0c568a4 JL |
2640 | |
2641 | if (mvumi_start(mhba)) { | |
2642 | ret = -EINVAL; | |
2643 | goto unmap_pci_addr; | |
2644 | } | |
2645 | ||
2646 | ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, | |
2647 | "mvumi", mhba); | |
2648 | if (ret) { | |
2649 | dev_err(&pdev->dev, "failed to register IRQ\n"); | |
2650 | goto unmap_pci_addr; | |
2651 | } | |
bd756dde | 2652 | mhba->instancet->enable_intr(mhba); |
f0c568a4 JL |
2653 | |
2654 | return 0; | |
2655 | ||
2656 | unmap_pci_addr: | |
2657 | mvumi_unmap_pci_addr(pdev, mhba->base_addr); | |
2658 | release_regions: | |
2659 | pci_release_regions(pdev); | |
2660 | fail: | |
2661 | pci_disable_device(pdev); | |
2662 | ||
2663 | return ret; | |
2664 | } | |
2665 | ||
2666 | static struct pci_driver mvumi_pci_driver = { | |
2667 | ||
2668 | .name = MV_DRIVER_NAME, | |
2669 | .id_table = mvumi_pci_table, | |
2670 | .probe = mvumi_probe_one, | |
6f039790 | 2671 | .remove = mvumi_detach_one, |
f0c568a4 JL |
2672 | .shutdown = mvumi_shutdown, |
2673 | #ifdef CONFIG_PM | |
2674 | .suspend = mvumi_suspend, | |
2675 | .resume = mvumi_resume, | |
2676 | #endif | |
2677 | }; | |
2678 | ||
f9c25ccf | 2679 | module_pci_driver(mvumi_pci_driver); |