]>
Commit | Line | Data |
---|---|---|
ede1e6f8 | 1 | /* |
00f59701 | 2 | * HighPoint RR3xxx/4xxx controller driver for Linux |
a93429c3 | 3 | * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved. |
ede1e6f8 HLT |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * Please report bugs/comments/suggestions to [email protected] | |
15 | * | |
16 | * For more information, visit http://www.highpoint-tech.com | |
17 | */ | |
ede1e6f8 HLT |
18 | #include <linux/module.h> |
19 | #include <linux/types.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/timer.h> | |
27 | #include <linux/spinlock.h> | |
5a0e3ad6 | 28 | #include <linux/gfp.h> |
ede1e6f8 HLT |
29 | #include <asm/uaccess.h> |
30 | #include <asm/io.h> | |
31 | #include <asm/div64.h> | |
32 | #include <scsi/scsi_cmnd.h> | |
33 | #include <scsi/scsi_device.h> | |
34 | #include <scsi/scsi.h> | |
35 | #include <scsi/scsi_tcq.h> | |
36 | #include <scsi/scsi_host.h> | |
37 | ||
38 | #include "hptiop.h" | |
39 | ||
40 | MODULE_AUTHOR("HighPoint Technologies, Inc."); | |
00f59701 | 41 | MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); |
ede1e6f8 HLT |
42 | |
43 | static char driver_name[] = "hptiop"; | |
00f59701 | 44 | static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; |
a93429c3 | 45 | static const char driver_ver[] = "v1.10.0"; |
00f59701 HLT |
46 | |
47 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); | |
48 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, | |
49 | struct hpt_iop_request_scsi_command *req); | |
50 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
51 | static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
ede1e6f8 HLT |
52 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
53 | ||
00f59701 | 54 | static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) |
ede1e6f8 HLT |
55 | { |
56 | u32 req = 0; | |
57 | int i; | |
58 | ||
59 | for (i = 0; i < millisec; i++) { | |
00f59701 | 60 | req = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
61 | if (req != IOPMU_QUEUE_EMPTY) |
62 | break; | |
63 | msleep(1); | |
64 | } | |
65 | ||
66 | if (req != IOPMU_QUEUE_EMPTY) { | |
00f59701 HLT |
67 | writel(req, &hba->u.itl.iop->outbound_queue); |
68 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
69 | return 0; |
70 | } | |
71 | ||
72 | return -1; | |
73 | } | |
74 | ||
00f59701 HLT |
75 | static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) |
76 | { | |
77 | return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); | |
78 | } | |
79 | ||
286aa031 HLT |
80 | static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec) |
81 | { | |
82 | return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); | |
83 | } | |
84 | ||
00f59701 | 85 | static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) |
ede1e6f8 | 86 | { |
db9b6e89 | 87 | if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) |
00f59701 | 88 | hptiop_host_request_callback_itl(hba, |
ede1e6f8 HLT |
89 | tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); |
90 | else | |
00f59701 | 91 | hptiop_iop_request_callback_itl(hba, tag); |
ede1e6f8 HLT |
92 | } |
93 | ||
00f59701 | 94 | static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) |
ede1e6f8 HLT |
95 | { |
96 | u32 req; | |
97 | ||
00f59701 HLT |
98 | while ((req = readl(&hba->u.itl.iop->outbound_queue)) != |
99 | IOPMU_QUEUE_EMPTY) { | |
ede1e6f8 HLT |
100 | |
101 | if (req & IOPMU_QUEUE_MASK_HOST_BITS) | |
00f59701 | 102 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
103 | else { |
104 | struct hpt_iop_request_header __iomem * p; | |
105 | ||
106 | p = (struct hpt_iop_request_header __iomem *) | |
00f59701 | 107 | ((char __iomem *)hba->u.itl.iop + req); |
ede1e6f8 HLT |
108 | |
109 | if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { | |
110 | if (readl(&p->context)) | |
00f59701 | 111 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
112 | else |
113 | writel(1, &p->context); | |
114 | } | |
115 | else | |
00f59701 | 116 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
117 | } |
118 | } | |
119 | } | |
120 | ||
00f59701 | 121 | static int iop_intr_itl(struct hptiop_hba *hba) |
ede1e6f8 | 122 | { |
00f59701 | 123 | struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; |
3bfc13c2 | 124 | void __iomem *plx = hba->u.itl.plx; |
ede1e6f8 HLT |
125 | u32 status; |
126 | int ret = 0; | |
127 | ||
3bfc13c2 HLT |
128 | if (plx && readl(plx + 0x11C5C) & 0xf) |
129 | writel(1, plx + 0x11C60); | |
130 | ||
ede1e6f8 HLT |
131 | status = readl(&iop->outbound_intstatus); |
132 | ||
133 | if (status & IOPMU_OUTBOUND_INT_MSG0) { | |
134 | u32 msg = readl(&iop->outbound_msgaddr0); | |
00f59701 | 135 | |
ede1e6f8 HLT |
136 | dprintk("received outbound msg %x\n", msg); |
137 | writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); | |
138 | hptiop_message_callback(hba, msg); | |
139 | ret = 1; | |
140 | } | |
141 | ||
142 | if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { | |
00f59701 HLT |
143 | hptiop_drain_outbound_queue_itl(hba); |
144 | ret = 1; | |
145 | } | |
146 | ||
147 | return ret; | |
148 | } | |
149 | ||
150 | static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) | |
151 | { | |
152 | u32 outbound_tail = readl(&mu->outbound_tail); | |
153 | u32 outbound_head = readl(&mu->outbound_head); | |
154 | ||
155 | if (outbound_tail != outbound_head) { | |
156 | u64 p; | |
157 | ||
158 | memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); | |
159 | outbound_tail++; | |
160 | ||
161 | if (outbound_tail == MVIOP_QUEUE_LEN) | |
162 | outbound_tail = 0; | |
163 | writel(outbound_tail, &mu->outbound_tail); | |
164 | return p; | |
165 | } else | |
166 | return 0; | |
167 | } | |
168 | ||
169 | static void mv_inbound_write(u64 p, struct hptiop_hba *hba) | |
170 | { | |
171 | u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); | |
172 | u32 head = inbound_head + 1; | |
173 | ||
174 | if (head == MVIOP_QUEUE_LEN) | |
175 | head = 0; | |
176 | ||
177 | memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); | |
178 | writel(head, &hba->u.mv.mu->inbound_head); | |
179 | writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, | |
180 | &hba->u.mv.regs->inbound_doorbell); | |
181 | } | |
182 | ||
183 | static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) | |
184 | { | |
185 | u32 req_type = (tag >> 5) & 0x7; | |
186 | struct hpt_iop_request_scsi_command *req; | |
187 | ||
188 | dprintk("hptiop_request_callback_mv: tag=%llx\n", tag); | |
189 | ||
190 | BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); | |
191 | ||
192 | switch (req_type) { | |
193 | case IOP_REQUEST_TYPE_GET_CONFIG: | |
194 | case IOP_REQUEST_TYPE_SET_CONFIG: | |
195 | hba->msg_done = 1; | |
196 | break; | |
197 | ||
198 | case IOP_REQUEST_TYPE_SCSI_COMMAND: | |
199 | req = hba->reqs[tag >> 8].req_virt; | |
200 | if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) | |
201 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
202 | ||
203 | hptiop_finish_scsi_req(hba, tag>>8, req); | |
204 | break; | |
205 | ||
206 | default: | |
207 | break; | |
208 | } | |
209 | } | |
210 | ||
211 | static int iop_intr_mv(struct hptiop_hba *hba) | |
212 | { | |
213 | u32 status; | |
214 | int ret = 0; | |
215 | ||
216 | status = readl(&hba->u.mv.regs->outbound_doorbell); | |
217 | writel(~status, &hba->u.mv.regs->outbound_doorbell); | |
218 | ||
219 | if (status & MVIOP_MU_OUTBOUND_INT_MSG) { | |
220 | u32 msg; | |
221 | msg = readl(&hba->u.mv.mu->outbound_msg); | |
222 | dprintk("received outbound msg %x\n", msg); | |
223 | hptiop_message_callback(hba, msg); | |
224 | ret = 1; | |
225 | } | |
226 | ||
227 | if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { | |
228 | u64 tag; | |
229 | ||
230 | while ((tag = mv_outbound_read(hba->u.mv.mu))) | |
231 | hptiop_request_callback_mv(hba, tag); | |
ede1e6f8 HLT |
232 | ret = 1; |
233 | } | |
234 | ||
235 | return ret; | |
236 | } | |
237 | ||
286aa031 HLT |
238 | static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag) |
239 | { | |
240 | u32 req_type = _tag & 0xf; | |
241 | struct hpt_iop_request_scsi_command *req; | |
242 | ||
243 | switch (req_type) { | |
244 | case IOP_REQUEST_TYPE_GET_CONFIG: | |
245 | case IOP_REQUEST_TYPE_SET_CONFIG: | |
246 | hba->msg_done = 1; | |
247 | break; | |
248 | ||
249 | case IOP_REQUEST_TYPE_SCSI_COMMAND: | |
250 | req = hba->reqs[(_tag >> 4) & 0xff].req_virt; | |
251 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) | |
252 | req->header.result = IOP_RESULT_SUCCESS; | |
253 | hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req); | |
254 | break; | |
255 | ||
256 | default: | |
257 | break; | |
258 | } | |
259 | } | |
260 | ||
261 | static int iop_intr_mvfrey(struct hptiop_hba *hba) | |
262 | { | |
263 | u32 _tag, status, cptr, cur_rptr; | |
264 | int ret = 0; | |
265 | ||
266 | if (hba->initialized) | |
267 | writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
268 | ||
269 | status = readl(&(hba->u.mvfrey.mu->f0_doorbell)); | |
270 | if (status) { | |
271 | writel(status, &(hba->u.mvfrey.mu->f0_doorbell)); | |
272 | if (status & CPU_TO_F0_DRBL_MSG_BIT) { | |
273 | u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a)); | |
274 | dprintk("received outbound msg %x\n", msg); | |
275 | hptiop_message_callback(hba, msg); | |
276 | } | |
277 | ret = 1; | |
278 | } | |
279 | ||
280 | status = readl(&(hba->u.mvfrey.mu->isr_cause)); | |
281 | if (status) { | |
282 | writel(status, &(hba->u.mvfrey.mu->isr_cause)); | |
283 | do { | |
284 | cptr = *hba->u.mvfrey.outlist_cptr & 0xff; | |
285 | cur_rptr = hba->u.mvfrey.outlist_rptr; | |
286 | while (cur_rptr != cptr) { | |
287 | cur_rptr++; | |
288 | if (cur_rptr == hba->u.mvfrey.list_count) | |
289 | cur_rptr = 0; | |
290 | ||
291 | _tag = hba->u.mvfrey.outlist[cur_rptr].val; | |
292 | BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS)); | |
293 | hptiop_request_callback_mvfrey(hba, _tag); | |
294 | ret = 1; | |
295 | } | |
296 | hba->u.mvfrey.outlist_rptr = cur_rptr; | |
297 | } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); | |
298 | } | |
299 | ||
300 | if (hba->initialized) | |
301 | writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
302 | ||
303 | return ret; | |
304 | } | |
305 | ||
00f59701 | 306 | static int iop_send_sync_request_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
307 | void __iomem *_req, u32 millisec) |
308 | { | |
309 | struct hpt_iop_request_header __iomem *req = _req; | |
310 | u32 i; | |
311 | ||
00f59701 | 312 | writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); |
ede1e6f8 | 313 | writel(0, &req->context); |
00f59701 HLT |
314 | writel((unsigned long)req - (unsigned long)hba->u.itl.iop, |
315 | &hba->u.itl.iop->inbound_queue); | |
316 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
317 | |
318 | for (i = 0; i < millisec; i++) { | |
00f59701 | 319 | iop_intr_itl(hba); |
ede1e6f8 HLT |
320 | if (readl(&req->context)) |
321 | return 0; | |
322 | msleep(1); | |
323 | } | |
324 | ||
325 | return -1; | |
326 | } | |
327 | ||
00f59701 HLT |
328 | static int iop_send_sync_request_mv(struct hptiop_hba *hba, |
329 | u32 size_bits, u32 millisec) | |
ede1e6f8 | 330 | { |
00f59701 | 331 | struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; |
ede1e6f8 HLT |
332 | u32 i; |
333 | ||
334 | hba->msg_done = 0; | |
00f59701 HLT |
335 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); |
336 | mv_inbound_write(hba->u.mv.internal_req_phy | | |
337 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); | |
338 | ||
339 | for (i = 0; i < millisec; i++) { | |
340 | iop_intr_mv(hba); | |
341 | if (hba->msg_done) | |
342 | return 0; | |
343 | msleep(1); | |
344 | } | |
345 | return -1; | |
346 | } | |
347 | ||
286aa031 HLT |
348 | static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba, |
349 | u32 size_bits, u32 millisec) | |
350 | { | |
351 | struct hpt_iop_request_header *reqhdr = | |
352 | hba->u.mvfrey.internal_req.req_virt; | |
353 | u32 i; | |
354 | ||
355 | hba->msg_done = 0; | |
356 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); | |
357 | hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req)); | |
358 | ||
359 | for (i = 0; i < millisec; i++) { | |
360 | iop_intr_mvfrey(hba); | |
361 | if (hba->msg_done) | |
362 | break; | |
363 | msleep(1); | |
364 | } | |
365 | return hba->msg_done ? 0 : -1; | |
366 | } | |
367 | ||
00f59701 HLT |
368 | static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) |
369 | { | |
370 | writel(msg, &hba->u.itl.iop->inbound_msgaddr0); | |
371 | readl(&hba->u.itl.iop->outbound_intstatus); | |
372 | } | |
373 | ||
374 | static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) | |
375 | { | |
376 | writel(msg, &hba->u.mv.mu->inbound_msg); | |
377 | writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); | |
378 | readl(&hba->u.mv.regs->inbound_doorbell); | |
379 | } | |
ede1e6f8 | 380 | |
286aa031 HLT |
381 | static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg) |
382 | { | |
383 | writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); | |
384 | readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); | |
385 | } | |
386 | ||
00f59701 HLT |
387 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) |
388 | { | |
389 | u32 i; | |
ede1e6f8 | 390 | |
00f59701 | 391 | hba->msg_done = 0; |
286aa031 | 392 | hba->ops->disable_intr(hba); |
00f59701 | 393 | hba->ops->post_msg(hba, msg); |
ede1e6f8 HLT |
394 | |
395 | for (i = 0; i < millisec; i++) { | |
396 | spin_lock_irq(hba->host->host_lock); | |
00f59701 | 397 | hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
398 | spin_unlock_irq(hba->host->host_lock); |
399 | if (hba->msg_done) | |
400 | break; | |
401 | msleep(1); | |
402 | } | |
403 | ||
286aa031 | 404 | hba->ops->enable_intr(hba); |
ede1e6f8 HLT |
405 | return hba->msg_done? 0 : -1; |
406 | } | |
407 | ||
00f59701 | 408 | static int iop_get_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
409 | struct hpt_iop_request_get_config *config) |
410 | { | |
411 | u32 req32; | |
412 | struct hpt_iop_request_get_config __iomem *req; | |
413 | ||
00f59701 | 414 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
415 | if (req32 == IOPMU_QUEUE_EMPTY) |
416 | return -1; | |
417 | ||
418 | req = (struct hpt_iop_request_get_config __iomem *) | |
00f59701 | 419 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
420 | |
421 | writel(0, &req->header.flags); | |
422 | writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); | |
423 | writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); | |
424 | writel(IOP_RESULT_PENDING, &req->header.result); | |
425 | ||
00f59701 | 426 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
427 | dprintk("Get config send cmd failed\n"); |
428 | return -1; | |
429 | } | |
430 | ||
431 | memcpy_fromio(config, req, sizeof(*config)); | |
00f59701 HLT |
432 | writel(req32, &hba->u.itl.iop->outbound_queue); |
433 | return 0; | |
434 | } | |
435 | ||
436 | static int iop_get_config_mv(struct hptiop_hba *hba, | |
437 | struct hpt_iop_request_get_config *config) | |
438 | { | |
439 | struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; | |
440 | ||
441 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
442 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); | |
443 | req->header.size = | |
444 | cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); | |
445 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
446 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); |
447 | req->header.context_hi32 = 0; | |
00f59701 HLT |
448 | |
449 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
450 | dprintk("Get config send cmd failed\n"); | |
451 | return -1; | |
452 | } | |
453 | ||
454 | memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); | |
ede1e6f8 HLT |
455 | return 0; |
456 | } | |
457 | ||
286aa031 HLT |
458 | static int iop_get_config_mvfrey(struct hptiop_hba *hba, |
459 | struct hpt_iop_request_get_config *config) | |
460 | { | |
461 | struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; | |
462 | ||
463 | if (info->header.size != sizeof(struct hpt_iop_request_get_config) || | |
464 | info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) | |
465 | return -1; | |
466 | ||
467 | config->interface_version = info->interface_version; | |
468 | config->firmware_version = info->firmware_version; | |
469 | config->max_requests = info->max_requests; | |
470 | config->request_size = info->request_size; | |
471 | config->max_sg_count = info->max_sg_count; | |
472 | config->data_transfer_length = info->data_transfer_length; | |
473 | config->alignment_mask = info->alignment_mask; | |
474 | config->max_devices = info->max_devices; | |
475 | config->sdram_size = info->sdram_size; | |
476 | ||
477 | return 0; | |
478 | } | |
479 | ||
00f59701 | 480 | static int iop_set_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
481 | struct hpt_iop_request_set_config *config) |
482 | { | |
483 | u32 req32; | |
484 | struct hpt_iop_request_set_config __iomem *req; | |
485 | ||
00f59701 | 486 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
487 | if (req32 == IOPMU_QUEUE_EMPTY) |
488 | return -1; | |
489 | ||
490 | req = (struct hpt_iop_request_set_config __iomem *) | |
00f59701 | 491 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
492 | |
493 | memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), | |
494 | (u8 *)config + sizeof(struct hpt_iop_request_header), | |
495 | sizeof(struct hpt_iop_request_set_config) - | |
496 | sizeof(struct hpt_iop_request_header)); | |
497 | ||
498 | writel(0, &req->header.flags); | |
499 | writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); | |
500 | writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); | |
501 | writel(IOP_RESULT_PENDING, &req->header.result); | |
502 | ||
00f59701 | 503 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
504 | dprintk("Set config send cmd failed\n"); |
505 | return -1; | |
506 | } | |
507 | ||
00f59701 | 508 | writel(req32, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
509 | return 0; |
510 | } | |
511 | ||
00f59701 HLT |
512 | static int iop_set_config_mv(struct hptiop_hba *hba, |
513 | struct hpt_iop_request_set_config *config) | |
ede1e6f8 | 514 | { |
00f59701 | 515 | struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; |
ede1e6f8 | 516 | |
00f59701 HLT |
517 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); |
518 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
519 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); | |
520 | req->header.size = | |
521 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); | |
522 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
523 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); |
524 | req->header.context_hi32 = 0; | |
00f59701 HLT |
525 | |
526 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
527 | dprintk("Set config send cmd failed\n"); | |
528 | return -1; | |
529 | } | |
530 | ||
531 | return 0; | |
532 | } | |
533 | ||
286aa031 HLT |
534 | static int iop_set_config_mvfrey(struct hptiop_hba *hba, |
535 | struct hpt_iop_request_set_config *config) | |
536 | { | |
537 | struct hpt_iop_request_set_config *req = | |
538 | hba->u.mvfrey.internal_req.req_virt; | |
539 | ||
540 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); | |
541 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
542 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); | |
543 | req->header.size = | |
544 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); | |
545 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
546 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); | |
547 | req->header.context_hi32 = 0; | |
548 | ||
549 | if (iop_send_sync_request_mvfrey(hba, 0, 20000)) { | |
550 | dprintk("Set config send cmd failed\n"); | |
551 | return -1; | |
552 | } | |
553 | ||
554 | return 0; | |
555 | } | |
556 | ||
00f59701 HLT |
557 | static void hptiop_enable_intr_itl(struct hptiop_hba *hba) |
558 | { | |
ede1e6f8 | 559 | writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), |
00f59701 HLT |
560 | &hba->u.itl.iop->outbound_intmask); |
561 | } | |
562 | ||
563 | static void hptiop_enable_intr_mv(struct hptiop_hba *hba) | |
564 | { | |
565 | writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, | |
566 | &hba->u.mv.regs->outbound_intmask); | |
567 | } | |
568 | ||
286aa031 HLT |
569 | static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba) |
570 | { | |
571 | writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable)); | |
572 | writel(0x1, &(hba->u.mvfrey.mu->isr_enable)); | |
573 | writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
574 | } | |
575 | ||
00f59701 HLT |
576 | static int hptiop_initialize_iop(struct hptiop_hba *hba) |
577 | { | |
578 | /* enable interrupts */ | |
579 | hba->ops->enable_intr(hba); | |
ede1e6f8 HLT |
580 | |
581 | hba->initialized = 1; | |
582 | ||
583 | /* start background tasks */ | |
584 | if (iop_send_sync_msg(hba, | |
585 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
586 | printk(KERN_ERR "scsi%d: fail to start background task\n", | |
587 | hba->host->host_no); | |
588 | return -1; | |
589 | } | |
590 | return 0; | |
591 | } | |
592 | ||
00f59701 | 593 | static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) |
ede1e6f8 HLT |
594 | { |
595 | u32 mem_base_phy, length; | |
596 | void __iomem *mem_base_virt; | |
00f59701 | 597 | |
ede1e6f8 HLT |
598 | struct pci_dev *pcidev = hba->pcidev; |
599 | ||
00f59701 HLT |
600 | |
601 | if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { | |
ede1e6f8 HLT |
602 | printk(KERN_ERR "scsi%d: pci resource invalid\n", |
603 | hba->host->host_no); | |
9bcf0910 | 604 | return NULL; |
ede1e6f8 HLT |
605 | } |
606 | ||
00f59701 HLT |
607 | mem_base_phy = pci_resource_start(pcidev, index); |
608 | length = pci_resource_len(pcidev, index); | |
ede1e6f8 HLT |
609 | mem_base_virt = ioremap(mem_base_phy, length); |
610 | ||
611 | if (!mem_base_virt) { | |
612 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", | |
613 | hba->host->host_no); | |
9bcf0910 | 614 | return NULL; |
00f59701 HLT |
615 | } |
616 | return mem_base_virt; | |
617 | } | |
618 | ||
619 | static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) | |
620 | { | |
3bfc13c2 | 621 | struct pci_dev *pcidev = hba->pcidev; |
00f59701 | 622 | hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); |
3bfc13c2 | 623 | if (hba->u.itl.iop == NULL) |
00f59701 | 624 | return -1; |
3bfc13c2 HLT |
625 | if ((pcidev->device & 0xff00) == 0x4400) { |
626 | hba->u.itl.plx = hba->u.itl.iop; | |
627 | hba->u.itl.iop = hptiop_map_pci_bar(hba, 2); | |
628 | if (hba->u.itl.iop == NULL) { | |
629 | iounmap(hba->u.itl.plx); | |
630 | return -1; | |
631 | } | |
632 | } | |
633 | return 0; | |
00f59701 HLT |
634 | } |
635 | ||
636 | static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) | |
637 | { | |
3bfc13c2 HLT |
638 | if (hba->u.itl.plx) |
639 | iounmap(hba->u.itl.plx); | |
00f59701 HLT |
640 | iounmap(hba->u.itl.iop); |
641 | } | |
642 | ||
643 | static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) | |
644 | { | |
645 | hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); | |
9bcf0910 | 646 | if (hba->u.mv.regs == NULL) |
00f59701 HLT |
647 | return -1; |
648 | ||
649 | hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); | |
9bcf0910 | 650 | if (hba->u.mv.mu == NULL) { |
00f59701 | 651 | iounmap(hba->u.mv.regs); |
ede1e6f8 HLT |
652 | return -1; |
653 | } | |
654 | ||
ede1e6f8 HLT |
655 | return 0; |
656 | } | |
657 | ||
286aa031 HLT |
658 | static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba) |
659 | { | |
660 | hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0); | |
661 | if (hba->u.mvfrey.config == NULL) | |
662 | return -1; | |
663 | ||
664 | hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2); | |
665 | if (hba->u.mvfrey.mu == NULL) { | |
666 | iounmap(hba->u.mvfrey.config); | |
667 | return -1; | |
668 | } | |
669 | ||
670 | return 0; | |
671 | } | |
672 | ||
00f59701 HLT |
673 | static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) |
674 | { | |
675 | iounmap(hba->u.mv.regs); | |
676 | iounmap(hba->u.mv.mu); | |
677 | } | |
678 | ||
286aa031 HLT |
679 | static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba) |
680 | { | |
681 | iounmap(hba->u.mvfrey.config); | |
682 | iounmap(hba->u.mvfrey.mu); | |
683 | } | |
684 | ||
ede1e6f8 HLT |
685 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) |
686 | { | |
687 | dprintk("iop message 0x%x\n", msg); | |
688 | ||
286aa031 HLT |
689 | if (msg == IOPMU_INBOUND_MSG0_NOP || |
690 | msg == IOPMU_INBOUND_MSG0_RESET_COMM) | |
00f59701 HLT |
691 | hba->msg_done = 1; |
692 | ||
ede1e6f8 HLT |
693 | if (!hba->initialized) |
694 | return; | |
695 | ||
696 | if (msg == IOPMU_INBOUND_MSG0_RESET) { | |
697 | atomic_set(&hba->resetting, 0); | |
698 | wake_up(&hba->reset_wq); | |
699 | } | |
700 | else if (msg <= IOPMU_INBOUND_MSG0_MAX) | |
701 | hba->msg_done = 1; | |
702 | } | |
703 | ||
00f59701 | 704 | static struct hptiop_request *get_req(struct hptiop_hba *hba) |
ede1e6f8 HLT |
705 | { |
706 | struct hptiop_request *ret; | |
707 | ||
708 | dprintk("get_req : req=%p\n", hba->req_list); | |
709 | ||
710 | ret = hba->req_list; | |
711 | if (ret) | |
712 | hba->req_list = ret->next; | |
713 | ||
714 | return ret; | |
715 | } | |
716 | ||
00f59701 | 717 | static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) |
ede1e6f8 HLT |
718 | { |
719 | dprintk("free_req(%d, %p)\n", req->index, req); | |
720 | req->next = hba->req_list; | |
721 | hba->req_list = req; | |
722 | } | |
723 | ||
00f59701 HLT |
724 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, |
725 | struct hpt_iop_request_scsi_command *req) | |
ede1e6f8 | 726 | { |
ede1e6f8 HLT |
727 | struct scsi_cmnd *scp; |
728 | ||
00f59701 | 729 | dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " |
ede1e6f8 HLT |
730 | "result=%d, context=0x%x tag=%d\n", |
731 | req, req->header.type, req->header.result, | |
732 | req->header.context, tag); | |
733 | ||
734 | BUG_ON(!req->header.result); | |
735 | BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); | |
736 | ||
737 | scp = hba->reqs[tag].scp; | |
738 | ||
f9875496 FT |
739 | if (HPT_SCP(scp)->mapped) |
740 | scsi_dma_unmap(scp); | |
ede1e6f8 HLT |
741 | |
742 | switch (le32_to_cpu(req->header.result)) { | |
743 | case IOP_RESULT_SUCCESS: | |
00f59701 HLT |
744 | scsi_set_resid(scp, |
745 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 HLT |
746 | scp->result = (DID_OK<<16); |
747 | break; | |
748 | case IOP_RESULT_BAD_TARGET: | |
749 | scp->result = (DID_BAD_TARGET<<16); | |
750 | break; | |
751 | case IOP_RESULT_BUSY: | |
752 | scp->result = (DID_BUS_BUSY<<16); | |
753 | break; | |
754 | case IOP_RESULT_RESET: | |
755 | scp->result = (DID_RESET<<16); | |
756 | break; | |
757 | case IOP_RESULT_FAIL: | |
758 | scp->result = (DID_ERROR<<16); | |
759 | break; | |
760 | case IOP_RESULT_INVALID_REQUEST: | |
761 | scp->result = (DID_ABORT<<16); | |
762 | break; | |
00f59701 HLT |
763 | case IOP_RESULT_CHECK_CONDITION: |
764 | scsi_set_resid(scp, | |
765 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 | 766 | scp->result = SAM_STAT_CHECK_CONDITION; |
a93429c3 | 767 | memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE); |
286aa031 | 768 | goto skip_resid; |
ede1e6f8 HLT |
769 | break; |
770 | ||
771 | default: | |
1c9fbafc | 772 | scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16; |
ede1e6f8 HLT |
773 | break; |
774 | } | |
775 | ||
286aa031 HLT |
776 | scsi_set_resid(scp, |
777 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
778 | ||
779 | skip_resid: | |
ede1e6f8 HLT |
780 | dprintk("scsi_done(%p)\n", scp); |
781 | scp->scsi_done(scp); | |
782 | free_req(hba, &hba->reqs[tag]); | |
783 | } | |
784 | ||
00f59701 HLT |
785 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) |
786 | { | |
787 | struct hpt_iop_request_scsi_command *req; | |
788 | u32 tag; | |
789 | ||
790 | if (hba->iopintf_v2) { | |
791 | tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; | |
792 | req = hba->reqs[tag].req_virt; | |
793 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) | |
794 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
795 | } else { | |
796 | tag = _tag; | |
797 | req = hba->reqs[tag].req_virt; | |
798 | } | |
799 | ||
800 | hptiop_finish_scsi_req(hba, tag, req); | |
801 | } | |
802 | ||
803 | void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) | |
ede1e6f8 HLT |
804 | { |
805 | struct hpt_iop_request_header __iomem *req; | |
806 | struct hpt_iop_request_ioctl_command __iomem *p; | |
807 | struct hpt_ioctl_k *arg; | |
808 | ||
809 | req = (struct hpt_iop_request_header __iomem *) | |
00f59701 HLT |
810 | ((unsigned long)hba->u.itl.iop + tag); |
811 | dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " | |
ede1e6f8 HLT |
812 | "result=%d, context=0x%x tag=%d\n", |
813 | req, readl(&req->type), readl(&req->result), | |
814 | readl(&req->context), tag); | |
815 | ||
816 | BUG_ON(!readl(&req->result)); | |
817 | BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); | |
818 | ||
819 | p = (struct hpt_iop_request_ioctl_command __iomem *)req; | |
820 | arg = (struct hpt_ioctl_k *)(unsigned long) | |
821 | (readl(&req->context) | | |
822 | ((u64)readl(&req->context_hi32)<<32)); | |
823 | ||
824 | if (readl(&req->result) == IOP_RESULT_SUCCESS) { | |
825 | arg->result = HPT_IOCTL_RESULT_OK; | |
826 | ||
827 | if (arg->outbuf_size) | |
828 | memcpy_fromio(arg->outbuf, | |
829 | &p->buf[(readl(&p->inbuf_size) + 3)& ~3], | |
830 | arg->outbuf_size); | |
831 | ||
832 | if (arg->bytes_returned) | |
833 | *arg->bytes_returned = arg->outbuf_size; | |
834 | } | |
835 | else | |
836 | arg->result = HPT_IOCTL_RESULT_FAILED; | |
837 | ||
838 | arg->done(arg); | |
00f59701 | 839 | writel(tag, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
840 | } |
841 | ||
7d12e780 | 842 | static irqreturn_t hptiop_intr(int irq, void *dev_id) |
ede1e6f8 HLT |
843 | { |
844 | struct hptiop_hba *hba = dev_id; | |
845 | int handled; | |
846 | unsigned long flags; | |
847 | ||
848 | spin_lock_irqsave(hba->host->host_lock, flags); | |
00f59701 | 849 | handled = hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
850 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
851 | ||
852 | return handled; | |
853 | } | |
854 | ||
855 | static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) | |
856 | { | |
857 | struct Scsi_Host *host = scp->device->host; | |
858 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
f9875496 FT |
859 | struct scatterlist *sg; |
860 | int idx, nseg; | |
861 | ||
862 | nseg = scsi_dma_map(scp); | |
863 | BUG_ON(nseg < 0); | |
864 | if (!nseg) | |
865 | return 0; | |
ede1e6f8 | 866 | |
f9875496 FT |
867 | HPT_SCP(scp)->sgcnt = nseg; |
868 | HPT_SCP(scp)->mapped = 1; | |
869 | ||
870 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); | |
871 | ||
872 | scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { | |
286aa031 HLT |
873 | psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) | |
874 | hba->ops->host_phy_flag; | |
f9875496 FT |
875 | psg[idx].size = cpu_to_le32(sg_dma_len(sg)); |
876 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? | |
877 | cpu_to_le32(1) : 0; | |
ede1e6f8 | 878 | } |
f9875496 | 879 | return HPT_SCP(scp)->sgcnt; |
ede1e6f8 HLT |
880 | } |
881 | ||
00f59701 HLT |
882 | static void hptiop_post_req_itl(struct hptiop_hba *hba, |
883 | struct hptiop_request *_req) | |
884 | { | |
885 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
886 | ||
887 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | |
888 | (u32)_req->index); | |
889 | reqhdr->context_hi32 = 0; | |
890 | ||
891 | if (hba->iopintf_v2) { | |
892 | u32 size, size_bits; | |
893 | ||
894 | size = le32_to_cpu(reqhdr->size); | |
895 | if (size < 256) | |
896 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; | |
897 | else if (size < 512) | |
898 | size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; | |
899 | else | |
900 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | | |
901 | IOPMU_QUEUE_ADDR_HOST_BIT; | |
902 | writel(_req->req_shifted_phy | size_bits, | |
903 | &hba->u.itl.iop->inbound_queue); | |
904 | } else | |
905 | writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, | |
906 | &hba->u.itl.iop->inbound_queue); | |
907 | } | |
908 | ||
909 | static void hptiop_post_req_mv(struct hptiop_hba *hba, | |
910 | struct hptiop_request *_req) | |
911 | { | |
912 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
913 | u32 size, size_bit; | |
914 | ||
915 | reqhdr->context = cpu_to_le32(_req->index<<8 | | |
916 | IOP_REQUEST_TYPE_SCSI_COMMAND<<5); | |
917 | reqhdr->context_hi32 = 0; | |
918 | size = le32_to_cpu(reqhdr->size); | |
919 | ||
920 | if (size <= 256) | |
921 | size_bit = 0; | |
922 | else if (size <= 256*2) | |
923 | size_bit = 1; | |
924 | else if (size <= 256*3) | |
925 | size_bit = 2; | |
926 | else | |
927 | size_bit = 3; | |
928 | ||
929 | mv_inbound_write((_req->req_shifted_phy << 5) | | |
930 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); | |
931 | } | |
932 | ||
286aa031 HLT |
933 | static void hptiop_post_req_mvfrey(struct hptiop_hba *hba, |
934 | struct hptiop_request *_req) | |
935 | { | |
936 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
937 | u32 index; | |
938 | ||
939 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT | | |
940 | IOP_REQUEST_FLAG_ADDR_BITS | | |
941 | ((_req->req_shifted_phy >> 11) & 0xffff0000)); | |
942 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | |
943 | (_req->index << 4) | reqhdr->type); | |
944 | reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) & | |
945 | 0xffffffff); | |
946 | ||
947 | hba->u.mvfrey.inlist_wptr++; | |
948 | index = hba->u.mvfrey.inlist_wptr & 0x3fff; | |
949 | ||
950 | if (index == hba->u.mvfrey.list_count) { | |
951 | index = 0; | |
952 | hba->u.mvfrey.inlist_wptr &= ~0x3fff; | |
953 | hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; | |
954 | } | |
955 | ||
956 | hba->u.mvfrey.inlist[index].addr = | |
957 | (dma_addr_t)_req->req_shifted_phy << 5; | |
958 | hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; | |
959 | writel(hba->u.mvfrey.inlist_wptr, | |
960 | &(hba->u.mvfrey.mu->inbound_write_ptr)); | |
961 | readl(&(hba->u.mvfrey.mu->inbound_write_ptr)); | |
962 | } | |
963 | ||
964 | static int hptiop_reset_comm_itl(struct hptiop_hba *hba) | |
965 | { | |
966 | return 0; | |
967 | } | |
968 | ||
969 | static int hptiop_reset_comm_mv(struct hptiop_hba *hba) | |
970 | { | |
971 | return 0; | |
972 | } | |
973 | ||
974 | static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba) | |
975 | { | |
976 | u32 list_count = hba->u.mvfrey.list_count; | |
977 | ||
978 | if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) | |
979 | return -1; | |
980 | ||
981 | /* wait 100ms for MCU ready */ | |
982 | msleep(100); | |
983 | ||
984 | writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff), | |
985 | &(hba->u.mvfrey.mu->inbound_base)); | |
986 | writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16), | |
987 | &(hba->u.mvfrey.mu->inbound_base_high)); | |
988 | ||
989 | writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff), | |
990 | &(hba->u.mvfrey.mu->outbound_base)); | |
991 | writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16), | |
992 | &(hba->u.mvfrey.mu->outbound_base_high)); | |
993 | ||
994 | writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff), | |
995 | &(hba->u.mvfrey.mu->outbound_shadow_base)); | |
996 | writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16), | |
997 | &(hba->u.mvfrey.mu->outbound_shadow_base_high)); | |
998 | ||
999 | hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE; | |
1000 | *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE; | |
1001 | hba->u.mvfrey.outlist_rptr = list_count - 1; | |
1002 | return 0; | |
1003 | } | |
1004 | ||
f281233d | 1005 | static int hptiop_queuecommand_lck(struct scsi_cmnd *scp, |
ede1e6f8 HLT |
1006 | void (*done)(struct scsi_cmnd *)) |
1007 | { | |
1008 | struct Scsi_Host *host = scp->device->host; | |
1009 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
1010 | struct hpt_iop_request_scsi_command *req; | |
1011 | int sg_count = 0; | |
1012 | struct hptiop_request *_req; | |
1013 | ||
1014 | BUG_ON(!done); | |
1015 | scp->scsi_done = done; | |
1016 | ||
ede1e6f8 HLT |
1017 | _req = get_req(hba); |
1018 | if (_req == NULL) { | |
1019 | dprintk("hptiop_queuecmd : no free req\n"); | |
4f2ddba3 | 1020 | return SCSI_MLQUEUE_HOST_BUSY; |
ede1e6f8 HLT |
1021 | } |
1022 | ||
1023 | _req->scp = scp; | |
1024 | ||
9cb78c16 | 1025 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) " |
ede1e6f8 HLT |
1026 | "req_index=%d, req=%p\n", |
1027 | scp, | |
1028 | host->host_no, scp->device->channel, | |
1029 | scp->device->id, scp->device->lun, | |
286aa031 HLT |
1030 | cpu_to_be32(((u32 *)scp->cmnd)[0]), |
1031 | cpu_to_be32(((u32 *)scp->cmnd)[1]), | |
1032 | cpu_to_be32(((u32 *)scp->cmnd)[2]), | |
1033 | cpu_to_be32(((u32 *)scp->cmnd)[3]), | |
ede1e6f8 HLT |
1034 | _req->index, _req->req_virt); |
1035 | ||
1036 | scp->result = 0; | |
1037 | ||
a93429c3 | 1038 | if (scp->device->channel || |
1039 | (scp->device->id > hba->max_devices) || | |
1040 | ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) { | |
ede1e6f8 HLT |
1041 | scp->result = DID_BAD_TARGET << 16; |
1042 | free_req(hba, _req); | |
1043 | goto cmd_done; | |
1044 | } | |
1045 | ||
db9b6e89 | 1046 | req = _req->req_virt; |
ede1e6f8 HLT |
1047 | |
1048 | /* build S/G table */ | |
f9875496 FT |
1049 | sg_count = hptiop_buildsgl(scp, req->sg_list); |
1050 | if (!sg_count) | |
ede1e6f8 HLT |
1051 | HPT_SCP(scp)->mapped = 0; |
1052 | ||
1053 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
1054 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); | |
1055 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f9875496 | 1056 | req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); |
ede1e6f8 HLT |
1057 | req->channel = scp->device->channel; |
1058 | req->target = scp->device->id; | |
1059 | req->lun = scp->device->lun; | |
1060 | req->header.size = cpu_to_le32( | |
1061 | sizeof(struct hpt_iop_request_scsi_command) | |
1062 | - sizeof(struct hpt_iopsg) | |
1063 | + sg_count * sizeof(struct hpt_iopsg)); | |
1064 | ||
1065 | memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); | |
00f59701 | 1066 | hba->ops->post_req(hba, _req); |
ede1e6f8 HLT |
1067 | return 0; |
1068 | ||
1069 | cmd_done: | |
1070 | dprintk("scsi_done(scp=%p)\n", scp); | |
1071 | scp->scsi_done(scp); | |
1072 | return 0; | |
1073 | } | |
1074 | ||
f281233d JG |
1075 | static DEF_SCSI_QCMD(hptiop_queuecommand) |
1076 | ||
ede1e6f8 HLT |
1077 | static const char *hptiop_info(struct Scsi_Host *host) |
1078 | { | |
1079 | return driver_name_long; | |
1080 | } | |
1081 | ||
1082 | static int hptiop_reset_hba(struct hptiop_hba *hba) | |
1083 | { | |
1084 | if (atomic_xchg(&hba->resetting, 1) == 0) { | |
1085 | atomic_inc(&hba->reset_count); | |
00f59701 | 1086 | hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); |
ede1e6f8 HLT |
1087 | } |
1088 | ||
1089 | wait_event_timeout(hba->reset_wq, | |
1090 | atomic_read(&hba->resetting) == 0, 60 * HZ); | |
1091 | ||
1092 | if (atomic_read(&hba->resetting)) { | |
af901ca1 | 1093 | /* IOP is in unknown state, abort reset */ |
ede1e6f8 HLT |
1094 | printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); |
1095 | return -1; | |
1096 | } | |
1097 | ||
1098 | if (iop_send_sync_msg(hba, | |
1099 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
1100 | dprintk("scsi%d: fail to start background task\n", | |
1101 | hba->host->host_no); | |
1102 | } | |
1103 | ||
1104 | return 0; | |
1105 | } | |
1106 | ||
1107 | static int hptiop_reset(struct scsi_cmnd *scp) | |
1108 | { | |
1109 | struct Scsi_Host * host = scp->device->host; | |
1110 | struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata; | |
1111 | ||
1112 | printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n", | |
1113 | scp->device->host->host_no, scp->device->channel, | |
1114 | scp->device->id, scp); | |
1115 | ||
1116 | return hptiop_reset_hba(hba)? FAILED : SUCCESS; | |
1117 | } | |
1118 | ||
1119 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | |
db5ed4df | 1120 | int queue_depth) |
ede1e6f8 | 1121 | { |
00f59701 HLT |
1122 | struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; |
1123 | ||
1124 | if (queue_depth > hba->max_requests) | |
1125 | queue_depth = hba->max_requests; | |
db5ed4df | 1126 | return scsi_change_queue_depth(sdev, queue_depth); |
ede1e6f8 HLT |
1127 | } |
1128 | ||
ee959b00 TJ |
1129 | static ssize_t hptiop_show_version(struct device *dev, |
1130 | struct device_attribute *attr, char *buf) | |
ede1e6f8 HLT |
1131 | { |
1132 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | |
1133 | } | |
1134 | ||
ee959b00 TJ |
1135 | static ssize_t hptiop_show_fw_version(struct device *dev, |
1136 | struct device_attribute *attr, char *buf) | |
ede1e6f8 | 1137 | { |
ee959b00 | 1138 | struct Scsi_Host *host = class_to_shost(dev); |
ede1e6f8 HLT |
1139 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
1140 | ||
1141 | return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", | |
1142 | hba->firmware_version >> 24, | |
1143 | (hba->firmware_version >> 16) & 0xff, | |
1144 | (hba->firmware_version >> 8) & 0xff, | |
1145 | hba->firmware_version & 0xff); | |
1146 | } | |
1147 | ||
ee959b00 | 1148 | static struct device_attribute hptiop_attr_version = { |
ede1e6f8 HLT |
1149 | .attr = { |
1150 | .name = "driver-version", | |
1151 | .mode = S_IRUGO, | |
1152 | }, | |
1153 | .show = hptiop_show_version, | |
1154 | }; | |
1155 | ||
ee959b00 | 1156 | static struct device_attribute hptiop_attr_fw_version = { |
ede1e6f8 HLT |
1157 | .attr = { |
1158 | .name = "firmware-version", | |
1159 | .mode = S_IRUGO, | |
1160 | }, | |
1161 | .show = hptiop_show_fw_version, | |
1162 | }; | |
1163 | ||
ee959b00 | 1164 | static struct device_attribute *hptiop_attrs[] = { |
ede1e6f8 HLT |
1165 | &hptiop_attr_version, |
1166 | &hptiop_attr_fw_version, | |
1167 | NULL | |
1168 | }; | |
1169 | ||
a93429c3 | 1170 | static int hptiop_slave_config(struct scsi_device *sdev) |
1171 | { | |
1172 | if (sdev->type == TYPE_TAPE) | |
1173 | blk_queue_max_hw_sectors(sdev->request_queue, 8192); | |
1174 | ||
1175 | return 0; | |
1176 | } | |
1177 | ||
ede1e6f8 HLT |
1178 | static struct scsi_host_template driver_template = { |
1179 | .module = THIS_MODULE, | |
1180 | .name = driver_name, | |
1181 | .queuecommand = hptiop_queuecommand, | |
1182 | .eh_device_reset_handler = hptiop_reset, | |
1183 | .eh_bus_reset_handler = hptiop_reset, | |
1184 | .info = hptiop_info, | |
ede1e6f8 HLT |
1185 | .emulated = 0, |
1186 | .use_clustering = ENABLE_CLUSTERING, | |
1187 | .proc_name = driver_name, | |
1188 | .shost_attrs = hptiop_attrs, | |
a93429c3 | 1189 | .slave_configure = hptiop_slave_config, |
ede1e6f8 HLT |
1190 | .this_id = -1, |
1191 | .change_queue_depth = hptiop_adjust_disk_queue_depth, | |
1192 | }; | |
1193 | ||
286aa031 HLT |
1194 | static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba) |
1195 | { | |
1196 | return 0; | |
1197 | } | |
1198 | ||
00f59701 HLT |
1199 | static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) |
1200 | { | |
1201 | hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, | |
1202 | 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); | |
1203 | if (hba->u.mv.internal_req) | |
1204 | return 0; | |
1205 | else | |
1206 | return -1; | |
1207 | } | |
1208 | ||
286aa031 HLT |
1209 | static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba) |
1210 | { | |
1211 | u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl); | |
1212 | char *p; | |
1213 | dma_addr_t phy; | |
1214 | ||
1215 | BUG_ON(hba->max_request_size == 0); | |
1216 | ||
1217 | if (list_count == 0) { | |
1218 | BUG_ON(1); | |
1219 | return -1; | |
1220 | } | |
1221 | ||
1222 | list_count >>= 16; | |
1223 | ||
1224 | hba->u.mvfrey.list_count = list_count; | |
1225 | hba->u.mvfrey.internal_mem_size = 0x800 + | |
1226 | list_count * sizeof(struct mvfrey_inlist_entry) + | |
1227 | list_count * sizeof(struct mvfrey_outlist_entry) + | |
1228 | sizeof(int); | |
1229 | ||
1230 | p = dma_alloc_coherent(&hba->pcidev->dev, | |
1231 | hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL); | |
1232 | if (!p) | |
1233 | return -1; | |
1234 | ||
1235 | hba->u.mvfrey.internal_req.req_virt = p; | |
1236 | hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5; | |
1237 | hba->u.mvfrey.internal_req.scp = NULL; | |
1238 | hba->u.mvfrey.internal_req.next = NULL; | |
1239 | ||
1240 | p += 0x800; | |
1241 | phy += 0x800; | |
1242 | ||
1243 | hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; | |
1244 | hba->u.mvfrey.inlist_phy = phy; | |
1245 | ||
1246 | p += list_count * sizeof(struct mvfrey_inlist_entry); | |
1247 | phy += list_count * sizeof(struct mvfrey_inlist_entry); | |
1248 | ||
1249 | hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; | |
1250 | hba->u.mvfrey.outlist_phy = phy; | |
1251 | ||
1252 | p += list_count * sizeof(struct mvfrey_outlist_entry); | |
1253 | phy += list_count * sizeof(struct mvfrey_outlist_entry); | |
1254 | ||
1255 | hba->u.mvfrey.outlist_cptr = (__le32 *)p; | |
1256 | hba->u.mvfrey.outlist_cptr_phy = phy; | |
1257 | ||
1258 | return 0; | |
1259 | } | |
1260 | ||
1261 | static int hptiop_internal_memfree_itl(struct hptiop_hba *hba) | |
1262 | { | |
1263 | return 0; | |
1264 | } | |
1265 | ||
00f59701 HLT |
1266 | static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) |
1267 | { | |
1268 | if (hba->u.mv.internal_req) { | |
1269 | dma_free_coherent(&hba->pcidev->dev, 0x800, | |
1270 | hba->u.mv.internal_req, hba->u.mv.internal_req_phy); | |
1271 | return 0; | |
1272 | } else | |
1273 | return -1; | |
1274 | } | |
1275 | ||
286aa031 HLT |
1276 | static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba) |
1277 | { | |
1278 | if (hba->u.mvfrey.internal_req.req_virt) { | |
1279 | dma_free_coherent(&hba->pcidev->dev, | |
1280 | hba->u.mvfrey.internal_mem_size, | |
1281 | hba->u.mvfrey.internal_req.req_virt, | |
1282 | (dma_addr_t) | |
1283 | hba->u.mvfrey.internal_req.req_shifted_phy << 5); | |
1284 | return 0; | |
1285 | } else | |
1286 | return -1; | |
1287 | } | |
1288 | ||
6f039790 | 1289 | static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) |
ede1e6f8 HLT |
1290 | { |
1291 | struct Scsi_Host *host = NULL; | |
1292 | struct hptiop_hba *hba; | |
23f0bb47 | 1293 | struct hptiop_adapter_ops *iop_ops; |
ede1e6f8 HLT |
1294 | struct hpt_iop_request_get_config iop_config; |
1295 | struct hpt_iop_request_set_config set_config; | |
1296 | dma_addr_t start_phy; | |
1297 | void *start_virt; | |
1298 | u32 offset, i, req_size; | |
1299 | ||
1300 | dprintk("hptiop_probe(%p)\n", pcidev); | |
1301 | ||
1302 | if (pci_enable_device(pcidev)) { | |
1303 | printk(KERN_ERR "hptiop: fail to enable pci device\n"); | |
1304 | return -ENODEV; | |
1305 | } | |
1306 | ||
1307 | printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", | |
1308 | pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, | |
1309 | pcidev->irq); | |
1310 | ||
1311 | pci_set_master(pcidev); | |
1312 | ||
1313 | /* Enable 64bit DMA if possible */ | |
23f0bb47 HLT |
1314 | iop_ops = (struct hptiop_adapter_ops *)id->driver_data; |
1315 | if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) { | |
284901a9 | 1316 | if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { |
ede1e6f8 HLT |
1317 | printk(KERN_ERR "hptiop: fail to set dma_mask\n"); |
1318 | goto disable_pci_device; | |
1319 | } | |
1320 | } | |
1321 | ||
1322 | if (pci_request_regions(pcidev, driver_name)) { | |
1323 | printk(KERN_ERR "hptiop: pci_request_regions failed\n"); | |
1324 | goto disable_pci_device; | |
1325 | } | |
1326 | ||
1327 | host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); | |
1328 | if (!host) { | |
1329 | printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); | |
1330 | goto free_pci_regions; | |
1331 | } | |
1332 | ||
1333 | hba = (struct hptiop_hba *)host->hostdata; | |
a93429c3 | 1334 | memset(hba, 0, sizeof(struct hptiop_hba)); |
ede1e6f8 | 1335 | |
23f0bb47 | 1336 | hba->ops = iop_ops; |
ede1e6f8 HLT |
1337 | hba->pcidev = pcidev; |
1338 | hba->host = host; | |
1339 | hba->initialized = 0; | |
db9b6e89 | 1340 | hba->iopintf_v2 = 0; |
ede1e6f8 HLT |
1341 | |
1342 | atomic_set(&hba->resetting, 0); | |
1343 | atomic_set(&hba->reset_count, 0); | |
1344 | ||
1345 | init_waitqueue_head(&hba->reset_wq); | |
1346 | init_waitqueue_head(&hba->ioctl_wq); | |
1347 | ||
a93429c3 | 1348 | host->max_lun = 128; |
ede1e6f8 HLT |
1349 | host->max_channel = 0; |
1350 | host->io_port = 0; | |
1351 | host->n_io_port = 0; | |
1352 | host->irq = pcidev->irq; | |
1353 | ||
00f59701 | 1354 | if (hba->ops->map_pci_bar(hba)) |
ede1e6f8 HLT |
1355 | goto free_scsi_host; |
1356 | ||
00f59701 | 1357 | if (hba->ops->iop_wait_ready(hba, 20000)) { |
ede1e6f8 HLT |
1358 | printk(KERN_ERR "scsi%d: firmware not ready\n", |
1359 | hba->host->host_no); | |
1360 | goto unmap_pci_bar; | |
1361 | } | |
1362 | ||
286aa031 | 1363 | if (hba->ops->family == MV_BASED_IOP) { |
00f59701 HLT |
1364 | if (hba->ops->internal_memalloc(hba)) { |
1365 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n", | |
1366 | hba->host->host_no); | |
1367 | goto unmap_pci_bar; | |
1368 | } | |
1369 | } | |
1370 | ||
1371 | if (hba->ops->get_config(hba, &iop_config)) { | |
ede1e6f8 HLT |
1372 | printk(KERN_ERR "scsi%d: get config failed\n", |
1373 | hba->host->host_no); | |
1374 | goto unmap_pci_bar; | |
1375 | } | |
1376 | ||
1377 | hba->max_requests = min(le32_to_cpu(iop_config.max_requests), | |
1378 | HPTIOP_MAX_REQUESTS); | |
1379 | hba->max_devices = le32_to_cpu(iop_config.max_devices); | |
1380 | hba->max_request_size = le32_to_cpu(iop_config.request_size); | |
1381 | hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); | |
1382 | hba->firmware_version = le32_to_cpu(iop_config.firmware_version); | |
db9b6e89 | 1383 | hba->interface_version = le32_to_cpu(iop_config.interface_version); |
ede1e6f8 HLT |
1384 | hba->sdram_size = le32_to_cpu(iop_config.sdram_size); |
1385 | ||
286aa031 HLT |
1386 | if (hba->ops->family == MVFREY_BASED_IOP) { |
1387 | if (hba->ops->internal_memalloc(hba)) { | |
1388 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n", | |
1389 | hba->host->host_no); | |
1390 | goto unmap_pci_bar; | |
1391 | } | |
1392 | if (hba->ops->reset_comm(hba)) { | |
1393 | printk(KERN_ERR "scsi%d: reset comm failed\n", | |
1394 | hba->host->host_no); | |
1395 | goto unmap_pci_bar; | |
1396 | } | |
1397 | } | |
1398 | ||
db9b6e89 HLT |
1399 | if (hba->firmware_version > 0x01020000 || |
1400 | hba->interface_version > 0x01020000) | |
1401 | hba->iopintf_v2 = 1; | |
1402 | ||
ede1e6f8 HLT |
1403 | host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; |
1404 | host->max_id = le32_to_cpu(iop_config.max_devices); | |
1405 | host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); | |
1406 | host->can_queue = le32_to_cpu(iop_config.max_requests); | |
1407 | host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); | |
1408 | host->max_cmd_len = 16; | |
1409 | ||
db9b6e89 HLT |
1410 | req_size = sizeof(struct hpt_iop_request_scsi_command) |
1411 | + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1); | |
1412 | if ((req_size & 0x1f) != 0) | |
1413 | req_size = (req_size + 0x1f) & ~0x1f; | |
1414 | ||
1415 | memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); | |
ede1e6f8 | 1416 | set_config.iop_id = cpu_to_le32(host->host_no); |
db9b6e89 HLT |
1417 | set_config.vbus_id = cpu_to_le16(host->host_no); |
1418 | set_config.max_host_request_size = cpu_to_le16(req_size); | |
ede1e6f8 | 1419 | |
00f59701 | 1420 | if (hba->ops->set_config(hba, &set_config)) { |
ede1e6f8 HLT |
1421 | printk(KERN_ERR "scsi%d: set config failed\n", |
1422 | hba->host->host_no); | |
1423 | goto unmap_pci_bar; | |
1424 | } | |
1425 | ||
ede1e6f8 HLT |
1426 | pci_set_drvdata(pcidev, host); |
1427 | ||
1d6f359a | 1428 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, |
ede1e6f8 HLT |
1429 | driver_name, hba)) { |
1430 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | |
1431 | hba->host->host_no, pcidev->irq); | |
3e74051b | 1432 | goto unmap_pci_bar; |
ede1e6f8 HLT |
1433 | } |
1434 | ||
1435 | /* Allocate request mem */ | |
ede1e6f8 HLT |
1436 | |
1437 | dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); | |
1438 | ||
1439 | hba->req_size = req_size; | |
a93429c3 | 1440 | hba->req_list = NULL; |
ede1e6f8 | 1441 | |
a93429c3 | 1442 | for (i = 0; i < hba->max_requests; i++) { |
1443 | start_virt = dma_alloc_coherent(&pcidev->dev, | |
1444 | hba->req_size + 0x20, | |
1445 | &start_phy, GFP_KERNEL); | |
1446 | ||
1447 | if (!start_virt) { | |
1448 | printk(KERN_ERR "scsi%d: fail to alloc request mem\n", | |
1449 | hba->host->host_no); | |
1450 | goto free_request_mem; | |
1451 | } | |
ede1e6f8 | 1452 | |
a93429c3 | 1453 | hba->dma_coherent[i] = start_virt; |
1454 | hba->dma_coherent_handle[i] = start_phy; | |
ede1e6f8 | 1455 | |
a93429c3 | 1456 | if ((start_phy & 0x1f) != 0) { |
1457 | offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; | |
1458 | start_phy += offset; | |
1459 | start_virt += offset; | |
1460 | } | |
ede1e6f8 | 1461 | |
ede1e6f8 HLT |
1462 | hba->reqs[i].next = NULL; |
1463 | hba->reqs[i].req_virt = start_virt; | |
1464 | hba->reqs[i].req_shifted_phy = start_phy >> 5; | |
1465 | hba->reqs[i].index = i; | |
1466 | free_req(hba, &hba->reqs[i]); | |
ede1e6f8 HLT |
1467 | } |
1468 | ||
1469 | /* Enable Interrupt and start background task */ | |
1470 | if (hptiop_initialize_iop(hba)) | |
1471 | goto free_request_mem; | |
1472 | ||
3e74051b CH |
1473 | if (scsi_add_host(host, &pcidev->dev)) { |
1474 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | |
1475 | hba->host->host_no); | |
1476 | goto free_request_mem; | |
1477 | } | |
1478 | ||
ede1e6f8 HLT |
1479 | scsi_scan_host(host); |
1480 | ||
1481 | dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); | |
1482 | return 0; | |
1483 | ||
1484 | free_request_mem: | |
a93429c3 | 1485 | for (i = 0; i < hba->max_requests; i++) { |
1486 | if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) | |
1487 | dma_free_coherent(&hba->pcidev->dev, | |
1488 | hba->req_size + 0x20, | |
1489 | hba->dma_coherent[i], | |
1490 | hba->dma_coherent_handle[i]); | |
1491 | else | |
1492 | break; | |
1493 | } | |
ede1e6f8 | 1494 | |
ede1e6f8 HLT |
1495 | free_irq(hba->pcidev->irq, hba); |
1496 | ||
ede1e6f8 | 1497 | unmap_pci_bar: |
286aa031 | 1498 | hba->ops->internal_memfree(hba); |
ede1e6f8 | 1499 | |
00f59701 | 1500 | hba->ops->unmap_pci_bar(hba); |
ede1e6f8 HLT |
1501 | |
1502 | free_scsi_host: | |
1503 | scsi_host_put(host); | |
1504 | ||
00f59701 HLT |
1505 | free_pci_regions: |
1506 | pci_release_regions(pcidev); | |
1507 | ||
ede1e6f8 HLT |
1508 | disable_pci_device: |
1509 | pci_disable_device(pcidev); | |
1510 | ||
1db90ea2 | 1511 | dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0); |
ede1e6f8 HLT |
1512 | return -ENODEV; |
1513 | } | |
1514 | ||
1515 | static void hptiop_shutdown(struct pci_dev *pcidev) | |
1516 | { | |
1517 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1518 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
ede1e6f8 HLT |
1519 | |
1520 | dprintk("hptiop_shutdown(%p)\n", hba); | |
1521 | ||
1522 | /* stop the iop */ | |
1523 | if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) | |
1524 | printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", | |
1525 | hba->host->host_no); | |
1526 | ||
1527 | /* disable all outbound interrupts */ | |
00f59701 HLT |
1528 | hba->ops->disable_intr(hba); |
1529 | } | |
1530 | ||
1531 | static void hptiop_disable_intr_itl(struct hptiop_hba *hba) | |
1532 | { | |
1533 | u32 int_mask; | |
1534 | ||
1535 | int_mask = readl(&hba->u.itl.iop->outbound_intmask); | |
ede1e6f8 HLT |
1536 | writel(int_mask | |
1537 | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, | |
00f59701 HLT |
1538 | &hba->u.itl.iop->outbound_intmask); |
1539 | readl(&hba->u.itl.iop->outbound_intmask); | |
1540 | } | |
1541 | ||
1542 | static void hptiop_disable_intr_mv(struct hptiop_hba *hba) | |
1543 | { | |
1544 | writel(0, &hba->u.mv.regs->outbound_intmask); | |
1545 | readl(&hba->u.mv.regs->outbound_intmask); | |
ede1e6f8 HLT |
1546 | } |
1547 | ||
286aa031 HLT |
1548 | static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba) |
1549 | { | |
1550 | writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable)); | |
1551 | readl(&(hba->u.mvfrey.mu->f0_doorbell_enable)); | |
1552 | writel(0, &(hba->u.mvfrey.mu->isr_enable)); | |
1553 | readl(&(hba->u.mvfrey.mu->isr_enable)); | |
1554 | writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
1555 | readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
1556 | } | |
1557 | ||
ede1e6f8 HLT |
1558 | static void hptiop_remove(struct pci_dev *pcidev) |
1559 | { | |
1560 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1561 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
a93429c3 | 1562 | u32 i; |
ede1e6f8 HLT |
1563 | |
1564 | dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); | |
1565 | ||
4f2ddba3 HLT |
1566 | scsi_remove_host(host); |
1567 | ||
ede1e6f8 HLT |
1568 | hptiop_shutdown(pcidev); |
1569 | ||
1570 | free_irq(hba->pcidev->irq, hba); | |
1571 | ||
a93429c3 | 1572 | for (i = 0; i < hba->max_requests; i++) { |
1573 | if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) | |
1574 | dma_free_coherent(&hba->pcidev->dev, | |
1575 | hba->req_size + 0x20, | |
1576 | hba->dma_coherent[i], | |
1577 | hba->dma_coherent_handle[i]); | |
1578 | else | |
1579 | break; | |
1580 | } | |
ede1e6f8 | 1581 | |
286aa031 | 1582 | hba->ops->internal_memfree(hba); |
00f59701 HLT |
1583 | |
1584 | hba->ops->unmap_pci_bar(hba); | |
ede1e6f8 HLT |
1585 | |
1586 | pci_release_regions(hba->pcidev); | |
1587 | pci_set_drvdata(hba->pcidev, NULL); | |
1588 | pci_disable_device(hba->pcidev); | |
1589 | ||
ede1e6f8 HLT |
1590 | scsi_host_put(host); |
1591 | } | |
1592 | ||
00f59701 | 1593 | static struct hptiop_adapter_ops hptiop_itl_ops = { |
286aa031 | 1594 | .family = INTEL_BASED_IOP, |
00f59701 | 1595 | .iop_wait_ready = iop_wait_ready_itl, |
286aa031 HLT |
1596 | .internal_memalloc = hptiop_internal_memalloc_itl, |
1597 | .internal_memfree = hptiop_internal_memfree_itl, | |
00f59701 HLT |
1598 | .map_pci_bar = hptiop_map_pci_bar_itl, |
1599 | .unmap_pci_bar = hptiop_unmap_pci_bar_itl, | |
1600 | .enable_intr = hptiop_enable_intr_itl, | |
1601 | .disable_intr = hptiop_disable_intr_itl, | |
1602 | .get_config = iop_get_config_itl, | |
1603 | .set_config = iop_set_config_itl, | |
1604 | .iop_intr = iop_intr_itl, | |
1605 | .post_msg = hptiop_post_msg_itl, | |
1606 | .post_req = hptiop_post_req_itl, | |
23f0bb47 | 1607 | .hw_dma_bit_mask = 64, |
286aa031 HLT |
1608 | .reset_comm = hptiop_reset_comm_itl, |
1609 | .host_phy_flag = cpu_to_le64(0), | |
00f59701 HLT |
1610 | }; |
1611 | ||
1612 | static struct hptiop_adapter_ops hptiop_mv_ops = { | |
286aa031 | 1613 | .family = MV_BASED_IOP, |
00f59701 HLT |
1614 | .iop_wait_ready = iop_wait_ready_mv, |
1615 | .internal_memalloc = hptiop_internal_memalloc_mv, | |
1616 | .internal_memfree = hptiop_internal_memfree_mv, | |
1617 | .map_pci_bar = hptiop_map_pci_bar_mv, | |
1618 | .unmap_pci_bar = hptiop_unmap_pci_bar_mv, | |
1619 | .enable_intr = hptiop_enable_intr_mv, | |
1620 | .disable_intr = hptiop_disable_intr_mv, | |
1621 | .get_config = iop_get_config_mv, | |
1622 | .set_config = iop_set_config_mv, | |
1623 | .iop_intr = iop_intr_mv, | |
1624 | .post_msg = hptiop_post_msg_mv, | |
1625 | .post_req = hptiop_post_req_mv, | |
23f0bb47 | 1626 | .hw_dma_bit_mask = 33, |
286aa031 HLT |
1627 | .reset_comm = hptiop_reset_comm_mv, |
1628 | .host_phy_flag = cpu_to_le64(0), | |
1629 | }; | |
1630 | ||
1631 | static struct hptiop_adapter_ops hptiop_mvfrey_ops = { | |
1632 | .family = MVFREY_BASED_IOP, | |
1633 | .iop_wait_ready = iop_wait_ready_mvfrey, | |
1634 | .internal_memalloc = hptiop_internal_memalloc_mvfrey, | |
1635 | .internal_memfree = hptiop_internal_memfree_mvfrey, | |
1636 | .map_pci_bar = hptiop_map_pci_bar_mvfrey, | |
1637 | .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey, | |
1638 | .enable_intr = hptiop_enable_intr_mvfrey, | |
1639 | .disable_intr = hptiop_disable_intr_mvfrey, | |
1640 | .get_config = iop_get_config_mvfrey, | |
1641 | .set_config = iop_set_config_mvfrey, | |
1642 | .iop_intr = iop_intr_mvfrey, | |
1643 | .post_msg = hptiop_post_msg_mvfrey, | |
1644 | .post_req = hptiop_post_req_mvfrey, | |
1645 | .hw_dma_bit_mask = 64, | |
1646 | .reset_comm = hptiop_reset_comm_mvfrey, | |
1647 | .host_phy_flag = cpu_to_le64(1), | |
00f59701 HLT |
1648 | }; |
1649 | ||
ede1e6f8 | 1650 | static struct pci_device_id hptiop_id_table[] = { |
00f59701 HLT |
1651 | { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, |
1652 | { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 | 1653 | { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, |
00f59701 HLT |
1654 | { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, |
1655 | { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 | 1656 | { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, |
00f59701 HLT |
1657 | { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, |
1658 | { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, | |
dd07428b | 1659 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, |
3bfc13c2 | 1660 | { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, |
dd07428b | 1661 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, |
dd07428b HLT |
1662 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, |
1663 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, | |
1664 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, | |
1665 | { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 HLT |
1666 | { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, |
1667 | { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, | |
1668 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, | |
1669 | { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops }, | |
00f59701 HLT |
1670 | { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, |
1671 | { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, | |
1672 | { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, | |
286aa031 HLT |
1673 | { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1674 | { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
a93429c3 | 1675 | { PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1676 | { PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1677 | { PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1678 | { PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1679 | { PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1680 | { PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1681 | { PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1682 | { PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
ede1e6f8 HLT |
1683 | {}, |
1684 | }; | |
1685 | ||
1686 | MODULE_DEVICE_TABLE(pci, hptiop_id_table); | |
1687 | ||
1688 | static struct pci_driver hptiop_pci_driver = { | |
1689 | .name = driver_name, | |
1690 | .id_table = hptiop_id_table, | |
1691 | .probe = hptiop_probe, | |
1692 | .remove = hptiop_remove, | |
1693 | .shutdown = hptiop_shutdown, | |
1694 | }; | |
1695 | ||
1696 | static int __init hptiop_module_init(void) | |
1697 | { | |
ede1e6f8 | 1698 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); |
3e74051b | 1699 | return pci_register_driver(&hptiop_pci_driver); |
ede1e6f8 HLT |
1700 | } |
1701 | ||
1702 | static void __exit hptiop_module_exit(void) | |
1703 | { | |
ede1e6f8 HLT |
1704 | pci_unregister_driver(&hptiop_pci_driver); |
1705 | } | |
1706 | ||
1707 | ||
1708 | module_init(hptiop_module_init); | |
1709 | module_exit(hptiop_module_exit); | |
1710 | ||
1711 | MODULE_LICENSE("GPL"); | |
db9b6e89 | 1712 |