]>
Commit | Line | Data |
---|---|---|
8e8e69d6 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
ede1e6f8 | 2 | /* |
00f59701 | 3 | * HighPoint RR3xxx/4xxx controller driver for Linux |
a93429c3 | 4 | * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved. |
ede1e6f8 | 5 | * |
ede1e6f8 HLT |
6 | * Please report bugs/comments/suggestions to [email protected] |
7 | * | |
8 | * For more information, visit http://www.highpoint-tech.com | |
9 | */ | |
ede1e6f8 HLT |
10 | #include <linux/module.h> |
11 | #include <linux/types.h> | |
12 | #include <linux/string.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/pci.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/timer.h> | |
19 | #include <linux/spinlock.h> | |
5a0e3ad6 | 20 | #include <linux/gfp.h> |
7c0f6ba6 | 21 | #include <linux/uaccess.h> |
ede1e6f8 HLT |
22 | #include <asm/io.h> |
23 | #include <asm/div64.h> | |
24 | #include <scsi/scsi_cmnd.h> | |
25 | #include <scsi/scsi_device.h> | |
26 | #include <scsi/scsi.h> | |
27 | #include <scsi/scsi_tcq.h> | |
28 | #include <scsi/scsi_host.h> | |
29 | ||
30 | #include "hptiop.h" | |
31 | ||
32 | MODULE_AUTHOR("HighPoint Technologies, Inc."); | |
00f59701 | 33 | MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); |
ede1e6f8 HLT |
34 | |
35 | static char driver_name[] = "hptiop"; | |
00f59701 | 36 | static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; |
a93429c3 | 37 | static const char driver_ver[] = "v1.10.0"; |
00f59701 HLT |
38 | |
39 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); | |
40 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, | |
41 | struct hpt_iop_request_scsi_command *req); | |
42 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
43 | static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
ede1e6f8 HLT |
44 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
45 | ||
00f59701 | 46 | static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) |
ede1e6f8 HLT |
47 | { |
48 | u32 req = 0; | |
49 | int i; | |
50 | ||
51 | for (i = 0; i < millisec; i++) { | |
00f59701 | 52 | req = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
53 | if (req != IOPMU_QUEUE_EMPTY) |
54 | break; | |
55 | msleep(1); | |
56 | } | |
57 | ||
58 | if (req != IOPMU_QUEUE_EMPTY) { | |
00f59701 HLT |
59 | writel(req, &hba->u.itl.iop->outbound_queue); |
60 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
61 | return 0; |
62 | } | |
63 | ||
64 | return -1; | |
65 | } | |
66 | ||
00f59701 HLT |
67 | static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) |
68 | { | |
69 | return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); | |
70 | } | |
71 | ||
286aa031 HLT |
72 | static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec) |
73 | { | |
74 | return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); | |
75 | } | |
76 | ||
00f59701 | 77 | static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) |
ede1e6f8 | 78 | { |
db9b6e89 | 79 | if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) |
00f59701 | 80 | hptiop_host_request_callback_itl(hba, |
ede1e6f8 HLT |
81 | tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); |
82 | else | |
00f59701 | 83 | hptiop_iop_request_callback_itl(hba, tag); |
ede1e6f8 HLT |
84 | } |
85 | ||
00f59701 | 86 | static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) |
ede1e6f8 HLT |
87 | { |
88 | u32 req; | |
89 | ||
00f59701 HLT |
90 | while ((req = readl(&hba->u.itl.iop->outbound_queue)) != |
91 | IOPMU_QUEUE_EMPTY) { | |
ede1e6f8 HLT |
92 | |
93 | if (req & IOPMU_QUEUE_MASK_HOST_BITS) | |
00f59701 | 94 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
95 | else { |
96 | struct hpt_iop_request_header __iomem * p; | |
97 | ||
98 | p = (struct hpt_iop_request_header __iomem *) | |
00f59701 | 99 | ((char __iomem *)hba->u.itl.iop + req); |
ede1e6f8 HLT |
100 | |
101 | if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { | |
102 | if (readl(&p->context)) | |
00f59701 | 103 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
104 | else |
105 | writel(1, &p->context); | |
106 | } | |
107 | else | |
00f59701 | 108 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
109 | } |
110 | } | |
111 | } | |
112 | ||
00f59701 | 113 | static int iop_intr_itl(struct hptiop_hba *hba) |
ede1e6f8 | 114 | { |
00f59701 | 115 | struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; |
3bfc13c2 | 116 | void __iomem *plx = hba->u.itl.plx; |
ede1e6f8 HLT |
117 | u32 status; |
118 | int ret = 0; | |
119 | ||
3bfc13c2 HLT |
120 | if (plx && readl(plx + 0x11C5C) & 0xf) |
121 | writel(1, plx + 0x11C60); | |
122 | ||
ede1e6f8 HLT |
123 | status = readl(&iop->outbound_intstatus); |
124 | ||
125 | if (status & IOPMU_OUTBOUND_INT_MSG0) { | |
126 | u32 msg = readl(&iop->outbound_msgaddr0); | |
00f59701 | 127 | |
ede1e6f8 HLT |
128 | dprintk("received outbound msg %x\n", msg); |
129 | writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); | |
130 | hptiop_message_callback(hba, msg); | |
131 | ret = 1; | |
132 | } | |
133 | ||
134 | if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { | |
00f59701 HLT |
135 | hptiop_drain_outbound_queue_itl(hba); |
136 | ret = 1; | |
137 | } | |
138 | ||
139 | return ret; | |
140 | } | |
141 | ||
142 | static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) | |
143 | { | |
144 | u32 outbound_tail = readl(&mu->outbound_tail); | |
145 | u32 outbound_head = readl(&mu->outbound_head); | |
146 | ||
147 | if (outbound_tail != outbound_head) { | |
148 | u64 p; | |
149 | ||
150 | memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); | |
151 | outbound_tail++; | |
152 | ||
153 | if (outbound_tail == MVIOP_QUEUE_LEN) | |
154 | outbound_tail = 0; | |
155 | writel(outbound_tail, &mu->outbound_tail); | |
156 | return p; | |
157 | } else | |
158 | return 0; | |
159 | } | |
160 | ||
161 | static void mv_inbound_write(u64 p, struct hptiop_hba *hba) | |
162 | { | |
163 | u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); | |
164 | u32 head = inbound_head + 1; | |
165 | ||
166 | if (head == MVIOP_QUEUE_LEN) | |
167 | head = 0; | |
168 | ||
169 | memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); | |
170 | writel(head, &hba->u.mv.mu->inbound_head); | |
171 | writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, | |
172 | &hba->u.mv.regs->inbound_doorbell); | |
173 | } | |
174 | ||
175 | static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) | |
176 | { | |
177 | u32 req_type = (tag >> 5) & 0x7; | |
178 | struct hpt_iop_request_scsi_command *req; | |
179 | ||
180 | dprintk("hptiop_request_callback_mv: tag=%llx\n", tag); | |
181 | ||
182 | BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); | |
183 | ||
184 | switch (req_type) { | |
185 | case IOP_REQUEST_TYPE_GET_CONFIG: | |
186 | case IOP_REQUEST_TYPE_SET_CONFIG: | |
187 | hba->msg_done = 1; | |
188 | break; | |
189 | ||
190 | case IOP_REQUEST_TYPE_SCSI_COMMAND: | |
191 | req = hba->reqs[tag >> 8].req_virt; | |
192 | if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) | |
193 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
194 | ||
195 | hptiop_finish_scsi_req(hba, tag>>8, req); | |
196 | break; | |
197 | ||
198 | default: | |
199 | break; | |
200 | } | |
201 | } | |
202 | ||
203 | static int iop_intr_mv(struct hptiop_hba *hba) | |
204 | { | |
205 | u32 status; | |
206 | int ret = 0; | |
207 | ||
208 | status = readl(&hba->u.mv.regs->outbound_doorbell); | |
209 | writel(~status, &hba->u.mv.regs->outbound_doorbell); | |
210 | ||
211 | if (status & MVIOP_MU_OUTBOUND_INT_MSG) { | |
212 | u32 msg; | |
213 | msg = readl(&hba->u.mv.mu->outbound_msg); | |
214 | dprintk("received outbound msg %x\n", msg); | |
215 | hptiop_message_callback(hba, msg); | |
216 | ret = 1; | |
217 | } | |
218 | ||
219 | if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { | |
220 | u64 tag; | |
221 | ||
222 | while ((tag = mv_outbound_read(hba->u.mv.mu))) | |
223 | hptiop_request_callback_mv(hba, tag); | |
ede1e6f8 HLT |
224 | ret = 1; |
225 | } | |
226 | ||
227 | return ret; | |
228 | } | |
229 | ||
286aa031 HLT |
230 | static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag) |
231 | { | |
232 | u32 req_type = _tag & 0xf; | |
233 | struct hpt_iop_request_scsi_command *req; | |
234 | ||
235 | switch (req_type) { | |
236 | case IOP_REQUEST_TYPE_GET_CONFIG: | |
237 | case IOP_REQUEST_TYPE_SET_CONFIG: | |
238 | hba->msg_done = 1; | |
239 | break; | |
240 | ||
241 | case IOP_REQUEST_TYPE_SCSI_COMMAND: | |
242 | req = hba->reqs[(_tag >> 4) & 0xff].req_virt; | |
243 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) | |
244 | req->header.result = IOP_RESULT_SUCCESS; | |
245 | hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req); | |
246 | break; | |
247 | ||
248 | default: | |
249 | break; | |
250 | } | |
251 | } | |
252 | ||
253 | static int iop_intr_mvfrey(struct hptiop_hba *hba) | |
254 | { | |
255 | u32 _tag, status, cptr, cur_rptr; | |
256 | int ret = 0; | |
257 | ||
258 | if (hba->initialized) | |
259 | writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
260 | ||
261 | status = readl(&(hba->u.mvfrey.mu->f0_doorbell)); | |
262 | if (status) { | |
263 | writel(status, &(hba->u.mvfrey.mu->f0_doorbell)); | |
264 | if (status & CPU_TO_F0_DRBL_MSG_BIT) { | |
265 | u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a)); | |
266 | dprintk("received outbound msg %x\n", msg); | |
267 | hptiop_message_callback(hba, msg); | |
268 | } | |
269 | ret = 1; | |
270 | } | |
271 | ||
272 | status = readl(&(hba->u.mvfrey.mu->isr_cause)); | |
273 | if (status) { | |
274 | writel(status, &(hba->u.mvfrey.mu->isr_cause)); | |
275 | do { | |
276 | cptr = *hba->u.mvfrey.outlist_cptr & 0xff; | |
277 | cur_rptr = hba->u.mvfrey.outlist_rptr; | |
278 | while (cur_rptr != cptr) { | |
279 | cur_rptr++; | |
280 | if (cur_rptr == hba->u.mvfrey.list_count) | |
281 | cur_rptr = 0; | |
282 | ||
283 | _tag = hba->u.mvfrey.outlist[cur_rptr].val; | |
284 | BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS)); | |
285 | hptiop_request_callback_mvfrey(hba, _tag); | |
286 | ret = 1; | |
287 | } | |
288 | hba->u.mvfrey.outlist_rptr = cur_rptr; | |
289 | } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); | |
290 | } | |
291 | ||
292 | if (hba->initialized) | |
293 | writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
294 | ||
295 | return ret; | |
296 | } | |
297 | ||
00f59701 | 298 | static int iop_send_sync_request_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
299 | void __iomem *_req, u32 millisec) |
300 | { | |
301 | struct hpt_iop_request_header __iomem *req = _req; | |
302 | u32 i; | |
303 | ||
00f59701 | 304 | writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); |
ede1e6f8 | 305 | writel(0, &req->context); |
00f59701 HLT |
306 | writel((unsigned long)req - (unsigned long)hba->u.itl.iop, |
307 | &hba->u.itl.iop->inbound_queue); | |
308 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
309 | |
310 | for (i = 0; i < millisec; i++) { | |
00f59701 | 311 | iop_intr_itl(hba); |
ede1e6f8 HLT |
312 | if (readl(&req->context)) |
313 | return 0; | |
314 | msleep(1); | |
315 | } | |
316 | ||
317 | return -1; | |
318 | } | |
319 | ||
00f59701 HLT |
320 | static int iop_send_sync_request_mv(struct hptiop_hba *hba, |
321 | u32 size_bits, u32 millisec) | |
ede1e6f8 | 322 | { |
00f59701 | 323 | struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; |
ede1e6f8 HLT |
324 | u32 i; |
325 | ||
326 | hba->msg_done = 0; | |
00f59701 HLT |
327 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); |
328 | mv_inbound_write(hba->u.mv.internal_req_phy | | |
329 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); | |
330 | ||
331 | for (i = 0; i < millisec; i++) { | |
332 | iop_intr_mv(hba); | |
333 | if (hba->msg_done) | |
334 | return 0; | |
335 | msleep(1); | |
336 | } | |
337 | return -1; | |
338 | } | |
339 | ||
286aa031 HLT |
340 | static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba, |
341 | u32 size_bits, u32 millisec) | |
342 | { | |
343 | struct hpt_iop_request_header *reqhdr = | |
344 | hba->u.mvfrey.internal_req.req_virt; | |
345 | u32 i; | |
346 | ||
347 | hba->msg_done = 0; | |
348 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); | |
349 | hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req)); | |
350 | ||
351 | for (i = 0; i < millisec; i++) { | |
352 | iop_intr_mvfrey(hba); | |
353 | if (hba->msg_done) | |
354 | break; | |
355 | msleep(1); | |
356 | } | |
357 | return hba->msg_done ? 0 : -1; | |
358 | } | |
359 | ||
00f59701 HLT |
360 | static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) |
361 | { | |
362 | writel(msg, &hba->u.itl.iop->inbound_msgaddr0); | |
363 | readl(&hba->u.itl.iop->outbound_intstatus); | |
364 | } | |
365 | ||
366 | static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) | |
367 | { | |
368 | writel(msg, &hba->u.mv.mu->inbound_msg); | |
369 | writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); | |
370 | readl(&hba->u.mv.regs->inbound_doorbell); | |
371 | } | |
ede1e6f8 | 372 | |
286aa031 HLT |
373 | static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg) |
374 | { | |
375 | writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); | |
376 | readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); | |
377 | } | |
378 | ||
00f59701 HLT |
379 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) |
380 | { | |
381 | u32 i; | |
ede1e6f8 | 382 | |
00f59701 | 383 | hba->msg_done = 0; |
286aa031 | 384 | hba->ops->disable_intr(hba); |
00f59701 | 385 | hba->ops->post_msg(hba, msg); |
ede1e6f8 HLT |
386 | |
387 | for (i = 0; i < millisec; i++) { | |
388 | spin_lock_irq(hba->host->host_lock); | |
00f59701 | 389 | hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
390 | spin_unlock_irq(hba->host->host_lock); |
391 | if (hba->msg_done) | |
392 | break; | |
393 | msleep(1); | |
394 | } | |
395 | ||
286aa031 | 396 | hba->ops->enable_intr(hba); |
ede1e6f8 HLT |
397 | return hba->msg_done? 0 : -1; |
398 | } | |
399 | ||
00f59701 | 400 | static int iop_get_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
401 | struct hpt_iop_request_get_config *config) |
402 | { | |
403 | u32 req32; | |
404 | struct hpt_iop_request_get_config __iomem *req; | |
405 | ||
00f59701 | 406 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
407 | if (req32 == IOPMU_QUEUE_EMPTY) |
408 | return -1; | |
409 | ||
410 | req = (struct hpt_iop_request_get_config __iomem *) | |
00f59701 | 411 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
412 | |
413 | writel(0, &req->header.flags); | |
414 | writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); | |
415 | writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); | |
416 | writel(IOP_RESULT_PENDING, &req->header.result); | |
417 | ||
00f59701 | 418 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
419 | dprintk("Get config send cmd failed\n"); |
420 | return -1; | |
421 | } | |
422 | ||
423 | memcpy_fromio(config, req, sizeof(*config)); | |
00f59701 HLT |
424 | writel(req32, &hba->u.itl.iop->outbound_queue); |
425 | return 0; | |
426 | } | |
427 | ||
428 | static int iop_get_config_mv(struct hptiop_hba *hba, | |
429 | struct hpt_iop_request_get_config *config) | |
430 | { | |
431 | struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; | |
432 | ||
433 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
434 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); | |
435 | req->header.size = | |
436 | cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); | |
437 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
438 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); |
439 | req->header.context_hi32 = 0; | |
00f59701 HLT |
440 | |
441 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
442 | dprintk("Get config send cmd failed\n"); | |
443 | return -1; | |
444 | } | |
445 | ||
446 | memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); | |
ede1e6f8 HLT |
447 | return 0; |
448 | } | |
449 | ||
286aa031 HLT |
450 | static int iop_get_config_mvfrey(struct hptiop_hba *hba, |
451 | struct hpt_iop_request_get_config *config) | |
452 | { | |
453 | struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; | |
454 | ||
455 | if (info->header.size != sizeof(struct hpt_iop_request_get_config) || | |
456 | info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) | |
457 | return -1; | |
458 | ||
459 | config->interface_version = info->interface_version; | |
460 | config->firmware_version = info->firmware_version; | |
461 | config->max_requests = info->max_requests; | |
462 | config->request_size = info->request_size; | |
463 | config->max_sg_count = info->max_sg_count; | |
464 | config->data_transfer_length = info->data_transfer_length; | |
465 | config->alignment_mask = info->alignment_mask; | |
466 | config->max_devices = info->max_devices; | |
467 | config->sdram_size = info->sdram_size; | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
00f59701 | 472 | static int iop_set_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
473 | struct hpt_iop_request_set_config *config) |
474 | { | |
475 | u32 req32; | |
476 | struct hpt_iop_request_set_config __iomem *req; | |
477 | ||
00f59701 | 478 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
479 | if (req32 == IOPMU_QUEUE_EMPTY) |
480 | return -1; | |
481 | ||
482 | req = (struct hpt_iop_request_set_config __iomem *) | |
00f59701 | 483 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
484 | |
485 | memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), | |
486 | (u8 *)config + sizeof(struct hpt_iop_request_header), | |
487 | sizeof(struct hpt_iop_request_set_config) - | |
488 | sizeof(struct hpt_iop_request_header)); | |
489 | ||
490 | writel(0, &req->header.flags); | |
491 | writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); | |
492 | writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); | |
493 | writel(IOP_RESULT_PENDING, &req->header.result); | |
494 | ||
00f59701 | 495 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
496 | dprintk("Set config send cmd failed\n"); |
497 | return -1; | |
498 | } | |
499 | ||
00f59701 | 500 | writel(req32, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
501 | return 0; |
502 | } | |
503 | ||
00f59701 HLT |
504 | static int iop_set_config_mv(struct hptiop_hba *hba, |
505 | struct hpt_iop_request_set_config *config) | |
ede1e6f8 | 506 | { |
00f59701 | 507 | struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; |
ede1e6f8 | 508 | |
00f59701 HLT |
509 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); |
510 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
511 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); | |
512 | req->header.size = | |
513 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); | |
514 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
515 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); |
516 | req->header.context_hi32 = 0; | |
00f59701 HLT |
517 | |
518 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
519 | dprintk("Set config send cmd failed\n"); | |
520 | return -1; | |
521 | } | |
522 | ||
523 | return 0; | |
524 | } | |
525 | ||
286aa031 HLT |
526 | static int iop_set_config_mvfrey(struct hptiop_hba *hba, |
527 | struct hpt_iop_request_set_config *config) | |
528 | { | |
529 | struct hpt_iop_request_set_config *req = | |
530 | hba->u.mvfrey.internal_req.req_virt; | |
531 | ||
532 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); | |
533 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
534 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); | |
535 | req->header.size = | |
536 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); | |
537 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
538 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); | |
539 | req->header.context_hi32 = 0; | |
540 | ||
541 | if (iop_send_sync_request_mvfrey(hba, 0, 20000)) { | |
542 | dprintk("Set config send cmd failed\n"); | |
543 | return -1; | |
544 | } | |
545 | ||
546 | return 0; | |
547 | } | |
548 | ||
00f59701 HLT |
549 | static void hptiop_enable_intr_itl(struct hptiop_hba *hba) |
550 | { | |
ede1e6f8 | 551 | writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), |
00f59701 HLT |
552 | &hba->u.itl.iop->outbound_intmask); |
553 | } | |
554 | ||
555 | static void hptiop_enable_intr_mv(struct hptiop_hba *hba) | |
556 | { | |
557 | writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, | |
558 | &hba->u.mv.regs->outbound_intmask); | |
559 | } | |
560 | ||
286aa031 HLT |
561 | static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba) |
562 | { | |
563 | writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable)); | |
564 | writel(0x1, &(hba->u.mvfrey.mu->isr_enable)); | |
565 | writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
566 | } | |
567 | ||
00f59701 HLT |
568 | static int hptiop_initialize_iop(struct hptiop_hba *hba) |
569 | { | |
570 | /* enable interrupts */ | |
571 | hba->ops->enable_intr(hba); | |
ede1e6f8 HLT |
572 | |
573 | hba->initialized = 1; | |
574 | ||
575 | /* start background tasks */ | |
576 | if (iop_send_sync_msg(hba, | |
577 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
578 | printk(KERN_ERR "scsi%d: fail to start background task\n", | |
579 | hba->host->host_no); | |
580 | return -1; | |
581 | } | |
582 | return 0; | |
583 | } | |
584 | ||
00f59701 | 585 | static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) |
ede1e6f8 HLT |
586 | { |
587 | u32 mem_base_phy, length; | |
588 | void __iomem *mem_base_virt; | |
00f59701 | 589 | |
ede1e6f8 HLT |
590 | struct pci_dev *pcidev = hba->pcidev; |
591 | ||
00f59701 HLT |
592 | |
593 | if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { | |
ede1e6f8 HLT |
594 | printk(KERN_ERR "scsi%d: pci resource invalid\n", |
595 | hba->host->host_no); | |
9bcf0910 | 596 | return NULL; |
ede1e6f8 HLT |
597 | } |
598 | ||
00f59701 HLT |
599 | mem_base_phy = pci_resource_start(pcidev, index); |
600 | length = pci_resource_len(pcidev, index); | |
ede1e6f8 HLT |
601 | mem_base_virt = ioremap(mem_base_phy, length); |
602 | ||
603 | if (!mem_base_virt) { | |
604 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", | |
605 | hba->host->host_no); | |
9bcf0910 | 606 | return NULL; |
00f59701 HLT |
607 | } |
608 | return mem_base_virt; | |
609 | } | |
610 | ||
611 | static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) | |
612 | { | |
3bfc13c2 | 613 | struct pci_dev *pcidev = hba->pcidev; |
00f59701 | 614 | hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); |
3bfc13c2 | 615 | if (hba->u.itl.iop == NULL) |
00f59701 | 616 | return -1; |
3bfc13c2 HLT |
617 | if ((pcidev->device & 0xff00) == 0x4400) { |
618 | hba->u.itl.plx = hba->u.itl.iop; | |
619 | hba->u.itl.iop = hptiop_map_pci_bar(hba, 2); | |
620 | if (hba->u.itl.iop == NULL) { | |
621 | iounmap(hba->u.itl.plx); | |
622 | return -1; | |
623 | } | |
624 | } | |
625 | return 0; | |
00f59701 HLT |
626 | } |
627 | ||
628 | static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) | |
629 | { | |
3bfc13c2 HLT |
630 | if (hba->u.itl.plx) |
631 | iounmap(hba->u.itl.plx); | |
00f59701 HLT |
632 | iounmap(hba->u.itl.iop); |
633 | } | |
634 | ||
635 | static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) | |
636 | { | |
637 | hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); | |
9bcf0910 | 638 | if (hba->u.mv.regs == NULL) |
00f59701 HLT |
639 | return -1; |
640 | ||
641 | hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); | |
9bcf0910 | 642 | if (hba->u.mv.mu == NULL) { |
00f59701 | 643 | iounmap(hba->u.mv.regs); |
ede1e6f8 HLT |
644 | return -1; |
645 | } | |
646 | ||
ede1e6f8 HLT |
647 | return 0; |
648 | } | |
649 | ||
286aa031 HLT |
650 | static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba) |
651 | { | |
652 | hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0); | |
653 | if (hba->u.mvfrey.config == NULL) | |
654 | return -1; | |
655 | ||
656 | hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2); | |
657 | if (hba->u.mvfrey.mu == NULL) { | |
658 | iounmap(hba->u.mvfrey.config); | |
659 | return -1; | |
660 | } | |
661 | ||
662 | return 0; | |
663 | } | |
664 | ||
00f59701 HLT |
665 | static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) |
666 | { | |
667 | iounmap(hba->u.mv.regs); | |
668 | iounmap(hba->u.mv.mu); | |
669 | } | |
670 | ||
286aa031 HLT |
671 | static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba) |
672 | { | |
673 | iounmap(hba->u.mvfrey.config); | |
674 | iounmap(hba->u.mvfrey.mu); | |
675 | } | |
676 | ||
ede1e6f8 HLT |
677 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) |
678 | { | |
679 | dprintk("iop message 0x%x\n", msg); | |
680 | ||
286aa031 HLT |
681 | if (msg == IOPMU_INBOUND_MSG0_NOP || |
682 | msg == IOPMU_INBOUND_MSG0_RESET_COMM) | |
00f59701 HLT |
683 | hba->msg_done = 1; |
684 | ||
ede1e6f8 HLT |
685 | if (!hba->initialized) |
686 | return; | |
687 | ||
688 | if (msg == IOPMU_INBOUND_MSG0_RESET) { | |
689 | atomic_set(&hba->resetting, 0); | |
690 | wake_up(&hba->reset_wq); | |
691 | } | |
692 | else if (msg <= IOPMU_INBOUND_MSG0_MAX) | |
693 | hba->msg_done = 1; | |
694 | } | |
695 | ||
00f59701 | 696 | static struct hptiop_request *get_req(struct hptiop_hba *hba) |
ede1e6f8 HLT |
697 | { |
698 | struct hptiop_request *ret; | |
699 | ||
700 | dprintk("get_req : req=%p\n", hba->req_list); | |
701 | ||
702 | ret = hba->req_list; | |
703 | if (ret) | |
704 | hba->req_list = ret->next; | |
705 | ||
706 | return ret; | |
707 | } | |
708 | ||
00f59701 | 709 | static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) |
ede1e6f8 HLT |
710 | { |
711 | dprintk("free_req(%d, %p)\n", req->index, req); | |
712 | req->next = hba->req_list; | |
713 | hba->req_list = req; | |
714 | } | |
715 | ||
00f59701 HLT |
716 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, |
717 | struct hpt_iop_request_scsi_command *req) | |
ede1e6f8 | 718 | { |
ede1e6f8 HLT |
719 | struct scsi_cmnd *scp; |
720 | ||
00f59701 | 721 | dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " |
ede1e6f8 HLT |
722 | "result=%d, context=0x%x tag=%d\n", |
723 | req, req->header.type, req->header.result, | |
724 | req->header.context, tag); | |
725 | ||
726 | BUG_ON(!req->header.result); | |
727 | BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); | |
728 | ||
729 | scp = hba->reqs[tag].scp; | |
730 | ||
f9875496 FT |
731 | if (HPT_SCP(scp)->mapped) |
732 | scsi_dma_unmap(scp); | |
ede1e6f8 HLT |
733 | |
734 | switch (le32_to_cpu(req->header.result)) { | |
735 | case IOP_RESULT_SUCCESS: | |
00f59701 HLT |
736 | scsi_set_resid(scp, |
737 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 HLT |
738 | scp->result = (DID_OK<<16); |
739 | break; | |
740 | case IOP_RESULT_BAD_TARGET: | |
741 | scp->result = (DID_BAD_TARGET<<16); | |
742 | break; | |
743 | case IOP_RESULT_BUSY: | |
744 | scp->result = (DID_BUS_BUSY<<16); | |
745 | break; | |
746 | case IOP_RESULT_RESET: | |
747 | scp->result = (DID_RESET<<16); | |
748 | break; | |
749 | case IOP_RESULT_FAIL: | |
750 | scp->result = (DID_ERROR<<16); | |
751 | break; | |
752 | case IOP_RESULT_INVALID_REQUEST: | |
753 | scp->result = (DID_ABORT<<16); | |
754 | break; | |
00f59701 HLT |
755 | case IOP_RESULT_CHECK_CONDITION: |
756 | scsi_set_resid(scp, | |
757 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 | 758 | scp->result = SAM_STAT_CHECK_CONDITION; |
a93429c3 | 759 | memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE); |
286aa031 | 760 | goto skip_resid; |
ede1e6f8 HLT |
761 | |
762 | default: | |
16576ad8 | 763 | scp->result = DID_ABORT << 16; |
ede1e6f8 HLT |
764 | break; |
765 | } | |
766 | ||
286aa031 HLT |
767 | scsi_set_resid(scp, |
768 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
769 | ||
770 | skip_resid: | |
ede1e6f8 | 771 | dprintk("scsi_done(%p)\n", scp); |
574015a8 | 772 | scsi_done(scp); |
ede1e6f8 HLT |
773 | free_req(hba, &hba->reqs[tag]); |
774 | } | |
775 | ||
00f59701 HLT |
776 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) |
777 | { | |
778 | struct hpt_iop_request_scsi_command *req; | |
779 | u32 tag; | |
780 | ||
781 | if (hba->iopintf_v2) { | |
782 | tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; | |
783 | req = hba->reqs[tag].req_virt; | |
784 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) | |
785 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
786 | } else { | |
787 | tag = _tag; | |
788 | req = hba->reqs[tag].req_virt; | |
789 | } | |
790 | ||
791 | hptiop_finish_scsi_req(hba, tag, req); | |
792 | } | |
793 | ||
06d9eb4e | 794 | static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) |
ede1e6f8 HLT |
795 | { |
796 | struct hpt_iop_request_header __iomem *req; | |
797 | struct hpt_iop_request_ioctl_command __iomem *p; | |
798 | struct hpt_ioctl_k *arg; | |
799 | ||
800 | req = (struct hpt_iop_request_header __iomem *) | |
00f59701 HLT |
801 | ((unsigned long)hba->u.itl.iop + tag); |
802 | dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " | |
ede1e6f8 HLT |
803 | "result=%d, context=0x%x tag=%d\n", |
804 | req, readl(&req->type), readl(&req->result), | |
805 | readl(&req->context), tag); | |
806 | ||
807 | BUG_ON(!readl(&req->result)); | |
808 | BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); | |
809 | ||
810 | p = (struct hpt_iop_request_ioctl_command __iomem *)req; | |
811 | arg = (struct hpt_ioctl_k *)(unsigned long) | |
812 | (readl(&req->context) | | |
813 | ((u64)readl(&req->context_hi32)<<32)); | |
814 | ||
815 | if (readl(&req->result) == IOP_RESULT_SUCCESS) { | |
816 | arg->result = HPT_IOCTL_RESULT_OK; | |
817 | ||
818 | if (arg->outbuf_size) | |
819 | memcpy_fromio(arg->outbuf, | |
820 | &p->buf[(readl(&p->inbuf_size) + 3)& ~3], | |
821 | arg->outbuf_size); | |
822 | ||
823 | if (arg->bytes_returned) | |
824 | *arg->bytes_returned = arg->outbuf_size; | |
825 | } | |
826 | else | |
827 | arg->result = HPT_IOCTL_RESULT_FAILED; | |
828 | ||
829 | arg->done(arg); | |
00f59701 | 830 | writel(tag, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
831 | } |
832 | ||
7d12e780 | 833 | static irqreturn_t hptiop_intr(int irq, void *dev_id) |
ede1e6f8 HLT |
834 | { |
835 | struct hptiop_hba *hba = dev_id; | |
836 | int handled; | |
837 | unsigned long flags; | |
838 | ||
839 | spin_lock_irqsave(hba->host->host_lock, flags); | |
00f59701 | 840 | handled = hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
841 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
842 | ||
843 | return handled; | |
844 | } | |
845 | ||
846 | static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) | |
847 | { | |
848 | struct Scsi_Host *host = scp->device->host; | |
849 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
f9875496 FT |
850 | struct scatterlist *sg; |
851 | int idx, nseg; | |
852 | ||
853 | nseg = scsi_dma_map(scp); | |
854 | BUG_ON(nseg < 0); | |
855 | if (!nseg) | |
856 | return 0; | |
ede1e6f8 | 857 | |
f9875496 FT |
858 | HPT_SCP(scp)->sgcnt = nseg; |
859 | HPT_SCP(scp)->mapped = 1; | |
860 | ||
861 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); | |
862 | ||
863 | scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { | |
286aa031 HLT |
864 | psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) | |
865 | hba->ops->host_phy_flag; | |
f9875496 FT |
866 | psg[idx].size = cpu_to_le32(sg_dma_len(sg)); |
867 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? | |
868 | cpu_to_le32(1) : 0; | |
ede1e6f8 | 869 | } |
f9875496 | 870 | return HPT_SCP(scp)->sgcnt; |
ede1e6f8 HLT |
871 | } |
872 | ||
00f59701 HLT |
873 | static void hptiop_post_req_itl(struct hptiop_hba *hba, |
874 | struct hptiop_request *_req) | |
875 | { | |
876 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
877 | ||
878 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | |
879 | (u32)_req->index); | |
880 | reqhdr->context_hi32 = 0; | |
881 | ||
882 | if (hba->iopintf_v2) { | |
883 | u32 size, size_bits; | |
884 | ||
885 | size = le32_to_cpu(reqhdr->size); | |
886 | if (size < 256) | |
887 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; | |
888 | else if (size < 512) | |
889 | size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; | |
890 | else | |
891 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | | |
892 | IOPMU_QUEUE_ADDR_HOST_BIT; | |
893 | writel(_req->req_shifted_phy | size_bits, | |
894 | &hba->u.itl.iop->inbound_queue); | |
895 | } else | |
896 | writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, | |
897 | &hba->u.itl.iop->inbound_queue); | |
898 | } | |
899 | ||
900 | static void hptiop_post_req_mv(struct hptiop_hba *hba, | |
901 | struct hptiop_request *_req) | |
902 | { | |
903 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
904 | u32 size, size_bit; | |
905 | ||
906 | reqhdr->context = cpu_to_le32(_req->index<<8 | | |
907 | IOP_REQUEST_TYPE_SCSI_COMMAND<<5); | |
908 | reqhdr->context_hi32 = 0; | |
909 | size = le32_to_cpu(reqhdr->size); | |
910 | ||
911 | if (size <= 256) | |
912 | size_bit = 0; | |
913 | else if (size <= 256*2) | |
914 | size_bit = 1; | |
915 | else if (size <= 256*3) | |
916 | size_bit = 2; | |
917 | else | |
918 | size_bit = 3; | |
919 | ||
920 | mv_inbound_write((_req->req_shifted_phy << 5) | | |
921 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); | |
922 | } | |
923 | ||
286aa031 HLT |
924 | static void hptiop_post_req_mvfrey(struct hptiop_hba *hba, |
925 | struct hptiop_request *_req) | |
926 | { | |
927 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
928 | u32 index; | |
929 | ||
930 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT | | |
931 | IOP_REQUEST_FLAG_ADDR_BITS | | |
932 | ((_req->req_shifted_phy >> 11) & 0xffff0000)); | |
933 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | |
934 | (_req->index << 4) | reqhdr->type); | |
935 | reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) & | |
936 | 0xffffffff); | |
937 | ||
938 | hba->u.mvfrey.inlist_wptr++; | |
939 | index = hba->u.mvfrey.inlist_wptr & 0x3fff; | |
940 | ||
941 | if (index == hba->u.mvfrey.list_count) { | |
942 | index = 0; | |
943 | hba->u.mvfrey.inlist_wptr &= ~0x3fff; | |
944 | hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; | |
945 | } | |
946 | ||
947 | hba->u.mvfrey.inlist[index].addr = | |
948 | (dma_addr_t)_req->req_shifted_phy << 5; | |
949 | hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; | |
950 | writel(hba->u.mvfrey.inlist_wptr, | |
951 | &(hba->u.mvfrey.mu->inbound_write_ptr)); | |
952 | readl(&(hba->u.mvfrey.mu->inbound_write_ptr)); | |
953 | } | |
954 | ||
955 | static int hptiop_reset_comm_itl(struct hptiop_hba *hba) | |
956 | { | |
957 | return 0; | |
958 | } | |
959 | ||
960 | static int hptiop_reset_comm_mv(struct hptiop_hba *hba) | |
961 | { | |
962 | return 0; | |
963 | } | |
964 | ||
965 | static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba) | |
966 | { | |
967 | u32 list_count = hba->u.mvfrey.list_count; | |
968 | ||
969 | if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) | |
970 | return -1; | |
971 | ||
972 | /* wait 100ms for MCU ready */ | |
973 | msleep(100); | |
974 | ||
975 | writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff), | |
976 | &(hba->u.mvfrey.mu->inbound_base)); | |
977 | writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16), | |
978 | &(hba->u.mvfrey.mu->inbound_base_high)); | |
979 | ||
980 | writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff), | |
981 | &(hba->u.mvfrey.mu->outbound_base)); | |
982 | writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16), | |
983 | &(hba->u.mvfrey.mu->outbound_base_high)); | |
984 | ||
985 | writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff), | |
986 | &(hba->u.mvfrey.mu->outbound_shadow_base)); | |
987 | writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16), | |
988 | &(hba->u.mvfrey.mu->outbound_shadow_base_high)); | |
989 | ||
990 | hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE; | |
991 | *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE; | |
992 | hba->u.mvfrey.outlist_rptr = list_count - 1; | |
993 | return 0; | |
994 | } | |
995 | ||
af049dfd | 996 | static int hptiop_queuecommand_lck(struct scsi_cmnd *scp) |
ede1e6f8 HLT |
997 | { |
998 | struct Scsi_Host *host = scp->device->host; | |
999 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
1000 | struct hpt_iop_request_scsi_command *req; | |
1001 | int sg_count = 0; | |
1002 | struct hptiop_request *_req; | |
1003 | ||
ede1e6f8 HLT |
1004 | _req = get_req(hba); |
1005 | if (_req == NULL) { | |
1006 | dprintk("hptiop_queuecmd : no free req\n"); | |
4f2ddba3 | 1007 | return SCSI_MLQUEUE_HOST_BUSY; |
ede1e6f8 HLT |
1008 | } |
1009 | ||
1010 | _req->scp = scp; | |
1011 | ||
9cb78c16 | 1012 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) " |
ede1e6f8 HLT |
1013 | "req_index=%d, req=%p\n", |
1014 | scp, | |
1015 | host->host_no, scp->device->channel, | |
1016 | scp->device->id, scp->device->lun, | |
286aa031 HLT |
1017 | cpu_to_be32(((u32 *)scp->cmnd)[0]), |
1018 | cpu_to_be32(((u32 *)scp->cmnd)[1]), | |
1019 | cpu_to_be32(((u32 *)scp->cmnd)[2]), | |
1020 | cpu_to_be32(((u32 *)scp->cmnd)[3]), | |
ede1e6f8 HLT |
1021 | _req->index, _req->req_virt); |
1022 | ||
1023 | scp->result = 0; | |
1024 | ||
a93429c3 | 1025 | if (scp->device->channel || |
1026 | (scp->device->id > hba->max_devices) || | |
1027 | ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) { | |
ede1e6f8 HLT |
1028 | scp->result = DID_BAD_TARGET << 16; |
1029 | free_req(hba, _req); | |
1030 | goto cmd_done; | |
1031 | } | |
1032 | ||
db9b6e89 | 1033 | req = _req->req_virt; |
ede1e6f8 HLT |
1034 | |
1035 | /* build S/G table */ | |
f9875496 FT |
1036 | sg_count = hptiop_buildsgl(scp, req->sg_list); |
1037 | if (!sg_count) | |
ede1e6f8 HLT |
1038 | HPT_SCP(scp)->mapped = 0; |
1039 | ||
1040 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
1041 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); | |
1042 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f9875496 | 1043 | req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); |
ede1e6f8 HLT |
1044 | req->channel = scp->device->channel; |
1045 | req->target = scp->device->id; | |
1046 | req->lun = scp->device->lun; | |
5b12a568 | 1047 | req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count)); |
ede1e6f8 HLT |
1048 | |
1049 | memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); | |
00f59701 | 1050 | hba->ops->post_req(hba, _req); |
ede1e6f8 HLT |
1051 | return 0; |
1052 | ||
1053 | cmd_done: | |
1054 | dprintk("scsi_done(scp=%p)\n", scp); | |
574015a8 | 1055 | scsi_done(scp); |
ede1e6f8 HLT |
1056 | return 0; |
1057 | } | |
1058 | ||
f281233d JG |
1059 | static DEF_SCSI_QCMD(hptiop_queuecommand) |
1060 | ||
ede1e6f8 HLT |
1061 | static const char *hptiop_info(struct Scsi_Host *host) |
1062 | { | |
1063 | return driver_name_long; | |
1064 | } | |
1065 | ||
1066 | static int hptiop_reset_hba(struct hptiop_hba *hba) | |
1067 | { | |
1068 | if (atomic_xchg(&hba->resetting, 1) == 0) { | |
1069 | atomic_inc(&hba->reset_count); | |
00f59701 | 1070 | hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); |
ede1e6f8 HLT |
1071 | } |
1072 | ||
1073 | wait_event_timeout(hba->reset_wq, | |
1074 | atomic_read(&hba->resetting) == 0, 60 * HZ); | |
1075 | ||
1076 | if (atomic_read(&hba->resetting)) { | |
af901ca1 | 1077 | /* IOP is in unknown state, abort reset */ |
ede1e6f8 HLT |
1078 | printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); |
1079 | return -1; | |
1080 | } | |
1081 | ||
1082 | if (iop_send_sync_msg(hba, | |
1083 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
1084 | dprintk("scsi%d: fail to start background task\n", | |
1085 | hba->host->host_no); | |
1086 | } | |
1087 | ||
1088 | return 0; | |
1089 | } | |
1090 | ||
1091 | static int hptiop_reset(struct scsi_cmnd *scp) | |
1092 | { | |
aceb2948 | 1093 | struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata; |
ede1e6f8 | 1094 | |
aceb2948 HR |
1095 | printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n", |
1096 | scp->device->host->host_no, -1, -1); | |
ede1e6f8 HLT |
1097 | |
1098 | return hptiop_reset_hba(hba)? FAILED : SUCCESS; | |
1099 | } | |
1100 | ||
1101 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | |
db5ed4df | 1102 | int queue_depth) |
ede1e6f8 | 1103 | { |
00f59701 HLT |
1104 | struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; |
1105 | ||
1106 | if (queue_depth > hba->max_requests) | |
1107 | queue_depth = hba->max_requests; | |
db5ed4df | 1108 | return scsi_change_queue_depth(sdev, queue_depth); |
ede1e6f8 HLT |
1109 | } |
1110 | ||
ee959b00 TJ |
1111 | static ssize_t hptiop_show_version(struct device *dev, |
1112 | struct device_attribute *attr, char *buf) | |
ede1e6f8 HLT |
1113 | { |
1114 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | |
1115 | } | |
1116 | ||
ee959b00 TJ |
1117 | static ssize_t hptiop_show_fw_version(struct device *dev, |
1118 | struct device_attribute *attr, char *buf) | |
ede1e6f8 | 1119 | { |
ee959b00 | 1120 | struct Scsi_Host *host = class_to_shost(dev); |
ede1e6f8 HLT |
1121 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
1122 | ||
1123 | return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", | |
1124 | hba->firmware_version >> 24, | |
1125 | (hba->firmware_version >> 16) & 0xff, | |
1126 | (hba->firmware_version >> 8) & 0xff, | |
1127 | hba->firmware_version & 0xff); | |
1128 | } | |
1129 | ||
ee959b00 | 1130 | static struct device_attribute hptiop_attr_version = { |
ede1e6f8 HLT |
1131 | .attr = { |
1132 | .name = "driver-version", | |
1133 | .mode = S_IRUGO, | |
1134 | }, | |
1135 | .show = hptiop_show_version, | |
1136 | }; | |
1137 | ||
ee959b00 | 1138 | static struct device_attribute hptiop_attr_fw_version = { |
ede1e6f8 HLT |
1139 | .attr = { |
1140 | .name = "firmware-version", | |
1141 | .mode = S_IRUGO, | |
1142 | }, | |
1143 | .show = hptiop_show_fw_version, | |
1144 | }; | |
1145 | ||
e8fbc28e BVA |
1146 | static struct attribute *hptiop_host_attrs[] = { |
1147 | &hptiop_attr_version.attr, | |
1148 | &hptiop_attr_fw_version.attr, | |
ede1e6f8 HLT |
1149 | NULL |
1150 | }; | |
1151 | ||
e8fbc28e BVA |
1152 | ATTRIBUTE_GROUPS(hptiop_host); |
1153 | ||
47c2e30a BVA |
1154 | static int hptiop_sdev_configure(struct scsi_device *sdev, |
1155 | struct queue_limits *lim) | |
a93429c3 | 1156 | { |
1157 | if (sdev->type == TYPE_TAPE) | |
f65eb761 | 1158 | lim->max_hw_sectors = 8192; |
a93429c3 | 1159 | return 0; |
1160 | } | |
1161 | ||
9194970b | 1162 | static const struct scsi_host_template driver_template = { |
ede1e6f8 HLT |
1163 | .module = THIS_MODULE, |
1164 | .name = driver_name, | |
1165 | .queuecommand = hptiop_queuecommand, | |
aceb2948 | 1166 | .eh_host_reset_handler = hptiop_reset, |
ede1e6f8 | 1167 | .info = hptiop_info, |
ede1e6f8 | 1168 | .emulated = 0, |
ede1e6f8 | 1169 | .proc_name = driver_name, |
e8fbc28e | 1170 | .shost_groups = hptiop_host_groups, |
47c2e30a | 1171 | .sdev_configure = hptiop_sdev_configure, |
ede1e6f8 HLT |
1172 | .this_id = -1, |
1173 | .change_queue_depth = hptiop_adjust_disk_queue_depth, | |
5c113eb3 | 1174 | .cmd_size = sizeof(struct hpt_cmd_priv), |
ede1e6f8 HLT |
1175 | }; |
1176 | ||
286aa031 HLT |
1177 | static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba) |
1178 | { | |
1179 | return 0; | |
1180 | } | |
1181 | ||
00f59701 HLT |
1182 | static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) |
1183 | { | |
1184 | hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, | |
1185 | 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); | |
1186 | if (hba->u.mv.internal_req) | |
1187 | return 0; | |
1188 | else | |
1189 | return -1; | |
1190 | } | |
1191 | ||
286aa031 HLT |
1192 | static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba) |
1193 | { | |
1194 | u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl); | |
1195 | char *p; | |
1196 | dma_addr_t phy; | |
1197 | ||
1198 | BUG_ON(hba->max_request_size == 0); | |
1199 | ||
1200 | if (list_count == 0) { | |
1201 | BUG_ON(1); | |
1202 | return -1; | |
1203 | } | |
1204 | ||
1205 | list_count >>= 16; | |
1206 | ||
1207 | hba->u.mvfrey.list_count = list_count; | |
1208 | hba->u.mvfrey.internal_mem_size = 0x800 + | |
1209 | list_count * sizeof(struct mvfrey_inlist_entry) + | |
1210 | list_count * sizeof(struct mvfrey_outlist_entry) + | |
1211 | sizeof(int); | |
1212 | ||
1213 | p = dma_alloc_coherent(&hba->pcidev->dev, | |
1214 | hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL); | |
1215 | if (!p) | |
1216 | return -1; | |
1217 | ||
1218 | hba->u.mvfrey.internal_req.req_virt = p; | |
1219 | hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5; | |
1220 | hba->u.mvfrey.internal_req.scp = NULL; | |
1221 | hba->u.mvfrey.internal_req.next = NULL; | |
1222 | ||
1223 | p += 0x800; | |
1224 | phy += 0x800; | |
1225 | ||
1226 | hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; | |
1227 | hba->u.mvfrey.inlist_phy = phy; | |
1228 | ||
1229 | p += list_count * sizeof(struct mvfrey_inlist_entry); | |
1230 | phy += list_count * sizeof(struct mvfrey_inlist_entry); | |
1231 | ||
1232 | hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; | |
1233 | hba->u.mvfrey.outlist_phy = phy; | |
1234 | ||
1235 | p += list_count * sizeof(struct mvfrey_outlist_entry); | |
1236 | phy += list_count * sizeof(struct mvfrey_outlist_entry); | |
1237 | ||
1238 | hba->u.mvfrey.outlist_cptr = (__le32 *)p; | |
1239 | hba->u.mvfrey.outlist_cptr_phy = phy; | |
1240 | ||
1241 | return 0; | |
1242 | } | |
1243 | ||
1244 | static int hptiop_internal_memfree_itl(struct hptiop_hba *hba) | |
1245 | { | |
1246 | return 0; | |
1247 | } | |
1248 | ||
00f59701 HLT |
1249 | static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) |
1250 | { | |
1251 | if (hba->u.mv.internal_req) { | |
1252 | dma_free_coherent(&hba->pcidev->dev, 0x800, | |
1253 | hba->u.mv.internal_req, hba->u.mv.internal_req_phy); | |
1254 | return 0; | |
1255 | } else | |
1256 | return -1; | |
1257 | } | |
1258 | ||
286aa031 HLT |
1259 | static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba) |
1260 | { | |
1261 | if (hba->u.mvfrey.internal_req.req_virt) { | |
1262 | dma_free_coherent(&hba->pcidev->dev, | |
1263 | hba->u.mvfrey.internal_mem_size, | |
1264 | hba->u.mvfrey.internal_req.req_virt, | |
1265 | (dma_addr_t) | |
1266 | hba->u.mvfrey.internal_req.req_shifted_phy << 5); | |
1267 | return 0; | |
1268 | } else | |
1269 | return -1; | |
1270 | } | |
1271 | ||
6f039790 | 1272 | static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) |
ede1e6f8 HLT |
1273 | { |
1274 | struct Scsi_Host *host = NULL; | |
1275 | struct hptiop_hba *hba; | |
23f0bb47 | 1276 | struct hptiop_adapter_ops *iop_ops; |
ede1e6f8 HLT |
1277 | struct hpt_iop_request_get_config iop_config; |
1278 | struct hpt_iop_request_set_config set_config; | |
1279 | dma_addr_t start_phy; | |
1280 | void *start_virt; | |
1281 | u32 offset, i, req_size; | |
3e344b6c | 1282 | int rc; |
ede1e6f8 HLT |
1283 | |
1284 | dprintk("hptiop_probe(%p)\n", pcidev); | |
1285 | ||
1286 | if (pci_enable_device(pcidev)) { | |
1287 | printk(KERN_ERR "hptiop: fail to enable pci device\n"); | |
1288 | return -ENODEV; | |
1289 | } | |
1290 | ||
1291 | printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", | |
1292 | pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, | |
1293 | pcidev->irq); | |
1294 | ||
1295 | pci_set_master(pcidev); | |
1296 | ||
1297 | /* Enable 64bit DMA if possible */ | |
23f0bb47 | 1298 | iop_ops = (struct hptiop_adapter_ops *)id->driver_data; |
3e344b6c HR |
1299 | rc = dma_set_mask(&pcidev->dev, |
1300 | DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)); | |
1301 | if (rc) | |
1302 | rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)); | |
1303 | ||
1304 | if (rc) { | |
453cd370 CH |
1305 | printk(KERN_ERR "hptiop: fail to set dma_mask\n"); |
1306 | goto disable_pci_device; | |
ede1e6f8 HLT |
1307 | } |
1308 | ||
1309 | if (pci_request_regions(pcidev, driver_name)) { | |
1310 | printk(KERN_ERR "hptiop: pci_request_regions failed\n"); | |
1311 | goto disable_pci_device; | |
1312 | } | |
1313 | ||
1314 | host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); | |
1315 | if (!host) { | |
1316 | printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); | |
1317 | goto free_pci_regions; | |
1318 | } | |
1319 | ||
1320 | hba = (struct hptiop_hba *)host->hostdata; | |
a93429c3 | 1321 | memset(hba, 0, sizeof(struct hptiop_hba)); |
ede1e6f8 | 1322 | |
23f0bb47 | 1323 | hba->ops = iop_ops; |
ede1e6f8 HLT |
1324 | hba->pcidev = pcidev; |
1325 | hba->host = host; | |
1326 | hba->initialized = 0; | |
db9b6e89 | 1327 | hba->iopintf_v2 = 0; |
ede1e6f8 HLT |
1328 | |
1329 | atomic_set(&hba->resetting, 0); | |
1330 | atomic_set(&hba->reset_count, 0); | |
1331 | ||
1332 | init_waitqueue_head(&hba->reset_wq); | |
1333 | init_waitqueue_head(&hba->ioctl_wq); | |
1334 | ||
a93429c3 | 1335 | host->max_lun = 128; |
ede1e6f8 HLT |
1336 | host->max_channel = 0; |
1337 | host->io_port = 0; | |
1338 | host->n_io_port = 0; | |
1339 | host->irq = pcidev->irq; | |
1340 | ||
00f59701 | 1341 | if (hba->ops->map_pci_bar(hba)) |
ede1e6f8 HLT |
1342 | goto free_scsi_host; |
1343 | ||
00f59701 | 1344 | if (hba->ops->iop_wait_ready(hba, 20000)) { |
ede1e6f8 HLT |
1345 | printk(KERN_ERR "scsi%d: firmware not ready\n", |
1346 | hba->host->host_no); | |
1347 | goto unmap_pci_bar; | |
1348 | } | |
1349 | ||
286aa031 | 1350 | if (hba->ops->family == MV_BASED_IOP) { |
00f59701 HLT |
1351 | if (hba->ops->internal_memalloc(hba)) { |
1352 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n", | |
1353 | hba->host->host_no); | |
1354 | goto unmap_pci_bar; | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | if (hba->ops->get_config(hba, &iop_config)) { | |
ede1e6f8 HLT |
1359 | printk(KERN_ERR "scsi%d: get config failed\n", |
1360 | hba->host->host_no); | |
1361 | goto unmap_pci_bar; | |
1362 | } | |
1363 | ||
1364 | hba->max_requests = min(le32_to_cpu(iop_config.max_requests), | |
1365 | HPTIOP_MAX_REQUESTS); | |
1366 | hba->max_devices = le32_to_cpu(iop_config.max_devices); | |
1367 | hba->max_request_size = le32_to_cpu(iop_config.request_size); | |
1368 | hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); | |
1369 | hba->firmware_version = le32_to_cpu(iop_config.firmware_version); | |
db9b6e89 | 1370 | hba->interface_version = le32_to_cpu(iop_config.interface_version); |
ede1e6f8 HLT |
1371 | hba->sdram_size = le32_to_cpu(iop_config.sdram_size); |
1372 | ||
286aa031 HLT |
1373 | if (hba->ops->family == MVFREY_BASED_IOP) { |
1374 | if (hba->ops->internal_memalloc(hba)) { | |
1375 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n", | |
1376 | hba->host->host_no); | |
1377 | goto unmap_pci_bar; | |
1378 | } | |
1379 | if (hba->ops->reset_comm(hba)) { | |
1380 | printk(KERN_ERR "scsi%d: reset comm failed\n", | |
1381 | hba->host->host_no); | |
1382 | goto unmap_pci_bar; | |
1383 | } | |
1384 | } | |
1385 | ||
db9b6e89 HLT |
1386 | if (hba->firmware_version > 0x01020000 || |
1387 | hba->interface_version > 0x01020000) | |
1388 | hba->iopintf_v2 = 1; | |
1389 | ||
ede1e6f8 HLT |
1390 | host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; |
1391 | host->max_id = le32_to_cpu(iop_config.max_devices); | |
1392 | host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); | |
1393 | host->can_queue = le32_to_cpu(iop_config.max_requests); | |
1394 | host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); | |
1395 | host->max_cmd_len = 16; | |
1396 | ||
d67790dd KC |
1397 | req_size = struct_size_t(struct hpt_iop_request_scsi_command, |
1398 | sg_list, hba->max_sg_descriptors); | |
db9b6e89 HLT |
1399 | if ((req_size & 0x1f) != 0) |
1400 | req_size = (req_size + 0x1f) & ~0x1f; | |
1401 | ||
1402 | memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); | |
ede1e6f8 | 1403 | set_config.iop_id = cpu_to_le32(host->host_no); |
db9b6e89 HLT |
1404 | set_config.vbus_id = cpu_to_le16(host->host_no); |
1405 | set_config.max_host_request_size = cpu_to_le16(req_size); | |
ede1e6f8 | 1406 | |
00f59701 | 1407 | if (hba->ops->set_config(hba, &set_config)) { |
ede1e6f8 HLT |
1408 | printk(KERN_ERR "scsi%d: set config failed\n", |
1409 | hba->host->host_no); | |
1410 | goto unmap_pci_bar; | |
1411 | } | |
1412 | ||
ede1e6f8 HLT |
1413 | pci_set_drvdata(pcidev, host); |
1414 | ||
1d6f359a | 1415 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, |
ede1e6f8 HLT |
1416 | driver_name, hba)) { |
1417 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | |
1418 | hba->host->host_no, pcidev->irq); | |
3e74051b | 1419 | goto unmap_pci_bar; |
ede1e6f8 HLT |
1420 | } |
1421 | ||
1422 | /* Allocate request mem */ | |
ede1e6f8 HLT |
1423 | |
1424 | dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); | |
1425 | ||
1426 | hba->req_size = req_size; | |
a93429c3 | 1427 | hba->req_list = NULL; |
ede1e6f8 | 1428 | |
a93429c3 | 1429 | for (i = 0; i < hba->max_requests; i++) { |
1430 | start_virt = dma_alloc_coherent(&pcidev->dev, | |
1431 | hba->req_size + 0x20, | |
1432 | &start_phy, GFP_KERNEL); | |
1433 | ||
1434 | if (!start_virt) { | |
1435 | printk(KERN_ERR "scsi%d: fail to alloc request mem\n", | |
1436 | hba->host->host_no); | |
1437 | goto free_request_mem; | |
1438 | } | |
ede1e6f8 | 1439 | |
a93429c3 | 1440 | hba->dma_coherent[i] = start_virt; |
1441 | hba->dma_coherent_handle[i] = start_phy; | |
ede1e6f8 | 1442 | |
a93429c3 | 1443 | if ((start_phy & 0x1f) != 0) { |
1444 | offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; | |
1445 | start_phy += offset; | |
1446 | start_virt += offset; | |
1447 | } | |
ede1e6f8 | 1448 | |
ede1e6f8 HLT |
1449 | hba->reqs[i].next = NULL; |
1450 | hba->reqs[i].req_virt = start_virt; | |
1451 | hba->reqs[i].req_shifted_phy = start_phy >> 5; | |
1452 | hba->reqs[i].index = i; | |
1453 | free_req(hba, &hba->reqs[i]); | |
ede1e6f8 HLT |
1454 | } |
1455 | ||
1456 | /* Enable Interrupt and start background task */ | |
1457 | if (hptiop_initialize_iop(hba)) | |
1458 | goto free_request_mem; | |
1459 | ||
3e74051b CH |
1460 | if (scsi_add_host(host, &pcidev->dev)) { |
1461 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | |
1462 | hba->host->host_no); | |
1463 | goto free_request_mem; | |
1464 | } | |
1465 | ||
ede1e6f8 HLT |
1466 | scsi_scan_host(host); |
1467 | ||
1468 | dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); | |
1469 | return 0; | |
1470 | ||
1471 | free_request_mem: | |
a93429c3 | 1472 | for (i = 0; i < hba->max_requests; i++) { |
1473 | if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) | |
1474 | dma_free_coherent(&hba->pcidev->dev, | |
1475 | hba->req_size + 0x20, | |
1476 | hba->dma_coherent[i], | |
1477 | hba->dma_coherent_handle[i]); | |
1478 | else | |
1479 | break; | |
1480 | } | |
ede1e6f8 | 1481 | |
ede1e6f8 HLT |
1482 | free_irq(hba->pcidev->irq, hba); |
1483 | ||
ede1e6f8 | 1484 | unmap_pci_bar: |
286aa031 | 1485 | hba->ops->internal_memfree(hba); |
ede1e6f8 | 1486 | |
00f59701 | 1487 | hba->ops->unmap_pci_bar(hba); |
ede1e6f8 HLT |
1488 | |
1489 | free_scsi_host: | |
1490 | scsi_host_put(host); | |
1491 | ||
00f59701 HLT |
1492 | free_pci_regions: |
1493 | pci_release_regions(pcidev); | |
1494 | ||
ede1e6f8 HLT |
1495 | disable_pci_device: |
1496 | pci_disable_device(pcidev); | |
1497 | ||
1db90ea2 | 1498 | dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0); |
ede1e6f8 HLT |
1499 | return -ENODEV; |
1500 | } | |
1501 | ||
1502 | static void hptiop_shutdown(struct pci_dev *pcidev) | |
1503 | { | |
1504 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1505 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
ede1e6f8 HLT |
1506 | |
1507 | dprintk("hptiop_shutdown(%p)\n", hba); | |
1508 | ||
1509 | /* stop the iop */ | |
1510 | if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) | |
1511 | printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", | |
1512 | hba->host->host_no); | |
1513 | ||
1514 | /* disable all outbound interrupts */ | |
00f59701 HLT |
1515 | hba->ops->disable_intr(hba); |
1516 | } | |
1517 | ||
1518 | static void hptiop_disable_intr_itl(struct hptiop_hba *hba) | |
1519 | { | |
1520 | u32 int_mask; | |
1521 | ||
1522 | int_mask = readl(&hba->u.itl.iop->outbound_intmask); | |
ede1e6f8 HLT |
1523 | writel(int_mask | |
1524 | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, | |
00f59701 HLT |
1525 | &hba->u.itl.iop->outbound_intmask); |
1526 | readl(&hba->u.itl.iop->outbound_intmask); | |
1527 | } | |
1528 | ||
1529 | static void hptiop_disable_intr_mv(struct hptiop_hba *hba) | |
1530 | { | |
1531 | writel(0, &hba->u.mv.regs->outbound_intmask); | |
1532 | readl(&hba->u.mv.regs->outbound_intmask); | |
ede1e6f8 HLT |
1533 | } |
1534 | ||
286aa031 HLT |
1535 | static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba) |
1536 | { | |
1537 | writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable)); | |
1538 | readl(&(hba->u.mvfrey.mu->f0_doorbell_enable)); | |
1539 | writel(0, &(hba->u.mvfrey.mu->isr_enable)); | |
1540 | readl(&(hba->u.mvfrey.mu->isr_enable)); | |
1541 | writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
1542 | readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable)); | |
1543 | } | |
1544 | ||
ede1e6f8 HLT |
1545 | static void hptiop_remove(struct pci_dev *pcidev) |
1546 | { | |
1547 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1548 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
a93429c3 | 1549 | u32 i; |
ede1e6f8 HLT |
1550 | |
1551 | dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); | |
1552 | ||
4f2ddba3 HLT |
1553 | scsi_remove_host(host); |
1554 | ||
ede1e6f8 HLT |
1555 | hptiop_shutdown(pcidev); |
1556 | ||
1557 | free_irq(hba->pcidev->irq, hba); | |
1558 | ||
a93429c3 | 1559 | for (i = 0; i < hba->max_requests; i++) { |
1560 | if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) | |
1561 | dma_free_coherent(&hba->pcidev->dev, | |
1562 | hba->req_size + 0x20, | |
1563 | hba->dma_coherent[i], | |
1564 | hba->dma_coherent_handle[i]); | |
1565 | else | |
1566 | break; | |
1567 | } | |
ede1e6f8 | 1568 | |
286aa031 | 1569 | hba->ops->internal_memfree(hba); |
00f59701 HLT |
1570 | |
1571 | hba->ops->unmap_pci_bar(hba); | |
ede1e6f8 HLT |
1572 | |
1573 | pci_release_regions(hba->pcidev); | |
1574 | pci_set_drvdata(hba->pcidev, NULL); | |
1575 | pci_disable_device(hba->pcidev); | |
1576 | ||
ede1e6f8 HLT |
1577 | scsi_host_put(host); |
1578 | } | |
1579 | ||
00f59701 | 1580 | static struct hptiop_adapter_ops hptiop_itl_ops = { |
286aa031 | 1581 | .family = INTEL_BASED_IOP, |
00f59701 | 1582 | .iop_wait_ready = iop_wait_ready_itl, |
286aa031 HLT |
1583 | .internal_memalloc = hptiop_internal_memalloc_itl, |
1584 | .internal_memfree = hptiop_internal_memfree_itl, | |
00f59701 HLT |
1585 | .map_pci_bar = hptiop_map_pci_bar_itl, |
1586 | .unmap_pci_bar = hptiop_unmap_pci_bar_itl, | |
1587 | .enable_intr = hptiop_enable_intr_itl, | |
1588 | .disable_intr = hptiop_disable_intr_itl, | |
1589 | .get_config = iop_get_config_itl, | |
1590 | .set_config = iop_set_config_itl, | |
1591 | .iop_intr = iop_intr_itl, | |
1592 | .post_msg = hptiop_post_msg_itl, | |
1593 | .post_req = hptiop_post_req_itl, | |
23f0bb47 | 1594 | .hw_dma_bit_mask = 64, |
286aa031 HLT |
1595 | .reset_comm = hptiop_reset_comm_itl, |
1596 | .host_phy_flag = cpu_to_le64(0), | |
00f59701 HLT |
1597 | }; |
1598 | ||
1599 | static struct hptiop_adapter_ops hptiop_mv_ops = { | |
286aa031 | 1600 | .family = MV_BASED_IOP, |
00f59701 HLT |
1601 | .iop_wait_ready = iop_wait_ready_mv, |
1602 | .internal_memalloc = hptiop_internal_memalloc_mv, | |
1603 | .internal_memfree = hptiop_internal_memfree_mv, | |
1604 | .map_pci_bar = hptiop_map_pci_bar_mv, | |
1605 | .unmap_pci_bar = hptiop_unmap_pci_bar_mv, | |
1606 | .enable_intr = hptiop_enable_intr_mv, | |
1607 | .disable_intr = hptiop_disable_intr_mv, | |
1608 | .get_config = iop_get_config_mv, | |
1609 | .set_config = iop_set_config_mv, | |
1610 | .iop_intr = iop_intr_mv, | |
1611 | .post_msg = hptiop_post_msg_mv, | |
1612 | .post_req = hptiop_post_req_mv, | |
23f0bb47 | 1613 | .hw_dma_bit_mask = 33, |
286aa031 HLT |
1614 | .reset_comm = hptiop_reset_comm_mv, |
1615 | .host_phy_flag = cpu_to_le64(0), | |
1616 | }; | |
1617 | ||
1618 | static struct hptiop_adapter_ops hptiop_mvfrey_ops = { | |
1619 | .family = MVFREY_BASED_IOP, | |
1620 | .iop_wait_ready = iop_wait_ready_mvfrey, | |
1621 | .internal_memalloc = hptiop_internal_memalloc_mvfrey, | |
1622 | .internal_memfree = hptiop_internal_memfree_mvfrey, | |
1623 | .map_pci_bar = hptiop_map_pci_bar_mvfrey, | |
1624 | .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey, | |
1625 | .enable_intr = hptiop_enable_intr_mvfrey, | |
1626 | .disable_intr = hptiop_disable_intr_mvfrey, | |
1627 | .get_config = iop_get_config_mvfrey, | |
1628 | .set_config = iop_set_config_mvfrey, | |
1629 | .iop_intr = iop_intr_mvfrey, | |
1630 | .post_msg = hptiop_post_msg_mvfrey, | |
1631 | .post_req = hptiop_post_req_mvfrey, | |
1632 | .hw_dma_bit_mask = 64, | |
1633 | .reset_comm = hptiop_reset_comm_mvfrey, | |
1634 | .host_phy_flag = cpu_to_le64(1), | |
00f59701 HLT |
1635 | }; |
1636 | ||
c9a71ca1 | 1637 | static const struct pci_device_id hptiop_id_table[] = { |
00f59701 HLT |
1638 | { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, |
1639 | { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 | 1640 | { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, |
00f59701 HLT |
1641 | { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, |
1642 | { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 | 1643 | { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, |
00f59701 HLT |
1644 | { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, |
1645 | { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, | |
dd07428b | 1646 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, |
3bfc13c2 | 1647 | { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, |
dd07428b | 1648 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, |
dd07428b HLT |
1649 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, |
1650 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, | |
1651 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, | |
1652 | { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 HLT |
1653 | { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, |
1654 | { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, | |
1655 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, | |
1656 | { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops }, | |
00f59701 HLT |
1657 | { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, |
1658 | { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, | |
1659 | { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, | |
286aa031 HLT |
1660 | { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1661 | { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
a93429c3 | 1662 | { PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1663 | { PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1664 | { PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1665 | { PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1666 | { PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1667 | { PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1668 | { PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
1669 | { PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops }, | |
ede1e6f8 HLT |
1670 | {}, |
1671 | }; | |
1672 | ||
1673 | MODULE_DEVICE_TABLE(pci, hptiop_id_table); | |
1674 | ||
1675 | static struct pci_driver hptiop_pci_driver = { | |
1676 | .name = driver_name, | |
1677 | .id_table = hptiop_id_table, | |
1678 | .probe = hptiop_probe, | |
1679 | .remove = hptiop_remove, | |
1680 | .shutdown = hptiop_shutdown, | |
1681 | }; | |
1682 | ||
1683 | static int __init hptiop_module_init(void) | |
1684 | { | |
ede1e6f8 | 1685 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); |
3e74051b | 1686 | return pci_register_driver(&hptiop_pci_driver); |
ede1e6f8 HLT |
1687 | } |
1688 | ||
1689 | static void __exit hptiop_module_exit(void) | |
1690 | { | |
ede1e6f8 HLT |
1691 | pci_unregister_driver(&hptiop_pci_driver); |
1692 | } | |
1693 | ||
1694 | ||
1695 | module_init(hptiop_module_init); | |
1696 | module_exit(hptiop_module_exit); | |
1697 | ||
1698 | MODULE_LICENSE("GPL"); | |
db9b6e89 | 1699 |