]>
Commit | Line | Data |
---|---|---|
ede1e6f8 | 1 | /* |
00f59701 | 2 | * HighPoint RR3xxx/4xxx controller driver for Linux |
3bfc13c2 | 3 | * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved. |
ede1e6f8 HLT |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * Please report bugs/comments/suggestions to [email protected] | |
15 | * | |
16 | * For more information, visit http://www.highpoint-tech.com | |
17 | */ | |
ede1e6f8 HLT |
18 | #include <linux/module.h> |
19 | #include <linux/types.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/timer.h> | |
27 | #include <linux/spinlock.h> | |
5a0e3ad6 | 28 | #include <linux/gfp.h> |
ede1e6f8 HLT |
29 | #include <asm/uaccess.h> |
30 | #include <asm/io.h> | |
31 | #include <asm/div64.h> | |
32 | #include <scsi/scsi_cmnd.h> | |
33 | #include <scsi/scsi_device.h> | |
34 | #include <scsi/scsi.h> | |
35 | #include <scsi/scsi_tcq.h> | |
36 | #include <scsi/scsi_host.h> | |
37 | ||
38 | #include "hptiop.h" | |
39 | ||
40 | MODULE_AUTHOR("HighPoint Technologies, Inc."); | |
00f59701 | 41 | MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); |
ede1e6f8 HLT |
42 | |
43 | static char driver_name[] = "hptiop"; | |
00f59701 | 44 | static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; |
3bfc13c2 | 45 | static const char driver_ver[] = "v1.6 (090910)"; |
00f59701 HLT |
46 | |
47 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); | |
48 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, | |
49 | struct hpt_iop_request_scsi_command *req); | |
50 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
51 | static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
ede1e6f8 HLT |
52 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
53 | ||
00f59701 | 54 | static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) |
ede1e6f8 HLT |
55 | { |
56 | u32 req = 0; | |
57 | int i; | |
58 | ||
59 | for (i = 0; i < millisec; i++) { | |
00f59701 | 60 | req = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
61 | if (req != IOPMU_QUEUE_EMPTY) |
62 | break; | |
63 | msleep(1); | |
64 | } | |
65 | ||
66 | if (req != IOPMU_QUEUE_EMPTY) { | |
00f59701 HLT |
67 | writel(req, &hba->u.itl.iop->outbound_queue); |
68 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
69 | return 0; |
70 | } | |
71 | ||
72 | return -1; | |
73 | } | |
74 | ||
00f59701 HLT |
75 | static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) |
76 | { | |
77 | return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); | |
78 | } | |
79 | ||
80 | static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) | |
ede1e6f8 | 81 | { |
db9b6e89 | 82 | if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) |
00f59701 | 83 | hptiop_host_request_callback_itl(hba, |
ede1e6f8 HLT |
84 | tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); |
85 | else | |
00f59701 | 86 | hptiop_iop_request_callback_itl(hba, tag); |
ede1e6f8 HLT |
87 | } |
88 | ||
00f59701 | 89 | static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) |
ede1e6f8 HLT |
90 | { |
91 | u32 req; | |
92 | ||
00f59701 HLT |
93 | while ((req = readl(&hba->u.itl.iop->outbound_queue)) != |
94 | IOPMU_QUEUE_EMPTY) { | |
ede1e6f8 HLT |
95 | |
96 | if (req & IOPMU_QUEUE_MASK_HOST_BITS) | |
00f59701 | 97 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
98 | else { |
99 | struct hpt_iop_request_header __iomem * p; | |
100 | ||
101 | p = (struct hpt_iop_request_header __iomem *) | |
00f59701 | 102 | ((char __iomem *)hba->u.itl.iop + req); |
ede1e6f8 HLT |
103 | |
104 | if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { | |
105 | if (readl(&p->context)) | |
00f59701 | 106 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
107 | else |
108 | writel(1, &p->context); | |
109 | } | |
110 | else | |
00f59701 | 111 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
112 | } |
113 | } | |
114 | } | |
115 | ||
00f59701 | 116 | static int iop_intr_itl(struct hptiop_hba *hba) |
ede1e6f8 | 117 | { |
00f59701 | 118 | struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; |
3bfc13c2 | 119 | void __iomem *plx = hba->u.itl.plx; |
ede1e6f8 HLT |
120 | u32 status; |
121 | int ret = 0; | |
122 | ||
3bfc13c2 HLT |
123 | if (plx && readl(plx + 0x11C5C) & 0xf) |
124 | writel(1, plx + 0x11C60); | |
125 | ||
ede1e6f8 HLT |
126 | status = readl(&iop->outbound_intstatus); |
127 | ||
128 | if (status & IOPMU_OUTBOUND_INT_MSG0) { | |
129 | u32 msg = readl(&iop->outbound_msgaddr0); | |
00f59701 | 130 | |
ede1e6f8 HLT |
131 | dprintk("received outbound msg %x\n", msg); |
132 | writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); | |
133 | hptiop_message_callback(hba, msg); | |
134 | ret = 1; | |
135 | } | |
136 | ||
137 | if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { | |
00f59701 HLT |
138 | hptiop_drain_outbound_queue_itl(hba); |
139 | ret = 1; | |
140 | } | |
141 | ||
142 | return ret; | |
143 | } | |
144 | ||
145 | static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) | |
146 | { | |
147 | u32 outbound_tail = readl(&mu->outbound_tail); | |
148 | u32 outbound_head = readl(&mu->outbound_head); | |
149 | ||
150 | if (outbound_tail != outbound_head) { | |
151 | u64 p; | |
152 | ||
153 | memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); | |
154 | outbound_tail++; | |
155 | ||
156 | if (outbound_tail == MVIOP_QUEUE_LEN) | |
157 | outbound_tail = 0; | |
158 | writel(outbound_tail, &mu->outbound_tail); | |
159 | return p; | |
160 | } else | |
161 | return 0; | |
162 | } | |
163 | ||
164 | static void mv_inbound_write(u64 p, struct hptiop_hba *hba) | |
165 | { | |
166 | u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); | |
167 | u32 head = inbound_head + 1; | |
168 | ||
169 | if (head == MVIOP_QUEUE_LEN) | |
170 | head = 0; | |
171 | ||
172 | memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); | |
173 | writel(head, &hba->u.mv.mu->inbound_head); | |
174 | writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, | |
175 | &hba->u.mv.regs->inbound_doorbell); | |
176 | } | |
177 | ||
178 | static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) | |
179 | { | |
180 | u32 req_type = (tag >> 5) & 0x7; | |
181 | struct hpt_iop_request_scsi_command *req; | |
182 | ||
183 | dprintk("hptiop_request_callback_mv: tag=%llx\n", tag); | |
184 | ||
185 | BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); | |
186 | ||
187 | switch (req_type) { | |
188 | case IOP_REQUEST_TYPE_GET_CONFIG: | |
189 | case IOP_REQUEST_TYPE_SET_CONFIG: | |
190 | hba->msg_done = 1; | |
191 | break; | |
192 | ||
193 | case IOP_REQUEST_TYPE_SCSI_COMMAND: | |
194 | req = hba->reqs[tag >> 8].req_virt; | |
195 | if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) | |
196 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
197 | ||
198 | hptiop_finish_scsi_req(hba, tag>>8, req); | |
199 | break; | |
200 | ||
201 | default: | |
202 | break; | |
203 | } | |
204 | } | |
205 | ||
206 | static int iop_intr_mv(struct hptiop_hba *hba) | |
207 | { | |
208 | u32 status; | |
209 | int ret = 0; | |
210 | ||
211 | status = readl(&hba->u.mv.regs->outbound_doorbell); | |
212 | writel(~status, &hba->u.mv.regs->outbound_doorbell); | |
213 | ||
214 | if (status & MVIOP_MU_OUTBOUND_INT_MSG) { | |
215 | u32 msg; | |
216 | msg = readl(&hba->u.mv.mu->outbound_msg); | |
217 | dprintk("received outbound msg %x\n", msg); | |
218 | hptiop_message_callback(hba, msg); | |
219 | ret = 1; | |
220 | } | |
221 | ||
222 | if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { | |
223 | u64 tag; | |
224 | ||
225 | while ((tag = mv_outbound_read(hba->u.mv.mu))) | |
226 | hptiop_request_callback_mv(hba, tag); | |
ede1e6f8 HLT |
227 | ret = 1; |
228 | } | |
229 | ||
230 | return ret; | |
231 | } | |
232 | ||
00f59701 | 233 | static int iop_send_sync_request_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
234 | void __iomem *_req, u32 millisec) |
235 | { | |
236 | struct hpt_iop_request_header __iomem *req = _req; | |
237 | u32 i; | |
238 | ||
00f59701 | 239 | writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); |
ede1e6f8 | 240 | writel(0, &req->context); |
00f59701 HLT |
241 | writel((unsigned long)req - (unsigned long)hba->u.itl.iop, |
242 | &hba->u.itl.iop->inbound_queue); | |
243 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
244 | |
245 | for (i = 0; i < millisec; i++) { | |
00f59701 | 246 | iop_intr_itl(hba); |
ede1e6f8 HLT |
247 | if (readl(&req->context)) |
248 | return 0; | |
249 | msleep(1); | |
250 | } | |
251 | ||
252 | return -1; | |
253 | } | |
254 | ||
00f59701 HLT |
255 | static int iop_send_sync_request_mv(struct hptiop_hba *hba, |
256 | u32 size_bits, u32 millisec) | |
ede1e6f8 | 257 | { |
00f59701 | 258 | struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; |
ede1e6f8 HLT |
259 | u32 i; |
260 | ||
261 | hba->msg_done = 0; | |
00f59701 HLT |
262 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); |
263 | mv_inbound_write(hba->u.mv.internal_req_phy | | |
264 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); | |
265 | ||
266 | for (i = 0; i < millisec; i++) { | |
267 | iop_intr_mv(hba); | |
268 | if (hba->msg_done) | |
269 | return 0; | |
270 | msleep(1); | |
271 | } | |
272 | return -1; | |
273 | } | |
274 | ||
275 | static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) | |
276 | { | |
277 | writel(msg, &hba->u.itl.iop->inbound_msgaddr0); | |
278 | readl(&hba->u.itl.iop->outbound_intstatus); | |
279 | } | |
280 | ||
281 | static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) | |
282 | { | |
283 | writel(msg, &hba->u.mv.mu->inbound_msg); | |
284 | writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); | |
285 | readl(&hba->u.mv.regs->inbound_doorbell); | |
286 | } | |
ede1e6f8 | 287 | |
00f59701 HLT |
288 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) |
289 | { | |
290 | u32 i; | |
ede1e6f8 | 291 | |
00f59701 HLT |
292 | hba->msg_done = 0; |
293 | hba->ops->post_msg(hba, msg); | |
ede1e6f8 HLT |
294 | |
295 | for (i = 0; i < millisec; i++) { | |
296 | spin_lock_irq(hba->host->host_lock); | |
00f59701 | 297 | hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
298 | spin_unlock_irq(hba->host->host_lock); |
299 | if (hba->msg_done) | |
300 | break; | |
301 | msleep(1); | |
302 | } | |
303 | ||
304 | return hba->msg_done? 0 : -1; | |
305 | } | |
306 | ||
00f59701 | 307 | static int iop_get_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
308 | struct hpt_iop_request_get_config *config) |
309 | { | |
310 | u32 req32; | |
311 | struct hpt_iop_request_get_config __iomem *req; | |
312 | ||
00f59701 | 313 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
314 | if (req32 == IOPMU_QUEUE_EMPTY) |
315 | return -1; | |
316 | ||
317 | req = (struct hpt_iop_request_get_config __iomem *) | |
00f59701 | 318 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
319 | |
320 | writel(0, &req->header.flags); | |
321 | writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); | |
322 | writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); | |
323 | writel(IOP_RESULT_PENDING, &req->header.result); | |
324 | ||
00f59701 | 325 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
326 | dprintk("Get config send cmd failed\n"); |
327 | return -1; | |
328 | } | |
329 | ||
330 | memcpy_fromio(config, req, sizeof(*config)); | |
00f59701 HLT |
331 | writel(req32, &hba->u.itl.iop->outbound_queue); |
332 | return 0; | |
333 | } | |
334 | ||
335 | static int iop_get_config_mv(struct hptiop_hba *hba, | |
336 | struct hpt_iop_request_get_config *config) | |
337 | { | |
338 | struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; | |
339 | ||
340 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
341 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); | |
342 | req->header.size = | |
343 | cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); | |
344 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
345 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); |
346 | req->header.context_hi32 = 0; | |
00f59701 HLT |
347 | |
348 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
349 | dprintk("Get config send cmd failed\n"); | |
350 | return -1; | |
351 | } | |
352 | ||
353 | memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); | |
ede1e6f8 HLT |
354 | return 0; |
355 | } | |
356 | ||
00f59701 | 357 | static int iop_set_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
358 | struct hpt_iop_request_set_config *config) |
359 | { | |
360 | u32 req32; | |
361 | struct hpt_iop_request_set_config __iomem *req; | |
362 | ||
00f59701 | 363 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
364 | if (req32 == IOPMU_QUEUE_EMPTY) |
365 | return -1; | |
366 | ||
367 | req = (struct hpt_iop_request_set_config __iomem *) | |
00f59701 | 368 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
369 | |
370 | memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), | |
371 | (u8 *)config + sizeof(struct hpt_iop_request_header), | |
372 | sizeof(struct hpt_iop_request_set_config) - | |
373 | sizeof(struct hpt_iop_request_header)); | |
374 | ||
375 | writel(0, &req->header.flags); | |
376 | writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); | |
377 | writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); | |
378 | writel(IOP_RESULT_PENDING, &req->header.result); | |
379 | ||
00f59701 | 380 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
381 | dprintk("Set config send cmd failed\n"); |
382 | return -1; | |
383 | } | |
384 | ||
00f59701 | 385 | writel(req32, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
386 | return 0; |
387 | } | |
388 | ||
00f59701 HLT |
389 | static int iop_set_config_mv(struct hptiop_hba *hba, |
390 | struct hpt_iop_request_set_config *config) | |
ede1e6f8 | 391 | { |
00f59701 | 392 | struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; |
ede1e6f8 | 393 | |
00f59701 HLT |
394 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); |
395 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
396 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); | |
397 | req->header.size = | |
398 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); | |
399 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
400 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); |
401 | req->header.context_hi32 = 0; | |
00f59701 HLT |
402 | |
403 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
404 | dprintk("Set config send cmd failed\n"); | |
405 | return -1; | |
406 | } | |
407 | ||
408 | return 0; | |
409 | } | |
410 | ||
411 | static void hptiop_enable_intr_itl(struct hptiop_hba *hba) | |
412 | { | |
ede1e6f8 | 413 | writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), |
00f59701 HLT |
414 | &hba->u.itl.iop->outbound_intmask); |
415 | } | |
416 | ||
417 | static void hptiop_enable_intr_mv(struct hptiop_hba *hba) | |
418 | { | |
419 | writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, | |
420 | &hba->u.mv.regs->outbound_intmask); | |
421 | } | |
422 | ||
423 | static int hptiop_initialize_iop(struct hptiop_hba *hba) | |
424 | { | |
425 | /* enable interrupts */ | |
426 | hba->ops->enable_intr(hba); | |
ede1e6f8 HLT |
427 | |
428 | hba->initialized = 1; | |
429 | ||
430 | /* start background tasks */ | |
431 | if (iop_send_sync_msg(hba, | |
432 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
433 | printk(KERN_ERR "scsi%d: fail to start background task\n", | |
434 | hba->host->host_no); | |
435 | return -1; | |
436 | } | |
437 | return 0; | |
438 | } | |
439 | ||
00f59701 | 440 | static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) |
ede1e6f8 HLT |
441 | { |
442 | u32 mem_base_phy, length; | |
443 | void __iomem *mem_base_virt; | |
00f59701 | 444 | |
ede1e6f8 HLT |
445 | struct pci_dev *pcidev = hba->pcidev; |
446 | ||
00f59701 HLT |
447 | |
448 | if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { | |
ede1e6f8 HLT |
449 | printk(KERN_ERR "scsi%d: pci resource invalid\n", |
450 | hba->host->host_no); | |
9bcf0910 | 451 | return NULL; |
ede1e6f8 HLT |
452 | } |
453 | ||
00f59701 HLT |
454 | mem_base_phy = pci_resource_start(pcidev, index); |
455 | length = pci_resource_len(pcidev, index); | |
ede1e6f8 HLT |
456 | mem_base_virt = ioremap(mem_base_phy, length); |
457 | ||
458 | if (!mem_base_virt) { | |
459 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", | |
460 | hba->host->host_no); | |
9bcf0910 | 461 | return NULL; |
00f59701 HLT |
462 | } |
463 | return mem_base_virt; | |
464 | } | |
465 | ||
466 | static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) | |
467 | { | |
3bfc13c2 | 468 | struct pci_dev *pcidev = hba->pcidev; |
00f59701 | 469 | hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); |
3bfc13c2 | 470 | if (hba->u.itl.iop == NULL) |
00f59701 | 471 | return -1; |
3bfc13c2 HLT |
472 | if ((pcidev->device & 0xff00) == 0x4400) { |
473 | hba->u.itl.plx = hba->u.itl.iop; | |
474 | hba->u.itl.iop = hptiop_map_pci_bar(hba, 2); | |
475 | if (hba->u.itl.iop == NULL) { | |
476 | iounmap(hba->u.itl.plx); | |
477 | return -1; | |
478 | } | |
479 | } | |
480 | return 0; | |
00f59701 HLT |
481 | } |
482 | ||
483 | static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) | |
484 | { | |
3bfc13c2 HLT |
485 | if (hba->u.itl.plx) |
486 | iounmap(hba->u.itl.plx); | |
00f59701 HLT |
487 | iounmap(hba->u.itl.iop); |
488 | } | |
489 | ||
490 | static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) | |
491 | { | |
492 | hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); | |
9bcf0910 | 493 | if (hba->u.mv.regs == NULL) |
00f59701 HLT |
494 | return -1; |
495 | ||
496 | hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); | |
9bcf0910 | 497 | if (hba->u.mv.mu == NULL) { |
00f59701 | 498 | iounmap(hba->u.mv.regs); |
ede1e6f8 HLT |
499 | return -1; |
500 | } | |
501 | ||
ede1e6f8 HLT |
502 | return 0; |
503 | } | |
504 | ||
00f59701 HLT |
505 | static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) |
506 | { | |
507 | iounmap(hba->u.mv.regs); | |
508 | iounmap(hba->u.mv.mu); | |
509 | } | |
510 | ||
ede1e6f8 HLT |
511 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) |
512 | { | |
513 | dprintk("iop message 0x%x\n", msg); | |
514 | ||
00f59701 HLT |
515 | if (msg == IOPMU_INBOUND_MSG0_NOP) |
516 | hba->msg_done = 1; | |
517 | ||
ede1e6f8 HLT |
518 | if (!hba->initialized) |
519 | return; | |
520 | ||
521 | if (msg == IOPMU_INBOUND_MSG0_RESET) { | |
522 | atomic_set(&hba->resetting, 0); | |
523 | wake_up(&hba->reset_wq); | |
524 | } | |
525 | else if (msg <= IOPMU_INBOUND_MSG0_MAX) | |
526 | hba->msg_done = 1; | |
527 | } | |
528 | ||
00f59701 | 529 | static struct hptiop_request *get_req(struct hptiop_hba *hba) |
ede1e6f8 HLT |
530 | { |
531 | struct hptiop_request *ret; | |
532 | ||
533 | dprintk("get_req : req=%p\n", hba->req_list); | |
534 | ||
535 | ret = hba->req_list; | |
536 | if (ret) | |
537 | hba->req_list = ret->next; | |
538 | ||
539 | return ret; | |
540 | } | |
541 | ||
00f59701 | 542 | static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) |
ede1e6f8 HLT |
543 | { |
544 | dprintk("free_req(%d, %p)\n", req->index, req); | |
545 | req->next = hba->req_list; | |
546 | hba->req_list = req; | |
547 | } | |
548 | ||
00f59701 HLT |
549 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, |
550 | struct hpt_iop_request_scsi_command *req) | |
ede1e6f8 | 551 | { |
ede1e6f8 HLT |
552 | struct scsi_cmnd *scp; |
553 | ||
00f59701 | 554 | dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " |
ede1e6f8 HLT |
555 | "result=%d, context=0x%x tag=%d\n", |
556 | req, req->header.type, req->header.result, | |
557 | req->header.context, tag); | |
558 | ||
559 | BUG_ON(!req->header.result); | |
560 | BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); | |
561 | ||
562 | scp = hba->reqs[tag].scp; | |
563 | ||
f9875496 FT |
564 | if (HPT_SCP(scp)->mapped) |
565 | scsi_dma_unmap(scp); | |
ede1e6f8 HLT |
566 | |
567 | switch (le32_to_cpu(req->header.result)) { | |
568 | case IOP_RESULT_SUCCESS: | |
00f59701 HLT |
569 | scsi_set_resid(scp, |
570 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 HLT |
571 | scp->result = (DID_OK<<16); |
572 | break; | |
573 | case IOP_RESULT_BAD_TARGET: | |
574 | scp->result = (DID_BAD_TARGET<<16); | |
575 | break; | |
576 | case IOP_RESULT_BUSY: | |
577 | scp->result = (DID_BUS_BUSY<<16); | |
578 | break; | |
579 | case IOP_RESULT_RESET: | |
580 | scp->result = (DID_RESET<<16); | |
581 | break; | |
582 | case IOP_RESULT_FAIL: | |
583 | scp->result = (DID_ERROR<<16); | |
584 | break; | |
585 | case IOP_RESULT_INVALID_REQUEST: | |
586 | scp->result = (DID_ABORT<<16); | |
587 | break; | |
00f59701 HLT |
588 | case IOP_RESULT_CHECK_CONDITION: |
589 | scsi_set_resid(scp, | |
590 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 | 591 | scp->result = SAM_STAT_CHECK_CONDITION; |
c372f4a8 | 592 | memcpy(scp->sense_buffer, &req->sg_list, |
b80ca4f7 | 593 | min_t(size_t, SCSI_SENSE_BUFFERSIZE, |
0fec02c9 | 594 | le32_to_cpu(req->dataxfer_length))); |
ede1e6f8 HLT |
595 | break; |
596 | ||
597 | default: | |
1c9fbafc | 598 | scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16; |
ede1e6f8 HLT |
599 | break; |
600 | } | |
601 | ||
602 | dprintk("scsi_done(%p)\n", scp); | |
603 | scp->scsi_done(scp); | |
604 | free_req(hba, &hba->reqs[tag]); | |
605 | } | |
606 | ||
00f59701 HLT |
607 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) |
608 | { | |
609 | struct hpt_iop_request_scsi_command *req; | |
610 | u32 tag; | |
611 | ||
612 | if (hba->iopintf_v2) { | |
613 | tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; | |
614 | req = hba->reqs[tag].req_virt; | |
615 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) | |
616 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
617 | } else { | |
618 | tag = _tag; | |
619 | req = hba->reqs[tag].req_virt; | |
620 | } | |
621 | ||
622 | hptiop_finish_scsi_req(hba, tag, req); | |
623 | } | |
624 | ||
625 | void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) | |
ede1e6f8 HLT |
626 | { |
627 | struct hpt_iop_request_header __iomem *req; | |
628 | struct hpt_iop_request_ioctl_command __iomem *p; | |
629 | struct hpt_ioctl_k *arg; | |
630 | ||
631 | req = (struct hpt_iop_request_header __iomem *) | |
00f59701 HLT |
632 | ((unsigned long)hba->u.itl.iop + tag); |
633 | dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " | |
ede1e6f8 HLT |
634 | "result=%d, context=0x%x tag=%d\n", |
635 | req, readl(&req->type), readl(&req->result), | |
636 | readl(&req->context), tag); | |
637 | ||
638 | BUG_ON(!readl(&req->result)); | |
639 | BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); | |
640 | ||
641 | p = (struct hpt_iop_request_ioctl_command __iomem *)req; | |
642 | arg = (struct hpt_ioctl_k *)(unsigned long) | |
643 | (readl(&req->context) | | |
644 | ((u64)readl(&req->context_hi32)<<32)); | |
645 | ||
646 | if (readl(&req->result) == IOP_RESULT_SUCCESS) { | |
647 | arg->result = HPT_IOCTL_RESULT_OK; | |
648 | ||
649 | if (arg->outbuf_size) | |
650 | memcpy_fromio(arg->outbuf, | |
651 | &p->buf[(readl(&p->inbuf_size) + 3)& ~3], | |
652 | arg->outbuf_size); | |
653 | ||
654 | if (arg->bytes_returned) | |
655 | *arg->bytes_returned = arg->outbuf_size; | |
656 | } | |
657 | else | |
658 | arg->result = HPT_IOCTL_RESULT_FAILED; | |
659 | ||
660 | arg->done(arg); | |
00f59701 | 661 | writel(tag, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
662 | } |
663 | ||
7d12e780 | 664 | static irqreturn_t hptiop_intr(int irq, void *dev_id) |
ede1e6f8 HLT |
665 | { |
666 | struct hptiop_hba *hba = dev_id; | |
667 | int handled; | |
668 | unsigned long flags; | |
669 | ||
670 | spin_lock_irqsave(hba->host->host_lock, flags); | |
00f59701 | 671 | handled = hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
672 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
673 | ||
674 | return handled; | |
675 | } | |
676 | ||
677 | static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) | |
678 | { | |
679 | struct Scsi_Host *host = scp->device->host; | |
680 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
f9875496 FT |
681 | struct scatterlist *sg; |
682 | int idx, nseg; | |
683 | ||
684 | nseg = scsi_dma_map(scp); | |
685 | BUG_ON(nseg < 0); | |
686 | if (!nseg) | |
687 | return 0; | |
ede1e6f8 | 688 | |
f9875496 FT |
689 | HPT_SCP(scp)->sgcnt = nseg; |
690 | HPT_SCP(scp)->mapped = 1; | |
691 | ||
692 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); | |
693 | ||
694 | scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { | |
695 | psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)); | |
696 | psg[idx].size = cpu_to_le32(sg_dma_len(sg)); | |
697 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? | |
698 | cpu_to_le32(1) : 0; | |
ede1e6f8 | 699 | } |
f9875496 | 700 | return HPT_SCP(scp)->sgcnt; |
ede1e6f8 HLT |
701 | } |
702 | ||
00f59701 HLT |
703 | static void hptiop_post_req_itl(struct hptiop_hba *hba, |
704 | struct hptiop_request *_req) | |
705 | { | |
706 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
707 | ||
708 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | |
709 | (u32)_req->index); | |
710 | reqhdr->context_hi32 = 0; | |
711 | ||
712 | if (hba->iopintf_v2) { | |
713 | u32 size, size_bits; | |
714 | ||
715 | size = le32_to_cpu(reqhdr->size); | |
716 | if (size < 256) | |
717 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; | |
718 | else if (size < 512) | |
719 | size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; | |
720 | else | |
721 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | | |
722 | IOPMU_QUEUE_ADDR_HOST_BIT; | |
723 | writel(_req->req_shifted_phy | size_bits, | |
724 | &hba->u.itl.iop->inbound_queue); | |
725 | } else | |
726 | writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, | |
727 | &hba->u.itl.iop->inbound_queue); | |
728 | } | |
729 | ||
730 | static void hptiop_post_req_mv(struct hptiop_hba *hba, | |
731 | struct hptiop_request *_req) | |
732 | { | |
733 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
734 | u32 size, size_bit; | |
735 | ||
736 | reqhdr->context = cpu_to_le32(_req->index<<8 | | |
737 | IOP_REQUEST_TYPE_SCSI_COMMAND<<5); | |
738 | reqhdr->context_hi32 = 0; | |
739 | size = le32_to_cpu(reqhdr->size); | |
740 | ||
741 | if (size <= 256) | |
742 | size_bit = 0; | |
743 | else if (size <= 256*2) | |
744 | size_bit = 1; | |
745 | else if (size <= 256*3) | |
746 | size_bit = 2; | |
747 | else | |
748 | size_bit = 3; | |
749 | ||
750 | mv_inbound_write((_req->req_shifted_phy << 5) | | |
751 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); | |
752 | } | |
753 | ||
f281233d | 754 | static int hptiop_queuecommand_lck(struct scsi_cmnd *scp, |
ede1e6f8 HLT |
755 | void (*done)(struct scsi_cmnd *)) |
756 | { | |
757 | struct Scsi_Host *host = scp->device->host; | |
758 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
759 | struct hpt_iop_request_scsi_command *req; | |
760 | int sg_count = 0; | |
761 | struct hptiop_request *_req; | |
762 | ||
763 | BUG_ON(!done); | |
764 | scp->scsi_done = done; | |
765 | ||
ede1e6f8 HLT |
766 | _req = get_req(hba); |
767 | if (_req == NULL) { | |
768 | dprintk("hptiop_queuecmd : no free req\n"); | |
4f2ddba3 | 769 | return SCSI_MLQUEUE_HOST_BUSY; |
ede1e6f8 HLT |
770 | } |
771 | ||
772 | _req->scp = scp; | |
773 | ||
774 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) " | |
775 | "req_index=%d, req=%p\n", | |
776 | scp, | |
777 | host->host_no, scp->device->channel, | |
778 | scp->device->id, scp->device->lun, | |
64a87b24 BH |
779 | ((u32 *)scp->cmnd)[0], |
780 | ((u32 *)scp->cmnd)[1], | |
781 | ((u32 *)scp->cmnd)[2], | |
ede1e6f8 HLT |
782 | _req->index, _req->req_virt); |
783 | ||
784 | scp->result = 0; | |
785 | ||
786 | if (scp->device->channel || scp->device->lun || | |
787 | scp->device->id > hba->max_devices) { | |
788 | scp->result = DID_BAD_TARGET << 16; | |
789 | free_req(hba, _req); | |
790 | goto cmd_done; | |
791 | } | |
792 | ||
db9b6e89 | 793 | req = _req->req_virt; |
ede1e6f8 HLT |
794 | |
795 | /* build S/G table */ | |
f9875496 FT |
796 | sg_count = hptiop_buildsgl(scp, req->sg_list); |
797 | if (!sg_count) | |
ede1e6f8 HLT |
798 | HPT_SCP(scp)->mapped = 0; |
799 | ||
800 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
801 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); | |
802 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f9875496 | 803 | req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); |
ede1e6f8 HLT |
804 | req->channel = scp->device->channel; |
805 | req->target = scp->device->id; | |
806 | req->lun = scp->device->lun; | |
807 | req->header.size = cpu_to_le32( | |
808 | sizeof(struct hpt_iop_request_scsi_command) | |
809 | - sizeof(struct hpt_iopsg) | |
810 | + sg_count * sizeof(struct hpt_iopsg)); | |
811 | ||
812 | memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); | |
00f59701 | 813 | hba->ops->post_req(hba, _req); |
ede1e6f8 HLT |
814 | return 0; |
815 | ||
816 | cmd_done: | |
817 | dprintk("scsi_done(scp=%p)\n", scp); | |
818 | scp->scsi_done(scp); | |
819 | return 0; | |
820 | } | |
821 | ||
f281233d JG |
822 | static DEF_SCSI_QCMD(hptiop_queuecommand) |
823 | ||
ede1e6f8 HLT |
824 | static const char *hptiop_info(struct Scsi_Host *host) |
825 | { | |
826 | return driver_name_long; | |
827 | } | |
828 | ||
829 | static int hptiop_reset_hba(struct hptiop_hba *hba) | |
830 | { | |
831 | if (atomic_xchg(&hba->resetting, 1) == 0) { | |
832 | atomic_inc(&hba->reset_count); | |
00f59701 | 833 | hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); |
ede1e6f8 HLT |
834 | } |
835 | ||
836 | wait_event_timeout(hba->reset_wq, | |
837 | atomic_read(&hba->resetting) == 0, 60 * HZ); | |
838 | ||
839 | if (atomic_read(&hba->resetting)) { | |
af901ca1 | 840 | /* IOP is in unknown state, abort reset */ |
ede1e6f8 HLT |
841 | printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); |
842 | return -1; | |
843 | } | |
844 | ||
845 | if (iop_send_sync_msg(hba, | |
846 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
847 | dprintk("scsi%d: fail to start background task\n", | |
848 | hba->host->host_no); | |
849 | } | |
850 | ||
851 | return 0; | |
852 | } | |
853 | ||
854 | static int hptiop_reset(struct scsi_cmnd *scp) | |
855 | { | |
856 | struct Scsi_Host * host = scp->device->host; | |
857 | struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata; | |
858 | ||
859 | printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n", | |
860 | scp->device->host->host_no, scp->device->channel, | |
861 | scp->device->id, scp); | |
862 | ||
863 | return hptiop_reset_hba(hba)? FAILED : SUCCESS; | |
864 | } | |
865 | ||
866 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | |
e881a172 | 867 | int queue_depth, int reason) |
ede1e6f8 | 868 | { |
00f59701 HLT |
869 | struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; |
870 | ||
e881a172 MC |
871 | if (reason != SCSI_QDEPTH_DEFAULT) |
872 | return -EOPNOTSUPP; | |
873 | ||
00f59701 HLT |
874 | if (queue_depth > hba->max_requests) |
875 | queue_depth = hba->max_requests; | |
ede1e6f8 HLT |
876 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); |
877 | return queue_depth; | |
878 | } | |
879 | ||
ee959b00 TJ |
880 | static ssize_t hptiop_show_version(struct device *dev, |
881 | struct device_attribute *attr, char *buf) | |
ede1e6f8 HLT |
882 | { |
883 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | |
884 | } | |
885 | ||
ee959b00 TJ |
886 | static ssize_t hptiop_show_fw_version(struct device *dev, |
887 | struct device_attribute *attr, char *buf) | |
ede1e6f8 | 888 | { |
ee959b00 | 889 | struct Scsi_Host *host = class_to_shost(dev); |
ede1e6f8 HLT |
890 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
891 | ||
892 | return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", | |
893 | hba->firmware_version >> 24, | |
894 | (hba->firmware_version >> 16) & 0xff, | |
895 | (hba->firmware_version >> 8) & 0xff, | |
896 | hba->firmware_version & 0xff); | |
897 | } | |
898 | ||
ee959b00 | 899 | static struct device_attribute hptiop_attr_version = { |
ede1e6f8 HLT |
900 | .attr = { |
901 | .name = "driver-version", | |
902 | .mode = S_IRUGO, | |
903 | }, | |
904 | .show = hptiop_show_version, | |
905 | }; | |
906 | ||
ee959b00 | 907 | static struct device_attribute hptiop_attr_fw_version = { |
ede1e6f8 HLT |
908 | .attr = { |
909 | .name = "firmware-version", | |
910 | .mode = S_IRUGO, | |
911 | }, | |
912 | .show = hptiop_show_fw_version, | |
913 | }; | |
914 | ||
ee959b00 | 915 | static struct device_attribute *hptiop_attrs[] = { |
ede1e6f8 HLT |
916 | &hptiop_attr_version, |
917 | &hptiop_attr_fw_version, | |
918 | NULL | |
919 | }; | |
920 | ||
921 | static struct scsi_host_template driver_template = { | |
922 | .module = THIS_MODULE, | |
923 | .name = driver_name, | |
924 | .queuecommand = hptiop_queuecommand, | |
925 | .eh_device_reset_handler = hptiop_reset, | |
926 | .eh_bus_reset_handler = hptiop_reset, | |
927 | .info = hptiop_info, | |
ede1e6f8 HLT |
928 | .emulated = 0, |
929 | .use_clustering = ENABLE_CLUSTERING, | |
930 | .proc_name = driver_name, | |
931 | .shost_attrs = hptiop_attrs, | |
932 | .this_id = -1, | |
933 | .change_queue_depth = hptiop_adjust_disk_queue_depth, | |
934 | }; | |
935 | ||
00f59701 HLT |
936 | static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) |
937 | { | |
938 | hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, | |
939 | 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); | |
940 | if (hba->u.mv.internal_req) | |
941 | return 0; | |
942 | else | |
943 | return -1; | |
944 | } | |
945 | ||
946 | static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) | |
947 | { | |
948 | if (hba->u.mv.internal_req) { | |
949 | dma_free_coherent(&hba->pcidev->dev, 0x800, | |
950 | hba->u.mv.internal_req, hba->u.mv.internal_req_phy); | |
951 | return 0; | |
952 | } else | |
953 | return -1; | |
954 | } | |
955 | ||
ede1e6f8 HLT |
956 | static int __devinit hptiop_probe(struct pci_dev *pcidev, |
957 | const struct pci_device_id *id) | |
958 | { | |
959 | struct Scsi_Host *host = NULL; | |
960 | struct hptiop_hba *hba; | |
961 | struct hpt_iop_request_get_config iop_config; | |
962 | struct hpt_iop_request_set_config set_config; | |
963 | dma_addr_t start_phy; | |
964 | void *start_virt; | |
965 | u32 offset, i, req_size; | |
966 | ||
967 | dprintk("hptiop_probe(%p)\n", pcidev); | |
968 | ||
969 | if (pci_enable_device(pcidev)) { | |
970 | printk(KERN_ERR "hptiop: fail to enable pci device\n"); | |
971 | return -ENODEV; | |
972 | } | |
973 | ||
974 | printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", | |
975 | pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, | |
976 | pcidev->irq); | |
977 | ||
978 | pci_set_master(pcidev); | |
979 | ||
980 | /* Enable 64bit DMA if possible */ | |
6a35528a | 981 | if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { |
284901a9 | 982 | if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { |
ede1e6f8 HLT |
983 | printk(KERN_ERR "hptiop: fail to set dma_mask\n"); |
984 | goto disable_pci_device; | |
985 | } | |
986 | } | |
987 | ||
988 | if (pci_request_regions(pcidev, driver_name)) { | |
989 | printk(KERN_ERR "hptiop: pci_request_regions failed\n"); | |
990 | goto disable_pci_device; | |
991 | } | |
992 | ||
993 | host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); | |
994 | if (!host) { | |
995 | printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); | |
996 | goto free_pci_regions; | |
997 | } | |
998 | ||
999 | hba = (struct hptiop_hba *)host->hostdata; | |
1000 | ||
00f59701 | 1001 | hba->ops = (struct hptiop_adapter_ops *)id->driver_data; |
ede1e6f8 HLT |
1002 | hba->pcidev = pcidev; |
1003 | hba->host = host; | |
1004 | hba->initialized = 0; | |
db9b6e89 | 1005 | hba->iopintf_v2 = 0; |
ede1e6f8 HLT |
1006 | |
1007 | atomic_set(&hba->resetting, 0); | |
1008 | atomic_set(&hba->reset_count, 0); | |
1009 | ||
1010 | init_waitqueue_head(&hba->reset_wq); | |
1011 | init_waitqueue_head(&hba->ioctl_wq); | |
1012 | ||
1013 | host->max_lun = 1; | |
1014 | host->max_channel = 0; | |
1015 | host->io_port = 0; | |
1016 | host->n_io_port = 0; | |
1017 | host->irq = pcidev->irq; | |
1018 | ||
00f59701 | 1019 | if (hba->ops->map_pci_bar(hba)) |
ede1e6f8 HLT |
1020 | goto free_scsi_host; |
1021 | ||
00f59701 | 1022 | if (hba->ops->iop_wait_ready(hba, 20000)) { |
ede1e6f8 HLT |
1023 | printk(KERN_ERR "scsi%d: firmware not ready\n", |
1024 | hba->host->host_no); | |
1025 | goto unmap_pci_bar; | |
1026 | } | |
1027 | ||
00f59701 HLT |
1028 | if (hba->ops->internal_memalloc) { |
1029 | if (hba->ops->internal_memalloc(hba)) { | |
1030 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n", | |
1031 | hba->host->host_no); | |
1032 | goto unmap_pci_bar; | |
1033 | } | |
1034 | } | |
1035 | ||
1036 | if (hba->ops->get_config(hba, &iop_config)) { | |
ede1e6f8 HLT |
1037 | printk(KERN_ERR "scsi%d: get config failed\n", |
1038 | hba->host->host_no); | |
1039 | goto unmap_pci_bar; | |
1040 | } | |
1041 | ||
1042 | hba->max_requests = min(le32_to_cpu(iop_config.max_requests), | |
1043 | HPTIOP_MAX_REQUESTS); | |
1044 | hba->max_devices = le32_to_cpu(iop_config.max_devices); | |
1045 | hba->max_request_size = le32_to_cpu(iop_config.request_size); | |
1046 | hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); | |
1047 | hba->firmware_version = le32_to_cpu(iop_config.firmware_version); | |
db9b6e89 | 1048 | hba->interface_version = le32_to_cpu(iop_config.interface_version); |
ede1e6f8 HLT |
1049 | hba->sdram_size = le32_to_cpu(iop_config.sdram_size); |
1050 | ||
db9b6e89 HLT |
1051 | if (hba->firmware_version > 0x01020000 || |
1052 | hba->interface_version > 0x01020000) | |
1053 | hba->iopintf_v2 = 1; | |
1054 | ||
ede1e6f8 HLT |
1055 | host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; |
1056 | host->max_id = le32_to_cpu(iop_config.max_devices); | |
1057 | host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); | |
1058 | host->can_queue = le32_to_cpu(iop_config.max_requests); | |
1059 | host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); | |
1060 | host->max_cmd_len = 16; | |
1061 | ||
db9b6e89 HLT |
1062 | req_size = sizeof(struct hpt_iop_request_scsi_command) |
1063 | + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1); | |
1064 | if ((req_size & 0x1f) != 0) | |
1065 | req_size = (req_size + 0x1f) & ~0x1f; | |
1066 | ||
1067 | memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); | |
ede1e6f8 | 1068 | set_config.iop_id = cpu_to_le32(host->host_no); |
db9b6e89 HLT |
1069 | set_config.vbus_id = cpu_to_le16(host->host_no); |
1070 | set_config.max_host_request_size = cpu_to_le16(req_size); | |
ede1e6f8 | 1071 | |
00f59701 | 1072 | if (hba->ops->set_config(hba, &set_config)) { |
ede1e6f8 HLT |
1073 | printk(KERN_ERR "scsi%d: set config failed\n", |
1074 | hba->host->host_no); | |
1075 | goto unmap_pci_bar; | |
1076 | } | |
1077 | ||
ede1e6f8 HLT |
1078 | pci_set_drvdata(pcidev, host); |
1079 | ||
1d6f359a | 1080 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, |
ede1e6f8 HLT |
1081 | driver_name, hba)) { |
1082 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | |
1083 | hba->host->host_no, pcidev->irq); | |
3e74051b | 1084 | goto unmap_pci_bar; |
ede1e6f8 HLT |
1085 | } |
1086 | ||
1087 | /* Allocate request mem */ | |
ede1e6f8 HLT |
1088 | |
1089 | dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); | |
1090 | ||
1091 | hba->req_size = req_size; | |
1092 | start_virt = dma_alloc_coherent(&pcidev->dev, | |
1093 | hba->req_size*hba->max_requests + 0x20, | |
1094 | &start_phy, GFP_KERNEL); | |
1095 | ||
1096 | if (!start_virt) { | |
1097 | printk(KERN_ERR "scsi%d: fail to alloc request mem\n", | |
1098 | hba->host->host_no); | |
1099 | goto free_request_irq; | |
1100 | } | |
1101 | ||
1102 | hba->dma_coherent = start_virt; | |
1103 | hba->dma_coherent_handle = start_phy; | |
1104 | ||
1105 | if ((start_phy & 0x1f) != 0) | |
1106 | { | |
1107 | offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; | |
1108 | start_phy += offset; | |
1109 | start_virt += offset; | |
1110 | } | |
1111 | ||
1112 | hba->req_list = start_virt; | |
1113 | for (i = 0; i < hba->max_requests; i++) { | |
1114 | hba->reqs[i].next = NULL; | |
1115 | hba->reqs[i].req_virt = start_virt; | |
1116 | hba->reqs[i].req_shifted_phy = start_phy >> 5; | |
1117 | hba->reqs[i].index = i; | |
1118 | free_req(hba, &hba->reqs[i]); | |
1119 | start_virt = (char *)start_virt + hba->req_size; | |
1120 | start_phy = start_phy + hba->req_size; | |
1121 | } | |
1122 | ||
1123 | /* Enable Interrupt and start background task */ | |
1124 | if (hptiop_initialize_iop(hba)) | |
1125 | goto free_request_mem; | |
1126 | ||
3e74051b CH |
1127 | if (scsi_add_host(host, &pcidev->dev)) { |
1128 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | |
1129 | hba->host->host_no); | |
1130 | goto free_request_mem; | |
1131 | } | |
1132 | ||
ede1e6f8 HLT |
1133 | |
1134 | scsi_scan_host(host); | |
1135 | ||
1136 | dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); | |
1137 | return 0; | |
1138 | ||
1139 | free_request_mem: | |
1140 | dma_free_coherent(&hba->pcidev->dev, | |
00f59701 | 1141 | hba->req_size * hba->max_requests + 0x20, |
ede1e6f8 HLT |
1142 | hba->dma_coherent, hba->dma_coherent_handle); |
1143 | ||
1144 | free_request_irq: | |
1145 | free_irq(hba->pcidev->irq, hba); | |
1146 | ||
ede1e6f8 | 1147 | unmap_pci_bar: |
00f59701 HLT |
1148 | if (hba->ops->internal_memfree) |
1149 | hba->ops->internal_memfree(hba); | |
ede1e6f8 | 1150 | |
00f59701 | 1151 | hba->ops->unmap_pci_bar(hba); |
ede1e6f8 HLT |
1152 | |
1153 | free_scsi_host: | |
1154 | scsi_host_put(host); | |
1155 | ||
00f59701 HLT |
1156 | free_pci_regions: |
1157 | pci_release_regions(pcidev); | |
1158 | ||
ede1e6f8 HLT |
1159 | disable_pci_device: |
1160 | pci_disable_device(pcidev); | |
1161 | ||
1db90ea2 | 1162 | dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0); |
ede1e6f8 HLT |
1163 | return -ENODEV; |
1164 | } | |
1165 | ||
1166 | static void hptiop_shutdown(struct pci_dev *pcidev) | |
1167 | { | |
1168 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1169 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
ede1e6f8 HLT |
1170 | |
1171 | dprintk("hptiop_shutdown(%p)\n", hba); | |
1172 | ||
1173 | /* stop the iop */ | |
1174 | if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) | |
1175 | printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", | |
1176 | hba->host->host_no); | |
1177 | ||
1178 | /* disable all outbound interrupts */ | |
00f59701 HLT |
1179 | hba->ops->disable_intr(hba); |
1180 | } | |
1181 | ||
1182 | static void hptiop_disable_intr_itl(struct hptiop_hba *hba) | |
1183 | { | |
1184 | u32 int_mask; | |
1185 | ||
1186 | int_mask = readl(&hba->u.itl.iop->outbound_intmask); | |
ede1e6f8 HLT |
1187 | writel(int_mask | |
1188 | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, | |
00f59701 HLT |
1189 | &hba->u.itl.iop->outbound_intmask); |
1190 | readl(&hba->u.itl.iop->outbound_intmask); | |
1191 | } | |
1192 | ||
1193 | static void hptiop_disable_intr_mv(struct hptiop_hba *hba) | |
1194 | { | |
1195 | writel(0, &hba->u.mv.regs->outbound_intmask); | |
1196 | readl(&hba->u.mv.regs->outbound_intmask); | |
ede1e6f8 HLT |
1197 | } |
1198 | ||
1199 | static void hptiop_remove(struct pci_dev *pcidev) | |
1200 | { | |
1201 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1202 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
1203 | ||
1204 | dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); | |
1205 | ||
4f2ddba3 HLT |
1206 | scsi_remove_host(host); |
1207 | ||
ede1e6f8 HLT |
1208 | hptiop_shutdown(pcidev); |
1209 | ||
1210 | free_irq(hba->pcidev->irq, hba); | |
1211 | ||
1212 | dma_free_coherent(&hba->pcidev->dev, | |
1213 | hba->req_size * hba->max_requests + 0x20, | |
1214 | hba->dma_coherent, | |
1215 | hba->dma_coherent_handle); | |
1216 | ||
00f59701 HLT |
1217 | if (hba->ops->internal_memfree) |
1218 | hba->ops->internal_memfree(hba); | |
1219 | ||
1220 | hba->ops->unmap_pci_bar(hba); | |
ede1e6f8 HLT |
1221 | |
1222 | pci_release_regions(hba->pcidev); | |
1223 | pci_set_drvdata(hba->pcidev, NULL); | |
1224 | pci_disable_device(hba->pcidev); | |
1225 | ||
ede1e6f8 HLT |
1226 | scsi_host_put(host); |
1227 | } | |
1228 | ||
00f59701 HLT |
1229 | static struct hptiop_adapter_ops hptiop_itl_ops = { |
1230 | .iop_wait_ready = iop_wait_ready_itl, | |
9bcf0910 HH |
1231 | .internal_memalloc = NULL, |
1232 | .internal_memfree = NULL, | |
00f59701 HLT |
1233 | .map_pci_bar = hptiop_map_pci_bar_itl, |
1234 | .unmap_pci_bar = hptiop_unmap_pci_bar_itl, | |
1235 | .enable_intr = hptiop_enable_intr_itl, | |
1236 | .disable_intr = hptiop_disable_intr_itl, | |
1237 | .get_config = iop_get_config_itl, | |
1238 | .set_config = iop_set_config_itl, | |
1239 | .iop_intr = iop_intr_itl, | |
1240 | .post_msg = hptiop_post_msg_itl, | |
1241 | .post_req = hptiop_post_req_itl, | |
1242 | }; | |
1243 | ||
1244 | static struct hptiop_adapter_ops hptiop_mv_ops = { | |
1245 | .iop_wait_ready = iop_wait_ready_mv, | |
1246 | .internal_memalloc = hptiop_internal_memalloc_mv, | |
1247 | .internal_memfree = hptiop_internal_memfree_mv, | |
1248 | .map_pci_bar = hptiop_map_pci_bar_mv, | |
1249 | .unmap_pci_bar = hptiop_unmap_pci_bar_mv, | |
1250 | .enable_intr = hptiop_enable_intr_mv, | |
1251 | .disable_intr = hptiop_disable_intr_mv, | |
1252 | .get_config = iop_get_config_mv, | |
1253 | .set_config = iop_set_config_mv, | |
1254 | .iop_intr = iop_intr_mv, | |
1255 | .post_msg = hptiop_post_msg_mv, | |
1256 | .post_req = hptiop_post_req_mv, | |
1257 | }; | |
1258 | ||
ede1e6f8 | 1259 | static struct pci_device_id hptiop_id_table[] = { |
00f59701 HLT |
1260 | { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, |
1261 | { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 | 1262 | { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, |
00f59701 HLT |
1263 | { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, |
1264 | { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 | 1265 | { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, |
00f59701 HLT |
1266 | { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, |
1267 | { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, | |
dd07428b | 1268 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, |
3bfc13c2 | 1269 | { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, |
dd07428b | 1270 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, |
dd07428b HLT |
1271 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, |
1272 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, | |
1273 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, | |
1274 | { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, | |
3bfc13c2 HLT |
1275 | { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, |
1276 | { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, | |
1277 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, | |
1278 | { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops }, | |
00f59701 HLT |
1279 | { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, |
1280 | { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, | |
1281 | { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, | |
ede1e6f8 HLT |
1282 | {}, |
1283 | }; | |
1284 | ||
1285 | MODULE_DEVICE_TABLE(pci, hptiop_id_table); | |
1286 | ||
1287 | static struct pci_driver hptiop_pci_driver = { | |
1288 | .name = driver_name, | |
1289 | .id_table = hptiop_id_table, | |
1290 | .probe = hptiop_probe, | |
1291 | .remove = hptiop_remove, | |
1292 | .shutdown = hptiop_shutdown, | |
1293 | }; | |
1294 | ||
1295 | static int __init hptiop_module_init(void) | |
1296 | { | |
ede1e6f8 | 1297 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); |
3e74051b | 1298 | return pci_register_driver(&hptiop_pci_driver); |
ede1e6f8 HLT |
1299 | } |
1300 | ||
1301 | static void __exit hptiop_module_exit(void) | |
1302 | { | |
ede1e6f8 HLT |
1303 | pci_unregister_driver(&hptiop_pci_driver); |
1304 | } | |
1305 | ||
1306 | ||
1307 | module_init(hptiop_module_init); | |
1308 | module_exit(hptiop_module_exit); | |
1309 | ||
1310 | MODULE_LICENSE("GPL"); | |
db9b6e89 | 1311 |