]>
Commit | Line | Data |
---|---|---|
ede1e6f8 | 1 | /* |
00f59701 | 2 | * HighPoint RR3xxx/4xxx controller driver for Linux |
db9b6e89 | 3 | * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. |
ede1e6f8 HLT |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * Please report bugs/comments/suggestions to [email protected] | |
15 | * | |
16 | * For more information, visit http://www.highpoint-tech.com | |
17 | */ | |
ede1e6f8 HLT |
18 | #include <linux/module.h> |
19 | #include <linux/types.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/timer.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/hdreg.h> | |
29 | #include <asm/uaccess.h> | |
30 | #include <asm/io.h> | |
31 | #include <asm/div64.h> | |
32 | #include <scsi/scsi_cmnd.h> | |
33 | #include <scsi/scsi_device.h> | |
34 | #include <scsi/scsi.h> | |
35 | #include <scsi/scsi_tcq.h> | |
36 | #include <scsi/scsi_host.h> | |
37 | ||
38 | #include "hptiop.h" | |
39 | ||
40 | MODULE_AUTHOR("HighPoint Technologies, Inc."); | |
00f59701 | 41 | MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); |
ede1e6f8 HLT |
42 | |
43 | static char driver_name[] = "hptiop"; | |
00f59701 HLT |
44 | static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; |
45 | static const char driver_ver[] = "v1.3 (071203)"; | |
46 | ||
47 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); | |
48 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, | |
49 | struct hpt_iop_request_scsi_command *req); | |
50 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
51 | static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
ede1e6f8 HLT |
52 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
53 | ||
00f59701 | 54 | static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) |
ede1e6f8 HLT |
55 | { |
56 | u32 req = 0; | |
57 | int i; | |
58 | ||
59 | for (i = 0; i < millisec; i++) { | |
00f59701 | 60 | req = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
61 | if (req != IOPMU_QUEUE_EMPTY) |
62 | break; | |
63 | msleep(1); | |
64 | } | |
65 | ||
66 | if (req != IOPMU_QUEUE_EMPTY) { | |
00f59701 HLT |
67 | writel(req, &hba->u.itl.iop->outbound_queue); |
68 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
69 | return 0; |
70 | } | |
71 | ||
72 | return -1; | |
73 | } | |
74 | ||
00f59701 HLT |
75 | static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) |
76 | { | |
77 | return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); | |
78 | } | |
79 | ||
80 | static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) | |
ede1e6f8 | 81 | { |
db9b6e89 | 82 | if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) |
00f59701 | 83 | hptiop_host_request_callback_itl(hba, |
ede1e6f8 HLT |
84 | tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); |
85 | else | |
00f59701 | 86 | hptiop_iop_request_callback_itl(hba, tag); |
ede1e6f8 HLT |
87 | } |
88 | ||
00f59701 | 89 | static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) |
ede1e6f8 HLT |
90 | { |
91 | u32 req; | |
92 | ||
00f59701 HLT |
93 | while ((req = readl(&hba->u.itl.iop->outbound_queue)) != |
94 | IOPMU_QUEUE_EMPTY) { | |
ede1e6f8 HLT |
95 | |
96 | if (req & IOPMU_QUEUE_MASK_HOST_BITS) | |
00f59701 | 97 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
98 | else { |
99 | struct hpt_iop_request_header __iomem * p; | |
100 | ||
101 | p = (struct hpt_iop_request_header __iomem *) | |
00f59701 | 102 | ((char __iomem *)hba->u.itl.iop + req); |
ede1e6f8 HLT |
103 | |
104 | if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { | |
105 | if (readl(&p->context)) | |
00f59701 | 106 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
107 | else |
108 | writel(1, &p->context); | |
109 | } | |
110 | else | |
00f59701 | 111 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
112 | } |
113 | } | |
114 | } | |
115 | ||
00f59701 | 116 | static int iop_intr_itl(struct hptiop_hba *hba) |
ede1e6f8 | 117 | { |
00f59701 | 118 | struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; |
ede1e6f8 HLT |
119 | u32 status; |
120 | int ret = 0; | |
121 | ||
122 | status = readl(&iop->outbound_intstatus); | |
123 | ||
124 | if (status & IOPMU_OUTBOUND_INT_MSG0) { | |
125 | u32 msg = readl(&iop->outbound_msgaddr0); | |
00f59701 | 126 | |
ede1e6f8 HLT |
127 | dprintk("received outbound msg %x\n", msg); |
128 | writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); | |
129 | hptiop_message_callback(hba, msg); | |
130 | ret = 1; | |
131 | } | |
132 | ||
133 | if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { | |
00f59701 HLT |
134 | hptiop_drain_outbound_queue_itl(hba); |
135 | ret = 1; | |
136 | } | |
137 | ||
138 | return ret; | |
139 | } | |
140 | ||
141 | static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) | |
142 | { | |
143 | u32 outbound_tail = readl(&mu->outbound_tail); | |
144 | u32 outbound_head = readl(&mu->outbound_head); | |
145 | ||
146 | if (outbound_tail != outbound_head) { | |
147 | u64 p; | |
148 | ||
149 | memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); | |
150 | outbound_tail++; | |
151 | ||
152 | if (outbound_tail == MVIOP_QUEUE_LEN) | |
153 | outbound_tail = 0; | |
154 | writel(outbound_tail, &mu->outbound_tail); | |
155 | return p; | |
156 | } else | |
157 | return 0; | |
158 | } | |
159 | ||
160 | static void mv_inbound_write(u64 p, struct hptiop_hba *hba) | |
161 | { | |
162 | u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); | |
163 | u32 head = inbound_head + 1; | |
164 | ||
165 | if (head == MVIOP_QUEUE_LEN) | |
166 | head = 0; | |
167 | ||
168 | memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); | |
169 | writel(head, &hba->u.mv.mu->inbound_head); | |
170 | writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, | |
171 | &hba->u.mv.regs->inbound_doorbell); | |
172 | } | |
173 | ||
174 | static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) | |
175 | { | |
176 | u32 req_type = (tag >> 5) & 0x7; | |
177 | struct hpt_iop_request_scsi_command *req; | |
178 | ||
179 | dprintk("hptiop_request_callback_mv: tag=%llx\n", tag); | |
180 | ||
181 | BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); | |
182 | ||
183 | switch (req_type) { | |
184 | case IOP_REQUEST_TYPE_GET_CONFIG: | |
185 | case IOP_REQUEST_TYPE_SET_CONFIG: | |
186 | hba->msg_done = 1; | |
187 | break; | |
188 | ||
189 | case IOP_REQUEST_TYPE_SCSI_COMMAND: | |
190 | req = hba->reqs[tag >> 8].req_virt; | |
191 | if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) | |
192 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
193 | ||
194 | hptiop_finish_scsi_req(hba, tag>>8, req); | |
195 | break; | |
196 | ||
197 | default: | |
198 | break; | |
199 | } | |
200 | } | |
201 | ||
202 | static int iop_intr_mv(struct hptiop_hba *hba) | |
203 | { | |
204 | u32 status; | |
205 | int ret = 0; | |
206 | ||
207 | status = readl(&hba->u.mv.regs->outbound_doorbell); | |
208 | writel(~status, &hba->u.mv.regs->outbound_doorbell); | |
209 | ||
210 | if (status & MVIOP_MU_OUTBOUND_INT_MSG) { | |
211 | u32 msg; | |
212 | msg = readl(&hba->u.mv.mu->outbound_msg); | |
213 | dprintk("received outbound msg %x\n", msg); | |
214 | hptiop_message_callback(hba, msg); | |
215 | ret = 1; | |
216 | } | |
217 | ||
218 | if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { | |
219 | u64 tag; | |
220 | ||
221 | while ((tag = mv_outbound_read(hba->u.mv.mu))) | |
222 | hptiop_request_callback_mv(hba, tag); | |
ede1e6f8 HLT |
223 | ret = 1; |
224 | } | |
225 | ||
226 | return ret; | |
227 | } | |
228 | ||
00f59701 | 229 | static int iop_send_sync_request_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
230 | void __iomem *_req, u32 millisec) |
231 | { | |
232 | struct hpt_iop_request_header __iomem *req = _req; | |
233 | u32 i; | |
234 | ||
00f59701 | 235 | writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); |
ede1e6f8 | 236 | writel(0, &req->context); |
00f59701 HLT |
237 | writel((unsigned long)req - (unsigned long)hba->u.itl.iop, |
238 | &hba->u.itl.iop->inbound_queue); | |
239 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
240 | |
241 | for (i = 0; i < millisec; i++) { | |
00f59701 | 242 | iop_intr_itl(hba); |
ede1e6f8 HLT |
243 | if (readl(&req->context)) |
244 | return 0; | |
245 | msleep(1); | |
246 | } | |
247 | ||
248 | return -1; | |
249 | } | |
250 | ||
00f59701 HLT |
251 | static int iop_send_sync_request_mv(struct hptiop_hba *hba, |
252 | u32 size_bits, u32 millisec) | |
ede1e6f8 | 253 | { |
00f59701 | 254 | struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; |
ede1e6f8 HLT |
255 | u32 i; |
256 | ||
257 | hba->msg_done = 0; | |
00f59701 HLT |
258 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); |
259 | mv_inbound_write(hba->u.mv.internal_req_phy | | |
260 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); | |
261 | ||
262 | for (i = 0; i < millisec; i++) { | |
263 | iop_intr_mv(hba); | |
264 | if (hba->msg_done) | |
265 | return 0; | |
266 | msleep(1); | |
267 | } | |
268 | return -1; | |
269 | } | |
270 | ||
271 | static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) | |
272 | { | |
273 | writel(msg, &hba->u.itl.iop->inbound_msgaddr0); | |
274 | readl(&hba->u.itl.iop->outbound_intstatus); | |
275 | } | |
276 | ||
277 | static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) | |
278 | { | |
279 | writel(msg, &hba->u.mv.mu->inbound_msg); | |
280 | writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); | |
281 | readl(&hba->u.mv.regs->inbound_doorbell); | |
282 | } | |
ede1e6f8 | 283 | |
00f59701 HLT |
284 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) |
285 | { | |
286 | u32 i; | |
ede1e6f8 | 287 | |
00f59701 HLT |
288 | hba->msg_done = 0; |
289 | hba->ops->post_msg(hba, msg); | |
ede1e6f8 HLT |
290 | |
291 | for (i = 0; i < millisec; i++) { | |
292 | spin_lock_irq(hba->host->host_lock); | |
00f59701 | 293 | hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
294 | spin_unlock_irq(hba->host->host_lock); |
295 | if (hba->msg_done) | |
296 | break; | |
297 | msleep(1); | |
298 | } | |
299 | ||
300 | return hba->msg_done? 0 : -1; | |
301 | } | |
302 | ||
00f59701 | 303 | static int iop_get_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
304 | struct hpt_iop_request_get_config *config) |
305 | { | |
306 | u32 req32; | |
307 | struct hpt_iop_request_get_config __iomem *req; | |
308 | ||
00f59701 | 309 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
310 | if (req32 == IOPMU_QUEUE_EMPTY) |
311 | return -1; | |
312 | ||
313 | req = (struct hpt_iop_request_get_config __iomem *) | |
00f59701 | 314 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
315 | |
316 | writel(0, &req->header.flags); | |
317 | writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); | |
318 | writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); | |
319 | writel(IOP_RESULT_PENDING, &req->header.result); | |
320 | ||
00f59701 | 321 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
322 | dprintk("Get config send cmd failed\n"); |
323 | return -1; | |
324 | } | |
325 | ||
326 | memcpy_fromio(config, req, sizeof(*config)); | |
00f59701 HLT |
327 | writel(req32, &hba->u.itl.iop->outbound_queue); |
328 | return 0; | |
329 | } | |
330 | ||
331 | static int iop_get_config_mv(struct hptiop_hba *hba, | |
332 | struct hpt_iop_request_get_config *config) | |
333 | { | |
334 | struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; | |
335 | ||
336 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
337 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); | |
338 | req->header.size = | |
339 | cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); | |
340 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
341 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); |
342 | req->header.context_hi32 = 0; | |
00f59701 HLT |
343 | |
344 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
345 | dprintk("Get config send cmd failed\n"); | |
346 | return -1; | |
347 | } | |
348 | ||
349 | memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); | |
ede1e6f8 HLT |
350 | return 0; |
351 | } | |
352 | ||
00f59701 | 353 | static int iop_set_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
354 | struct hpt_iop_request_set_config *config) |
355 | { | |
356 | u32 req32; | |
357 | struct hpt_iop_request_set_config __iomem *req; | |
358 | ||
00f59701 | 359 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
360 | if (req32 == IOPMU_QUEUE_EMPTY) |
361 | return -1; | |
362 | ||
363 | req = (struct hpt_iop_request_set_config __iomem *) | |
00f59701 | 364 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
365 | |
366 | memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), | |
367 | (u8 *)config + sizeof(struct hpt_iop_request_header), | |
368 | sizeof(struct hpt_iop_request_set_config) - | |
369 | sizeof(struct hpt_iop_request_header)); | |
370 | ||
371 | writel(0, &req->header.flags); | |
372 | writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); | |
373 | writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); | |
374 | writel(IOP_RESULT_PENDING, &req->header.result); | |
375 | ||
00f59701 | 376 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
377 | dprintk("Set config send cmd failed\n"); |
378 | return -1; | |
379 | } | |
380 | ||
00f59701 | 381 | writel(req32, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
382 | return 0; |
383 | } | |
384 | ||
00f59701 HLT |
385 | static int iop_set_config_mv(struct hptiop_hba *hba, |
386 | struct hpt_iop_request_set_config *config) | |
ede1e6f8 | 387 | { |
00f59701 | 388 | struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; |
ede1e6f8 | 389 | |
00f59701 HLT |
390 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); |
391 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
392 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); | |
393 | req->header.size = | |
394 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); | |
395 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
396 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); |
397 | req->header.context_hi32 = 0; | |
00f59701 HLT |
398 | |
399 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
400 | dprintk("Set config send cmd failed\n"); | |
401 | return -1; | |
402 | } | |
403 | ||
404 | return 0; | |
405 | } | |
406 | ||
407 | static void hptiop_enable_intr_itl(struct hptiop_hba *hba) | |
408 | { | |
ede1e6f8 | 409 | writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), |
00f59701 HLT |
410 | &hba->u.itl.iop->outbound_intmask); |
411 | } | |
412 | ||
413 | static void hptiop_enable_intr_mv(struct hptiop_hba *hba) | |
414 | { | |
415 | writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, | |
416 | &hba->u.mv.regs->outbound_intmask); | |
417 | } | |
418 | ||
419 | static int hptiop_initialize_iop(struct hptiop_hba *hba) | |
420 | { | |
421 | /* enable interrupts */ | |
422 | hba->ops->enable_intr(hba); | |
ede1e6f8 HLT |
423 | |
424 | hba->initialized = 1; | |
425 | ||
426 | /* start background tasks */ | |
427 | if (iop_send_sync_msg(hba, | |
428 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
429 | printk(KERN_ERR "scsi%d: fail to start background task\n", | |
430 | hba->host->host_no); | |
431 | return -1; | |
432 | } | |
433 | return 0; | |
434 | } | |
435 | ||
00f59701 | 436 | static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) |
ede1e6f8 HLT |
437 | { |
438 | u32 mem_base_phy, length; | |
439 | void __iomem *mem_base_virt; | |
00f59701 | 440 | |
ede1e6f8 HLT |
441 | struct pci_dev *pcidev = hba->pcidev; |
442 | ||
00f59701 HLT |
443 | |
444 | if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { | |
ede1e6f8 HLT |
445 | printk(KERN_ERR "scsi%d: pci resource invalid\n", |
446 | hba->host->host_no); | |
9bcf0910 | 447 | return NULL; |
ede1e6f8 HLT |
448 | } |
449 | ||
00f59701 HLT |
450 | mem_base_phy = pci_resource_start(pcidev, index); |
451 | length = pci_resource_len(pcidev, index); | |
ede1e6f8 HLT |
452 | mem_base_virt = ioremap(mem_base_phy, length); |
453 | ||
454 | if (!mem_base_virt) { | |
455 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", | |
456 | hba->host->host_no); | |
9bcf0910 | 457 | return NULL; |
00f59701 HLT |
458 | } |
459 | return mem_base_virt; | |
460 | } | |
461 | ||
462 | static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) | |
463 | { | |
464 | hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); | |
465 | if (hba->u.itl.iop) | |
466 | return 0; | |
467 | else | |
468 | return -1; | |
469 | } | |
470 | ||
471 | static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) | |
472 | { | |
473 | iounmap(hba->u.itl.iop); | |
474 | } | |
475 | ||
476 | static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) | |
477 | { | |
478 | hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); | |
9bcf0910 | 479 | if (hba->u.mv.regs == NULL) |
00f59701 HLT |
480 | return -1; |
481 | ||
482 | hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); | |
9bcf0910 | 483 | if (hba->u.mv.mu == NULL) { |
00f59701 | 484 | iounmap(hba->u.mv.regs); |
ede1e6f8 HLT |
485 | return -1; |
486 | } | |
487 | ||
ede1e6f8 HLT |
488 | return 0; |
489 | } | |
490 | ||
00f59701 HLT |
491 | static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) |
492 | { | |
493 | iounmap(hba->u.mv.regs); | |
494 | iounmap(hba->u.mv.mu); | |
495 | } | |
496 | ||
ede1e6f8 HLT |
497 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) |
498 | { | |
499 | dprintk("iop message 0x%x\n", msg); | |
500 | ||
00f59701 HLT |
501 | if (msg == IOPMU_INBOUND_MSG0_NOP) |
502 | hba->msg_done = 1; | |
503 | ||
ede1e6f8 HLT |
504 | if (!hba->initialized) |
505 | return; | |
506 | ||
507 | if (msg == IOPMU_INBOUND_MSG0_RESET) { | |
508 | atomic_set(&hba->resetting, 0); | |
509 | wake_up(&hba->reset_wq); | |
510 | } | |
511 | else if (msg <= IOPMU_INBOUND_MSG0_MAX) | |
512 | hba->msg_done = 1; | |
513 | } | |
514 | ||
00f59701 | 515 | static struct hptiop_request *get_req(struct hptiop_hba *hba) |
ede1e6f8 HLT |
516 | { |
517 | struct hptiop_request *ret; | |
518 | ||
519 | dprintk("get_req : req=%p\n", hba->req_list); | |
520 | ||
521 | ret = hba->req_list; | |
522 | if (ret) | |
523 | hba->req_list = ret->next; | |
524 | ||
525 | return ret; | |
526 | } | |
527 | ||
00f59701 | 528 | static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) |
ede1e6f8 HLT |
529 | { |
530 | dprintk("free_req(%d, %p)\n", req->index, req); | |
531 | req->next = hba->req_list; | |
532 | hba->req_list = req; | |
533 | } | |
534 | ||
00f59701 HLT |
535 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, |
536 | struct hpt_iop_request_scsi_command *req) | |
ede1e6f8 | 537 | { |
ede1e6f8 HLT |
538 | struct scsi_cmnd *scp; |
539 | ||
00f59701 | 540 | dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " |
ede1e6f8 HLT |
541 | "result=%d, context=0x%x tag=%d\n", |
542 | req, req->header.type, req->header.result, | |
543 | req->header.context, tag); | |
544 | ||
545 | BUG_ON(!req->header.result); | |
546 | BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); | |
547 | ||
548 | scp = hba->reqs[tag].scp; | |
549 | ||
f9875496 FT |
550 | if (HPT_SCP(scp)->mapped) |
551 | scsi_dma_unmap(scp); | |
ede1e6f8 HLT |
552 | |
553 | switch (le32_to_cpu(req->header.result)) { | |
554 | case IOP_RESULT_SUCCESS: | |
00f59701 HLT |
555 | scsi_set_resid(scp, |
556 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 HLT |
557 | scp->result = (DID_OK<<16); |
558 | break; | |
559 | case IOP_RESULT_BAD_TARGET: | |
560 | scp->result = (DID_BAD_TARGET<<16); | |
561 | break; | |
562 | case IOP_RESULT_BUSY: | |
563 | scp->result = (DID_BUS_BUSY<<16); | |
564 | break; | |
565 | case IOP_RESULT_RESET: | |
566 | scp->result = (DID_RESET<<16); | |
567 | break; | |
568 | case IOP_RESULT_FAIL: | |
569 | scp->result = (DID_ERROR<<16); | |
570 | break; | |
571 | case IOP_RESULT_INVALID_REQUEST: | |
572 | scp->result = (DID_ABORT<<16); | |
573 | break; | |
00f59701 HLT |
574 | case IOP_RESULT_CHECK_CONDITION: |
575 | scsi_set_resid(scp, | |
576 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 | 577 | scp->result = SAM_STAT_CHECK_CONDITION; |
c372f4a8 | 578 | memcpy(scp->sense_buffer, &req->sg_list, |
b80ca4f7 | 579 | min_t(size_t, SCSI_SENSE_BUFFERSIZE, |
0fec02c9 | 580 | le32_to_cpu(req->dataxfer_length))); |
ede1e6f8 HLT |
581 | break; |
582 | ||
583 | default: | |
584 | scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) | | |
585 | (DID_ABORT<<16); | |
586 | break; | |
587 | } | |
588 | ||
589 | dprintk("scsi_done(%p)\n", scp); | |
590 | scp->scsi_done(scp); | |
591 | free_req(hba, &hba->reqs[tag]); | |
592 | } | |
593 | ||
00f59701 HLT |
594 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) |
595 | { | |
596 | struct hpt_iop_request_scsi_command *req; | |
597 | u32 tag; | |
598 | ||
599 | if (hba->iopintf_v2) { | |
600 | tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; | |
601 | req = hba->reqs[tag].req_virt; | |
602 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) | |
603 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
604 | } else { | |
605 | tag = _tag; | |
606 | req = hba->reqs[tag].req_virt; | |
607 | } | |
608 | ||
609 | hptiop_finish_scsi_req(hba, tag, req); | |
610 | } | |
611 | ||
612 | void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) | |
ede1e6f8 HLT |
613 | { |
614 | struct hpt_iop_request_header __iomem *req; | |
615 | struct hpt_iop_request_ioctl_command __iomem *p; | |
616 | struct hpt_ioctl_k *arg; | |
617 | ||
618 | req = (struct hpt_iop_request_header __iomem *) | |
00f59701 HLT |
619 | ((unsigned long)hba->u.itl.iop + tag); |
620 | dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " | |
ede1e6f8 HLT |
621 | "result=%d, context=0x%x tag=%d\n", |
622 | req, readl(&req->type), readl(&req->result), | |
623 | readl(&req->context), tag); | |
624 | ||
625 | BUG_ON(!readl(&req->result)); | |
626 | BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); | |
627 | ||
628 | p = (struct hpt_iop_request_ioctl_command __iomem *)req; | |
629 | arg = (struct hpt_ioctl_k *)(unsigned long) | |
630 | (readl(&req->context) | | |
631 | ((u64)readl(&req->context_hi32)<<32)); | |
632 | ||
633 | if (readl(&req->result) == IOP_RESULT_SUCCESS) { | |
634 | arg->result = HPT_IOCTL_RESULT_OK; | |
635 | ||
636 | if (arg->outbuf_size) | |
637 | memcpy_fromio(arg->outbuf, | |
638 | &p->buf[(readl(&p->inbuf_size) + 3)& ~3], | |
639 | arg->outbuf_size); | |
640 | ||
641 | if (arg->bytes_returned) | |
642 | *arg->bytes_returned = arg->outbuf_size; | |
643 | } | |
644 | else | |
645 | arg->result = HPT_IOCTL_RESULT_FAILED; | |
646 | ||
647 | arg->done(arg); | |
00f59701 | 648 | writel(tag, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
649 | } |
650 | ||
7d12e780 | 651 | static irqreturn_t hptiop_intr(int irq, void *dev_id) |
ede1e6f8 HLT |
652 | { |
653 | struct hptiop_hba *hba = dev_id; | |
654 | int handled; | |
655 | unsigned long flags; | |
656 | ||
657 | spin_lock_irqsave(hba->host->host_lock, flags); | |
00f59701 | 658 | handled = hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
659 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
660 | ||
661 | return handled; | |
662 | } | |
663 | ||
664 | static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) | |
665 | { | |
666 | struct Scsi_Host *host = scp->device->host; | |
667 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
f9875496 FT |
668 | struct scatterlist *sg; |
669 | int idx, nseg; | |
670 | ||
671 | nseg = scsi_dma_map(scp); | |
672 | BUG_ON(nseg < 0); | |
673 | if (!nseg) | |
674 | return 0; | |
ede1e6f8 | 675 | |
f9875496 FT |
676 | HPT_SCP(scp)->sgcnt = nseg; |
677 | HPT_SCP(scp)->mapped = 1; | |
678 | ||
679 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); | |
680 | ||
681 | scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { | |
682 | psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)); | |
683 | psg[idx].size = cpu_to_le32(sg_dma_len(sg)); | |
684 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? | |
685 | cpu_to_le32(1) : 0; | |
ede1e6f8 | 686 | } |
f9875496 | 687 | return HPT_SCP(scp)->sgcnt; |
ede1e6f8 HLT |
688 | } |
689 | ||
00f59701 HLT |
690 | static void hptiop_post_req_itl(struct hptiop_hba *hba, |
691 | struct hptiop_request *_req) | |
692 | { | |
693 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
694 | ||
695 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | |
696 | (u32)_req->index); | |
697 | reqhdr->context_hi32 = 0; | |
698 | ||
699 | if (hba->iopintf_v2) { | |
700 | u32 size, size_bits; | |
701 | ||
702 | size = le32_to_cpu(reqhdr->size); | |
703 | if (size < 256) | |
704 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; | |
705 | else if (size < 512) | |
706 | size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; | |
707 | else | |
708 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | | |
709 | IOPMU_QUEUE_ADDR_HOST_BIT; | |
710 | writel(_req->req_shifted_phy | size_bits, | |
711 | &hba->u.itl.iop->inbound_queue); | |
712 | } else | |
713 | writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, | |
714 | &hba->u.itl.iop->inbound_queue); | |
715 | } | |
716 | ||
717 | static void hptiop_post_req_mv(struct hptiop_hba *hba, | |
718 | struct hptiop_request *_req) | |
719 | { | |
720 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
721 | u32 size, size_bit; | |
722 | ||
723 | reqhdr->context = cpu_to_le32(_req->index<<8 | | |
724 | IOP_REQUEST_TYPE_SCSI_COMMAND<<5); | |
725 | reqhdr->context_hi32 = 0; | |
726 | size = le32_to_cpu(reqhdr->size); | |
727 | ||
728 | if (size <= 256) | |
729 | size_bit = 0; | |
730 | else if (size <= 256*2) | |
731 | size_bit = 1; | |
732 | else if (size <= 256*3) | |
733 | size_bit = 2; | |
734 | else | |
735 | size_bit = 3; | |
736 | ||
737 | mv_inbound_write((_req->req_shifted_phy << 5) | | |
738 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); | |
739 | } | |
740 | ||
ede1e6f8 HLT |
741 | static int hptiop_queuecommand(struct scsi_cmnd *scp, |
742 | void (*done)(struct scsi_cmnd *)) | |
743 | { | |
744 | struct Scsi_Host *host = scp->device->host; | |
745 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
746 | struct hpt_iop_request_scsi_command *req; | |
747 | int sg_count = 0; | |
748 | struct hptiop_request *_req; | |
749 | ||
750 | BUG_ON(!done); | |
751 | scp->scsi_done = done; | |
752 | ||
ede1e6f8 HLT |
753 | _req = get_req(hba); |
754 | if (_req == NULL) { | |
755 | dprintk("hptiop_queuecmd : no free req\n"); | |
4f2ddba3 | 756 | return SCSI_MLQUEUE_HOST_BUSY; |
ede1e6f8 HLT |
757 | } |
758 | ||
759 | _req->scp = scp; | |
760 | ||
761 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) " | |
762 | "req_index=%d, req=%p\n", | |
763 | scp, | |
764 | host->host_no, scp->device->channel, | |
765 | scp->device->id, scp->device->lun, | |
64a87b24 BH |
766 | ((u32 *)scp->cmnd)[0], |
767 | ((u32 *)scp->cmnd)[1], | |
768 | ((u32 *)scp->cmnd)[2], | |
ede1e6f8 HLT |
769 | _req->index, _req->req_virt); |
770 | ||
771 | scp->result = 0; | |
772 | ||
773 | if (scp->device->channel || scp->device->lun || | |
774 | scp->device->id > hba->max_devices) { | |
775 | scp->result = DID_BAD_TARGET << 16; | |
776 | free_req(hba, _req); | |
777 | goto cmd_done; | |
778 | } | |
779 | ||
db9b6e89 | 780 | req = _req->req_virt; |
ede1e6f8 HLT |
781 | |
782 | /* build S/G table */ | |
f9875496 FT |
783 | sg_count = hptiop_buildsgl(scp, req->sg_list); |
784 | if (!sg_count) | |
ede1e6f8 HLT |
785 | HPT_SCP(scp)->mapped = 0; |
786 | ||
787 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
788 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); | |
789 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f9875496 | 790 | req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); |
ede1e6f8 HLT |
791 | req->channel = scp->device->channel; |
792 | req->target = scp->device->id; | |
793 | req->lun = scp->device->lun; | |
794 | req->header.size = cpu_to_le32( | |
795 | sizeof(struct hpt_iop_request_scsi_command) | |
796 | - sizeof(struct hpt_iopsg) | |
797 | + sg_count * sizeof(struct hpt_iopsg)); | |
798 | ||
799 | memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); | |
00f59701 | 800 | hba->ops->post_req(hba, _req); |
ede1e6f8 HLT |
801 | return 0; |
802 | ||
803 | cmd_done: | |
804 | dprintk("scsi_done(scp=%p)\n", scp); | |
805 | scp->scsi_done(scp); | |
806 | return 0; | |
807 | } | |
808 | ||
809 | static const char *hptiop_info(struct Scsi_Host *host) | |
810 | { | |
811 | return driver_name_long; | |
812 | } | |
813 | ||
814 | static int hptiop_reset_hba(struct hptiop_hba *hba) | |
815 | { | |
816 | if (atomic_xchg(&hba->resetting, 1) == 0) { | |
817 | atomic_inc(&hba->reset_count); | |
00f59701 | 818 | hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); |
ede1e6f8 HLT |
819 | } |
820 | ||
821 | wait_event_timeout(hba->reset_wq, | |
822 | atomic_read(&hba->resetting) == 0, 60 * HZ); | |
823 | ||
824 | if (atomic_read(&hba->resetting)) { | |
825 | /* IOP is in unkown state, abort reset */ | |
826 | printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); | |
827 | return -1; | |
828 | } | |
829 | ||
830 | if (iop_send_sync_msg(hba, | |
831 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
832 | dprintk("scsi%d: fail to start background task\n", | |
833 | hba->host->host_no); | |
834 | } | |
835 | ||
836 | return 0; | |
837 | } | |
838 | ||
839 | static int hptiop_reset(struct scsi_cmnd *scp) | |
840 | { | |
841 | struct Scsi_Host * host = scp->device->host; | |
842 | struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata; | |
843 | ||
844 | printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n", | |
845 | scp->device->host->host_no, scp->device->channel, | |
846 | scp->device->id, scp); | |
847 | ||
848 | return hptiop_reset_hba(hba)? FAILED : SUCCESS; | |
849 | } | |
850 | ||
851 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | |
852 | int queue_depth) | |
853 | { | |
00f59701 HLT |
854 | struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; |
855 | ||
856 | if (queue_depth > hba->max_requests) | |
857 | queue_depth = hba->max_requests; | |
ede1e6f8 HLT |
858 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); |
859 | return queue_depth; | |
860 | } | |
861 | ||
ee959b00 TJ |
862 | static ssize_t hptiop_show_version(struct device *dev, |
863 | struct device_attribute *attr, char *buf) | |
ede1e6f8 HLT |
864 | { |
865 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | |
866 | } | |
867 | ||
ee959b00 TJ |
868 | static ssize_t hptiop_show_fw_version(struct device *dev, |
869 | struct device_attribute *attr, char *buf) | |
ede1e6f8 | 870 | { |
ee959b00 | 871 | struct Scsi_Host *host = class_to_shost(dev); |
ede1e6f8 HLT |
872 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
873 | ||
874 | return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", | |
875 | hba->firmware_version >> 24, | |
876 | (hba->firmware_version >> 16) & 0xff, | |
877 | (hba->firmware_version >> 8) & 0xff, | |
878 | hba->firmware_version & 0xff); | |
879 | } | |
880 | ||
ee959b00 | 881 | static struct device_attribute hptiop_attr_version = { |
ede1e6f8 HLT |
882 | .attr = { |
883 | .name = "driver-version", | |
884 | .mode = S_IRUGO, | |
885 | }, | |
886 | .show = hptiop_show_version, | |
887 | }; | |
888 | ||
ee959b00 | 889 | static struct device_attribute hptiop_attr_fw_version = { |
ede1e6f8 HLT |
890 | .attr = { |
891 | .name = "firmware-version", | |
892 | .mode = S_IRUGO, | |
893 | }, | |
894 | .show = hptiop_show_fw_version, | |
895 | }; | |
896 | ||
ee959b00 | 897 | static struct device_attribute *hptiop_attrs[] = { |
ede1e6f8 HLT |
898 | &hptiop_attr_version, |
899 | &hptiop_attr_fw_version, | |
900 | NULL | |
901 | }; | |
902 | ||
903 | static struct scsi_host_template driver_template = { | |
904 | .module = THIS_MODULE, | |
905 | .name = driver_name, | |
906 | .queuecommand = hptiop_queuecommand, | |
907 | .eh_device_reset_handler = hptiop_reset, | |
908 | .eh_bus_reset_handler = hptiop_reset, | |
909 | .info = hptiop_info, | |
ede1e6f8 HLT |
910 | .emulated = 0, |
911 | .use_clustering = ENABLE_CLUSTERING, | |
912 | .proc_name = driver_name, | |
913 | .shost_attrs = hptiop_attrs, | |
914 | .this_id = -1, | |
915 | .change_queue_depth = hptiop_adjust_disk_queue_depth, | |
916 | }; | |
917 | ||
00f59701 HLT |
918 | static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) |
919 | { | |
920 | hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, | |
921 | 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); | |
922 | if (hba->u.mv.internal_req) | |
923 | return 0; | |
924 | else | |
925 | return -1; | |
926 | } | |
927 | ||
928 | static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) | |
929 | { | |
930 | if (hba->u.mv.internal_req) { | |
931 | dma_free_coherent(&hba->pcidev->dev, 0x800, | |
932 | hba->u.mv.internal_req, hba->u.mv.internal_req_phy); | |
933 | return 0; | |
934 | } else | |
935 | return -1; | |
936 | } | |
937 | ||
ede1e6f8 HLT |
938 | static int __devinit hptiop_probe(struct pci_dev *pcidev, |
939 | const struct pci_device_id *id) | |
940 | { | |
941 | struct Scsi_Host *host = NULL; | |
942 | struct hptiop_hba *hba; | |
943 | struct hpt_iop_request_get_config iop_config; | |
944 | struct hpt_iop_request_set_config set_config; | |
945 | dma_addr_t start_phy; | |
946 | void *start_virt; | |
947 | u32 offset, i, req_size; | |
948 | ||
949 | dprintk("hptiop_probe(%p)\n", pcidev); | |
950 | ||
951 | if (pci_enable_device(pcidev)) { | |
952 | printk(KERN_ERR "hptiop: fail to enable pci device\n"); | |
953 | return -ENODEV; | |
954 | } | |
955 | ||
956 | printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", | |
957 | pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, | |
958 | pcidev->irq); | |
959 | ||
960 | pci_set_master(pcidev); | |
961 | ||
962 | /* Enable 64bit DMA if possible */ | |
963 | if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) { | |
964 | if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) { | |
965 | printk(KERN_ERR "hptiop: fail to set dma_mask\n"); | |
966 | goto disable_pci_device; | |
967 | } | |
968 | } | |
969 | ||
970 | if (pci_request_regions(pcidev, driver_name)) { | |
971 | printk(KERN_ERR "hptiop: pci_request_regions failed\n"); | |
972 | goto disable_pci_device; | |
973 | } | |
974 | ||
975 | host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); | |
976 | if (!host) { | |
977 | printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); | |
978 | goto free_pci_regions; | |
979 | } | |
980 | ||
981 | hba = (struct hptiop_hba *)host->hostdata; | |
982 | ||
00f59701 | 983 | hba->ops = (struct hptiop_adapter_ops *)id->driver_data; |
ede1e6f8 HLT |
984 | hba->pcidev = pcidev; |
985 | hba->host = host; | |
986 | hba->initialized = 0; | |
db9b6e89 | 987 | hba->iopintf_v2 = 0; |
ede1e6f8 HLT |
988 | |
989 | atomic_set(&hba->resetting, 0); | |
990 | atomic_set(&hba->reset_count, 0); | |
991 | ||
992 | init_waitqueue_head(&hba->reset_wq); | |
993 | init_waitqueue_head(&hba->ioctl_wq); | |
994 | ||
995 | host->max_lun = 1; | |
996 | host->max_channel = 0; | |
997 | host->io_port = 0; | |
998 | host->n_io_port = 0; | |
999 | host->irq = pcidev->irq; | |
1000 | ||
00f59701 | 1001 | if (hba->ops->map_pci_bar(hba)) |
ede1e6f8 HLT |
1002 | goto free_scsi_host; |
1003 | ||
00f59701 | 1004 | if (hba->ops->iop_wait_ready(hba, 20000)) { |
ede1e6f8 HLT |
1005 | printk(KERN_ERR "scsi%d: firmware not ready\n", |
1006 | hba->host->host_no); | |
1007 | goto unmap_pci_bar; | |
1008 | } | |
1009 | ||
00f59701 HLT |
1010 | if (hba->ops->internal_memalloc) { |
1011 | if (hba->ops->internal_memalloc(hba)) { | |
1012 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n", | |
1013 | hba->host->host_no); | |
1014 | goto unmap_pci_bar; | |
1015 | } | |
1016 | } | |
1017 | ||
1018 | if (hba->ops->get_config(hba, &iop_config)) { | |
ede1e6f8 HLT |
1019 | printk(KERN_ERR "scsi%d: get config failed\n", |
1020 | hba->host->host_no); | |
1021 | goto unmap_pci_bar; | |
1022 | } | |
1023 | ||
1024 | hba->max_requests = min(le32_to_cpu(iop_config.max_requests), | |
1025 | HPTIOP_MAX_REQUESTS); | |
1026 | hba->max_devices = le32_to_cpu(iop_config.max_devices); | |
1027 | hba->max_request_size = le32_to_cpu(iop_config.request_size); | |
1028 | hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); | |
1029 | hba->firmware_version = le32_to_cpu(iop_config.firmware_version); | |
db9b6e89 | 1030 | hba->interface_version = le32_to_cpu(iop_config.interface_version); |
ede1e6f8 HLT |
1031 | hba->sdram_size = le32_to_cpu(iop_config.sdram_size); |
1032 | ||
db9b6e89 HLT |
1033 | if (hba->firmware_version > 0x01020000 || |
1034 | hba->interface_version > 0x01020000) | |
1035 | hba->iopintf_v2 = 1; | |
1036 | ||
ede1e6f8 HLT |
1037 | host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; |
1038 | host->max_id = le32_to_cpu(iop_config.max_devices); | |
1039 | host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); | |
1040 | host->can_queue = le32_to_cpu(iop_config.max_requests); | |
1041 | host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); | |
1042 | host->max_cmd_len = 16; | |
1043 | ||
db9b6e89 HLT |
1044 | req_size = sizeof(struct hpt_iop_request_scsi_command) |
1045 | + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1); | |
1046 | if ((req_size & 0x1f) != 0) | |
1047 | req_size = (req_size + 0x1f) & ~0x1f; | |
1048 | ||
1049 | memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); | |
ede1e6f8 | 1050 | set_config.iop_id = cpu_to_le32(host->host_no); |
db9b6e89 HLT |
1051 | set_config.vbus_id = cpu_to_le16(host->host_no); |
1052 | set_config.max_host_request_size = cpu_to_le16(req_size); | |
ede1e6f8 | 1053 | |
00f59701 | 1054 | if (hba->ops->set_config(hba, &set_config)) { |
ede1e6f8 HLT |
1055 | printk(KERN_ERR "scsi%d: set config failed\n", |
1056 | hba->host->host_no); | |
1057 | goto unmap_pci_bar; | |
1058 | } | |
1059 | ||
ede1e6f8 HLT |
1060 | pci_set_drvdata(pcidev, host); |
1061 | ||
1d6f359a | 1062 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, |
ede1e6f8 HLT |
1063 | driver_name, hba)) { |
1064 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | |
1065 | hba->host->host_no, pcidev->irq); | |
3e74051b | 1066 | goto unmap_pci_bar; |
ede1e6f8 HLT |
1067 | } |
1068 | ||
1069 | /* Allocate request mem */ | |
ede1e6f8 HLT |
1070 | |
1071 | dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); | |
1072 | ||
1073 | hba->req_size = req_size; | |
1074 | start_virt = dma_alloc_coherent(&pcidev->dev, | |
1075 | hba->req_size*hba->max_requests + 0x20, | |
1076 | &start_phy, GFP_KERNEL); | |
1077 | ||
1078 | if (!start_virt) { | |
1079 | printk(KERN_ERR "scsi%d: fail to alloc request mem\n", | |
1080 | hba->host->host_no); | |
1081 | goto free_request_irq; | |
1082 | } | |
1083 | ||
1084 | hba->dma_coherent = start_virt; | |
1085 | hba->dma_coherent_handle = start_phy; | |
1086 | ||
1087 | if ((start_phy & 0x1f) != 0) | |
1088 | { | |
1089 | offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; | |
1090 | start_phy += offset; | |
1091 | start_virt += offset; | |
1092 | } | |
1093 | ||
1094 | hba->req_list = start_virt; | |
1095 | for (i = 0; i < hba->max_requests; i++) { | |
1096 | hba->reqs[i].next = NULL; | |
1097 | hba->reqs[i].req_virt = start_virt; | |
1098 | hba->reqs[i].req_shifted_phy = start_phy >> 5; | |
1099 | hba->reqs[i].index = i; | |
1100 | free_req(hba, &hba->reqs[i]); | |
1101 | start_virt = (char *)start_virt + hba->req_size; | |
1102 | start_phy = start_phy + hba->req_size; | |
1103 | } | |
1104 | ||
1105 | /* Enable Interrupt and start background task */ | |
1106 | if (hptiop_initialize_iop(hba)) | |
1107 | goto free_request_mem; | |
1108 | ||
3e74051b CH |
1109 | if (scsi_add_host(host, &pcidev->dev)) { |
1110 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | |
1111 | hba->host->host_no); | |
1112 | goto free_request_mem; | |
1113 | } | |
1114 | ||
ede1e6f8 HLT |
1115 | |
1116 | scsi_scan_host(host); | |
1117 | ||
1118 | dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); | |
1119 | return 0; | |
1120 | ||
1121 | free_request_mem: | |
1122 | dma_free_coherent(&hba->pcidev->dev, | |
00f59701 | 1123 | hba->req_size * hba->max_requests + 0x20, |
ede1e6f8 HLT |
1124 | hba->dma_coherent, hba->dma_coherent_handle); |
1125 | ||
1126 | free_request_irq: | |
1127 | free_irq(hba->pcidev->irq, hba); | |
1128 | ||
ede1e6f8 | 1129 | unmap_pci_bar: |
00f59701 HLT |
1130 | if (hba->ops->internal_memfree) |
1131 | hba->ops->internal_memfree(hba); | |
ede1e6f8 | 1132 | |
00f59701 | 1133 | hba->ops->unmap_pci_bar(hba); |
ede1e6f8 HLT |
1134 | |
1135 | free_scsi_host: | |
1136 | scsi_host_put(host); | |
1137 | ||
00f59701 HLT |
1138 | free_pci_regions: |
1139 | pci_release_regions(pcidev); | |
1140 | ||
ede1e6f8 HLT |
1141 | disable_pci_device: |
1142 | pci_disable_device(pcidev); | |
1143 | ||
1144 | dprintk("scsi%d: hptiop_probe fail\n", host->host_no); | |
1145 | return -ENODEV; | |
1146 | } | |
1147 | ||
1148 | static void hptiop_shutdown(struct pci_dev *pcidev) | |
1149 | { | |
1150 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1151 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
ede1e6f8 HLT |
1152 | |
1153 | dprintk("hptiop_shutdown(%p)\n", hba); | |
1154 | ||
1155 | /* stop the iop */ | |
1156 | if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) | |
1157 | printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", | |
1158 | hba->host->host_no); | |
1159 | ||
1160 | /* disable all outbound interrupts */ | |
00f59701 HLT |
1161 | hba->ops->disable_intr(hba); |
1162 | } | |
1163 | ||
1164 | static void hptiop_disable_intr_itl(struct hptiop_hba *hba) | |
1165 | { | |
1166 | u32 int_mask; | |
1167 | ||
1168 | int_mask = readl(&hba->u.itl.iop->outbound_intmask); | |
ede1e6f8 HLT |
1169 | writel(int_mask | |
1170 | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, | |
00f59701 HLT |
1171 | &hba->u.itl.iop->outbound_intmask); |
1172 | readl(&hba->u.itl.iop->outbound_intmask); | |
1173 | } | |
1174 | ||
1175 | static void hptiop_disable_intr_mv(struct hptiop_hba *hba) | |
1176 | { | |
1177 | writel(0, &hba->u.mv.regs->outbound_intmask); | |
1178 | readl(&hba->u.mv.regs->outbound_intmask); | |
ede1e6f8 HLT |
1179 | } |
1180 | ||
1181 | static void hptiop_remove(struct pci_dev *pcidev) | |
1182 | { | |
1183 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1184 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
1185 | ||
1186 | dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); | |
1187 | ||
4f2ddba3 HLT |
1188 | scsi_remove_host(host); |
1189 | ||
ede1e6f8 HLT |
1190 | hptiop_shutdown(pcidev); |
1191 | ||
1192 | free_irq(hba->pcidev->irq, hba); | |
1193 | ||
1194 | dma_free_coherent(&hba->pcidev->dev, | |
1195 | hba->req_size * hba->max_requests + 0x20, | |
1196 | hba->dma_coherent, | |
1197 | hba->dma_coherent_handle); | |
1198 | ||
00f59701 HLT |
1199 | if (hba->ops->internal_memfree) |
1200 | hba->ops->internal_memfree(hba); | |
1201 | ||
1202 | hba->ops->unmap_pci_bar(hba); | |
ede1e6f8 HLT |
1203 | |
1204 | pci_release_regions(hba->pcidev); | |
1205 | pci_set_drvdata(hba->pcidev, NULL); | |
1206 | pci_disable_device(hba->pcidev); | |
1207 | ||
ede1e6f8 HLT |
1208 | scsi_host_put(host); |
1209 | } | |
1210 | ||
00f59701 HLT |
1211 | static struct hptiop_adapter_ops hptiop_itl_ops = { |
1212 | .iop_wait_ready = iop_wait_ready_itl, | |
9bcf0910 HH |
1213 | .internal_memalloc = NULL, |
1214 | .internal_memfree = NULL, | |
00f59701 HLT |
1215 | .map_pci_bar = hptiop_map_pci_bar_itl, |
1216 | .unmap_pci_bar = hptiop_unmap_pci_bar_itl, | |
1217 | .enable_intr = hptiop_enable_intr_itl, | |
1218 | .disable_intr = hptiop_disable_intr_itl, | |
1219 | .get_config = iop_get_config_itl, | |
1220 | .set_config = iop_set_config_itl, | |
1221 | .iop_intr = iop_intr_itl, | |
1222 | .post_msg = hptiop_post_msg_itl, | |
1223 | .post_req = hptiop_post_req_itl, | |
1224 | }; | |
1225 | ||
1226 | static struct hptiop_adapter_ops hptiop_mv_ops = { | |
1227 | .iop_wait_ready = iop_wait_ready_mv, | |
1228 | .internal_memalloc = hptiop_internal_memalloc_mv, | |
1229 | .internal_memfree = hptiop_internal_memfree_mv, | |
1230 | .map_pci_bar = hptiop_map_pci_bar_mv, | |
1231 | .unmap_pci_bar = hptiop_unmap_pci_bar_mv, | |
1232 | .enable_intr = hptiop_enable_intr_mv, | |
1233 | .disable_intr = hptiop_disable_intr_mv, | |
1234 | .get_config = iop_get_config_mv, | |
1235 | .set_config = iop_set_config_mv, | |
1236 | .iop_intr = iop_intr_mv, | |
1237 | .post_msg = hptiop_post_msg_mv, | |
1238 | .post_req = hptiop_post_req_mv, | |
1239 | }; | |
1240 | ||
ede1e6f8 | 1241 | static struct pci_device_id hptiop_id_table[] = { |
00f59701 HLT |
1242 | { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, |
1243 | { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, | |
1244 | { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, | |
1245 | { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, | |
1246 | { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, | |
1247 | { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, | |
1248 | { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, | |
1249 | { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, | |
1250 | { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, | |
1251 | { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, | |
1252 | { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, | |
1253 | { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, | |
1254 | { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, | |
ede1e6f8 HLT |
1255 | {}, |
1256 | }; | |
1257 | ||
1258 | MODULE_DEVICE_TABLE(pci, hptiop_id_table); | |
1259 | ||
1260 | static struct pci_driver hptiop_pci_driver = { | |
1261 | .name = driver_name, | |
1262 | .id_table = hptiop_id_table, | |
1263 | .probe = hptiop_probe, | |
1264 | .remove = hptiop_remove, | |
1265 | .shutdown = hptiop_shutdown, | |
1266 | }; | |
1267 | ||
1268 | static int __init hptiop_module_init(void) | |
1269 | { | |
ede1e6f8 | 1270 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); |
3e74051b | 1271 | return pci_register_driver(&hptiop_pci_driver); |
ede1e6f8 HLT |
1272 | } |
1273 | ||
1274 | static void __exit hptiop_module_exit(void) | |
1275 | { | |
ede1e6f8 HLT |
1276 | pci_unregister_driver(&hptiop_pci_driver); |
1277 | } | |
1278 | ||
1279 | ||
1280 | module_init(hptiop_module_init); | |
1281 | module_exit(hptiop_module_exit); | |
1282 | ||
1283 | MODULE_LICENSE("GPL"); | |
db9b6e89 | 1284 |